diff -Nru cabal-install-1.22-1.22.6.0/bash-completion/cabal cabal-install-1.22-1.22.9.0/bash-completion/cabal --- cabal-install-1.22-1.22.6.0/bash-completion/cabal 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/bash-completion/cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -# cabal command line completion -# Copyright 2007-2008 "Lennart Kolmodin" -# "Duncan Coutts" -# - -# List cabal targets by type, pass: -# - test-suite for test suites -# - benchmark for benchmarks -# - executable for executables -# - executable|test-suite|benchmark for the three -_cabal_list() -{ - cat *.cabal | - grep -Ei "^[[:space:]]*($1)[[:space:]]" | - sed -e "s/.* \([^ ]*\).*/\1/" -} - -# List possible targets depending on the command supplied as parameter. The -# ideal option would be to implement this via --list-options on cabal directly. -# This is a temporary workaround. -_cabal_targets() -{ - # If command ($*) contains build, repl, test or bench completes with - # targets of according type. - [ -f *.cabal ] || return 0 - local comp - for comp in $*; do - [ $comp == build ] && _cabal_list "executable|test-suite|benchmark" && break - [ $comp == repl ] && _cabal_list "executable|test-suite|benchmark" && break - [ $comp == run ] && _cabal_list "executable" && break - [ $comp == test ] && _cabal_list "test-suite" && break - [ $comp == bench ] && _cabal_list "benchmark" && break - done -} - -# List possible subcommands of a cabal subcommand. -# -# In example "sandbox" is a cabal subcommand that itself has subcommands. Since -# "cabal --list-options" doesn't work in such cases we have to get the list -# using other means. -_cabal_subcommands() -{ - local word - for word in "$@"; do - case "$word" in - sandbox) - # Get list of "cabal sandbox" subcommands from its help message. - # - # Following command short-circuits if it reaches flags section. - # This is to prevent any problems that might arise from unfortunate - # word combinations in flag descriptions. Usage section is parsed - # using simple regexp and "sandbox" subcommand is printed for each - # successful substitution. - "$1" help sandbox | - sed -rn '/Flags/q;s/^.* cabal sandbox *([^ ]*).*/\1/;t p;b;: p;p' - break # Terminate for loop. - ;; - esac - done -} - -_cabal() -{ - # get the word currently being completed - local cur - cur=${COMP_WORDS[$COMP_CWORD]} - - # create a command line to run - local cmd - # copy all words the user has entered - cmd=( ${COMP_WORDS[@]} ) - - # replace the current word with --list-options - cmd[${COMP_CWORD}]="--list-options" - - # the resulting completions should be put into this array - COMPREPLY=( $( compgen -W "$( ${cmd[@]} ) $( _cabal_targets ${cmd[@]} ) $( _cabal_subcommands ${COMP_WORDS[@]} )" -- $cur ) ) -} - -complete -F _cabal -o default cabal diff -Nru cabal-install-1.22-1.22.6.0/bootstrap.sh cabal-install-1.22-1.22.9.0/bootstrap.sh --- cabal-install-1.22-1.22.6.0/bootstrap.sh 2015-07-22 10:29:08.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/bootstrap.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,466 +0,0 @@ -#!/usr/bin/env sh - -# A script to bootstrap cabal-install. - -# It works by downloading and installing the Cabal, zlib and -# HTTP packages. It then installs cabal-install itself. -# It expects to be run inside the cabal-install directory. - -# Install settings, you can override these by setting environment vars. E.g. if -# you don't want profiling and dynamic versions of libraries to be installed in -# addition to vanilla, run 'EXTRA_CONFIGURE_OPTS="" ./bootstrap.sh' - -#VERBOSE -DEFAULT_CONFIGURE_OPTS="--disable-library-profiling --disable-shared" -EXTRA_CONFIGURE_OPTS=${EXTRA_CONFIGURE_OPTS-$DEFAULT_CONFIGURE_OPTS} -#EXTRA_BUILD_OPTS -#EXTRA_INSTALL_OPTS - -die () { printf "\nError during cabal-install bootstrap:\n$1\n" >&2 && exit 2 ;} - -# programs, you can override these by setting environment vars -GHC="${GHC:-ghc}" -GHC_PKG="${GHC_PKG:-ghc-pkg}" -GHC_VER="$(${GHC} --numeric-version)" -HADDOCK=${HADDOCK:-haddock} -WGET="${WGET:-wget}" -CURL="${CURL:-curl}" -FETCH="${FETCH:-fetch}" -TAR="${TAR:-tar}" -GZIP_PROGRAM="${GZIP_PROGRAM:-gzip}" - -# The variable SCOPE_OF_INSTALLATION can be set on the command line to -# use/install the libaries needed to build cabal-install to a custom package -# database instead of the user or global package database. -# -# Example: -# -# $ ghc-pkg init /my/package/database -# $ SCOPE_OF_INSTALLATION='--package-db=/my/package/database' ./bootstrap.sh -# -# You can also combine SCOPE_OF_INSTALLATION with PREFIX: -# -# $ ghc-pkg init /my/prefix/packages.conf.d -# $ SCOPE_OF_INSTALLATION='--package-db=/my/prefix/packages.conf.d' \ -# PREFIX=/my/prefix ./bootstrap.sh -# -# If you use the --global,--user or --sandbox arguments, this will -# override the SCOPE_OF_INSTALLATION setting and not use the package -# database you pass in the SCOPE_OF_INSTALLATION variable. - -SCOPE_OF_INSTALLATION="${SCOPE_OF_INSTALLATION:---user}" -DEFAULT_PREFIX="${HOME}/.cabal" - -# Try to respect $TMPDIR but override if needed - see #1710. -[ -"$TMPDIR"- = -""- ] || echo "$TMPDIR" | grep -q ld && - export TMPDIR=/tmp/cabal-$(echo $(od -XN4 -An /dev/random)) && mkdir $TMPDIR - -# Check for a C compiler. -[ ! -x "$CC" ] && for ccc in gcc clang cc icc; do - ${ccc} --version > /dev/null 2>&1 && CC=$ccc && - echo "Using $CC for C compiler. If this is not what you want, set CC." >&2 && - break -done - -# None found. -[ ! -x `which "$CC"` ] && - die "C compiler not found (or could not be run). - If a C compiler is installed make sure it is on your PATH, - or set the CC variable." - -# Check the C compiler/linker work. -LINK="$(for link in collect2 ld; do - echo 'main;' | ${CC} -v -x c - -o /dev/null -\#\#\# 2>&1 | grep -qw $link && - echo 'main;' | ${CC} -v -x c - -o /dev/null -\#\#\# 2>&1 | grep -w $link | - sed -e "s|\(.*$link\).*|\1|" -e 's/ //g' -e 's|"||' && break -done)" - -# They don't. -[ -z "$LINK" ] && - die "C compiler and linker could not compile a simple test program. - Please check your toolchain." - -## Warn that were's overriding $LD if set (if you want). - -[ -x "$LD" ] && [ "$LD" != "$LINK" ] && - echo "Warning: value set in $LD is not the same as C compiler's $LINK." >&2 - echo "Using $LINK instead." >&2 - -# Set LD, overriding environment if necessary. -LD=$LINK - -# Check we're in the right directory, etc. -grep "cabal-install" ./cabal-install.cabal > /dev/null 2>&1 || - die "The bootstrap.sh script must be run in the cabal-install directory" - -${GHC} --numeric-version > /dev/null 2>&1 || - die "${GHC} not found (or could not be run). - If ghc is installed, make sure it is on your PATH, - or set the GHC and GHC_PKG vars." - -${GHC_PKG} --version > /dev/null 2>&1 || die "${GHC_PKG} not found." - -GHC_VER="$(${GHC} --numeric-version)" -GHC_PKG_VER="$(${GHC_PKG} --version | cut -d' ' -f 5)" - -[ ${GHC_VER} = ${GHC_PKG_VER} ] || - die "Version mismatch between ${GHC} and ${GHC_PKG}. - If you set the GHC variable then set GHC_PKG too." - -while [ "$#" -gt 0 ]; do - case "${1}" in - "--user") - SCOPE_OF_INSTALLATION="${1}" - shift;; - "--global") - SCOPE_OF_INSTALLATION="${1}" - DEFAULT_PREFIX="/usr/local" - shift;; - "--sandbox") - shift - # check if there is another argument which doesn't start with -- - if [ "$#" -le 0 ] || [ ! -z $(echo "${1}" | grep "^--") ] - then - SANDBOX=".cabal-sandbox" - else - SANDBOX="${1}" - shift - fi;; - "--no-doc") - NO_DOCUMENTATION=1 - shift;; - *) - echo "Unknown argument or option, quitting: ${1}" - echo "usage: bootstrap.sh [OPTION]" - echo - echo "options:" - echo " --user Install for the local user (default)" - echo " --global Install systemwide (must be run as root)" - echo " --no-doc Do not generate documentation for installed "\ - "packages" - echo " --sandbox Install to a sandbox in the default location"\ - "(.cabal-sandbox)" - echo " --sandbox path Install to a sandbox located at path" - exit;; - esac -done - -abspath () { case "$1" in /*)printf "%s\n" "$1";; *)printf "%s\n" "$PWD/$1";; - esac; } - -if [ ! -z "$SANDBOX" ] -then # set up variables for sandbox bootstrap - # Make the sandbox path absolute since it will be used from - # different working directories when the dependency packages are - # installed. - SANDBOX=$(abspath "$SANDBOX") - # Get the name of the package database which cabal sandbox would use. - GHC_ARCH=$(ghc --info | - sed -n 's/.*"Target platform".*"\([^-]\+\)-[^-]\+-\([^"]\+\)".*/\1-\2/p') - PACKAGEDB="$SANDBOX/${GHC_ARCH}-ghc-${GHC_VER}-packages.conf.d" - # Assume that if the directory is already there, it is already a - # package database. We will get an error immediately below if it - # isn't. Uses -r to try to be compatible with Solaris, and allow - # symlinks as well as a normal dir/file. - [ ! -r "$PACKAGEDB" ] && ghc-pkg init "$PACKAGEDB" - PREFIX="$SANDBOX" - SCOPE_OF_INSTALLATION="--package-db=$PACKAGEDB" - echo Bootstrapping in sandbox at \'$SANDBOX\'. -fi - -# Check for haddock unless no documentation should be generated. -if [ ! ${NO_DOCUMENTATION} ] -then - ${HADDOCK} --version > /dev/null 2>&1 || die "${HADDOCK} not found." -fi - -PREFIX=${PREFIX:-${DEFAULT_PREFIX}} - -# Versions of the packages to install. -# The version regex says what existing installed versions are ok. -PARSEC_VER="3.1.7"; PARSEC_VER_REGEXP="[3]\.[01]\." - # >= 3.0 && < 3.2 -DEEPSEQ_VER="1.4.0.0"; DEEPSEQ_VER_REGEXP="1\.[1-9]\." - # >= 1.1 && < 2 -BINARY_VER="0.7.2.3"; BINARY_VER_REGEXP="[0]\.[7]\." - # == 0.7.* -TEXT_VER="1.2.0.3"; TEXT_VER_REGEXP="((1\.[012]\.)|(0\.([2-9]|(1[0-1]))\.))" - # >= 0.2 && < 1.3 -NETWORK_VER="2.6.0.2"; NETWORK_VER_REGEXP="2\.[0-6]\." - # >= 2.0 && < 2.7 -NETWORK_URI_VER="2.6.0.1"; NETWORK_URI_VER_REGEXP="2\.6\." - # >= 2.6 && < 2.7 -CABAL_VER="1.22.4.0"; CABAL_VER_REGEXP="1\.22" - # >= 1.22 && < 1.23 -TRANS_VER="0.4.2.0"; TRANS_VER_REGEXP="0\.[4]\." - # >= 0.2.* && < 0.5 -MTL_VER="2.2.1"; MTL_VER_REGEXP="[2]\." - # >= 2.0 && < 3 -HTTP_VER="4000.2.19"; HTTP_VER_REGEXP="4000\.2\.([5-9]|1[0-9]|2[0-9])" - # >= 4000.2.5 < 4000.3 -ZLIB_VER="0.5.4.2"; ZLIB_VER_REGEXP="0\.[45]\." - # == 0.4.* || == 0.5.* -TIME_VER="1.5" TIME_VER_REGEXP="1\.[12345]\.?" - # >= 1.1 && < 1.6 -RANDOM_VER="1.1" RANDOM_VER_REGEXP="1\.[01]\.?" - # >= 1 && < 1.2 -STM_VER="2.4.4"; STM_VER_REGEXP="2\." - # == 2.* -OLD_TIME_VER="1.1.0.3"; OLD_TIME_VER_REGEXP="1\.[01]\.?" - # >=1.0.0.0 && <1.2 -OLD_LOCALE_VER="1.0.0.7"; OLD_LOCALE_VER_REGEXP="1\.0\.?" - # >=1.0.0.0 && <1.1 - -HACKAGE_URL="https://hackage.haskell.org/package" - -# Haddock fails for network-2.5.0.0. -NO_DOCS_PACKAGES_VER_REGEXP="network-uri-2\.5\.[0-9]+\.[0-9]+" - -# Cache the list of packages: -echo "Checking installed packages for ghc-${GHC_VER}..." -${GHC_PKG} list --global ${SCOPE_OF_INSTALLATION} > ghc-pkg.list || - die "running '${GHC_PKG} list' failed" - -# Will we need to install this package, or is a suitable version installed? -need_pkg () { - PKG=$1 - VER_MATCH=$2 - if egrep " ${PKG}-${VER_MATCH}" ghc-pkg.list > /dev/null 2>&1 - then - return 1; - else - return 0; - fi - #Note: we cannot use "! grep" here as Solaris 9 /bin/sh doesn't like it. -} - -info_pkg () { - PKG=$1 - VER=$2 - VER_MATCH=$3 - - if need_pkg ${PKG} ${VER_MATCH} - then - if [ -r "${PKG}-${VER}.tar.gz" ] - then - echo "${PKG}-${VER} will be installed from local tarball." - else - echo "${PKG}-${VER} will be downloaded and installed." - fi - else - echo "${PKG} is already installed and the version is ok." - fi -} - -fetch_pkg () { - PKG=$1 - VER=$2 - - URL=${HACKAGE_URL}/${PKG}-${VER}/${PKG}-${VER}.tar.gz - if which ${CURL} > /dev/null - then - # TODO: switch back to resuming curl command once - # https://github.com/haskell/hackage-server/issues/111 is resolved - #${CURL} -L --fail -C - -O ${URL} || die "Failed to download ${PKG}." - ${CURL} -L --fail -O ${URL} || die "Failed to download ${PKG}." - elif which ${WGET} > /dev/null - then - ${WGET} -c ${URL} || die "Failed to download ${PKG}." - elif which ${FETCH} > /dev/null - then - ${FETCH} ${URL} || die "Failed to download ${PKG}." - else - die "Failed to find a downloader. 'curl', 'wget' or 'fetch' is required." - fi - [ -f "${PKG}-${VER}.tar.gz" ] || - die "Downloading ${URL} did not create ${PKG}-${VER}.tar.gz" -} - -unpack_pkg () { - PKG=$1 - VER=$2 - - rm -rf "${PKG}-${VER}.tar" "${PKG}-${VER}" - ${GZIP_PROGRAM} -d < "${PKG}-${VER}.tar.gz" | ${TAR} -xf - - [ -d "${PKG}-${VER}" ] || die "Failed to unpack ${PKG}-${VER}.tar.gz" -} - -install_pkg () { - PKG=$1 - VER=$2 - - [ -x Setup ] && ./Setup clean - [ -f Setup ] && rm Setup - - ${GHC} --make Setup -o Setup || - die "Compiling the Setup script failed." - - [ -x Setup ] || die "The Setup script does not exist or cannot be run" - - args="${SCOPE_OF_INSTALLATION} --with-compiler=${GHC}" - args="$args --with-hc-pkg=${GHC_PKG} --with-gcc=${CC} --with-ld=${LD}" - args="$args ${EXTRA_CONFIGURE_OPTS} ${VERBOSE}" - - ./Setup configure $args || die "Configuring the ${PKG} package failed." - - ./Setup build ${EXTRA_BUILD_OPTS} ${VERBOSE} || - die "Building the ${PKG} package failed." - - if [ ! ${NO_DOCUMENTATION} ] - then - if echo "${PKG}-${VER}" | egrep ${NO_DOCS_PACKAGES_VER_REGEXP} > /dev/null 2>&1 - then - echo "Skipping documentation for the ${PKG} package." - else - ./Setup haddock --with-ghc=${GHC} --with-haddock=${HADDOCK} ${VERBOSE} || - die "Documenting the ${PKG} package failed." - fi - fi - - ./Setup install ${EXTRA_INSTALL_OPTS} ${VERBOSE} || - die "Installing the ${PKG} package failed." -} - -install_pkg2 () { - PKG=$1 - - [ -x Setup ] && ./Setup clean - [ -f Setup ] && rm Setup - - ${GHC} --make Setup -o Setup || - die "Compiling the Setup script failed." - - [ -x Setup ] || die "The Setup script does not exist or cannot be run" - - args="${SCOPE_OF_INSTALLATION} --prefix=${PREFIX} --with-compiler=${GHC}" - args="$args --with-hc-pkg=${GHC_PKG} --with-gcc=${CC} --with-ld=${LD}" - args="$args ${EXTRA_CONFIGURE_OPTS} ${VERBOSE}" - - ./Setup configure $args || die "Configuring the ${PKG} package failed." - - ./Setup build ${EXTRA_BUILD_OPTS} ${VERBOSE} || - die "Building the ${PKG} package failed." - - if [ ! ${NO_DOCUMENTATION} ] - then - ./Setup haddock --with-ghc=${GHC} --with-haddock=${HADDOCK} ${VERBOSE} || - die "Documenting the ${PKG} package failed." - fi - - ./Setup copy "--destdir=${DESTDIR}" ${EXTRA_INSTALL_OPTS} ${VERBOSE} || - die "Installing the ${PKG} package failed." -} - -do_pkg () { - PKG=$1 - VER=$2 - VER_MATCH=$3 - - if need_pkg ${PKG} ${VER_MATCH} - then - echo - if [ -r "${PKG}-${VER}.tar.gz" ] - then - echo "Using local tarball for ${PKG}-${VER}." - else - echo "Downloading ${PKG}-${VER}..." - fetch_pkg ${PKG} ${VER} - fi - unpack_pkg ${PKG} ${VER} - cd "${PKG}-${VER}" - install_pkg ${PKG} ${VER} - cd .. - fi -} - -# Replicate the flag selection logic for network-uri in the .cabal file. -do_network_uri_pkg () { - # Refresh installed package list. - ${GHC_PKG} list --global ${SCOPE_OF_INSTALLATION} > ghc-pkg-stage2.list \ - || die "running '${GHC_PKG} list' failed" - - NETWORK_URI_DUMMY_VER="2.5.0.0"; NETWORK_URI_DUMMY_VER_REGEXP="2\.5\." # < 2.6 - if egrep " network-2\.[6-9]\." ghc-pkg-stage2.list > /dev/null 2>&1 - then - # Use network >= 2.6 && network-uri >= 2.6 - info_pkg "network-uri" ${NETWORK_URI_VER} ${NETWORK_URI_VER_REGEXP} - do_pkg "network-uri" ${NETWORK_URI_VER} ${NETWORK_URI_VER_REGEXP} - else - # Use network < 2.6 && network-uri < 2.6 - info_pkg "network-uri" ${NETWORK_URI_DUMMY_VER} ${NETWORK_URI_DUMMY_VER_REGEXP} - do_pkg "network-uri" ${NETWORK_URI_DUMMY_VER} ${NETWORK_URI_DUMMY_VER_REGEXP} - fi -} - -# Actually do something! - -info_pkg "deepseq" ${DEEPSEQ_VER} ${DEEPSEQ_VER_REGEXP} -info_pkg "binary" ${BINARY_VER} ${BINARY_VER_REGEXP} -info_pkg "time" ${TIME_VER} ${TIME_VER_REGEXP} -info_pkg "Cabal" ${CABAL_VER} ${CABAL_VER_REGEXP} -info_pkg "transformers" ${TRANS_VER} ${TRANS_VER_REGEXP} -info_pkg "mtl" ${MTL_VER} ${MTL_VER_REGEXP} -info_pkg "text" ${TEXT_VER} ${TEXT_VER_REGEXP} -info_pkg "parsec" ${PARSEC_VER} ${PARSEC_VER_REGEXP} -info_pkg "network" ${NETWORK_VER} ${NETWORK_VER_REGEXP} -info_pkg "old-locale" ${OLD_LOCALE_VER} ${OLD_LOCALE_VER_REGEXP} -info_pkg "old-time" ${OLD_TIME_VER} ${OLD_TIME_VER_REGEXP} -info_pkg "HTTP" ${HTTP_VER} ${HTTP_VER_REGEXP} -info_pkg "zlib" ${ZLIB_VER} ${ZLIB_VER_REGEXP} -info_pkg "random" ${RANDOM_VER} ${RANDOM_VER_REGEXP} -info_pkg "stm" ${STM_VER} ${STM_VER_REGEXP} - -do_pkg "deepseq" ${DEEPSEQ_VER} ${DEEPSEQ_VER_REGEXP} -do_pkg "binary" ${BINARY_VER} ${BINARY_VER_REGEXP} -do_pkg "time" ${TIME_VER} ${TIME_VER_REGEXP} -do_pkg "Cabal" ${CABAL_VER} ${CABAL_VER_REGEXP} -do_pkg "transformers" ${TRANS_VER} ${TRANS_VER_REGEXP} -do_pkg "mtl" ${MTL_VER} ${MTL_VER_REGEXP} -do_pkg "text" ${TEXT_VER} ${TEXT_VER_REGEXP} -do_pkg "parsec" ${PARSEC_VER} ${PARSEC_VER_REGEXP} -do_pkg "network" ${NETWORK_VER} ${NETWORK_VER_REGEXP} - -# We conditionally install network-uri, depending on the network version. -do_network_uri_pkg - -do_pkg "old-locale" ${OLD_LOCALE_VER} ${OLD_LOCALE_VER_REGEXP} -do_pkg "old-time" ${OLD_TIME_VER} ${OLD_TIME_VER_REGEXP} -do_pkg "HTTP" ${HTTP_VER} ${HTTP_VER_REGEXP} -do_pkg "zlib" ${ZLIB_VER} ${ZLIB_VER_REGEXP} -do_pkg "random" ${RANDOM_VER} ${RANDOM_VER_REGEXP} -do_pkg "stm" ${STM_VER} ${STM_VER_REGEXP} - -install_pkg2 "cabal-install" - -# Use the newly built cabal to turn the prefix/package database into a -# legit cabal sandbox. This works because 'cabal sandbox init' will -# reuse the already existing package database and other files if they -# are in the expected locations. -[ ! -z "$SANDBOX" ] && $SANDBOX/bin/cabal sandbox init --sandbox $SANDBOX - -echo -echo "===========================================" -CABAL_BIN="$PREFIX/bin" -if [ -x "$CABAL_BIN/cabal" ] -then - echo "The 'cabal' program has been installed in $CABAL_BIN/" - echo "You should either add $CABAL_BIN to your PATH" - echo "or copy the cabal program to a directory that is on your PATH." - echo - echo "The first thing to do is to get the latest list of packages with:" - echo " cabal update" - echo "This will also create a default config file (if it does not already" - echo "exist) at $HOME/.cabal/config" - echo - echo "By default cabal will install programs to $HOME/.cabal/bin" - echo "If you do not want to add this directory to your PATH then you can" - echo "change the setting in the config file, for example you could use:" - echo "symlink-bindir: $HOME/bin" -else - echo "Sorry, something went wrong." - echo "The 'cabal' executable was not successfully installed into" - echo "$CABAL_BIN/" -fi -echo - -rm ghc-pkg.list diff -Nru cabal-install-1.22-1.22.6.0/build.sh cabal-install-1.22-1.22.9.0/build.sh --- cabal-install-1.22-1.22.6.0/build.sh 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/build.sh 2016-06-02 07:15:44.000000000 +0000 @@ -0,0 +1,23 @@ +#!/bin/bash -e + +HC=ghc +HC_PKG=ghc-pkg + +declare -a IPLAN +IPLAN=( $( cat src/buildplan.lst ) ) + +PKGDB=$PWD/src/package.conf.d +rm -rvf "$PKGDB" + +$HC_PKG init $PKGDB + +for PKG in ${IPLAN[@]}; do + pushd src/$PKG + + $HC --make Setup -o Setup + ./Setup configure --package-db=$PKGDB --prefix=$PREFIX --disable-library-profiling --disable-shared + ./Setup build + ./Setup install + + popd +done diff -Nru cabal-install-1.22-1.22.6.0/cabal-install.cabal cabal-install-1.22-1.22.9.0/cabal-install.cabal --- cabal-install-1.22-1.22.6.0/cabal-install.cabal 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/cabal-install.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,263 +0,0 @@ -Name: cabal-install -Version: 1.22.6.0 -Synopsis: The command-line interface for Cabal and Hackage. -Description: - The \'cabal\' command-line program simplifies the process of managing - Haskell software by automating the fetching, configuration, compilation - and installation of Haskell libraries and programs. -homepage: http://www.haskell.org/cabal/ -bug-reports: https://github.com/haskell/cabal/issues -License: BSD3 -License-File: LICENSE -Author: Lemmih - Paolo Martini - Bjorn Bringert - Isaac Potoczny-Jones - Duncan Coutts -Maintainer: cabal-devel@haskell.org -Copyright: 2005 Lemmih - 2006 Paolo Martini - 2007 Bjorn Bringert - 2007 Isaac Potoczny-Jones - 2007-2012 Duncan Coutts -Category: Distribution -Build-type: Simple -Cabal-Version: >= 1.10 -Extra-Source-Files: - README.md bash-completion/cabal bootstrap.sh changelog - - -- Generated with '../Cabal/misc/gen-extra-source-files.sh | sort' - tests/PackageTests/Freeze/my.cabal - -source-repository head - type: git - location: https://github.com/haskell/cabal/ - subdir: cabal-install - -Flag old-directory - description: Use directory < 1.2 and old-time - default: False - -Flag network-uri - description: Get Network.URI from the network-uri package - default: True - -executable cabal - main-is: Main.hs - ghc-options: -Wall -fwarn-tabs - other-modules: - Distribution.Client.BuildReports.Anonymous - Distribution.Client.BuildReports.Storage - Distribution.Client.BuildReports.Types - Distribution.Client.BuildReports.Upload - Distribution.Client.Check - Distribution.Client.Config - Distribution.Client.Configure - Distribution.Client.Dependency - Distribution.Client.Dependency.TopDown - Distribution.Client.Dependency.TopDown.Constraints - Distribution.Client.Dependency.TopDown.Types - Distribution.Client.Dependency.Types - Distribution.Client.Dependency.Modular - Distribution.Client.Dependency.Modular.Assignment - Distribution.Client.Dependency.Modular.Builder - Distribution.Client.Dependency.Modular.Configured - Distribution.Client.Dependency.Modular.ConfiguredConversion - Distribution.Client.Dependency.Modular.Dependency - Distribution.Client.Dependency.Modular.Explore - Distribution.Client.Dependency.Modular.Flag - Distribution.Client.Dependency.Modular.Index - Distribution.Client.Dependency.Modular.IndexConversion - Distribution.Client.Dependency.Modular.Log - Distribution.Client.Dependency.Modular.Message - Distribution.Client.Dependency.Modular.Package - Distribution.Client.Dependency.Modular.Preference - Distribution.Client.Dependency.Modular.PSQ - Distribution.Client.Dependency.Modular.Solver - Distribution.Client.Dependency.Modular.Tree - Distribution.Client.Dependency.Modular.Validate - Distribution.Client.Dependency.Modular.Version - Distribution.Client.Exec - Distribution.Client.Fetch - Distribution.Client.FetchUtils - Distribution.Client.Freeze - Distribution.Client.Get - Distribution.Client.GZipUtils - Distribution.Client.Haddock - Distribution.Client.HttpUtils - Distribution.Client.IndexUtils - Distribution.Client.Init - Distribution.Client.Init.Heuristics - Distribution.Client.Init.Licenses - Distribution.Client.Init.Types - Distribution.Client.Install - Distribution.Client.InstallPlan - Distribution.Client.InstallSymlink - Distribution.Client.JobControl - Distribution.Client.List - Distribution.Client.PackageIndex - Distribution.Client.PackageUtils - Distribution.Client.ParseUtils - Distribution.Client.Run - Distribution.Client.Sandbox - Distribution.Client.Sandbox.Index - Distribution.Client.Sandbox.PackageEnvironment - Distribution.Client.Sandbox.Timestamp - Distribution.Client.Sandbox.Types - Distribution.Client.Setup - Distribution.Client.SetupWrapper - Distribution.Client.SrcDist - Distribution.Client.Tar - Distribution.Client.Targets - Distribution.Client.Types - Distribution.Client.Update - Distribution.Client.Upload - Distribution.Client.Utils - Distribution.Client.World - Distribution.Client.Win32SelfUpgrade - Distribution.Client.Compat.Environment - Distribution.Client.Compat.ExecutablePath - Distribution.Client.Compat.FilePerms - Distribution.Client.Compat.Process - Distribution.Client.Compat.Semaphore - Distribution.Client.Compat.Time - Paths_cabal_install - - -- NOTE: when updating build-depends, don't forget to update version regexps - -- in bootstrap.sh. - build-depends: - array >= 0.1 && < 0.6, - base >= 4.3 && < 5, - bytestring >= 0.9 && < 1, - Cabal >= 1.22.2 && < 1.23, - containers >= 0.1 && < 0.6, - filepath >= 1.0 && < 1.5, - HTTP >= 4000.2.5 && < 4000.3, - mtl >= 2.0 && < 3, - pretty >= 1 && < 1.2, - random >= 1 && < 1.2, - stm >= 2.0 && < 3, - time >= 1.1 && < 1.6, - zlib >= 0.5.3 && < 0.6 - - if flag(old-directory) - build-depends: directory >= 1 && < 1.2, old-time >= 1 && < 1.2, - process >= 1.0.1.1 && < 1.1.0.2 - else - build-depends: directory >= 1.2 && < 1.3, - process >= 1.1.0.2 && < 1.3 - - -- NOTE: you MUST include the network dependency even when network-uri - -- is pulled in, otherwise the constraint solver doesn't have enough - -- information - if flag(network-uri) - build-depends: network-uri >= 2.6, network >= 2.6 - else - build-depends: network >= 2.4 && < 2.6 - - if os(windows) - build-depends: Win32 >= 2 && < 3 - cpp-options: -DWIN32 - else - build-depends: unix >= 2.0 && < 2.8 - - if arch(arm) && impl(ghc < 7.6) - -- older ghc on arm does not support -threaded - cc-options: -DCABAL_NO_THREADED - else - ghc-options: -threaded - - c-sources: cbits/getnumcores.c - default-language: Haskell2010 - --- Small, fast running tests. -Test-Suite unit-tests - type: exitcode-stdio-1.0 - main-is: UnitTests.hs - hs-source-dirs: tests, . - ghc-options: -Wall -fwarn-tabs - other-modules: - UnitTests.Distribution.Client.Targets - UnitTests.Distribution.Client.Dependency.Modular.PSQ - UnitTests.Distribution.Client.Sandbox - UnitTests.Distribution.Client.UserConfig - build-depends: - base, - array, - bytestring, - Cabal, - containers, - mtl, - pretty, - process, - directory, - filepath, - stm, - time, - HTTP, - zlib, - - test-framework, - test-framework-hunit, - test-framework-quickcheck2 >= 0.3, - HUnit, - QuickCheck >= 2.5 - - if flag(old-directory) - build-depends: old-time - - if flag(network-uri) - build-depends: network-uri >= 2.6, network >= 2.6 - else - build-depends: network-uri < 2.6, network < 2.6 - - if os(windows) - build-depends: Win32 - cpp-options: -DWIN32 - else - build-depends: unix - - if arch(arm) - cc-options: -DCABAL_NO_THREADED - else - ghc-options: -threaded - default-language: Haskell2010 - --- Large, system tests that build packages. -test-suite package-tests - type: exitcode-stdio-1.0 - hs-source-dirs: tests - main-is: PackageTests.hs - other-modules: - PackageTests.Exec.Check - PackageTests.Freeze.Check - PackageTests.MultipleSource.Check - PackageTests.PackageTester - build-depends: - Cabal, - HUnit, - QuickCheck >= 2.1.0.1 && < 2.8, - base, - bytestring, - directory, - extensible-exceptions, - filepath, - process, - regex-posix, - test-framework, - test-framework-hunit, - test-framework-quickcheck2 >= 0.2.12 - - if os(windows) - build-depends: Win32 >= 2 && < 3 - cpp-options: -DWIN32 - else - build-depends: unix >= 2.0 && < 2.8 - - if arch(arm) - cc-options: -DCABAL_NO_THREADED - else - ghc-options: -threaded - - ghc-options: -Wall - default-language: Haskell2010 diff -Nru cabal-install-1.22-1.22.6.0/cbits/getnumcores.c cabal-install-1.22-1.22.9.0/cbits/getnumcores.c --- cabal-install-1.22-1.22.6.0/cbits/getnumcores.c 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/cbits/getnumcores.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -#if defined(__GLASGOW_HASKELL__) && (__GLASGOW_HASKELL__ >= 612) && !defined(CABAL_NO_THREADED) -/* Since version 6.12, GHC's threaded RTS includes a getNumberOfProcessors - function, so we try to use that if available. cabal-install is always built - with -threaded nowadays. */ -#define HAS_GET_NUMBER_OF_PROCESSORS -#endif - - -#ifndef HAS_GET_NUMBER_OF_PROCESSORS - -#ifdef _WIN32 -#include -#elif MACOS -#include -#include -#elif __linux__ -#include -#endif - -int getNumberOfProcessors() { -#ifdef WIN32 - SYSTEM_INFO sysinfo; - GetSystemInfo(&sysinfo); - return sysinfo.dwNumberOfProcessors; -#elif MACOS - int nm[2]; - size_t len = 4; - uint32_t count; - - nm[0] = CTL_HW; nm[1] = HW_AVAILCPU; - sysctl(nm, 2, &count, &len, NULL, 0); - - if(count < 1) { - nm[1] = HW_NCPU; - sysctl(nm, 2, &count, &len, NULL, 0); - if(count < 1) { count = 1; } - } - return count; -#elif __linux__ - return sysconf(_SC_NPROCESSORS_ONLN); -#else - return 1; -#endif -} - -#endif /* HAS_GET_NUMBER_OF_PROCESSORS */ diff -Nru cabal-install-1.22-1.22.6.0/changelog cabal-install-1.22-1.22.9.0/changelog --- cabal-install-1.22-1.22.6.0/changelog 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/changelog 1970-01-01 00:00:00.000000000 +0000 @@ -1,196 +0,0 @@ --*-change-log-*- -1.22.6.0 Ryan Thomas June 2015 - * A fix for @ezyang's fix for #2502. (Mikhail Glushenkov) - -1.22.5.0 Ryan Thomas June 2015 - * Reduce temporary directory name length, fixes #2502. (Edward Z. Yang) - -1.22.4.0 Ryan Thomas May 2015 - * Force cabal upload to always use digest auth and never basic auth. - * Add dependency-graph information to `printPlan` output - * bootstrap.sh: fixes linker matching to avoid cases where tested linker names appear unexpectedly in compiler output (fixes #2542) - -1.22.3.0 Ryan Thomas April 2015 - * Fix bash completion for sandbox subcommands - Fixes #2513 (Mikhail Glushenkov) - * filterConfigureFlags: filter more flags (Mikhail Glushenkov) - -1.22.2.0 Ryan Thomas March 2015 - * Don't pass '--{en,dis}able-profiling' to old setup exes. - * -Wall police - * Allow filepath 1.4 - -1.21.x (current development version) - * New command: user-config (#2159). - * Implement 'cabal repl --only' (#2016). - * Fix an issue when 'cabal repl' was doing unnecessary compilation - (#1715). - * Prompt the user to specify source directory in 'cabal init' - (#1989). - * Remove the self-upgrade check (#2090). - * Don't redownload already downloaded packages when bootstrapping - (#2133). - * Support sandboxes in 'bootstrap.sh' (#2137). - * Install profiling and shared libs by default in 'bootstrap.sh' - (#2009). - -1.20.0.3 Johan Tibell June 2014 - * Don't attempt to rename dist if it is already named correctly - * Treat all flags of a package as interdependent. - * Allow template-haskell to be upgradable again - -1.20.0.2 Johan Tibell May 2014 - * Increase max-backjumps to 2000. - * Fix solver bug which led to missed install plans. - * Fix streaming test output. - * Tweak solver heuristics to avoid reinstalls. - -1.20.0.1 Johan Tibell May 2014 - * Fix cabal repl search path bug on Windows - * Include OS and arch in cabal-install user agent - * Revert --constraint flag behavior in configure to 1.18 behavior - -1.20.0.0 Johan Tibell April 2014 - * Build only selected executables - * Add -j flag to build/test/bench/run - * Improve install log file - * Don't symlink executables when in a sandbox - * Add --package-db flag to 'list' and 'info' - * Make upload more efficient - * Add --require-sandbox option - * Add experimental Cabal file format command - * Add haddock section to config file - * Add --main-is flag to init - -0.14.0 Andres Loeh April 2012 - * Works with ghc-7.4 - * Completely new modular dependency solver (default in most cases) - * Some tweaks to old topdown dependency solver - * Install plans are now checked for reinstalls that break packages - * Flags --constraint and --preference work for nonexisting packages - * New constraint forms for source and installed packages - * New constraint form for package-specific use flags - * New constraint form for package-specific stanza flags - * Test suite dependencies are pulled in on demand - * No longer install packages on --enable-tests when tests fail - * New "cabal bench" command - * Various "cabal init" tweaks - -0.10.0 Duncan Coutts February 2011 - * New package targets: local dirs, local and remote tarballs - * Initial support for a "world" package target - * Partial fix for situation where user packages mask global ones - * Removed cabal upgrade, new --upgrade-dependencies flag - * New cabal install --only-dependencies flag - * New cabal fetch --no-dependencies and --dry-run flags - * Improved output for cabal info - * Simpler and faster bash command line completion - * Fix for broken proxies that decompress wrongly - * Fix for cabal unpack to preserve executable permissions - * Adjusted the output for the -v verbosity level in a few places - -0.8.2 Duncan Coutts March 2010 - * Fix for cabal update on Windows - * On windows switch to per-user installs (rather than global) - * Handle intra-package dependencies in dependency planning - * Minor tweaks to cabal init feature - * Fix various -Wall warnings - * Fix for cabal sdist --snapshot - -0.8.0 Duncan Coutts Dec 2009 - * Works with ghc-6.12 - * New "cabal init" command for making initial project .cabal file - * New feature to maintain an index of haddock documentation - -0.6.4 Duncan Coutts Nov 2009 - * Improve the algorithm for selecting the base package version - * Hackage errors now reported by "cabal upload [--check]" - * Improved format of messages from "cabal check" - * Config file can now be selected by an env var - * Updated tar reading/writing code - * Improve instructions in the README and bootstrap output - * Fix bootstrap.sh on Solaris 9 - * Fix bootstrap for systems where network uses parsec 3 - * Fix building with ghc-6.6 - -0.6.2 Duncan Coutts Feb 2009 - * The upgrade command has been disabled in this release - * The configure and install commands now have consistent behaviour - * Reduce the tendancy to re-install already existing packages - * The --constraint= flag now works for the install command - * New --preference= flag for soft constraints / version preferences - * Improved bootstrap.sh script, smarter and better error checking - * New cabal info command to display detailed info on packages - * New cabal unpack command to download and untar a package - * HTTP-4000 package required, should fix bugs with http proxies - * Now works with authenticated proxies. - * On Windows can now override the proxy setting using an env var - * Fix compatability with config files generated by older versions - * Warn if the hackage package list is very old - * More helpful --help output, mention config file and examples - * Better documentation in ~/.cabal/config file - * Improved command line interface for logging and build reporting - * Minor improvements to some messages - -0.6.0 Duncan Coutts Oct 2008 - * Constraint solver can now cope with base 3 and base 4 - * Allow use of package version preferences from hackage index - * More detailed output from cabal install --dry-run -v - * Improved bootstrap.sh - -0.5.2 Duncan Coutts Aug 2008 - * Suport building haddock documentaion - * Self-reinstall now works on Windows - * Allow adding symlinks to excutables into a separate bindir - * New self-documenting config file - * New install --reinstall flag - * More helpful status messages in a couple places - * Upload failures now report full text error message from the server - * Support for local package repositories - * New build logging and reporting - * New command to upload build reports to (a compatible) server - * Allow tilde in hackage server URIs - * Internal code improvements - * Many other minor improvements and bug fixes - -0.5.1 Duncan Coutts June 2008 - * Restore minimal hugs support in dependency resolver - * Fix for disabled http proxies on Windows - * Revert to global installs on Windows by default - -0.5.0 Duncan Coutts June 2008 - * New package dependency resolver, solving diamond dep problem - * Integrate cabal-setup functionality - * Integrate cabal-upload functionality - * New cabal update and check commands - * Improved behavior for install and upgrade commands - * Full Windows support - * New command line handling - * Bash command line completion - * Allow case insensitive package names on command line - * New --dry-run flag for install, upgrade and fetch commands - * New --root-cmd flag to allow installing as root - * New --cabal-lib-version flag to select different Cabal lib versions - * Support for HTTP proxies - * Improved cabal list output - * Build other non-dependent packages even when some fail - * Report a summary of all build failures at the end - * Partial support for hugs - * Partial implementation of build reporting and logging - * More consistent logging and verbosity - * Significant internal code restructuring - -0.4 Duncan Coutts Oct 2007 - * Renamed executable from 'cabal-install' to 'cabal' - * Partial Windows compatability - * Do per-user installs by default - * cabal install now installs the package in the current directory - * Allow multiple remote servers - * Use zlib lib and internal tar code and rather than external tar - * Reorganised configuration files - * Significant code restructuring - * Cope with packages with conditional dependencies - -0.3 and older versions by Lemmih, Paolo Martini and others 2006-2007 - * Switch from smart-server, dumb-client model to the reverse - * New .tar.gz based index format - * New remote and local package archive format diff -Nru cabal-install-1.22-1.22.6.0/debian/changelog cabal-install-1.22-1.22.9.0/debian/changelog --- cabal-install-1.22-1.22.6.0/debian/changelog 2015-07-22 10:44:09.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/debian/changelog 2016-06-02 07:15:44.000000000 +0000 @@ -1,5 +1,5 @@ -cabal-install-1.22 (1.22.6.0-1~precise) precise; urgency=medium +cabal-install-1.22 (1.22.9.0-1~precise) precise; urgency=medium * Initial release - -- Herbert Valerio Riedel Wed, 22 Jul 2015 09:00:00 +0200 + -- Herbert Valerio Riedel Thu, 02 Jun 2016 09:15:44 +0200 diff -Nru cabal-install-1.22-1.22.6.0/debian/control cabal-install-1.22-1.22.9.0/debian/control --- cabal-install-1.22-1.22.6.0/debian/control 2015-07-22 10:28:53.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/debian/control 2016-06-02 07:15:44.000000000 +0000 @@ -2,7 +2,7 @@ Section: universe/haskell Priority: extra Maintainer: Herbert Valerio Riedel -Build-Depends: debhelper (>= 8.0.0), zlib1g-dev, ghc-7.10.2 +Build-Depends: debhelper (>= 8.0.0), zlib1g-dev, ghc-7.10.3 Standards-Version: 3.9.4 Homepage: http://www.haskell.org/ghc/ diff -Nru cabal-install-1.22-1.22.6.0/debian/rules cabal-install-1.22-1.22.9.0/debian/rules --- cabal-install-1.22-1.22.6.0/debian/rules 2015-07-22 10:33:53.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/debian/rules 2016-06-02 07:15:44.000000000 +0000 @@ -6,5 +6,5 @@ dh $@ override_dh_auto_install: - PATH=/opt/ghc/7.10.2/bin:$$PATH DESTDIR=$(CURDIR)/debian/cabal-install-1.22 PREFIX=/opt/cabal/1.22 $(CURDIR)/bootstrap.sh --no-doc - rm -rf $(CURDIR)/debian/opt/cabal/1.22/lib + PATH=/opt/ghc/7.10.3/bin:$$PATH PREFIX=$(CURDIR)/debian/cabal-install-1.22/opt/cabal/1.22 $(CURDIR)/build.sh + rm -rf $(CURDIR)/debian/cabal-install-1.22/opt/cabal/1.22/lib diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/BuildReports/Anonymous.hs cabal-install-1.22-1.22.9.0/Distribution/Client/BuildReports/Anonymous.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/BuildReports/Anonymous.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/BuildReports/Anonymous.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,316 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Reporting --- Copyright : (c) David Waern 2008 --- License : BSD-like --- --- Maintainer : david.waern@gmail.com --- Stability : experimental --- Portability : portable --- --- Anonymous build report data structure, printing and parsing --- ------------------------------------------------------------------------------ -module Distribution.Client.BuildReports.Anonymous ( - BuildReport(..), - InstallOutcome(..), - Outcome(..), - - -- * Constructing and writing reports - new, - - -- * parsing and pretty printing - parse, - parseList, - show, --- showList, - ) where - -import qualified Distribution.Client.Types as BR - ( BuildResult, BuildFailure(..), BuildSuccess(..) - , DocsResult(..), TestsResult(..) ) -import Distribution.Client.Utils - ( mergeBy, MergeResult(..) ) -import qualified Paths_cabal_install (version) - -import Distribution.Package - ( PackageIdentifier(..), PackageName(..) ) -import Distribution.PackageDescription - ( FlagName(..), FlagAssignment ) ---import Distribution.Version --- ( Version ) -import Distribution.System - ( OS, Arch ) -import Distribution.Compiler - ( CompilerId(..) ) -import qualified Distribution.Text as Text - ( Text(disp, parse) ) -import Distribution.ParseUtils - ( FieldDescr(..), ParseResult(..), Field(..) - , simpleField, listField, ppFields, readFields - , syntaxError, locatedErrorMsg ) -import Distribution.Simple.Utils - ( comparing ) - -import qualified Distribution.Compat.ReadP as Parse - ( ReadP, pfail, munch1, skipSpaces ) -import qualified Text.PrettyPrint as Disp - ( Doc, render, char, text ) -import Text.PrettyPrint - ( (<+>), (<>) ) - -import Data.List - ( unfoldr, sortBy ) -import Data.Char as Char - ( isAlpha, isAlphaNum ) - -import Prelude hiding (show) - -data BuildReport - = BuildReport { - -- | The package this build report is about - package :: PackageIdentifier, - - -- | The OS and Arch the package was built on - os :: OS, - arch :: Arch, - - -- | The Haskell compiler (and hopefully version) used - compiler :: CompilerId, - - -- | The uploading client, ie cabal-install-x.y.z - client :: PackageIdentifier, - - -- | Which configurations flags we used - flagAssignment :: FlagAssignment, - - -- | Which dependent packages we were using exactly - dependencies :: [PackageIdentifier], - - -- | Did installing work ok? - installOutcome :: InstallOutcome, - - -- Which version of the Cabal library was used to compile the Setup.hs --- cabalVersion :: Version, - - -- Which build tools we were using (with versions) --- tools :: [PackageIdentifier], - - -- | Configure outcome, did configure work ok? - docsOutcome :: Outcome, - - -- | Configure outcome, did configure work ok? - testsOutcome :: Outcome - } - -data InstallOutcome - = PlanningFailed - | DependencyFailed PackageIdentifier - | DownloadFailed - | UnpackFailed - | SetupFailed - | ConfigureFailed - | BuildFailed - | TestsFailed - | InstallFailed - | InstallOk - deriving Eq - -data Outcome = NotTried | Failed | Ok - deriving Eq - -new :: OS -> Arch -> CompilerId -> PackageIdentifier -> FlagAssignment - -> [PackageIdentifier] -> BR.BuildResult -> BuildReport -new os' arch' comp pkgid flags deps result = - BuildReport { - package = pkgid, - os = os', - arch = arch', - compiler = comp, - client = cabalInstallID, - flagAssignment = flags, - dependencies = deps, - installOutcome = convertInstallOutcome, --- cabalVersion = undefined - docsOutcome = convertDocsOutcome, - testsOutcome = convertTestsOutcome - } - where - convertInstallOutcome = case result of - Left BR.PlanningFailed -> PlanningFailed - Left (BR.DependentFailed p) -> DependencyFailed p - Left (BR.DownloadFailed _) -> DownloadFailed - Left (BR.UnpackFailed _) -> UnpackFailed - Left (BR.ConfigureFailed _) -> ConfigureFailed - Left (BR.BuildFailed _) -> BuildFailed - Left (BR.TestsFailed _) -> TestsFailed - Left (BR.InstallFailed _) -> InstallFailed - Right (BR.BuildOk _ _ _) -> InstallOk - convertDocsOutcome = case result of - Left _ -> NotTried - Right (BR.BuildOk BR.DocsNotTried _ _) -> NotTried - Right (BR.BuildOk BR.DocsFailed _ _) -> Failed - Right (BR.BuildOk BR.DocsOk _ _) -> Ok - convertTestsOutcome = case result of - Left (BR.TestsFailed _) -> Failed - Left _ -> NotTried - Right (BR.BuildOk _ BR.TestsNotTried _) -> NotTried - Right (BR.BuildOk _ BR.TestsOk _) -> Ok - -cabalInstallID :: PackageIdentifier -cabalInstallID = - PackageIdentifier (PackageName "cabal-install") Paths_cabal_install.version - --- ------------------------------------------------------------ --- * External format --- ------------------------------------------------------------ - -initialBuildReport :: BuildReport -initialBuildReport = BuildReport { - package = requiredField "package", - os = requiredField "os", - arch = requiredField "arch", - compiler = requiredField "compiler", - client = requiredField "client", - flagAssignment = [], - dependencies = [], - installOutcome = requiredField "install-outcome", --- cabalVersion = Nothing, --- tools = [], - docsOutcome = NotTried, - testsOutcome = NotTried - } - where - requiredField fname = error ("required field: " ++ fname) - --- ----------------------------------------------------------------------------- --- Parsing - -parse :: String -> Either String BuildReport -parse s = case parseFields s of - ParseFailed perror -> Left msg where (_, msg) = locatedErrorMsg perror - ParseOk _ report -> Right report - ---FIXME: this does not allow for optional or repeated fields -parseFields :: String -> ParseResult BuildReport -parseFields input = do - fields <- mapM extractField =<< readFields input - let merged = mergeBy (\desc (_,name,_) -> compare (fieldName desc) name) - sortedFieldDescrs - (sortBy (comparing (\(_,name,_) -> name)) fields) - checkMerged initialBuildReport merged - - where - extractField :: Field -> ParseResult (Int, String, String) - extractField (F line name value) = return (line, name, value) - extractField (Section line _ _ _) = syntaxError line "Unrecognized stanza" - extractField (IfBlock line _ _ _) = syntaxError line "Unrecognized stanza" - - checkMerged report [] = return report - checkMerged report (merged:remaining) = case merged of - InBoth fieldDescr (line, _name, value) -> do - report' <- fieldSet fieldDescr line value report - checkMerged report' remaining - OnlyInRight (line, name, _) -> - syntaxError line ("Unrecognized field " ++ name) - OnlyInLeft fieldDescr -> - fail ("Missing field " ++ fieldName fieldDescr) - -parseList :: String -> [BuildReport] -parseList str = - [ report | Right report <- map parse (split str) ] - - where - split :: String -> [String] - split = filter (not . null) . unfoldr chunk . lines - chunk [] = Nothing - chunk ls = case break null ls of - (r, rs) -> Just (unlines r, dropWhile null rs) - --- ----------------------------------------------------------------------------- --- Pretty-printing - -show :: BuildReport -> String -show = Disp.render . ppFields fieldDescrs - --- ----------------------------------------------------------------------------- --- Description of the fields, for parsing/printing - -fieldDescrs :: [FieldDescr BuildReport] -fieldDescrs = - [ simpleField "package" Text.disp Text.parse - package (\v r -> r { package = v }) - , simpleField "os" Text.disp Text.parse - os (\v r -> r { os = v }) - , simpleField "arch" Text.disp Text.parse - arch (\v r -> r { arch = v }) - , simpleField "compiler" Text.disp Text.parse - compiler (\v r -> r { compiler = v }) - , simpleField "client" Text.disp Text.parse - client (\v r -> r { client = v }) - , listField "flags" dispFlag parseFlag - flagAssignment (\v r -> r { flagAssignment = v }) - , listField "dependencies" Text.disp Text.parse - dependencies (\v r -> r { dependencies = v }) - , simpleField "install-outcome" Text.disp Text.parse - installOutcome (\v r -> r { installOutcome = v }) - , simpleField "docs-outcome" Text.disp Text.parse - docsOutcome (\v r -> r { docsOutcome = v }) - , simpleField "tests-outcome" Text.disp Text.parse - testsOutcome (\v r -> r { testsOutcome = v }) - ] - -sortedFieldDescrs :: [FieldDescr BuildReport] -sortedFieldDescrs = sortBy (comparing fieldName) fieldDescrs - -dispFlag :: (FlagName, Bool) -> Disp.Doc -dispFlag (FlagName name, True) = Disp.text name -dispFlag (FlagName name, False) = Disp.char '-' <> Disp.text name - -parseFlag :: Parse.ReadP r (FlagName, Bool) -parseFlag = do - name <- Parse.munch1 (\c -> Char.isAlphaNum c || c == '_' || c == '-') - case name of - ('-':flag) -> return (FlagName flag, False) - flag -> return (FlagName flag, True) - -instance Text.Text InstallOutcome where - disp PlanningFailed = Disp.text "PlanningFailed" - disp (DependencyFailed pkgid) = Disp.text "DependencyFailed" <+> Text.disp pkgid - disp DownloadFailed = Disp.text "DownloadFailed" - disp UnpackFailed = Disp.text "UnpackFailed" - disp SetupFailed = Disp.text "SetupFailed" - disp ConfigureFailed = Disp.text "ConfigureFailed" - disp BuildFailed = Disp.text "BuildFailed" - disp TestsFailed = Disp.text "TestsFailed" - disp InstallFailed = Disp.text "InstallFailed" - disp InstallOk = Disp.text "InstallOk" - - parse = do - name <- Parse.munch1 Char.isAlphaNum - case name of - "PlanningFailed" -> return PlanningFailed - "DependencyFailed" -> do Parse.skipSpaces - pkgid <- Text.parse - return (DependencyFailed pkgid) - "DownloadFailed" -> return DownloadFailed - "UnpackFailed" -> return UnpackFailed - "SetupFailed" -> return SetupFailed - "ConfigureFailed" -> return ConfigureFailed - "BuildFailed" -> return BuildFailed - "TestsFailed" -> return TestsFailed - "InstallFailed" -> return InstallFailed - "InstallOk" -> return InstallOk - _ -> Parse.pfail - -instance Text.Text Outcome where - disp NotTried = Disp.text "NotTried" - disp Failed = Disp.text "Failed" - disp Ok = Disp.text "Ok" - parse = do - name <- Parse.munch1 Char.isAlpha - case name of - "NotTried" -> return NotTried - "Failed" -> return Failed - "Ok" -> return Ok - _ -> Parse.pfail diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/BuildReports/Storage.hs cabal-install-1.22-1.22.9.0/Distribution/Client/BuildReports/Storage.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/BuildReports/Storage.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/BuildReports/Storage.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,152 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Reporting --- Copyright : (c) David Waern 2008 --- License : BSD-like --- --- Maintainer : david.waern@gmail.com --- Stability : experimental --- Portability : portable --- --- Anonymous build report data structure, printing and parsing --- ------------------------------------------------------------------------------ -module Distribution.Client.BuildReports.Storage ( - - -- * Storing and retrieving build reports - storeAnonymous, - storeLocal, --- retrieve, - - -- * 'InstallPlan' support - fromInstallPlan, - fromPlanningFailure, - ) where - -import qualified Distribution.Client.BuildReports.Anonymous as BuildReport -import Distribution.Client.BuildReports.Anonymous (BuildReport) - -import Distribution.Client.Types -import qualified Distribution.Client.InstallPlan as InstallPlan -import Distribution.Client.InstallPlan - ( InstallPlan ) - -import Distribution.Package - ( PackageId, packageId ) -import Distribution.PackageDescription - ( FlagAssignment ) -import Distribution.Simple.InstallDirs - ( PathTemplate, fromPathTemplate - , initialPathTemplateEnv, substPathTemplate ) -import Distribution.System - ( Platform(Platform) ) -import Distribution.Compiler - ( CompilerId(..), CompilerInfo(..) ) -import Distribution.Simple.Utils - ( comparing, equating ) - -import Data.List - ( groupBy, sortBy ) -import Data.Maybe - ( catMaybes ) -import System.FilePath - ( (), takeDirectory ) -import System.Directory - ( createDirectoryIfMissing ) - -storeAnonymous :: [(BuildReport, Maybe Repo)] -> IO () -storeAnonymous reports = sequence_ - [ appendFile file (concatMap format reports') - | (repo, reports') <- separate reports - , let file = repoLocalDir repo "build-reports.log" ] - --TODO: make this concurrency safe, either lock the report file or make sure - -- the writes for each report are atomic (under 4k and flush at boundaries) - - where - format r = '\n' : BuildReport.show r ++ "\n" - separate :: [(BuildReport, Maybe Repo)] - -> [(Repo, [BuildReport])] - separate = map (\rs@((_,repo,_):_) -> (repo, [ r | (r,_,_) <- rs ])) - . map concat - . groupBy (equating (repoName . head)) - . sortBy (comparing (repoName . head)) - . groupBy (equating repoName) - . onlyRemote - repoName (_,_,rrepo) = remoteRepoName rrepo - - onlyRemote :: [(BuildReport, Maybe Repo)] -> [(BuildReport, Repo, RemoteRepo)] - onlyRemote rs = - [ (report, repo, remoteRepo) - | (report, Just repo@Repo { repoKind = Left remoteRepo }) <- rs ] - -storeLocal :: CompilerInfo -> [PathTemplate] -> [(BuildReport, Maybe Repo)] - -> Platform -> IO () -storeLocal cinfo templates reports platform = sequence_ - [ do createDirectoryIfMissing True (takeDirectory file) - appendFile file output - --TODO: make this concurrency safe, either lock the report file or make - -- sure the writes for each report are atomic - | (file, reports') <- groupByFileName - [ (reportFileName template report, report) - | template <- templates - , (report, _repo) <- reports ] - , let output = concatMap format reports' - ] - where - format r = '\n' : BuildReport.show r ++ "\n" - - reportFileName template report = - fromPathTemplate (substPathTemplate env template) - where env = initialPathTemplateEnv - (BuildReport.package report) - -- ToDo: In principle, we can support $pkgkey, but only - -- if the configure step succeeds. So add a Maybe field - -- to the build report, and either use that or make up - -- a fake identifier if it's not available. - (error "storeLocal: package key not available") - cinfo - platform - - groupByFileName = map (\grp@((filename,_):_) -> (filename, map snd grp)) - . groupBy (equating fst) - . sortBy (comparing fst) - --- ------------------------------------------------------------ --- * InstallPlan support --- ------------------------------------------------------------ - -fromInstallPlan :: InstallPlan -> [(BuildReport, Maybe Repo)] -fromInstallPlan plan = catMaybes - . map (fromPlanPackage platform comp) - . InstallPlan.toList - $ plan - where platform = InstallPlan.planPlatform plan - comp = compilerInfoId (InstallPlan.planCompiler plan) - -fromPlanPackage :: Platform -> CompilerId - -> InstallPlan.PlanPackage - -> Maybe (BuildReport, Maybe Repo) -fromPlanPackage (Platform arch os) comp planPackage = case planPackage of - InstallPlan.Installed (ReadyPackage srcPkg flags _ deps) result - -> Just $ ( BuildReport.new os arch comp - (packageId srcPkg) flags (map packageId deps) - (Right result) - , extractRepo srcPkg) - - InstallPlan.Failed (ConfiguredPackage srcPkg flags _ deps) result - -> Just $ ( BuildReport.new os arch comp - (packageId srcPkg) flags deps - (Left result) - , extractRepo srcPkg ) - - _ -> Nothing - - where - extractRepo (SourcePackage { packageSource = RepoTarballPackage repo _ _ }) = Just repo - extractRepo _ = Nothing - -fromPlanningFailure :: Platform -> CompilerId - -> [PackageId] -> FlagAssignment -> [(BuildReport, Maybe Repo)] -fromPlanningFailure (Platform arch os) comp pkgids flags = - [ (BuildReport.new os arch comp pkgid flags [] (Left PlanningFailed), Nothing) - | pkgid <- pkgids ] diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/BuildReports/Types.hs cabal-install-1.22-1.22.9.0/Distribution/Client/BuildReports/Types.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/BuildReports/Types.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/BuildReports/Types.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.BuildReports.Types --- Copyright : (c) Duncan Coutts 2009 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- Types related to build reporting --- ------------------------------------------------------------------------------ -module Distribution.Client.BuildReports.Types ( - ReportLevel(..), - ) where - -import qualified Distribution.Text as Text - ( Text(..) ) - -import qualified Distribution.Compat.ReadP as Parse - ( pfail, munch1 ) -import qualified Text.PrettyPrint as Disp - ( text ) - -import Data.Char as Char - ( isAlpha, toLower ) - -data ReportLevel = NoReports | AnonymousReports | DetailedReports - deriving (Eq, Ord, Show) - -instance Text.Text ReportLevel where - disp NoReports = Disp.text "none" - disp AnonymousReports = Disp.text "anonymous" - disp DetailedReports = Disp.text "detailed" - parse = do - name <- Parse.munch1 Char.isAlpha - case lowercase name of - "none" -> return NoReports - "anonymous" -> return AnonymousReports - "detailed" -> return DetailedReports - _ -> Parse.pfail - -lowercase :: String -> String -lowercase = map Char.toLower diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/BuildReports/Upload.hs cabal-install-1.22-1.22.9.0/Distribution/Client/BuildReports/Upload.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/BuildReports/Upload.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/BuildReports/Upload.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -{-# LANGUAGE CPP, PatternGuards #-} --- This is a quick hack for uploading build reports to Hackage. - -module Distribution.Client.BuildReports.Upload - ( BuildLog - , BuildReportId - , uploadReports - , postBuildReport - , putBuildLog - ) where - -import Network.Browser - ( BrowserAction, request, setAllowRedirects ) -import Network.HTTP - ( Header(..), HeaderName(..) - , Request(..), RequestMethod(..), Response(..) ) -import Network.TCP (HandleStream) -import Network.URI (URI, uriPath, parseRelativeReference, relativeTo) - -import Control.Monad - ( forM_ ) -import System.FilePath.Posix - ( () ) -import qualified Distribution.Client.BuildReports.Anonymous as BuildReport -import Distribution.Client.BuildReports.Anonymous (BuildReport) -import Distribution.Text (display) - -type BuildReportId = URI -type BuildLog = String - -uploadReports :: URI -> [(BuildReport, Maybe BuildLog)] - -> BrowserAction (HandleStream BuildLog) () -uploadReports uri reports = do - forM_ reports $ \(report, mbBuildLog) -> do - buildId <- postBuildReport uri report - case mbBuildLog of - Just buildLog -> putBuildLog buildId buildLog - Nothing -> return () - -postBuildReport :: URI -> BuildReport - -> BrowserAction (HandleStream BuildLog) BuildReportId -postBuildReport uri buildReport = do - setAllowRedirects False - (_, response) <- request Request { - rqURI = uri { uriPath = "/package" display (BuildReport.package buildReport) "reports" }, - rqMethod = POST, - rqHeaders = [Header HdrContentType ("text/plain"), - Header HdrContentLength (show (length body)), - Header HdrAccept ("text/plain")], - rqBody = body - } - case rspCode response of - (3,0,3) | [Just buildId] <- [ do rel <- parseRelativeReference location -#if defined(VERSION_network_uri) - return $ relativeTo rel uri -#elif defined(VERSION_network) -#if MIN_VERSION_network(2,4,0) - return $ relativeTo rel uri -#else - relativeTo rel uri -#endif -#endif - | Header HdrLocation location <- rspHeaders response ] - -> return $ buildId - _ -> error "Unrecognised response from server." - where body = BuildReport.show buildReport - -putBuildLog :: BuildReportId -> BuildLog - -> BrowserAction (HandleStream BuildLog) () -putBuildLog reportId buildLog = do - --FIXME: do something if the request fails - (_, _response) <- request Request { - rqURI = reportId{uriPath = uriPath reportId "log"}, - rqMethod = PUT, - rqHeaders = [Header HdrContentType ("text/plain"), - Header HdrContentLength (show (length buildLog)), - Header HdrAccept ("text/plain")], - rqBody = buildLog - } - return () diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Check.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Check.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Check.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Check.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,85 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Check --- Copyright : (c) Lennart Kolmodin 2008 --- License : BSD-like --- --- Maintainer : kolmodin@haskell.org --- Stability : provisional --- Portability : portable --- --- Check a package for common mistakes --- ------------------------------------------------------------------------------ -module Distribution.Client.Check ( - check - ) where - -import Control.Monad ( when, unless ) - -import Distribution.PackageDescription.Parse - ( readPackageDescription ) -import Distribution.PackageDescription.Check -import Distribution.PackageDescription.Configuration - ( flattenPackageDescription ) -import Distribution.Verbosity - ( Verbosity ) -import Distribution.Simple.Utils - ( defaultPackageDesc, toUTF8, wrapText ) - -check :: Verbosity -> IO Bool -check verbosity = do - pdfile <- defaultPackageDesc verbosity - ppd <- readPackageDescription verbosity pdfile - -- flatten the generic package description into a regular package - -- description - -- TODO: this may give more warnings than it should give; - -- consider two branches of a condition, one saying - -- ghc-options: -Wall - -- and the other - -- ghc-options: -Werror - -- joined into - -- ghc-options: -Wall -Werror - -- checkPackages will yield a warning on the last line, but it - -- would not on each individual branch. - -- Hovever, this is the same way hackage does it, so we will yield - -- the exact same errors as it will. - let pkg_desc = flattenPackageDescription ppd - ioChecks <- checkPackageFiles pkg_desc "." - let packageChecks = ioChecks ++ checkPackage ppd (Just pkg_desc) - buildImpossible = [ x | x@PackageBuildImpossible {} <- packageChecks ] - buildWarning = [ x | x@PackageBuildWarning {} <- packageChecks ] - distSuspicious = [ x | x@PackageDistSuspicious {} <- packageChecks ] - distInexusable = [ x | x@PackageDistInexcusable {} <- packageChecks ] - - unless (null buildImpossible) $ do - putStrLn "The package will not build sanely due to these errors:" - printCheckMessages buildImpossible - - unless (null buildWarning) $ do - putStrLn "The following warnings are likely affect your build negatively:" - printCheckMessages buildWarning - - unless (null distSuspicious) $ do - putStrLn "These warnings may cause trouble when distributing the package:" - printCheckMessages distSuspicious - - unless (null distInexusable) $ do - putStrLn "The following errors will cause portability problems on other environments:" - printCheckMessages distInexusable - - let isDistError (PackageDistSuspicious {}) = False - isDistError _ = True - errors = filter isDistError packageChecks - - unless (null errors) $ - putStrLn "Hackage would reject this package." - - when (null packageChecks) $ - putStrLn "No errors or warnings could be found in the package." - - return (null packageChecks) - - where - printCheckMessages = mapM_ (putStrLn . format . explanation) - format = toUTF8 . wrapText . ("* "++) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/Environment.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/Environment.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/Environment.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/Environment.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ -{-# LANGUAGE CPP, ForeignFunctionInterface #-} - ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Compat.Environment --- Copyright : (c) Simon Hengel 2012 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- A cross-platform library for setting environment variables. --- ------------------------------------------------------------------------------ - -module Distribution.Client.Compat.Environment ( - lookupEnv, setEnv -) where - -#ifdef mingw32_HOST_OS -import GHC.Windows -import Foreign.C -import Control.Monad -#else -import Foreign.C.Types -import Foreign.C.String -import Foreign.C.Error (throwErrnoIfMinus1_) -import System.Posix.Internals ( withFilePath ) -#endif /* mingw32_HOST_OS */ - -#if MIN_VERSION_base(4,6,0) -import System.Environment (lookupEnv) -#else -import System.Environment (getEnv) -import Distribution.Compat.Exception (catchIO) -#endif - -#if !MIN_VERSION_base(4,6,0) --- | @lookupEnv var@ returns the value of the environment variable @var@, or --- @Nothing@ if there is no such value. -lookupEnv :: String -> IO (Maybe String) -lookupEnv name = (Just `fmap` getEnv name) `catchIO` const (return Nothing) -#endif /* !MIN_VERSION_base(4,6,0) */ - --- | @setEnv name value@ sets the specified environment variable to @value@. --- --- Throws `Control.Exception.IOException` if either @name@ or @value@ is the --- empty string or contains an equals sign. -setEnv :: String -> String -> IO () -setEnv key value_ - | null value = error "Distribuiton.Compat.setEnv: empty string" - | otherwise = setEnv_ key value - where - -- NOTE: Anything that follows NUL is ignored on both POSIX and Windows. We - -- still strip it manually so that the null check above succeeds if a value - -- starts with NUL. - value = takeWhile (/= '\NUL') value_ - -setEnv_ :: String -> String -> IO () - -#ifdef mingw32_HOST_OS - -setEnv_ key value = withCWString key $ \k -> withCWString value $ \v -> do - success <- c_SetEnvironmentVariable k v - unless success (throwGetLastError "setEnv") - -# if defined(i386_HOST_ARCH) -# define WINDOWS_CCONV stdcall -# elif defined(x86_64_HOST_ARCH) -# define WINDOWS_CCONV ccall -# else -# error Unknown mingw32 arch -# endif /* i386_HOST_ARCH */ - -foreign import WINDOWS_CCONV unsafe "windows.h SetEnvironmentVariableW" - c_SetEnvironmentVariable :: LPTSTR -> LPTSTR -> IO Bool -#else -setEnv_ key value = do - withFilePath key $ \ keyP -> - withFilePath value $ \ valueP -> - throwErrnoIfMinus1_ "setenv" $ - c_setenv keyP valueP (fromIntegral (fromEnum True)) - -foreign import ccall unsafe "setenv" - c_setenv :: CString -> CString -> CInt -> IO CInt -#endif /* mingw32_HOST_OS */ diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/ExecutablePath.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/ExecutablePath.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/ExecutablePath.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/ExecutablePath.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,183 +0,0 @@ -{-# LANGUAGE ForeignFunctionInterface #-} -{-# LANGUAGE CPP #-} - --- Copied verbatim from base-4.6.0.0. We can't simply import --- System.Environment.getExecutablePath because we need compatibility with older --- GHCs. - -module Distribution.Client.Compat.ExecutablePath ( getExecutablePath ) where - --- The imports are purposely kept completely disjoint to prevent edits --- to one OS implementation from breaking another. - -#if defined(darwin_HOST_OS) -import Data.Word -import Foreign.C -import Foreign.Marshal.Alloc -import Foreign.Ptr -import Foreign.Storable -import System.Posix.Internals -#elif defined(linux_HOST_OS) -import Foreign.C -import Foreign.Marshal.Array -import System.Posix.Internals -#elif defined(mingw32_HOST_OS) -import Data.Word -import Foreign.C -import Foreign.Marshal.Array -import Foreign.Ptr -import System.Posix.Internals -#else -import Foreign.C -import Foreign.Marshal.Alloc -import Foreign.Ptr -import Foreign.Storable -import System.Posix.Internals -#endif - --- GHC 7.0.* compatibility. 'System.Posix.Internals' in base-4.3.* doesn't --- provide 'peekFilePath' and 'peekFilePathLen'. -#if !MIN_VERSION_base(4,4,0) -#ifdef mingw32_HOST_OS - -peekFilePath :: CWString -> IO FilePath -peekFilePath = peekCWString - -#else - -peekFilePath :: CString -> IO FilePath -peekFilePath = peekCString - -peekFilePathLen :: CStringLen -> IO FilePath -peekFilePathLen = peekCStringLen - -#endif -#endif - --- The exported function is defined outside any if-guard to make sure --- every OS implements it with the same type. - --- | Returns the absolute pathname of the current executable. --- --- Note that for scripts and interactive sessions, this is the path to --- the interpreter (e.g. ghci.) --- --- /Since: 4.6.0.0/ -getExecutablePath :: IO FilePath - --------------------------------------------------------------------------------- --- Mac OS X - -#if defined(darwin_HOST_OS) - -type UInt32 = Word32 - -foreign import ccall unsafe "mach-o/dyld.h _NSGetExecutablePath" - c__NSGetExecutablePath :: CString -> Ptr UInt32 -> IO CInt - --- | Returns the path of the main executable. The path may be a --- symbolic link and not the real file. --- --- See dyld(3) -_NSGetExecutablePath :: IO FilePath -_NSGetExecutablePath = - allocaBytes 1024 $ \ buf -> -- PATH_MAX is 1024 on OS X - alloca $ \ bufsize -> do - poke bufsize 1024 - status <- c__NSGetExecutablePath buf bufsize - if status == 0 - then peekFilePath buf - else do reqBufsize <- fromIntegral `fmap` peek bufsize - allocaBytes reqBufsize $ \ newBuf -> do - status2 <- c__NSGetExecutablePath newBuf bufsize - if status2 == 0 - then peekFilePath newBuf - else error "_NSGetExecutablePath: buffer too small" - -foreign import ccall unsafe "stdlib.h realpath" - c_realpath :: CString -> CString -> IO CString - --- | Resolves all symbolic links, extra \/ characters, and references --- to \/.\/ and \/..\/. Returns an absolute pathname. --- --- See realpath(3) -realpath :: FilePath -> IO FilePath -realpath path = - withFilePath path $ \ fileName -> - allocaBytes 1024 $ \ resolvedName -> do - _ <- throwErrnoIfNull "realpath" $ c_realpath fileName resolvedName - peekFilePath resolvedName - -getExecutablePath = _NSGetExecutablePath >>= realpath - --------------------------------------------------------------------------------- --- Linux - -#elif defined(linux_HOST_OS) - -foreign import ccall unsafe "readlink" - c_readlink :: CString -> CString -> CSize -> IO CInt - --- | Reads the @FilePath@ pointed to by the symbolic link and returns --- it. --- --- See readlink(2) -readSymbolicLink :: FilePath -> IO FilePath -readSymbolicLink file = - allocaArray0 4096 $ \buf -> do - withFilePath file $ \s -> do - len <- throwErrnoPathIfMinus1 "readSymbolicLink" file $ - c_readlink s buf 4096 - peekFilePathLen (buf,fromIntegral len) - -getExecutablePath = readSymbolicLink $ "/proc/self/exe" - --------------------------------------------------------------------------------- --- Windows - -#elif defined(mingw32_HOST_OS) - -# if defined(i386_HOST_ARCH) -# define WINDOWS_CCONV stdcall -# elif defined(x86_64_HOST_ARCH) -# define WINDOWS_CCONV ccall -# else -# error Unknown mingw32 arch -# endif - -foreign import WINDOWS_CCONV unsafe "windows.h GetModuleFileNameW" - c_GetModuleFileName :: Ptr () -> CWString -> Word32 -> IO Word32 - -getExecutablePath = go 2048 -- plenty, PATH_MAX is 512 under Win32 - where - go size = allocaArray (fromIntegral size) $ \ buf -> do - ret <- c_GetModuleFileName nullPtr buf size - case ret of - 0 -> error "getExecutablePath: GetModuleFileNameW returned an error" - _ | ret < size -> peekFilePath buf - | otherwise -> go (size * 2) - --------------------------------------------------------------------------------- --- Fallback to argv[0] - -#else - -foreign import ccall unsafe "getFullProgArgv" - c_getFullProgArgv :: Ptr CInt -> Ptr (Ptr CString) -> IO () - -getExecutablePath = - alloca $ \ p_argc -> - alloca $ \ p_argv -> do - c_getFullProgArgv p_argc p_argv - argc <- peek p_argc - if argc > 0 - -- If argc > 0 then argv[0] is guaranteed by the standard - -- to be a pointer to a null-terminated string. - then peek p_argv >>= peek >>= peekFilePath - else error $ "getExecutablePath: " ++ msg - where msg = "no OS specific implementation and program name couldn't be " ++ - "found in argv" - --------------------------------------------------------------------------------- - -#endif diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/FilePerms.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/FilePerms.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/FilePerms.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/FilePerms.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# OPTIONS_HADDOCK hide #-} -module Distribution.Client.Compat.FilePerms ( - setFileOrdinary, - setFileExecutable, - setFileHidden, - ) where - -#ifndef mingw32_HOST_OS -import System.Posix.Types - ( FileMode ) -import System.Posix.Internals - ( c_chmod ) -import Foreign.C - ( withCString ) -import Foreign.C - ( throwErrnoPathIfMinus1_ ) -#else -import System.Win32.File (setFileAttributes, fILE_ATTRIBUTE_HIDDEN) -#endif /* mingw32_HOST_OS */ - -setFileHidden, setFileOrdinary, setFileExecutable :: FilePath -> IO () -#ifndef mingw32_HOST_OS -setFileOrdinary path = setFileMode path 0o644 -- file perms -rw-r--r-- -setFileExecutable path = setFileMode path 0o755 -- file perms -rwxr-xr-x -setFileHidden _ = return () - -setFileMode :: FilePath -> FileMode -> IO () -setFileMode name m = - withCString name $ \s -> - throwErrnoPathIfMinus1_ "setFileMode" name (c_chmod s m) -#else -setFileOrdinary _ = return () -setFileExecutable _ = return () -setFileHidden path = setFileAttributes path fILE_ATTRIBUTE_HIDDEN -#endif diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/Process.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/Process.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/Process.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/Process.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -{-# LANGUAGE CPP #-} - ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Compat.Process --- Copyright : (c) 2013 Liu Hao, Brent Yorgey --- License : BSD-style (see the file LICENSE) --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Cross-platform utilities for invoking processes. --- ------------------------------------------------------------------------------ - -module Distribution.Client.Compat.Process ( - readProcessWithExitCode -) where - -#if !MIN_VERSION_base(4,6,0) -import Prelude hiding (catch) -#endif - -import Control.Exception (catch, throw) -import System.Exit (ExitCode (ExitFailure)) -import System.IO.Error (isDoesNotExistError) -import qualified System.Process as P - --- | @readProcessWithExitCode@ creates an external process, reads its --- standard output and standard error strictly, waits until the --- process terminates, and then returns the @ExitCode@ of the --- process, the standard output, and the standard error. --- --- See the documentation of the version from @System.Process@ for --- more information. --- --- The version from @System.Process@ behaves inconsistently across --- platforms when an executable with the given name is not found: in --- some cases it returns an @ExitFailure@, in others it throws an --- exception. This variant catches \"does not exist\" exceptions and --- turns them into @ExitFailure@s. -readProcessWithExitCode :: FilePath -> [String] -> String -> IO (ExitCode, String, String) -readProcessWithExitCode cmd args input = - P.readProcessWithExitCode cmd args input - `catch` \e -> if isDoesNotExistError e - then return (ExitFailure 127, "", "") - else throw e diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/Semaphore.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/Semaphore.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/Semaphore.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/Semaphore.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -{-# LANGUAGE DeriveDataTypeable #-} -{-# OPTIONS_GHC -funbox-strict-fields #-} -module Distribution.Client.Compat.Semaphore - ( QSem - , newQSem - , waitQSem - , signalQSem - ) where - -import Control.Concurrent.STM (TVar, atomically, newTVar, readTVar, retry, - writeTVar) -import Control.Exception (mask_, onException) -import Control.Monad (join, when) -import Data.Typeable (Typeable) - --- | 'QSem' is a quantity semaphore in which the resource is aqcuired --- and released in units of one. It provides guaranteed FIFO ordering --- for satisfying blocked `waitQSem` calls. --- -data QSem = QSem !(TVar Int) !(TVar [TVar Bool]) !(TVar [TVar Bool]) - deriving (Eq, Typeable) - -newQSem :: Int -> IO QSem -newQSem i = atomically $ do - q <- newTVar i - b1 <- newTVar [] - b2 <- newTVar [] - return (QSem q b1 b2) - -waitQSem :: QSem -> IO () -waitQSem s@(QSem q _b1 b2) = - mask_ $ join $ atomically $ do - -- join, because if we need to block, we have to add a TVar to - -- the block queue. - -- mask_, because we need a chance to set up an exception handler - -- after the join returns. - v <- readTVar q - if v == 0 - then do b <- newTVar False - ys <- readTVar b2 - writeTVar b2 (b:ys) - return (wait b) - else do writeTVar q $! v - 1 - return (return ()) - where - -- - -- very careful here: if we receive an exception, then we need to - -- (a) write True into the TVar, so that another signalQSem doesn't - -- try to wake up this thread, and - -- (b) if the TVar is *already* True, then we need to do another - -- signalQSem to avoid losing a unit of the resource. - -- - -- The 'wake' function does both (a) and (b), so we can just call - -- it here. - -- - wait t = - flip onException (wake s t) $ - atomically $ do - b <- readTVar t - when (not b) retry - - -wake :: QSem -> TVar Bool -> IO () -wake s x = join $ atomically $ do - b <- readTVar x - if b then return (signalQSem s) - else do writeTVar x True - return (return ()) - -{- - property we want: - - bracket waitQSem (\_ -> signalQSem) (\_ -> ...) - - never loses a unit of the resource. --} - -signalQSem :: QSem -> IO () -signalQSem s@(QSem q b1 b2) = - mask_ $ join $ atomically $ do - -- join, so we don't force the reverse inside the txn - -- mask_ is needed so we don't lose a wakeup - v <- readTVar q - if v /= 0 - then do writeTVar q $! v + 1 - return (return ()) - else do xs <- readTVar b1 - checkwake1 xs - where - checkwake1 [] = do - ys <- readTVar b2 - checkwake2 ys - checkwake1 (x:xs) = do - writeTVar b1 xs - return (wake s x) - - checkwake2 [] = do - writeTVar q 1 - return (return ()) - checkwake2 ys = do - let (z:zs) = reverse ys - writeTVar b1 zs - writeTVar b2 [] - return (wake s z) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/Time.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/Time.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Compat/Time.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Compat/Time.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,142 +0,0 @@ -{-# LANGUAGE CPP, ForeignFunctionInterface #-} -module Distribution.Client.Compat.Time - (EpochTime, getModTime, getFileAge, getCurTime) - where - -import Data.Int (Int64) -import System.Directory (getModificationTime) - -#if MIN_VERSION_directory(1,2,0) -import Data.Time.Clock.POSIX (utcTimeToPOSIXSeconds, posixDayLength) -import Data.Time (getCurrentTime, diffUTCTime) -#else -import System.Time (ClockTime(..), getClockTime - ,diffClockTimes, normalizeTimeDiff, tdDay, tdHour) -#endif - -#if defined mingw32_HOST_OS - -#if MIN_VERSION_base(4,7,0) -import Data.Bits ((.|.), finiteBitSize, unsafeShiftL) -#else -import Data.Bits ((.|.), bitSize, unsafeShiftL) -#endif -import Data.Int (Int32) -import Data.Word (Word64) -import Foreign (allocaBytes, peekByteOff) -import System.IO.Error (mkIOError, doesNotExistErrorType) -import System.Win32.Types (BOOL, DWORD, LPCTSTR, LPVOID, withTString) - -#ifdef x86_64_HOST_ARCH -#define CALLCONV ccall -#else -#define CALLCONV stdcall -#endif - -foreign import CALLCONV "windows.h GetFileAttributesExW" - c_getFileAttributesEx :: LPCTSTR -> Int32 -> LPVOID -> IO BOOL - -getFileAttributesEx :: String -> LPVOID -> IO BOOL -getFileAttributesEx path lpFileInformation = - withTString path $ \c_path -> - c_getFileAttributesEx c_path getFileExInfoStandard lpFileInformation - -getFileExInfoStandard :: Int32 -getFileExInfoStandard = 0 - -size_WIN32_FILE_ATTRIBUTE_DATA :: Int -size_WIN32_FILE_ATTRIBUTE_DATA = 36 - -index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwLowDateTime :: Int -index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwLowDateTime = 20 - -index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwHighDateTime :: Int -index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwHighDateTime = 24 - -#else - -import Foreign.C.Types (CTime(..)) -import System.Posix.Files (getFileStatus, modificationTime) - -#endif - --- | The number of seconds since the UNIX epoch. -type EpochTime = Int64 - --- | Return modification time of given file. Works around the low clock --- resolution problem that 'getModificationTime' has on GHC < 7.8. --- --- This is a modified version of the code originally written for OpenShake by --- Neil Mitchell. See module Development.Shake.FileTime. -getModTime :: FilePath -> IO EpochTime - -#if defined mingw32_HOST_OS - --- Directly against the Win32 API. -getModTime path = allocaBytes size_WIN32_FILE_ATTRIBUTE_DATA $ \info -> do - res <- getFileAttributesEx path info - if not res - then do - let err = mkIOError doesNotExistErrorType - "Distribution.Client.Compat.Time.getModTime" - Nothing (Just path) - ioError err - else do - dwLow <- peekByteOff info - index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwLowDateTime - dwHigh <- peekByteOff info - index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwHighDateTime - return $! windowsTimeToPOSIXSeconds dwLow dwHigh - where - windowsTimeToPOSIXSeconds :: DWORD -> DWORD -> EpochTime - windowsTimeToPOSIXSeconds dwLow dwHigh = - let wINDOWS_TICK = 10000000 - sEC_TO_UNIX_EPOCH = 11644473600 -#if MIN_VERSION_base(4,7,0) - qwTime = (fromIntegral dwHigh `unsafeShiftL` finiteBitSize dwHigh) - .|. (fromIntegral dwLow) -#else - qwTime = (fromIntegral dwHigh `unsafeShiftL` bitSize dwHigh) - .|. (fromIntegral dwLow) -#endif - res = ((qwTime :: Word64) `div` wINDOWS_TICK) - - sEC_TO_UNIX_EPOCH - -- TODO: What if the result is not representable as POSIX seconds? - -- Probably fine to return garbage. - in fromIntegral res -#else - --- Directly against the unix library. -getModTime path = do - -- CTime is Int32 in base 4.5, Int64 in base >= 4.6, and an abstract type in - -- base < 4.5. - t <- fmap modificationTime $ getFileStatus path -#if MIN_VERSION_base(4,5,0) - let CTime i = t - return (fromIntegral i) -#else - return (read . show $ t) -#endif -#endif - --- | Return age of given file in days. -getFileAge :: FilePath -> IO Double -getFileAge file = do - t0 <- getModificationTime file -#if MIN_VERSION_directory(1,2,0) - t1 <- getCurrentTime - return $ realToFrac (t1 `diffUTCTime` t0) / realToFrac posixDayLength -#else - t1 <- getClockTime - let dt = normalizeTimeDiff (t1 `diffClockTimes` t0) - return $ fromIntegral ((24 * tdDay dt) + tdHour dt) / 24.0 -#endif - -getCurTime :: IO EpochTime -getCurTime = do -#if MIN_VERSION_directory(1,2,0) - (truncate . utcTimeToPOSIXSeconds) `fmap` getCurrentTime -#else - (TOD s _) <- getClockTime - return $! fromIntegral s -#endif diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Config.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Config.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Config.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Config.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,931 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Config --- Copyright : (c) David Himmelstrup 2005 --- License : BSD-like --- --- Maintainer : lemmih@gmail.com --- Stability : provisional --- Portability : portable --- --- Utilities for handling saved state such as known packages, known servers and --- downloaded packages. ------------------------------------------------------------------------------ -module Distribution.Client.Config ( - SavedConfig(..), - loadConfig, - - showConfig, - showConfigWithComments, - parseConfig, - - defaultCabalDir, - defaultConfigFile, - defaultCacheDir, - defaultCompiler, - defaultLogsDir, - defaultUserInstall, - - baseSavedConfig, - commentSavedConfig, - initialSavedConfig, - configFieldDescriptions, - haddockFlagsFields, - installDirsFields, - withProgramsFields, - withProgramOptionsFields, - userConfigDiff, - userConfigUpdate - ) where - -import Distribution.Client.Types - ( RemoteRepo(..), Username(..), Password(..) ) -import Distribution.Client.BuildReports.Types - ( ReportLevel(..) ) -import Distribution.Client.Setup - ( GlobalFlags(..), globalCommand, defaultGlobalFlags - , ConfigExFlags(..), configureExOptions, defaultConfigExFlags - , InstallFlags(..), installOptions, defaultInstallFlags - , UploadFlags(..), uploadCommand - , ReportFlags(..), reportCommand - , showRepo, parseRepo ) -import Distribution.Utils.NubList - ( NubList, fromNubList, toNubList) - -import Distribution.Simple.Compiler - ( DebugInfoLevel(..), OptimisationLevel(..) ) -import Distribution.Simple.Setup - ( ConfigFlags(..), configureOptions, defaultConfigFlags - , HaddockFlags(..), haddockOptions, defaultHaddockFlags - , installDirsOptions - , programConfigurationPaths', programConfigurationOptions - , Flag(..), toFlag, flagToMaybe, fromFlagOrDefault ) -import Distribution.Simple.InstallDirs - ( InstallDirs(..), defaultInstallDirs - , PathTemplate, toPathTemplate ) -import Distribution.ParseUtils - ( FieldDescr(..), liftField - , ParseResult(..), PError(..), PWarning(..) - , locatedErrorMsg, showPWarning - , readFields, warning, lineNo - , simpleField, listField, parseFilePathQ, parseTokenQ ) -import Distribution.Client.ParseUtils - ( parseFields, ppFields, ppSection ) -import qualified Distribution.ParseUtils as ParseUtils - ( Field(..) ) -import qualified Distribution.Text as Text - ( Text(..) ) -import Distribution.Simple.Command - ( CommandUI(commandOptions), commandDefaultFlags, ShowOrParseArgs(..) - , viewAsFieldDescr ) -import Distribution.Simple.Program - ( defaultProgramConfiguration ) -import Distribution.Simple.Utils - ( die, notice, warn, lowercase, cabalVersion ) -import Distribution.Compiler - ( CompilerFlavor(..), defaultCompilerFlavor ) -import Distribution.Verbosity - ( Verbosity, normal ) - -import Data.List - ( partition, find, foldl' ) -import Data.Maybe - ( fromMaybe ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid - ( Monoid(..) ) -#endif -import Control.Monad - ( unless, foldM, liftM, liftM2 ) -import qualified Distribution.Compat.ReadP as Parse - ( option ) -import qualified Text.PrettyPrint as Disp - ( render, text, empty ) -import Text.PrettyPrint - ( ($+$) ) -import System.Directory - ( createDirectoryIfMissing, getAppUserDataDirectory, renameFile ) -import Network.URI - ( URI(..), URIAuth(..) ) -import System.FilePath - ( (<.>), (), takeDirectory ) -import System.IO.Error - ( isDoesNotExistError ) -import Distribution.Compat.Environment - ( getEnvironment ) -import Distribution.Compat.Exception - ( catchIO ) -import qualified Paths_cabal_install - ( version ) -import Data.Version - ( showVersion ) -import Data.Char - ( isSpace ) -import qualified Data.Map as M - --- --- * Configuration saved in the config file --- - -data SavedConfig = SavedConfig { - savedGlobalFlags :: GlobalFlags, - savedInstallFlags :: InstallFlags, - savedConfigureFlags :: ConfigFlags, - savedConfigureExFlags :: ConfigExFlags, - savedUserInstallDirs :: InstallDirs (Flag PathTemplate), - savedGlobalInstallDirs :: InstallDirs (Flag PathTemplate), - savedUploadFlags :: UploadFlags, - savedReportFlags :: ReportFlags, - savedHaddockFlags :: HaddockFlags - } - -instance Monoid SavedConfig where - mempty = SavedConfig { - savedGlobalFlags = mempty, - savedInstallFlags = mempty, - savedConfigureFlags = mempty, - savedConfigureExFlags = mempty, - savedUserInstallDirs = mempty, - savedGlobalInstallDirs = mempty, - savedUploadFlags = mempty, - savedReportFlags = mempty, - savedHaddockFlags = mempty - } - mappend a b = SavedConfig { - savedGlobalFlags = combinedSavedGlobalFlags, - savedInstallFlags = combinedSavedInstallFlags, - savedConfigureFlags = combinedSavedConfigureFlags, - savedConfigureExFlags = combinedSavedConfigureExFlags, - savedUserInstallDirs = combinedSavedUserInstallDirs, - savedGlobalInstallDirs = combinedSavedGlobalInstallDirs, - savedUploadFlags = combinedSavedUploadFlags, - savedReportFlags = combinedSavedReportFlags, - savedHaddockFlags = combinedSavedHaddockFlags - } - where - -- This is ugly, but necessary. If we're mappending two config files, we - -- want the values of the *non-empty* list fields from the second one to - -- *override* the corresponding values from the first one. Default - -- behaviour (concatenation) is confusing and makes some use cases (see - -- #1884) impossible. - -- - -- However, we also want to allow specifying multiple values for a list - -- field in a *single* config file. For example, we want the following to - -- continue to work: - -- - -- remote-repo: hackage.haskell.org:http://hackage.haskell.org/ - -- remote-repo: private-collection:http://hackage.local/ - -- - -- So we can't just wrap the list fields inside Flags; we have to do some - -- special-casing just for SavedConfig. - - -- NB: the signature prevents us from using 'combine' on lists. - combine' :: (SavedConfig -> flags) -> (flags -> Flag a) -> Flag a - combine' field subfield = - (subfield . field $ a) `mappend` (subfield . field $ b) - - lastNonEmpty' :: (SavedConfig -> flags) -> (flags -> [a]) -> [a] - lastNonEmpty' field subfield = - let a' = subfield . field $ a - b' = subfield . field $ b - in case b' of [] -> a' - _ -> b' - - lastNonEmptyNL' :: (SavedConfig -> flags) -> (flags -> NubList a) - -> NubList a - lastNonEmptyNL' field subfield = - let a' = subfield . field $ a - b' = subfield . field $ b - in case fromNubList b' of [] -> a' - _ -> b' - - combinedSavedGlobalFlags = GlobalFlags { - globalVersion = combine globalVersion, - globalNumericVersion = combine globalNumericVersion, - globalConfigFile = combine globalConfigFile, - globalSandboxConfigFile = combine globalSandboxConfigFile, - globalRemoteRepos = lastNonEmptyNL globalRemoteRepos, - globalCacheDir = combine globalCacheDir, - globalLocalRepos = lastNonEmptyNL globalLocalRepos, - globalLogsDir = combine globalLogsDir, - globalWorldFile = combine globalWorldFile, - globalRequireSandbox = combine globalRequireSandbox, - globalIgnoreSandbox = combine globalIgnoreSandbox - } - where - combine = combine' savedGlobalFlags - lastNonEmptyNL = lastNonEmptyNL' savedGlobalFlags - - combinedSavedInstallFlags = InstallFlags { - installDocumentation = combine installDocumentation, - installHaddockIndex = combine installHaddockIndex, - installDryRun = combine installDryRun, - installMaxBackjumps = combine installMaxBackjumps, - installReorderGoals = combine installReorderGoals, - installIndependentGoals = combine installIndependentGoals, - installShadowPkgs = combine installShadowPkgs, - installStrongFlags = combine installStrongFlags, - installReinstall = combine installReinstall, - installAvoidReinstalls = combine installAvoidReinstalls, - installOverrideReinstall = combine installOverrideReinstall, - installUpgradeDeps = combine installUpgradeDeps, - installOnly = combine installOnly, - installOnlyDeps = combine installOnlyDeps, - installRootCmd = combine installRootCmd, - installSummaryFile = lastNonEmptyNL installSummaryFile, - installLogFile = combine installLogFile, - installBuildReports = combine installBuildReports, - installReportPlanningFailure = combine installReportPlanningFailure, - installSymlinkBinDir = combine installSymlinkBinDir, - installOneShot = combine installOneShot, - installNumJobs = combine installNumJobs, - installRunTests = combine installRunTests - } - where - combine = combine' savedInstallFlags - lastNonEmptyNL = lastNonEmptyNL' savedInstallFlags - - combinedSavedConfigureFlags = ConfigFlags { - configPrograms = configPrograms . savedConfigureFlags $ b, - -- TODO: NubListify - configProgramPaths = lastNonEmpty configProgramPaths, - -- TODO: NubListify - configProgramArgs = lastNonEmpty configProgramArgs, - configProgramPathExtra = lastNonEmptyNL configProgramPathExtra, - configHcFlavor = combine configHcFlavor, - configHcPath = combine configHcPath, - configHcPkg = combine configHcPkg, - configVanillaLib = combine configVanillaLib, - configProfLib = combine configProfLib, - configSharedLib = combine configSharedLib, - configDynExe = combine configDynExe, - configProfExe = combine configProfExe, - -- TODO: NubListify - configConfigureArgs = lastNonEmpty configConfigureArgs, - configOptimization = combine configOptimization, - configDebugInfo = combine configDebugInfo, - configProgPrefix = combine configProgPrefix, - configProgSuffix = combine configProgSuffix, - -- Parametrised by (Flag PathTemplate), so safe to use 'mappend'. - configInstallDirs = - (configInstallDirs . savedConfigureFlags $ a) - `mappend` (configInstallDirs . savedConfigureFlags $ b), - configScratchDir = combine configScratchDir, - -- TODO: NubListify - configExtraLibDirs = lastNonEmpty configExtraLibDirs, - -- TODO: NubListify - configExtraIncludeDirs = lastNonEmpty configExtraIncludeDirs, - configDistPref = combine configDistPref, - configVerbosity = combine configVerbosity, - configUserInstall = combine configUserInstall, - -- TODO: NubListify - configPackageDBs = lastNonEmpty configPackageDBs, - configGHCiLib = combine configGHCiLib, - configSplitObjs = combine configSplitObjs, - configStripExes = combine configStripExes, - configStripLibs = combine configStripLibs, - -- TODO: NubListify - configConstraints = lastNonEmpty configConstraints, - -- TODO: NubListify - configDependencies = lastNonEmpty configDependencies, - configInstantiateWith = lastNonEmpty configInstantiateWith, - -- TODO: NubListify - configConfigurationsFlags = lastNonEmpty configConfigurationsFlags, - configTests = combine configTests, - configBenchmarks = combine configBenchmarks, - configCoverage = combine configCoverage, - configLibCoverage = combine configLibCoverage, - configExactConfiguration = combine configExactConfiguration, - configFlagError = combine configFlagError, - configRelocatable = combine configRelocatable - } - where - combine = combine' savedConfigureFlags - lastNonEmpty = lastNonEmpty' savedConfigureFlags - lastNonEmptyNL = lastNonEmptyNL' savedConfigureFlags - - combinedSavedConfigureExFlags = ConfigExFlags { - configCabalVersion = combine configCabalVersion, - -- TODO: NubListify - configExConstraints = lastNonEmpty configExConstraints, - -- TODO: NubListify - configPreferences = lastNonEmpty configPreferences, - configSolver = combine configSolver, - configAllowNewer = combine configAllowNewer - } - where - combine = combine' savedConfigureExFlags - lastNonEmpty = lastNonEmpty' savedConfigureExFlags - - -- Parametrised by (Flag PathTemplate), so safe to use 'mappend'. - combinedSavedUserInstallDirs = savedUserInstallDirs a - `mappend` savedUserInstallDirs b - - -- Parametrised by (Flag PathTemplate), so safe to use 'mappend'. - combinedSavedGlobalInstallDirs = savedGlobalInstallDirs a - `mappend` savedGlobalInstallDirs b - - combinedSavedUploadFlags = UploadFlags { - uploadCheck = combine uploadCheck, - uploadUsername = combine uploadUsername, - uploadPassword = combine uploadPassword, - uploadVerbosity = combine uploadVerbosity - } - where - combine = combine' savedUploadFlags - - combinedSavedReportFlags = ReportFlags { - reportUsername = combine reportUsername, - reportPassword = combine reportPassword, - reportVerbosity = combine reportVerbosity - } - where - combine = combine' savedReportFlags - - combinedSavedHaddockFlags = HaddockFlags { - -- TODO: NubListify - haddockProgramPaths = lastNonEmpty haddockProgramPaths, - -- TODO: NubListify - haddockProgramArgs = lastNonEmpty haddockProgramArgs, - haddockHoogle = combine haddockHoogle, - haddockHtml = combine haddockHtml, - haddockHtmlLocation = combine haddockHtmlLocation, - haddockExecutables = combine haddockExecutables, - haddockTestSuites = combine haddockTestSuites, - haddockBenchmarks = combine haddockBenchmarks, - haddockInternal = combine haddockInternal, - haddockCss = combine haddockCss, - haddockHscolour = combine haddockHscolour, - haddockHscolourCss = combine haddockHscolourCss, - haddockContents = combine haddockContents, - haddockDistPref = combine haddockDistPref, - haddockKeepTempFiles = combine haddockKeepTempFiles, - haddockVerbosity = combine haddockVerbosity - } - where - combine = combine' savedHaddockFlags - lastNonEmpty = lastNonEmpty' savedHaddockFlags - - -updateInstallDirs :: Flag Bool -> SavedConfig -> SavedConfig -updateInstallDirs userInstallFlag - savedConfig@SavedConfig { - savedConfigureFlags = configureFlags, - savedUserInstallDirs = userInstallDirs, - savedGlobalInstallDirs = globalInstallDirs - } = - savedConfig { - savedConfigureFlags = configureFlags { - configInstallDirs = installDirs - } - } - where - installDirs | userInstall = userInstallDirs - | otherwise = globalInstallDirs - userInstall = fromFlagOrDefault defaultUserInstall $ - configUserInstall configureFlags `mappend` userInstallFlag - --- --- * Default config --- - --- | These are the absolute basic defaults. The fields that must be --- initialised. When we load the config from the file we layer the loaded --- values over these ones, so any missing fields in the file take their values --- from here. --- -baseSavedConfig :: IO SavedConfig -baseSavedConfig = do - userPrefix <- defaultCabalDir - logsDir <- defaultLogsDir - worldFile <- defaultWorldFile - return mempty { - savedConfigureFlags = mempty { - configHcFlavor = toFlag defaultCompiler, - configUserInstall = toFlag defaultUserInstall, - configVerbosity = toFlag normal - }, - savedUserInstallDirs = mempty { - prefix = toFlag (toPathTemplate userPrefix) - }, - savedGlobalFlags = mempty { - globalLogsDir = toFlag logsDir, - globalWorldFile = toFlag worldFile - } - } - --- | This is the initial configuration that we write out to to the config file --- if the file does not exist (or the config we use if the file cannot be read --- for some other reason). When the config gets loaded it gets layered on top --- of 'baseSavedConfig' so we do not need to include it into the initial --- values we save into the config file. --- -initialSavedConfig :: IO SavedConfig -initialSavedConfig = do - cacheDir <- defaultCacheDir - logsDir <- defaultLogsDir - worldFile <- defaultWorldFile - extraPath <- defaultExtraPath - return mempty { - savedGlobalFlags = mempty { - globalCacheDir = toFlag cacheDir, - globalRemoteRepos = toNubList [defaultRemoteRepo], - globalWorldFile = toFlag worldFile - }, - savedConfigureFlags = mempty { - configProgramPathExtra = toNubList extraPath - }, - savedInstallFlags = mempty { - installSummaryFile = toNubList [toPathTemplate (logsDir "build.log")], - installBuildReports= toFlag AnonymousReports, - installNumJobs = toFlag Nothing - } - } - ---TODO: misleading, there's no way to override this default --- either make it possible or rename to simply getCabalDir. -defaultCabalDir :: IO FilePath -defaultCabalDir = getAppUserDataDirectory "cabal" - -defaultConfigFile :: IO FilePath -defaultConfigFile = do - dir <- defaultCabalDir - return $ dir "config" - -defaultCacheDir :: IO FilePath -defaultCacheDir = do - dir <- defaultCabalDir - return $ dir "packages" - -defaultLogsDir :: IO FilePath -defaultLogsDir = do - dir <- defaultCabalDir - return $ dir "logs" - --- | Default position of the world file -defaultWorldFile :: IO FilePath -defaultWorldFile = do - dir <- defaultCabalDir - return $ dir "world" - -defaultExtraPath :: IO [FilePath] -defaultExtraPath = do - dir <- defaultCabalDir - return [dir "bin"] - -defaultCompiler :: CompilerFlavor -defaultCompiler = fromMaybe GHC defaultCompilerFlavor - -defaultUserInstall :: Bool -defaultUserInstall = True --- We do per-user installs by default on all platforms. We used to default to --- global installs on Windows but that no longer works on Windows Vista or 7. - -defaultRemoteRepo :: RemoteRepo -defaultRemoteRepo = RemoteRepo name uri - where - name = "hackage.haskell.org" - uri = URI "http:" (Just (URIAuth "" name "")) "/packages/archive" "" "" - --- --- * Config file reading --- - -loadConfig :: Verbosity -> Flag FilePath -> Flag Bool -> IO SavedConfig -loadConfig verbosity configFileFlag userInstallFlag = addBaseConf $ do - let sources = [ - ("commandline option", return . flagToMaybe $ configFileFlag), - ("env var CABAL_CONFIG", lookup "CABAL_CONFIG" `liftM` getEnvironment), - ("default config file", Just `liftM` defaultConfigFile) ] - - getSource [] = error "no config file path candidate found." - getSource ((msg,action): xs) = - action >>= maybe (getSource xs) (return . (,) msg) - - (source, configFile) <- getSource sources - minp <- readConfigFile mempty configFile - case minp of - Nothing -> do - notice verbosity $ "Config file path source is " ++ source ++ "." - notice verbosity $ "Config file " ++ configFile ++ " not found." - notice verbosity $ "Writing default configuration to " ++ configFile - commentConf <- commentSavedConfig - initialConf <- initialSavedConfig - writeConfigFile configFile commentConf initialConf - return initialConf - Just (ParseOk ws conf) -> do - unless (null ws) $ warn verbosity $ - unlines (map (showPWarning configFile) ws) - return conf - Just (ParseFailed err) -> do - let (line, msg) = locatedErrorMsg err - die $ - "Error parsing config file " ++ configFile - ++ maybe "" (\n -> ':' : show n) line ++ ":\n" ++ msg - - where - addBaseConf body = do - base <- baseSavedConfig - extra <- body - return (updateInstallDirs userInstallFlag (base `mappend` extra)) - -readConfigFile :: SavedConfig -> FilePath -> IO (Maybe (ParseResult SavedConfig)) -readConfigFile initial file = handleNotExists $ - fmap (Just . parseConfig initial) (readFile file) - - where - handleNotExists action = catchIO action $ \ioe -> - if isDoesNotExistError ioe - then return Nothing - else ioError ioe - -writeConfigFile :: FilePath -> SavedConfig -> SavedConfig -> IO () -writeConfigFile file comments vals = do - let tmpFile = file <.> "tmp" - createDirectoryIfMissing True (takeDirectory file) - writeFile tmpFile $ explanation ++ showConfigWithComments comments vals ++ "\n" - renameFile tmpFile file - where - explanation = unlines - ["-- This is the configuration file for the 'cabal' command line tool." - ,"" - ,"-- The available configuration options are listed below." - ,"-- Some of them have default values listed." - ,"" - ,"-- Lines (like this one) beginning with '--' are comments." - ,"-- Be careful with spaces and indentation because they are" - ,"-- used to indicate layout for nested sections." - ,"" - ,"-- Cabal library version: " ++ showVersion cabalVersion - ,"-- cabal-install version: " ++ showVersion Paths_cabal_install.version - ,"","" - ] - --- | These are the default values that get used in Cabal if a no value is --- given. We use these here to include in comments when we write out the --- initial config file so that the user can see what default value they are --- overriding. --- -commentSavedConfig :: IO SavedConfig -commentSavedConfig = do - userInstallDirs <- defaultInstallDirs defaultCompiler True True - globalInstallDirs <- defaultInstallDirs defaultCompiler False True - return SavedConfig { - savedGlobalFlags = defaultGlobalFlags, - savedInstallFlags = defaultInstallFlags, - savedConfigureExFlags = defaultConfigExFlags, - savedConfigureFlags = (defaultConfigFlags defaultProgramConfiguration) { - configUserInstall = toFlag defaultUserInstall - }, - savedUserInstallDirs = fmap toFlag userInstallDirs, - savedGlobalInstallDirs = fmap toFlag globalInstallDirs, - savedUploadFlags = commandDefaultFlags uploadCommand, - savedReportFlags = commandDefaultFlags reportCommand, - savedHaddockFlags = defaultHaddockFlags - } - --- | All config file fields. --- -configFieldDescriptions :: [FieldDescr SavedConfig] -configFieldDescriptions = - - toSavedConfig liftGlobalFlag - (commandOptions (globalCommand []) ParseArgs) - ["version", "numeric-version", "config-file", "sandbox-config-file"] [] - - ++ toSavedConfig liftConfigFlag - (configureOptions ParseArgs) - (["builddir", "constraint", "dependency"] - ++ map fieldName installDirsFields) - - --FIXME: this is only here because viewAsFieldDescr gives us a parser - -- that only recognises 'ghc' etc, the case-sensitive flag names, not - -- what the normal case-insensitive parser gives us. - [simpleField "compiler" - (fromFlagOrDefault Disp.empty . fmap Text.disp) (optional Text.parse) - configHcFlavor (\v flags -> flags { configHcFlavor = v }) - -- TODO: The following is a temporary fix. The "optimization" - -- and "debug-info" fields are OptArg, and viewAsFieldDescr - -- fails on that. Instead of a hand-written hackaged parser - -- and printer, we should handle this case properly in the - -- library. - ,liftField configOptimization (\v flags -> flags { configOptimization = v }) $ - let name = "optimization" in - FieldDescr name - (\f -> case f of - Flag NoOptimisation -> Disp.text "False" - Flag NormalOptimisation -> Disp.text "True" - Flag MaximumOptimisation -> Disp.text "2" - _ -> Disp.empty) - (\line str _ -> case () of - _ | str == "False" -> ParseOk [] (Flag NoOptimisation) - | str == "True" -> ParseOk [] (Flag NormalOptimisation) - | str == "0" -> ParseOk [] (Flag NoOptimisation) - | str == "1" -> ParseOk [] (Flag NormalOptimisation) - | str == "2" -> ParseOk [] (Flag MaximumOptimisation) - | lstr == "false" -> ParseOk [caseWarning] (Flag NoOptimisation) - | lstr == "true" -> ParseOk [caseWarning] (Flag NormalOptimisation) - | otherwise -> ParseFailed (NoParse name line) - where - lstr = lowercase str - caseWarning = PWarning $ - "The '" ++ name ++ "' field is case sensitive, use 'True' or 'False'.") - ,liftField configDebugInfo (\v flags -> flags { configDebugInfo = v }) $ - let name = "debug-info" in - FieldDescr name - (\f -> case f of - Flag NoDebugInfo -> Disp.text "False" - Flag MinimalDebugInfo -> Disp.text "1" - Flag NormalDebugInfo -> Disp.text "True" - Flag MaximalDebugInfo -> Disp.text "3" - _ -> Disp.empty) - (\line str _ -> case () of - _ | str == "False" -> ParseOk [] (Flag NoDebugInfo) - | str == "True" -> ParseOk [] (Flag NormalDebugInfo) - | str == "0" -> ParseOk [] (Flag NoDebugInfo) - | str == "1" -> ParseOk [] (Flag MinimalDebugInfo) - | str == "2" -> ParseOk [] (Flag NormalDebugInfo) - | str == "3" -> ParseOk [] (Flag MaximalDebugInfo) - | lstr == "false" -> ParseOk [caseWarning] (Flag NoDebugInfo) - | lstr == "true" -> ParseOk [caseWarning] (Flag NormalDebugInfo) - | otherwise -> ParseFailed (NoParse name line) - where - lstr = lowercase str - caseWarning = PWarning $ - "The '" ++ name ++ "' field is case sensitive, use 'True' or 'False'.") - ] - - ++ toSavedConfig liftConfigExFlag - (configureExOptions ParseArgs) - [] [] - - ++ toSavedConfig liftInstallFlag - (installOptions ParseArgs) - ["dry-run", "only", "only-dependencies", "dependencies-only"] [] - - ++ toSavedConfig liftUploadFlag - (commandOptions uploadCommand ParseArgs) - ["verbose", "check"] [] - - ++ toSavedConfig liftReportFlag - (commandOptions reportCommand ParseArgs) - ["verbose", "username", "password"] [] - --FIXME: this is a hack, hiding the user name and password. - -- But otherwise it masks the upload ones. Either need to - -- share the options or make then distinct. In any case - -- they should probably be per-server. - - where - toSavedConfig lift options exclusions replacements = - [ lift (fromMaybe field replacement) - | opt <- options - , let field = viewAsFieldDescr opt - name = fieldName field - replacement = find ((== name) . fieldName) replacements - , name `notElem` exclusions ] - optional = Parse.option mempty . fmap toFlag - --- TODO: next step, make the deprecated fields elicit a warning. --- -deprecatedFieldDescriptions :: [FieldDescr SavedConfig] -deprecatedFieldDescriptions = - [ liftGlobalFlag $ - listField "repos" - (Disp.text . showRepo) parseRepo - (fromNubList . globalRemoteRepos) - (\rs cfg -> cfg { globalRemoteRepos = toNubList rs }) - , liftGlobalFlag $ - simpleField "cachedir" - (Disp.text . fromFlagOrDefault "") (optional parseFilePathQ) - globalCacheDir (\d cfg -> cfg { globalCacheDir = d }) - , liftUploadFlag $ - simpleField "hackage-username" - (Disp.text . fromFlagOrDefault "" . fmap unUsername) - (optional (fmap Username parseTokenQ)) - uploadUsername (\d cfg -> cfg { uploadUsername = d }) - , liftUploadFlag $ - simpleField "hackage-password" - (Disp.text . fromFlagOrDefault "" . fmap unPassword) - (optional (fmap Password parseTokenQ)) - uploadPassword (\d cfg -> cfg { uploadPassword = d }) - ] - ++ map (modifyFieldName ("user-"++) . liftUserInstallDirs) installDirsFields - ++ map (modifyFieldName ("global-"++) . liftGlobalInstallDirs) installDirsFields - where - optional = Parse.option mempty . fmap toFlag - modifyFieldName :: (String -> String) -> FieldDescr a -> FieldDescr a - modifyFieldName f d = d { fieldName = f (fieldName d) } - -liftUserInstallDirs :: FieldDescr (InstallDirs (Flag PathTemplate)) - -> FieldDescr SavedConfig -liftUserInstallDirs = liftField - savedUserInstallDirs (\flags conf -> conf { savedUserInstallDirs = flags }) - -liftGlobalInstallDirs :: FieldDescr (InstallDirs (Flag PathTemplate)) - -> FieldDescr SavedConfig -liftGlobalInstallDirs = liftField - savedGlobalInstallDirs (\flags conf -> conf { savedGlobalInstallDirs = flags }) - -liftGlobalFlag :: FieldDescr GlobalFlags -> FieldDescr SavedConfig -liftGlobalFlag = liftField - savedGlobalFlags (\flags conf -> conf { savedGlobalFlags = flags }) - -liftConfigFlag :: FieldDescr ConfigFlags -> FieldDescr SavedConfig -liftConfigFlag = liftField - savedConfigureFlags (\flags conf -> conf { savedConfigureFlags = flags }) - -liftConfigExFlag :: FieldDescr ConfigExFlags -> FieldDescr SavedConfig -liftConfigExFlag = liftField - savedConfigureExFlags (\flags conf -> conf { savedConfigureExFlags = flags }) - -liftInstallFlag :: FieldDescr InstallFlags -> FieldDescr SavedConfig -liftInstallFlag = liftField - savedInstallFlags (\flags conf -> conf { savedInstallFlags = flags }) - -liftUploadFlag :: FieldDescr UploadFlags -> FieldDescr SavedConfig -liftUploadFlag = liftField - savedUploadFlags (\flags conf -> conf { savedUploadFlags = flags }) - -liftReportFlag :: FieldDescr ReportFlags -> FieldDescr SavedConfig -liftReportFlag = liftField - savedReportFlags (\flags conf -> conf { savedReportFlags = flags }) - -parseConfig :: SavedConfig -> String -> ParseResult SavedConfig -parseConfig initial = \str -> do - fields <- readFields str - let (knownSections, others) = partition isKnownSection fields - config <- parse others - let user0 = savedUserInstallDirs config - global0 = savedGlobalInstallDirs config - (haddockFlags, user, global, paths, args) <- - foldM parseSections - (savedHaddockFlags config, user0, global0, [], []) - knownSections - return config { - savedConfigureFlags = (savedConfigureFlags config) { - configProgramPaths = paths, - configProgramArgs = args - }, - savedHaddockFlags = haddockFlags, - savedUserInstallDirs = user, - savedGlobalInstallDirs = global - } - - where - isKnownSection (ParseUtils.Section _ "haddock" _ _) = True - isKnownSection (ParseUtils.Section _ "install-dirs" _ _) = True - isKnownSection (ParseUtils.Section _ "program-locations" _ _) = True - isKnownSection (ParseUtils.Section _ "program-default-options" _ _) = True - isKnownSection _ = False - - parse = parseFields (configFieldDescriptions - ++ deprecatedFieldDescriptions) initial - - parseSections accum@(h,u,g,p,a) - (ParseUtils.Section _ "haddock" name fs) - | name == "" = do h' <- parseFields haddockFlagsFields h fs - return (h', u, g, p, a) - | otherwise = do - warning "The 'haddock' section should be unnamed" - return accum - parseSections accum@(h,u,g,p,a) - (ParseUtils.Section _ "install-dirs" name fs) - | name' == "user" = do u' <- parseFields installDirsFields u fs - return (h, u', g, p, a) - | name' == "global" = do g' <- parseFields installDirsFields g fs - return (h, u, g', p, a) - | otherwise = do - warning "The 'install-paths' section should be for 'user' or 'global'" - return accum - where name' = lowercase name - parseSections accum@(h,u,g,p,a) - (ParseUtils.Section _ "program-locations" name fs) - | name == "" = do p' <- parseFields withProgramsFields p fs - return (h, u, g, p', a) - | otherwise = do - warning "The 'program-locations' section should be unnamed" - return accum - parseSections accum@(h, u, g, p, a) - (ParseUtils.Section _ "program-default-options" name fs) - | name == "" = do a' <- parseFields withProgramOptionsFields a fs - return (h, u, g, p, a') - | otherwise = do - warning "The 'program-default-options' section should be unnamed" - return accum - parseSections accum f = do - warning $ "Unrecognized stanza on line " ++ show (lineNo f) - return accum - -showConfig :: SavedConfig -> String -showConfig = showConfigWithComments mempty - -showConfigWithComments :: SavedConfig -> SavedConfig -> String -showConfigWithComments comment vals = Disp.render $ - ppFields configFieldDescriptions mcomment vals - $+$ Disp.text "" - $+$ ppSection "haddock" "" haddockFlagsFields - (fmap savedHaddockFlags mcomment) (savedHaddockFlags vals) - $+$ Disp.text "" - $+$ installDirsSection "user" savedUserInstallDirs - $+$ Disp.text "" - $+$ installDirsSection "global" savedGlobalInstallDirs - $+$ Disp.text "" - $+$ configFlagsSection "program-locations" withProgramsFields - configProgramPaths - $+$ Disp.text "" - $+$ configFlagsSection "program-default-options" withProgramOptionsFields - configProgramArgs - where - mcomment = Just comment - installDirsSection name field = - ppSection "install-dirs" name installDirsFields - (fmap field mcomment) (field vals) - configFlagsSection name fields field = - ppSection name "" fields - (fmap (field . savedConfigureFlags) mcomment) - ((field . savedConfigureFlags) vals) - --- | Fields for the 'install-dirs' sections. -installDirsFields :: [FieldDescr (InstallDirs (Flag PathTemplate))] -installDirsFields = map viewAsFieldDescr installDirsOptions - --- | Fields for the 'haddock' section. -haddockFlagsFields :: [FieldDescr HaddockFlags] -haddockFlagsFields = [ field - | opt <- haddockOptions ParseArgs - , let field = viewAsFieldDescr opt - name = fieldName field - , name `notElem` exclusions ] - where - exclusions = ["verbose", "builddir"] - --- | Fields for the 'program-locations' section. -withProgramsFields :: [FieldDescr [(String, FilePath)]] -withProgramsFields = - map viewAsFieldDescr $ - programConfigurationPaths' (++ "-location") defaultProgramConfiguration - ParseArgs id (++) - --- | Fields for the 'program-default-options' section. -withProgramOptionsFields :: [FieldDescr [(String, [String])]] -withProgramOptionsFields = - map viewAsFieldDescr $ - programConfigurationOptions defaultProgramConfiguration ParseArgs id (++) - --- | Get the differences (as a pseudo code diff) between the user's --- '~/.cabal/config' and the one that cabal would generate if it didn't exist. -userConfigDiff :: GlobalFlags -> IO [String] -userConfigDiff globalFlags = do - userConfig <- loadConfig normal (globalConfigFile globalFlags) mempty - testConfig <- liftM2 mappend baseSavedConfig initialSavedConfig - return $ reverse . foldl' createDiff [] . M.toList - $ M.unionWith combine - (M.fromList . map justFst $ filterShow testConfig) - (M.fromList . map justSnd $ filterShow userConfig) - where - justFst (a, b) = (a, (Just b, Nothing)) - justSnd (a, b) = (a, (Nothing, Just b)) - - combine (Nothing, Just b) (Just a, Nothing) = (Just a, Just b) - combine (Just a, Nothing) (Nothing, Just b) = (Just a, Just b) - combine x y = error $ "Can't happen : userConfigDiff " ++ show x ++ " " ++ show y - - createDiff :: [String] -> (String, (Maybe String, Maybe String)) -> [String] - createDiff acc (key, (Just a, Just b)) - | a == b = acc - | otherwise = ("+ " ++ key ++ ": " ++ b) : ("- " ++ key ++ ": " ++ a) : acc - createDiff acc (key, (Nothing, Just b)) = ("+ " ++ key ++ ": " ++ b) : acc - createDiff acc (key, (Just a, Nothing)) = ("- " ++ key ++ ": " ++ a) : acc - createDiff acc (_, (Nothing, Nothing)) = acc - - filterShow :: SavedConfig -> [(String, String)] - filterShow cfg = map keyValueSplit - . filter (\s -> not (null s) && any (== ':') s) - . map nonComment - . lines - $ showConfig cfg - - nonComment [] = [] - nonComment ('-':'-':_) = [] - nonComment (x:xs) = x : nonComment xs - - topAndTail = reverse . dropWhile isSpace . reverse . dropWhile isSpace - - keyValueSplit s = - let (left, right) = break (== ':') s - in (topAndTail left, topAndTail (drop 1 right)) - - --- | Update the user's ~/.cabal/config' keeping the user's customizations. -userConfigUpdate :: Verbosity -> GlobalFlags -> IO () -userConfigUpdate verbosity globalFlags = do - userConfig <- loadConfig normal (globalConfigFile globalFlags) mempty - newConfig <- liftM2 mappend baseSavedConfig initialSavedConfig - commentConf <- commentSavedConfig - cabalFile <- defaultConfigFile - let backup = cabalFile ++ ".backup" - notice verbosity $ "Renaming " ++ cabalFile ++ " to " ++ backup ++ "." - renameFile cabalFile backup - notice verbosity $ "Writing merged config to " ++ cabalFile ++ "." - writeConfigFile cabalFile commentConf (newConfig `mappend` userConfig) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Configure.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Configure.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Configure.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Configure.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,254 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Configure --- Copyright : (c) David Himmelstrup 2005, --- Duncan Coutts 2005 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- High level interface to configuring a package. ------------------------------------------------------------------------------ -module Distribution.Client.Configure ( - configure, - chooseCabalVersion, - ) where - -import Distribution.Client.Dependency -import Distribution.Client.Dependency.Types (AllowNewer(..), isAllowNewer) -import qualified Distribution.Client.InstallPlan as InstallPlan -import Distribution.Client.InstallPlan (InstallPlan) -import Distribution.Client.IndexUtils as IndexUtils - ( getSourcePackages, getInstalledPackages ) -import Distribution.Client.Setup - ( ConfigExFlags(..), configureCommand, filterConfigureFlags ) -import Distribution.Client.Types as Source -import Distribution.Client.SetupWrapper - ( setupWrapper, SetupScriptOptions(..), defaultSetupScriptOptions ) -import Distribution.Client.Targets - ( userToPackageConstraint ) - -import Distribution.Simple.Compiler - ( Compiler, CompilerInfo, compilerInfo, PackageDB(..), PackageDBStack ) -import Distribution.Simple.Program (ProgramConfiguration ) -import Distribution.Simple.Setup - ( ConfigFlags(..), fromFlag, toFlag, flagToMaybe, fromFlagOrDefault ) -import Distribution.Simple.PackageIndex (InstalledPackageIndex) -import Distribution.Simple.Utils - ( defaultPackageDesc ) -import qualified Distribution.InstalledPackageInfo as Installed -import Distribution.Package - ( Package(..), packageName, Dependency(..), thisPackageVersion ) -import Distribution.PackageDescription.Parse - ( readPackageDescription ) -import Distribution.PackageDescription.Configuration - ( finalizePackageDescription ) -import Distribution.Version - ( anyVersion, thisVersion ) -import Distribution.Simple.Utils as Utils - ( notice, info, debug, die ) -import Distribution.System - ( Platform ) -import Distribution.Verbosity as Verbosity - ( Verbosity ) -import Distribution.Version - ( Version(..), VersionRange, orLaterVersion ) - -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid (Monoid(..)) -#endif - --- | Choose the Cabal version such that the setup scripts compiled against this --- version will support the given command-line flags. -chooseCabalVersion :: ConfigExFlags -> Maybe Version -> VersionRange -chooseCabalVersion configExFlags maybeVersion = - maybe defaultVersionRange thisVersion maybeVersion - where - -- Cabal < 1.19.2 doesn't support '--exact-configuration' which is needed - -- for '--allow-newer' to work. - allowNewer = fromFlagOrDefault False $ - fmap isAllowNewer (configAllowNewer configExFlags) - - defaultVersionRange = if allowNewer - then orLaterVersion (Version [1,19,2] []) - else anyVersion - --- | Configure the package found in the local directory -configure :: Verbosity - -> PackageDBStack - -> [Repo] - -> Compiler - -> Platform - -> ProgramConfiguration - -> ConfigFlags - -> ConfigExFlags - -> [String] - -> IO () -configure verbosity packageDBs repos comp platform conf - configFlags configExFlags extraArgs = do - - installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf - sourcePkgDb <- getSourcePackages verbosity repos - - progress <- planLocalPackage verbosity comp platform configFlags configExFlags - installedPkgIndex sourcePkgDb - - notice verbosity "Resolving dependencies..." - maybePlan <- foldProgress logMsg (return . Left) (return . Right) - progress - case maybePlan of - Left message -> do - info verbosity message - setupWrapper verbosity (setupScriptOptions installedPkgIndex) Nothing - configureCommand (const configFlags) extraArgs - - Right installPlan -> case InstallPlan.ready installPlan of - [pkg@(ReadyPackage (SourcePackage _ _ (LocalUnpackedPackage _) _) _ _ _)] -> - configurePackage verbosity - (InstallPlan.planPlatform installPlan) - (InstallPlan.planCompiler installPlan) - (setupScriptOptions installedPkgIndex) - configFlags pkg extraArgs - - _ -> die $ "internal error: configure install plan should have exactly " - ++ "one local ready package." - - where - setupScriptOptions index = SetupScriptOptions { - useCabalVersion = chooseCabalVersion configExFlags - (flagToMaybe (configCabalVersion configExFlags)), - useCompiler = Just comp, - usePlatform = Just platform, - usePackageDB = packageDBs', - usePackageIndex = index', - useProgramConfig = conf, - useDistPref = fromFlagOrDefault - (useDistPref defaultSetupScriptOptions) - (configDistPref configFlags), - useLoggingHandle = Nothing, - useWorkingDir = Nothing, - useWin32CleanHack = False, - forceExternalSetupMethod = False, - setupCacheLock = Nothing - } - where - -- Hack: we typically want to allow the UserPackageDB for finding the - -- Cabal lib when compiling any Setup.hs even if we're doing a global - -- install. However we also allow looking in a specific package db. - (packageDBs', index') = - case packageDBs of - (GlobalPackageDB:dbs) | UserPackageDB `notElem` dbs - -> (GlobalPackageDB:UserPackageDB:dbs, Nothing) - -- but if the user is using an odd db stack, don't touch it - dbs -> (dbs, Just index) - - logMsg message rest = debug verbosity message >> rest - --- | Make an 'InstallPlan' for the unpacked package in the current directory, --- and all its dependencies. --- -planLocalPackage :: Verbosity -> Compiler - -> Platform - -> ConfigFlags -> ConfigExFlags - -> InstalledPackageIndex - -> SourcePackageDb - -> IO (Progress String String InstallPlan) -planLocalPackage verbosity comp platform configFlags configExFlags installedPkgIndex - (SourcePackageDb _ packagePrefs) = do - pkg <- readPackageDescription verbosity =<< defaultPackageDesc verbosity - solver <- chooseSolver verbosity (fromFlag $ configSolver configExFlags) (compilerInfo comp) - - let -- We create a local package and ask to resolve a dependency on it - localPkg = SourcePackage { - packageInfoId = packageId pkg, - Source.packageDescription = pkg, - packageSource = LocalUnpackedPackage ".", - packageDescrOverride = Nothing - } - - testsEnabled = fromFlagOrDefault False $ configTests configFlags - benchmarksEnabled = - fromFlagOrDefault False $ configBenchmarks configFlags - - resolverParams = - removeUpperBounds (fromFlagOrDefault AllowNewerNone $ - configAllowNewer configExFlags) - - . addPreferences - -- preferences from the config file or command line - [ PackageVersionPreference name ver - | Dependency name ver <- configPreferences configExFlags ] - - . addConstraints - -- version constraints from the config file or command line - -- TODO: should warn or error on constraints that are not on direct - -- deps or flag constraints not on the package in question. - (map userToPackageConstraint (configExConstraints configExFlags)) - - . addConstraints - -- package flags from the config file or command line - [ PackageConstraintFlags (packageName pkg) - (configConfigurationsFlags configFlags) ] - - . addConstraints - -- '--enable-tests' and '--enable-benchmarks' constraints from - -- command line - [ PackageConstraintStanzas (packageName pkg) $ - [ TestStanzas | testsEnabled ] ++ - [ BenchStanzas | benchmarksEnabled ] - ] - - $ standardInstallPolicy - installedPkgIndex - (SourcePackageDb mempty packagePrefs) - [SpecificSourcePackage localPkg] - - return (resolveDependencies platform (compilerInfo comp) solver resolverParams) - - --- | Call an installer for an 'SourcePackage' but override the configure --- flags with the ones given by the 'ReadyPackage'. In particular the --- 'ReadyPackage' specifies an exact 'FlagAssignment' and exactly --- versioned package dependencies. So we ignore any previous partial flag --- assignment or dependency constraints and use the new ones. --- --- NB: when updating this function, don't forget to also update --- 'installReadyPackage' in D.C.Install. -configurePackage :: Verbosity - -> Platform -> CompilerInfo - -> SetupScriptOptions - -> ConfigFlags - -> ReadyPackage - -> [String] - -> IO () -configurePackage verbosity platform comp scriptOptions configFlags - (ReadyPackage (SourcePackage _ gpkg _ _) flags stanzas deps) extraArgs = - - setupWrapper verbosity - scriptOptions (Just pkg) configureCommand configureFlags extraArgs - - where - configureFlags = filterConfigureFlags configFlags { - configConfigurationsFlags = flags, - -- We generate the legacy constraints as well as the new style precise - -- deps. In the end only one set gets passed to Setup.hs configure, - -- depending on the Cabal version we are talking to. - configConstraints = [ thisPackageVersion (packageId deppkg) - | deppkg <- deps ], - configDependencies = [ (packageName (Installed.sourcePackageId deppkg), - Installed.installedPackageId deppkg) - | deppkg <- deps ], - -- Use '--exact-configuration' if supported. - configExactConfiguration = toFlag True, - configVerbosity = toFlag verbosity, - configBenchmarks = toFlag (BenchStanzas `elem` stanzas), - configTests = toFlag (TestStanzas `elem` stanzas) - } - - pkg = case finalizePackageDescription flags - (const True) - platform comp [] (enableStanzas stanzas gpkg) of - Left _ -> error "finalizePackageDescription ReadyPackage failed" - Right (desc, _) -> desc diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Assignment.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Assignment.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Assignment.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Assignment.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,154 +0,0 @@ -module Distribution.Client.Dependency.Modular.Assignment where - -import Control.Applicative -import Control.Monad -import Data.Array as A -import Data.List as L -import Data.Map as M -import Data.Maybe -import Data.Graph -import Prelude hiding (pi) - -import Distribution.PackageDescription (FlagAssignment) -- from Cabal -import Distribution.Client.Types (OptionalStanza) - -import Distribution.Client.Dependency.Modular.Configured -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Flag -import Distribution.Client.Dependency.Modular.Index -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.Version - --- | A (partial) package assignment. Qualified package names --- are associated with instances. -type PAssignment = Map QPN I - --- | A (partial) package preassignment. Qualified package names --- are associated with constrained instances. Constrained instances --- record constraints about the instances that can still be chosen, --- and in the extreme case fix a concrete instance. -type PPreAssignment = Map QPN (CI QPN) -type FAssignment = Map QFN Bool -type SAssignment = Map QSN Bool - --- | A (partial) assignment of variables. -data Assignment = A PAssignment FAssignment SAssignment - deriving (Show, Eq) - --- | A preassignment comprises knowledge about variables, but not --- necessarily fixed values. -data PreAssignment = PA PPreAssignment FAssignment SAssignment - --- | Extend a package preassignment. --- --- Takes the variable that causes the new constraints, a current preassignment --- and a set of new dependency constraints. --- --- We're trying to extend the preassignment with each dependency one by one. --- Each dependency is for a particular variable. We check if we already have --- constraints for that variable in the current preassignment. If so, we're --- trying to merge the constraints. --- --- Either returns a witness of the conflict that would arise during the merge, --- or the successfully extended assignment. -extend :: Var QPN -> PPreAssignment -> [Dep QPN] -> Either (ConflictSet QPN, [Dep QPN]) PPreAssignment -extend var pa qa = foldM (\ a (Dep qpn ci) -> - let ci' = M.findWithDefault (Constrained []) qpn a - in case (\ x -> M.insert qpn x a) <$> merge ci' ci of - Left (c, (d, d')) -> Left (c, L.map (Dep qpn) (simplify (P qpn) d d')) - Right x -> Right x) - pa qa - where - -- We're trying to remove trivial elements of the conflict. If we're just - -- making a choice pkg == instance, and pkg => pkg == instance is a part - -- of the conflict, then this info is clear from the context and does not - -- have to be repeated. - simplify v (Fixed _ (Goal var' _)) c | v == var && var' == var = [c] - simplify v c (Fixed _ (Goal var' _)) | v == var && var' == var = [c] - simplify _ c d = [c, d] - --- | Delivers an ordered list of fully configured packages. --- --- TODO: This function is (sort of) ok. However, there's an open bug --- w.r.t. unqualification. There might be several different instances --- of one package version chosen by the solver, which will lead to --- clashes. -toCPs :: Assignment -> RevDepMap -> [CP QPN] -toCPs (A pa fa sa) rdm = - let - -- get hold of the graph - g :: Graph - vm :: Vertex -> ((), QPN, [QPN]) - cvm :: QPN -> Maybe Vertex - -- Note that the RevDepMap contains duplicate dependencies. Therefore the nub. - (g, vm, cvm) = graphFromEdges (L.map (\ (x, xs) -> ((), x, nub xs)) - (M.toList rdm)) - tg :: Graph - tg = transposeG g - -- Topsort the dependency graph, yielding a list of pkgs in the right order. - -- The graph will still contain all the installed packages, and it might - -- contain duplicates, because several variables might actually resolve to - -- the same package in the presence of qualified package names. - ps :: [PI QPN] - ps = L.map ((\ (_, x, _) -> PI x (pa M.! x)) . vm) $ - topSort g - -- Determine the flags per package, by walking over and regrouping the - -- complete flag assignment by package. - fapp :: Map QPN FlagAssignment - fapp = M.fromListWith (++) $ - L.map (\ ((FN (PI qpn _) fn), b) -> (qpn, [(fn, b)])) $ - M.toList $ - fa - -- Stanzas per package. - sapp :: Map QPN [OptionalStanza] - sapp = M.fromListWith (++) $ - L.map (\ ((SN (PI qpn _) sn), b) -> (qpn, if b then [sn] else [])) $ - M.toList $ - sa - -- Dependencies per package. - depp :: QPN -> [PI QPN] - depp qpn = let v :: Vertex - v = fromJust (cvm qpn) - dvs :: [Vertex] - dvs = tg A.! v - in L.map (\ dv -> case vm dv of (_, x, _) -> PI x (pa M.! x)) dvs - in - L.map (\ pi@(PI qpn _) -> CP pi - (M.findWithDefault [] qpn fapp) - (M.findWithDefault [] qpn sapp) - (depp qpn)) - ps - --- | Finalize an assignment and a reverse dependency map. --- --- This is preliminary, and geared towards output right now. -finalize :: Index -> Assignment -> RevDepMap -> IO () -finalize idx (A pa fa _) rdm = - let - -- get hold of the graph - g :: Graph - vm :: Vertex -> ((), QPN, [QPN]) - (g, vm) = graphFromEdges' (L.map (\ (x, xs) -> ((), x, xs)) (M.toList rdm)) - -- topsort the dependency graph, yielding a list of pkgs in the right order - f :: [PI QPN] - f = L.filter (not . instPI) (L.map ((\ (_, x, _) -> PI x (pa M.! x)) . vm) (topSort g)) - fapp :: Map QPN [(QFN, Bool)] -- flags per package - fapp = M.fromListWith (++) $ - L.map (\ (qfn@(FN (PI qpn _) _), b) -> (qpn, [(qfn, b)])) $ M.toList $ fa - -- print one instance - ppi pi@(PI qpn _) = showPI pi ++ status pi ++ " " ++ pflags (M.findWithDefault [] qpn fapp) - -- print install status - status :: PI QPN -> String - status (PI (Q _ pn) _) = - case insts of - [] -> " (new)" - vs -> " (" ++ intercalate ", " (L.map showVer vs) ++ ")" - where insts = L.map (\ (I v _) -> v) $ L.filter isInstalled $ - M.keys (M.findWithDefault M.empty pn idx) - isInstalled (I _ (Inst _ )) = True - isInstalled _ = False - -- print flag assignment - pflags = unwords . L.map (uncurry showFBool) - in - -- show packages with associated flag assignments - putStr (unlines (L.map ppi f)) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Builder.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Builder.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Builder.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Builder.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,170 +0,0 @@ -module Distribution.Client.Dependency.Modular.Builder where - --- Building the search tree. --- --- In this phase, we build a search tree that is too large, i.e, it contains --- invalid solutions. We keep track of the open goals at each point. We --- nondeterministically pick an open goal (via a goal choice node), create --- subtrees according to the index and the available solutions, and extend the --- set of open goals by superficially looking at the dependencies recorded in --- the index. --- --- For each goal, we keep track of all the *reasons* why it is being --- introduced. These are for debugging and error messages, mainly. A little bit --- of care has to be taken due to the way we treat flags. If a package has --- flag-guarded dependencies, we cannot introduce them immediately. Instead, we --- store the entire dependency. - -import Control.Monad.Reader hiding (sequence, mapM) -import Data.List as L -import Data.Map as M -import Prelude hiding (sequence, mapM) - -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Flag -import Distribution.Client.Dependency.Modular.Index -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.PSQ as P -import Distribution.Client.Dependency.Modular.Tree - --- | The state needed during the build phase of the search tree. -data BuildState = BS { - index :: Index, -- ^ information about packages and their dependencies - scope :: Scope, -- ^ information about encapsulations - rdeps :: RevDepMap, -- ^ set of all package goals, completed and open, with reverse dependencies - open :: PSQ OpenGoal (), -- ^ set of still open goals (flag and package goals) - next :: BuildType -- ^ kind of node to generate next -} - --- | Extend the set of open goals with the new goals listed. --- --- We also adjust the map of overall goals, and keep track of the --- reverse dependencies of each of the goals. -extendOpen :: QPN -> [OpenGoal] -> BuildState -> BuildState -extendOpen qpn' gs s@(BS { rdeps = gs', open = o' }) = go gs' o' gs - where - go :: RevDepMap -> PSQ OpenGoal () -> [OpenGoal] -> BuildState - go g o [] = s { rdeps = g, open = o } - go g o (ng@(OpenGoal (Flagged _ _ _ _) _gr) : ngs) = go g (cons ng () o) ngs - -- Note: for 'Flagged' goals, we always insert, so later additions win. - -- This is important, because in general, if a goal is inserted twice, - -- the later addition will have better dependency information. - go g o (ng@(OpenGoal (Stanza _ _ ) _gr) : ngs) = go g (cons ng () o) ngs - go g o (ng@(OpenGoal (Simple (Dep qpn _)) _gr) : ngs) - | qpn == qpn' = go g o ngs - -- we ignore self-dependencies at this point; TODO: more care may be needed - | qpn `M.member` g = go (M.adjust (qpn':) qpn g) o ngs - | otherwise = go (M.insert qpn [qpn'] g) (cons ng () o) ngs - -- code above is correct; insert/adjust have different arg order - --- | Update the current scope by taking into account the encapsulations that --- are defined for the current package. -establishScope :: QPN -> Encaps -> BuildState -> BuildState -establishScope (Q pp pn) ecs s = - s { scope = L.foldl (\ m e -> M.insert e pp' m) (scope s) ecs } - where - pp' = pn : pp -- new path - --- | Given the current scope, qualify all the package names in the given set of --- dependencies and then extend the set of open goals accordingly. -scopedExtendOpen :: QPN -> I -> QGoalReasonChain -> FlaggedDeps PN -> FlagInfo -> - BuildState -> BuildState -scopedExtendOpen qpn i gr fdeps fdefs s = extendOpen qpn gs s - where - sc = scope s - -- Qualify all package names - qfdeps = L.map (fmap (qualify sc)) fdeps -- qualify all the package names - -- Introduce all package flags - qfdefs = L.map (\ (fn, b) -> Flagged (FN (PI qpn i) fn) b [] []) $ M.toList fdefs - -- Combine new package and flag goals - gs = L.map (flip OpenGoal gr) (qfdefs ++ qfdeps) - -- NOTE: - -- - -- In the expression @qfdefs ++ qfdeps@ above, flags occur potentially - -- multiple times, both via the flag declaration and via dependencies. - -- The order is potentially important, because the occurrences via - -- dependencies may record flag-dependency information. After a number - -- of bugs involving computing this information incorrectly, however, - -- we're currently not using carefully computed inter-flag dependencies - -- anymore, but instead use 'simplifyVar' when computing conflict sets - -- to map all flags of one package to a single flag for conflict set - -- purposes, thereby treating them all as interdependent. - -- - -- If we ever move to a more clever algorithm again, then the line above - -- needs to be looked at very carefully, and probably be replaced by - -- more systematically computed flag dependency information. - --- | Datatype that encodes what to build next -data BuildType = - Goals -- ^ build a goal choice node - | OneGoal OpenGoal -- ^ build a node for this goal - | Instance QPN I PInfo QGoalReasonChain -- ^ build a tree for a concrete instance - deriving Show - -build :: BuildState -> Tree (QGoalReasonChain, Scope) -build = ana go - where - go :: BuildState -> TreeF (QGoalReasonChain, Scope) BuildState - - -- If we have a choice between many goals, we just record the choice in - -- the tree. We select each open goal in turn, and before we descend, remove - -- it from the queue of open goals. - go bs@(BS { rdeps = rds, open = gs, next = Goals }) - | P.null gs = DoneF rds - | otherwise = GoalChoiceF (P.mapWithKey (\ g (_sc, gs') -> bs { next = OneGoal g, open = gs' }) - (P.splits gs)) - - -- If we have already picked a goal, then the choice depends on the kind - -- of goal. - -- - -- For a package, we look up the instances available in the global info, - -- and then handle each instance in turn. - go bs@(BS { index = idx, scope = sc, next = OneGoal (OpenGoal (Simple (Dep qpn@(Q _ pn) _)) gr) }) = - case M.lookup pn idx of - Nothing -> FailF (toConflictSet (Goal (P qpn) gr)) (BuildFailureNotInIndex pn) - Just pis -> PChoiceF qpn (gr, sc) (P.fromList (L.map (\ (i, info) -> - (i, bs { next = Instance qpn i info gr })) - (M.toList pis))) - -- TODO: data structure conversion is rather ugly here - - -- For a flag, we create only two subtrees, and we create them in the order - -- that is indicated by the flag default. - -- - -- TODO: Should we include the flag default in the tree? - go bs@(BS { scope = sc, next = OneGoal (OpenGoal (Flagged qfn@(FN (PI qpn _) _) (FInfo b m w) t f) gr) }) = - FChoiceF qfn (gr, sc) (w || trivial) m (P.fromList (reorder b - [(True, (extendOpen qpn (L.map (flip OpenGoal (FDependency qfn True : gr)) t) bs) { next = Goals }), - (False, (extendOpen qpn (L.map (flip OpenGoal (FDependency qfn False : gr)) f) bs) { next = Goals })])) - where - reorder True = id - reorder False = reverse - trivial = L.null t && L.null f - - go bs@(BS { scope = sc, next = OneGoal (OpenGoal (Stanza qsn@(SN (PI qpn _) _) t) gr) }) = - SChoiceF qsn (gr, sc) trivial (P.fromList - [(False, bs { next = Goals }), - (True, (extendOpen qpn (L.map (flip OpenGoal (SDependency qsn : gr)) t) bs) { next = Goals })]) - where - trivial = L.null t - - -- For a particular instance, we change the state: we update the scope, - -- and furthermore we update the set of goals. - -- - -- TODO: We could inline this above. - go bs@(BS { next = Instance qpn i (PInfo fdeps fdefs ecs _) gr }) = - go ((establishScope qpn ecs - (scopedExtendOpen qpn i (PDependency (PI qpn i) : gr) fdeps fdefs bs)) - { next = Goals }) - --- | Interface to the tree builder. Just takes an index and a list of package names, --- and computes the initial state and then the tree from there. -buildTree :: Index -> Bool -> [PN] -> Tree (QGoalReasonChain, Scope) -buildTree idx ind igs = - build (BS idx sc - (M.fromList (L.map (\ qpn -> (qpn, [])) qpns)) - (P.fromList (L.map (\ qpn -> (OpenGoal (Simple (Dep qpn (Constrained []))) [UserGoal], ())) qpns)) - Goals) - where - sc | ind = makeIndependent igs - | otherwise = emptyScope - qpns = L.map (qualify sc) igs diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/ConfiguredConversion.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/ConfiguredConversion.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/ConfiguredConversion.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/ConfiguredConversion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -module Distribution.Client.Dependency.Modular.ConfiguredConversion where - -import Data.Maybe -import Prelude hiding (pi) - -import Distribution.Client.InstallPlan -import Distribution.Client.Types -import Distribution.Compiler -import qualified Distribution.Client.PackageIndex as CI -import qualified Distribution.Simple.PackageIndex as SI -import Distribution.System - -import Distribution.Client.Dependency.Modular.Configured -import Distribution.Client.Dependency.Modular.Package - -mkPlan :: Platform -> CompilerInfo -> - SI.InstalledPackageIndex -> CI.PackageIndex SourcePackage -> - [CP QPN] -> Either [PlanProblem] InstallPlan -mkPlan plat comp iidx sidx cps = - new plat comp (SI.fromList (map (convCP iidx sidx) cps)) - -convCP :: SI.InstalledPackageIndex -> CI.PackageIndex SourcePackage -> - CP QPN -> PlanPackage -convCP iidx sidx (CP qpi fa es ds) = - case convPI qpi of - Left pi -> PreExisting $ InstalledPackage - (fromJust $ SI.lookupInstalledPackageId iidx pi) - (map convPI' ds) - Right pi -> Configured $ ConfiguredPackage - (fromJust $ CI.lookupPackageId sidx pi) - fa - es - (map convPI' ds) - -convPI :: PI QPN -> Either InstalledPackageId PackageId -convPI (PI _ (I _ (Inst pi))) = Left pi -convPI qpi = Right $ convPI' qpi - -convPI' :: PI QPN -> PackageId -convPI' (PI (Q _ pn) (I v _)) = PackageIdentifier pn v diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Configured.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Configured.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Configured.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Configured.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -module Distribution.Client.Dependency.Modular.Configured where - -import Distribution.PackageDescription (FlagAssignment) -- from Cabal -import Distribution.Client.Types (OptionalStanza) - -import Distribution.Client.Dependency.Modular.Package - --- | A configured package is a package instance together with --- a flag assignment and complete dependencies. -data CP qpn = CP (PI qpn) FlagAssignment [OptionalStanza] [PI qpn] diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Dependency.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Dependency.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Dependency.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Dependency.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,181 +0,0 @@ -{-# LANGUAGE DeriveFunctor #-} -module Distribution.Client.Dependency.Modular.Dependency where - -import Prelude hiding (pi) - -import Data.List as L -import Data.Map as M -import Data.Set as S - -import Distribution.Client.Dependency.Modular.Flag -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.Version - --- | The type of variables that play a role in the solver. --- Note that the tree currently does not use this type directly, --- and rather has separate tree nodes for the different types of --- variables. This fits better with the fact that in most cases, --- these have to be treated differently. --- --- TODO: This isn't the ideal location to declare the type, --- but we need them for constrained instances. -data Var qpn = P qpn | F (FN qpn) | S (SN qpn) - deriving (Eq, Ord, Show, Functor) - --- | For computing conflict sets, we map flag choice vars to a --- single flag choice. This means that all flag choices are treated --- as interdependent. So if one flag of a package ends up in a --- conflict set, then all flags are being treated as being part of --- the conflict set. -simplifyVar :: Var qpn -> Var qpn -simplifyVar (P qpn) = P qpn -simplifyVar (F (FN pi _)) = F (FN pi (mkFlag "flag")) -simplifyVar (S qsn) = S qsn - -showVar :: Var QPN -> String -showVar (P qpn) = showQPN qpn -showVar (F qfn) = showQFN qfn -showVar (S qsn) = showQSN qsn - -type ConflictSet qpn = Set (Var qpn) - -showCS :: ConflictSet QPN -> String -showCS = intercalate ", " . L.map showVar . S.toList - --- | Constrained instance. If the choice has already been made, this is --- a fixed instance, and we record the package name for which the choice --- is for convenience. Otherwise, it is a list of version ranges paired with --- the goals / variables that introduced them. -data CI qpn = Fixed I (Goal qpn) | Constrained [VROrigin qpn] - deriving (Eq, Show, Functor) - -instance ResetGoal CI where - resetGoal g (Fixed i _) = Fixed i g - resetGoal g (Constrained vrs) = Constrained (L.map (\ (x, y) -> (x, resetGoal g y)) vrs) - -type VROrigin qpn = (VR, Goal qpn) - --- | Helper function to collapse a list of version ranges with origins into --- a single, simplified, version range. -collapse :: [VROrigin qpn] -> VR -collapse = simplifyVR . L.foldr (.&&.) anyVR . L.map fst - -showCI :: CI QPN -> String -showCI (Fixed i _) = "==" ++ showI i -showCI (Constrained vr) = showVR (collapse vr) - --- | Merge constrained instances. We currently adopt a lazy strategy for --- merging, i.e., we only perform actual checking if one of the two choices --- is fixed. If the merge fails, we return a conflict set indicating the --- variables responsible for the failure, as well as the two conflicting --- fragments. --- --- Note that while there may be more than one conflicting pair of version --- ranges, we only return the first we find. --- --- TODO: Different pairs might have different conflict sets. We're --- obviously interested to return a conflict that has a "better" conflict --- set in the sense the it contains variables that allow us to backjump --- further. We might apply some heuristics here, such as to change the --- order in which we check the constraints. -merge :: Ord qpn => CI qpn -> CI qpn -> Either (ConflictSet qpn, (CI qpn, CI qpn)) (CI qpn) -merge c@(Fixed i g1) d@(Fixed j g2) - | i == j = Right c - | otherwise = Left (S.union (toConflictSet g1) (toConflictSet g2), (c, d)) -merge c@(Fixed (I v _) g1) (Constrained rs) = go rs -- I tried "reverse rs" here, but it seems to slow things down ... - where - go [] = Right c - go (d@(vr, g2) : vrs) - | checkVR vr v = go vrs - | otherwise = Left (S.union (toConflictSet g1) (toConflictSet g2), (c, Constrained [d])) -merge c@(Constrained _) d@(Fixed _ _) = merge d c -merge (Constrained rs) (Constrained ss) = Right (Constrained (rs ++ ss)) - - -type FlaggedDeps qpn = [FlaggedDep qpn] - --- | Flagged dependencies can either be plain dependency constraints, --- or flag-dependent dependency trees. -data FlaggedDep qpn = - Flagged (FN qpn) FInfo (TrueFlaggedDeps qpn) (FalseFlaggedDeps qpn) - | Stanza (SN qpn) (TrueFlaggedDeps qpn) - | Simple (Dep qpn) - deriving (Eq, Show, Functor) - -type TrueFlaggedDeps qpn = FlaggedDeps qpn -type FalseFlaggedDeps qpn = FlaggedDeps qpn - --- | A dependency (constraint) associates a package name with a --- constrained instance. -data Dep qpn = Dep qpn (CI qpn) - deriving (Eq, Show, Functor) - -showDep :: Dep QPN -> String -showDep (Dep qpn (Fixed i (Goal v _)) ) = - (if P qpn /= v then showVar v ++ " => " else "") ++ - showQPN qpn ++ "==" ++ showI i -showDep (Dep qpn (Constrained [(vr, Goal v _)])) = - showVar v ++ " => " ++ showQPN qpn ++ showVR vr -showDep (Dep qpn ci ) = - showQPN qpn ++ showCI ci - -instance ResetGoal Dep where - resetGoal g (Dep qpn ci) = Dep qpn (resetGoal g ci) - --- | A map containing reverse dependencies between qualified --- package names. -type RevDepMap = Map QPN [QPN] - --- | Goals are solver variables paired with information about --- why they have been introduced. -data Goal qpn = Goal (Var qpn) (GoalReasonChain qpn) - deriving (Eq, Show, Functor) - -class ResetGoal f where - resetGoal :: Goal qpn -> f qpn -> f qpn - -instance ResetGoal Goal where - resetGoal = const - --- | For open goals as they occur during the build phase, we need to store --- additional information about flags. -data OpenGoal = OpenGoal (FlaggedDep QPN) QGoalReasonChain - deriving (Eq, Show) - --- | Reasons why a goal can be added to a goal set. -data GoalReason qpn = - UserGoal - | PDependency (PI qpn) - | FDependency (FN qpn) Bool - | SDependency (SN qpn) - deriving (Eq, Show, Functor) - --- | The first element is the immediate reason. The rest are the reasons --- for the reasons ... -type GoalReasonChain qpn = [GoalReason qpn] - -type QGoalReasonChain = GoalReasonChain QPN - -goalReasonToVars :: GoalReason qpn -> ConflictSet qpn -goalReasonToVars UserGoal = S.empty -goalReasonToVars (PDependency (PI qpn _)) = S.singleton (P qpn) -goalReasonToVars (FDependency qfn _) = S.singleton (simplifyVar (F qfn)) -goalReasonToVars (SDependency qsn) = S.singleton (S qsn) - -goalReasonChainToVars :: Ord qpn => GoalReasonChain qpn -> ConflictSet qpn -goalReasonChainToVars = S.unions . L.map goalReasonToVars - -goalReasonChainsToVars :: Ord qpn => [GoalReasonChain qpn] -> ConflictSet qpn -goalReasonChainsToVars = S.unions . L.map goalReasonChainToVars - --- | Closes a goal, i.e., removes all the extraneous information that we --- need only during the build phase. -close :: OpenGoal -> Goal QPN -close (OpenGoal (Simple (Dep qpn _)) gr) = Goal (P qpn) gr -close (OpenGoal (Flagged qfn _ _ _ ) gr) = Goal (F qfn) gr -close (OpenGoal (Stanza qsn _) gr) = Goal (S qsn) gr - --- | Compute a conflic set from a goal. The conflict set contains the --- closure of goal reasons as well as the variable of the goal itself. -toConflictSet :: Ord qpn => Goal qpn -> ConflictSet qpn -toConflictSet (Goal g grs) = S.insert (simplifyVar g) (goalReasonChainToVars grs) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Explore.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Explore.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Explore.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Explore.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,149 +0,0 @@ -module Distribution.Client.Dependency.Modular.Explore where - -import Control.Applicative as A -import Data.Foldable -import Data.List as L -import Data.Map as M -import Data.Set as S - -import Distribution.Client.Dependency.Modular.Assignment -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Log -import Distribution.Client.Dependency.Modular.Message -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.PSQ as P -import Distribution.Client.Dependency.Modular.Tree - --- | Backjumping. --- --- A tree traversal that tries to propagate conflict sets --- up the tree from the leaves, and thereby cut branches. --- All the tricky things are done in the function 'combine'. -backjump :: Tree a -> Tree (Maybe (ConflictSet QPN)) -backjump = snd . cata go - where - go (FailF c fr) = (Just c, Fail c fr) - go (DoneF rdm ) = (Nothing, Done rdm) - go (PChoiceF qpn _ ts) = (c, PChoice qpn c (P.fromList ts')) - where - ~(c, ts') = combine (P qpn) (P.toList ts) S.empty - go (FChoiceF qfn _ b m ts) = (c, FChoice qfn c b m (P.fromList ts')) - where - ~(c, ts') = combine (F qfn) (P.toList ts) S.empty - go (SChoiceF qsn _ b ts) = (c, SChoice qsn c b (P.fromList ts')) - where - ~(c, ts') = combine (S qsn) (P.toList ts) S.empty - go (GoalChoiceF ts) = (c, GoalChoice (P.fromList ts')) - where - ~(cs, ts') = unzip $ L.map (\ (k, (x, v)) -> (x, (k, v))) $ P.toList ts - c = case cs of [] -> Nothing - d : _ -> d - --- | The 'combine' function is at the heart of backjumping. It takes --- the variable we're currently considering, and a list of children --- annotated with their respective conflict sets, and an accumulator --- for the result conflict set. It returns a combined conflict set --- for the parent node, and a (potentially shortened) list of children --- with the annotations removed. --- --- It is *essential* that we produce the results as early as possible. --- In particular, we have to produce the list of children prior to --- traversing the entire list -- otherwise we lose the desired behaviour --- of being able to traverse the tree from left to right incrementally. --- --- We can shorten the list of children if we find an individual conflict --- set that does not contain the current variable. In this case, we can --- just lift the conflict set to the current level, because the current --- level cannot possibly have contributed to this conflict, so no other --- choice at the current level would avoid the conflict. --- --- If any of the children might contain a successful solution --- (indicated by Nothing), then Nothing will be the combined --- conflict set. If all children contain conflict sets, we can --- take the union as the combined conflict set. -combine :: Var QPN -> [(a, (Maybe (ConflictSet QPN), b))] -> - ConflictSet QPN -> (Maybe (ConflictSet QPN), [(a, b)]) -combine _ [] c = (Just c, []) -combine var ((k, ( d, v)) : xs) c = (\ ~(e, ys) -> (e, (k, v) : ys)) $ - case d of - Just e | not (simplifyVar var `S.member` e) -> (Just e, []) - | otherwise -> combine var xs (e `S.union` c) - Nothing -> (Nothing, snd $ combine var xs S.empty) - --- | Naive backtracking exploration of the search tree. This will yield correct --- assignments only once the tree itself is validated. -explore :: Alternative m => Tree a -> (Assignment -> m (Assignment, RevDepMap)) -explore = cata go - where - go (FailF _ _) _ = A.empty - go (DoneF rdm) a = pure (a, rdm) - go (PChoiceF qpn _ ts) (A pa fa sa) = - asum $ -- try children in order, - P.mapWithKey -- when descending ... - (\ k r -> r (A (M.insert qpn k pa) fa sa)) -- record the pkg choice - ts - go (FChoiceF qfn _ _ _ ts) (A pa fa sa) = - asum $ -- try children in order, - P.mapWithKey -- when descending ... - (\ k r -> r (A pa (M.insert qfn k fa) sa)) -- record the flag choice - ts - go (SChoiceF qsn _ _ ts) (A pa fa sa) = - asum $ -- try children in order, - P.mapWithKey -- when descending ... - (\ k r -> r (A pa fa (M.insert qsn k sa))) -- record the flag choice - ts - go (GoalChoiceF ts) a = - casePSQ ts A.empty -- empty goal choice is an internal error - (\ _k v _xs -> v a) -- commit to the first goal choice - --- | Version of 'explore' that returns a 'Log'. -exploreLog :: Tree (Maybe (ConflictSet QPN)) -> - (Assignment -> Log Message (Assignment, RevDepMap)) -exploreLog = cata go - where - go (FailF c fr) _ = failWith (Failure c fr) - go (DoneF rdm) a = succeedWith Success (a, rdm) - go (PChoiceF qpn c ts) (A pa fa sa) = - backjumpInfo c $ - asum $ -- try children in order, - P.mapWithKey -- when descending ... - (\ k r -> tryWith (TryP (PI qpn k)) $ -- log and ... - r (A (M.insert qpn k pa) fa sa)) -- record the pkg choice - ts - go (FChoiceF qfn c _ _ ts) (A pa fa sa) = - backjumpInfo c $ - asum $ -- try children in order, - P.mapWithKey -- when descending ... - (\ k r -> tryWith (TryF qfn k) $ -- log and ... - r (A pa (M.insert qfn k fa) sa)) -- record the pkg choice - ts - go (SChoiceF qsn c _ ts) (A pa fa sa) = - backjumpInfo c $ - asum $ -- try children in order, - P.mapWithKey -- when descending ... - (\ k r -> tryWith (TryS qsn k) $ -- log and ... - r (A pa fa (M.insert qsn k sa))) -- record the pkg choice - ts - go (GoalChoiceF ts) a = - casePSQ ts - (failWith (Failure S.empty EmptyGoalChoice)) -- empty goal choice is an internal error - (\ k v _xs -> continueWith (Next (close k)) (v a)) -- commit to the first goal choice - --- | Add in information about pruned trees. --- --- TODO: This isn't quite optimal, because we do not merely report the shape of the --- tree, but rather make assumptions about where that shape originated from. It'd be --- better if the pruning itself would leave information that we could pick up at this --- point. -backjumpInfo :: Maybe (ConflictSet QPN) -> Log Message a -> Log Message a -backjumpInfo c m = m <|> case c of -- important to produce 'm' before matching on 'c'! - Nothing -> A.empty - Just cs -> failWith (Failure cs Backjump) - --- | Interface. -exploreTree :: Alternative m => Tree a -> m (Assignment, RevDepMap) -exploreTree t = explore t (A M.empty M.empty M.empty) - --- | Interface. -exploreTreeLog :: Tree (Maybe (ConflictSet QPN)) -> Log Message (Assignment, RevDepMap) -exploreTreeLog t = exploreLog t (A M.empty M.empty M.empty) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Flag.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Flag.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Flag.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Flag.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ -{-# LANGUAGE DeriveFunctor #-} -module Distribution.Client.Dependency.Modular.Flag where - -import Data.Map as M -import Prelude hiding (pi) - -import Distribution.PackageDescription hiding (Flag) -- from Cabal - -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Types (OptionalStanza(..)) - --- | Flag name. Consists of a package instance and the flag identifier itself. -data FN qpn = FN (PI qpn) Flag - deriving (Eq, Ord, Show, Functor) - --- | Extract the package name from a flag name. -getPN :: FN qpn -> qpn -getPN (FN (PI qpn _) _) = qpn - --- | Flag identifier. Just a string. -type Flag = FlagName - -unFlag :: Flag -> String -unFlag (FlagName fn) = fn - -mkFlag :: String -> Flag -mkFlag fn = FlagName fn - --- | Flag info. Default value, whether the flag is manual, and --- whether the flag is weak. Manual flags can only be set explicitly. --- Weak flags are typically deferred by the solver. -data FInfo = FInfo { fdefault :: Bool, fmanual :: Bool, fweak :: Bool } - deriving (Eq, Ord, Show) - --- | Flag defaults. -type FlagInfo = Map Flag FInfo - --- | Qualified flag name. -type QFN = FN QPN - --- | Stanza name. Paired with a package name, much like a flag. -data SN qpn = SN (PI qpn) OptionalStanza - deriving (Eq, Ord, Show, Functor) - --- | Qualified stanza name. -type QSN = SN QPN - -unStanza :: OptionalStanza -> String -unStanza TestStanzas = "test" -unStanza BenchStanzas = "bench" - -showQFNBool :: QFN -> Bool -> String -showQFNBool qfn@(FN pi _f) b = showPI pi ++ ":" ++ showFBool qfn b - -showQSNBool :: QSN -> Bool -> String -showQSNBool qsn@(SN pi _f) b = showPI pi ++ ":" ++ showSBool qsn b - -showFBool :: FN qpn -> Bool -> String -showFBool (FN _ f) True = "+" ++ unFlag f -showFBool (FN _ f) False = "-" ++ unFlag f - -showSBool :: SN qpn -> Bool -> String -showSBool (SN _ s) True = "*" ++ unStanza s -showSBool (SN _ s) False = "!" ++ unStanza s - -showQFN :: QFN -> String -showQFN (FN pi f) = showPI pi ++ ":" ++ unFlag f - -showQSN :: QSN -> String -showQSN (SN pi f) = showPI pi ++ ":" ++ unStanza f diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/IndexConversion.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/IndexConversion.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/IndexConversion.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/IndexConversion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,200 +0,0 @@ -module Distribution.Client.Dependency.Modular.IndexConversion where - -import Data.List as L -import Data.Map as M -import Data.Maybe -import Prelude hiding (pi) - -import qualified Distribution.Client.PackageIndex as CI -import Distribution.Client.Types -import Distribution.Compiler -import Distribution.InstalledPackageInfo as IPI -import Distribution.Package -- from Cabal -import Distribution.PackageDescription as PD -- from Cabal -import qualified Distribution.Simple.PackageIndex as SI -import Distribution.System - -import Distribution.Client.Dependency.Modular.Dependency as D -import Distribution.Client.Dependency.Modular.Flag as F -import Distribution.Client.Dependency.Modular.Index -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.Tree -import Distribution.Client.Dependency.Modular.Version - --- | Convert both the installed package index and the source package --- index into one uniform solver index. --- --- We use 'allPackagesBySourcePackageId' for the installed package index --- because that returns us several instances of the same package and version --- in order of preference. This allows us in principle to \"shadow\" --- packages if there are several installed packages of the same version. --- There are currently some shortcomings in both GHC and Cabal in --- resolving these situations. However, the right thing to do is to --- fix the problem there, so for now, shadowing is only activated if --- explicitly requested. -convPIs :: OS -> Arch -> CompilerInfo -> Bool -> Bool -> - SI.InstalledPackageIndex -> CI.PackageIndex SourcePackage -> Index -convPIs os arch comp sip strfl iidx sidx = - mkIndex (convIPI' sip iidx ++ convSPI' os arch comp strfl sidx) - --- | Convert a Cabal installed package index to the simpler, --- more uniform index format of the solver. -convIPI' :: Bool -> SI.InstalledPackageIndex -> [(PN, I, PInfo)] -convIPI' sip idx = - -- apply shadowing whenever there are multiple installed packages with - -- the same version - [ maybeShadow (convIP idx pkg) - | (_pkgid, pkgs) <- SI.allPackagesBySourcePackageId idx - , (maybeShadow, pkg) <- zip (id : repeat shadow) pkgs ] - where - - -- shadowing is recorded in the package info - shadow (pn, i, PInfo fdeps fds encs _) | sip = (pn, i, PInfo fdeps fds encs (Just Shadowed)) - shadow x = x - -convIPI :: Bool -> SI.InstalledPackageIndex -> Index -convIPI sip = mkIndex . convIPI' sip - --- | Convert a single installed package into the solver-specific format. -convIP :: SI.InstalledPackageIndex -> InstalledPackageInfo -> (PN, I, PInfo) -convIP idx ipi = - let ipid = IPI.installedPackageId ipi - i = I (pkgVersion (sourcePackageId ipi)) (Inst ipid) - pn = pkgName (sourcePackageId ipi) - in case mapM (convIPId pn idx) (IPI.depends ipi) of - Nothing -> (pn, i, PInfo [] M.empty [] (Just Broken)) - Just fds -> (pn, i, PInfo fds M.empty [] Nothing) --- TODO: Installed packages should also store their encapsulations! - --- | Convert dependencies specified by an installed package id into --- flagged dependencies of the solver. --- --- May return Nothing if the package can't be found in the index. That --- indicates that the original package having this dependency is broken --- and should be ignored. -convIPId :: PN -> SI.InstalledPackageIndex -> InstalledPackageId -> Maybe (FlaggedDep PN) -convIPId pn' idx ipid = - case SI.lookupInstalledPackageId idx ipid of - Nothing -> Nothing - Just ipi -> let i = I (pkgVersion (sourcePackageId ipi)) (Inst ipid) - pn = pkgName (sourcePackageId ipi) - in Just (D.Simple (Dep pn (Fixed i (Goal (P pn') [])))) - --- | Convert a cabal-install source package index to the simpler, --- more uniform index format of the solver. -convSPI' :: OS -> Arch -> CompilerInfo -> Bool -> - CI.PackageIndex SourcePackage -> [(PN, I, PInfo)] -convSPI' os arch cinfo strfl = L.map (convSP os arch cinfo strfl) . CI.allPackages - -convSPI :: OS -> Arch -> CompilerInfo -> Bool -> - CI.PackageIndex SourcePackage -> Index -convSPI os arch cinfo strfl = mkIndex . convSPI' os arch cinfo strfl - --- | Convert a single source package into the solver-specific format. -convSP :: OS -> Arch -> CompilerInfo -> Bool -> SourcePackage -> (PN, I, PInfo) -convSP os arch cinfo strfl (SourcePackage (PackageIdentifier pn pv) gpd _ _pl) = - let i = I pv InRepo - in (pn, i, convGPD os arch cinfo strfl (PI pn i) gpd) - --- We do not use 'flattenPackageDescription' or 'finalizePackageDescription' --- from 'Distribution.PackageDescription.Configuration' here, because we --- want to keep the condition tree, but simplify much of the test. - --- | Convert a generic package description to a solver-specific 'PInfo'. --- --- TODO: We currently just take all dependencies from all specified library, --- executable and test components. This does not quite seem fair. -convGPD :: OS -> Arch -> CompilerInfo -> Bool -> - PI PN -> GenericPackageDescription -> PInfo -convGPD os arch comp strfl pi - (GenericPackageDescription _ flags libs exes tests benchs) = - let - fds = flagInfo strfl flags - in - PInfo - (maybe [] (convCondTree os arch comp pi fds (const True) ) libs ++ - concatMap (convCondTree os arch comp pi fds (const True) . snd) exes ++ - prefix (Stanza (SN pi TestStanzas)) - (L.map (convCondTree os arch comp pi fds (const True) . snd) tests) ++ - prefix (Stanza (SN pi BenchStanzas)) - (L.map (convCondTree os arch comp pi fds (const True) . snd) benchs)) - fds - [] -- TODO: add encaps - Nothing - -prefix :: (FlaggedDeps qpn -> FlaggedDep qpn) -> [FlaggedDeps qpn] -> FlaggedDeps qpn -prefix _ [] = [] -prefix f fds = [f (concat fds)] - --- | Convert flag information. Automatic flags are now considered weak --- unless strong flags have been selected explicitly. -flagInfo :: Bool -> [PD.Flag] -> FlagInfo -flagInfo strfl = M.fromList . L.map (\ (MkFlag fn _ b m) -> (fn, FInfo b m (not (strfl || m)))) - --- | Convert condition trees to flagged dependencies. -convCondTree :: OS -> Arch -> CompilerInfo -> PI PN -> FlagInfo -> - (a -> Bool) -> -- how to detect if a branch is active - CondTree ConfVar [Dependency] a -> FlaggedDeps PN -convCondTree os arch comp pi@(PI pn _) fds p (CondNode info ds branches) - | p info = L.map (D.Simple . convDep pn) ds -- unconditional dependencies - ++ concatMap (convBranch os arch comp pi fds p) branches - | otherwise = [] - --- | Branch interpreter. --- --- Here, we try to simplify one of Cabal's condition tree branches into the --- solver's flagged dependency format, which is weaker. Condition trees can --- contain complex logical expression composed from flag choices and special --- flags (such as architecture, or compiler flavour). We try to evaluate the --- special flags and subsequently simplify to a tree that only depends on --- simple flag choices. -convBranch :: OS -> Arch -> CompilerInfo -> - PI PN -> FlagInfo -> - (a -> Bool) -> -- how to detect if a branch is active - (Condition ConfVar, - CondTree ConfVar [Dependency] a, - Maybe (CondTree ConfVar [Dependency] a)) -> FlaggedDeps PN -convBranch os arch cinfo pi fds p (c', t', mf') = - go c' ( convCondTree os arch cinfo pi fds p t') - (maybe [] (convCondTree os arch cinfo pi fds p) mf') - where - go :: Condition ConfVar -> - FlaggedDeps PN -> FlaggedDeps PN -> FlaggedDeps PN - go (Lit True) t _ = t - go (Lit False) _ f = f - go (CNot c) t f = go c f t - go (CAnd c d) t f = go c (go d t f) f - go (COr c d) t f = go c t (go d t f) - go (Var (Flag fn)) t f = extractCommon t f ++ [Flagged (FN pi fn) (fds ! fn) t f] - go (Var (OS os')) t f - | os == os' = t - | otherwise = f - go (Var (Arch arch')) t f - | arch == arch' = t - | otherwise = f - go (Var (Impl cf cvr)) t f - | matchImpl (compilerInfoId cinfo) || - -- fixme: Nothing should be treated as unknown, rather than empty - -- list. This code should eventually be changed to either - -- support partial resolution of compiler flags or to - -- complain about incompletely configured compilers. - any matchImpl (fromMaybe [] $ compilerInfoCompat cinfo) = t - | otherwise = f - where - matchImpl (CompilerId cf' cv) = cf == cf' && checkVR cvr cv - - -- If both branches contain the same package as a simple dep, we lift it to - -- the next higher-level, but without constraints. This heuristic together - -- with deferring flag choices will then usually first resolve this package, - -- and try an already installed version before imposing a default flag choice - -- that might not be what we want. - extractCommon :: FlaggedDeps PN -> FlaggedDeps PN -> FlaggedDeps PN - extractCommon ps ps' = [ D.Simple (Dep pn (Constrained [])) | D.Simple (Dep pn _) <- ps, D.Simple (Dep pn' _) <- ps', pn == pn' ] - --- | Convert a Cabal dependency to a solver-specific dependency. -convDep :: PN -> Dependency -> Dep PN -convDep pn' (Dependency pn vr) = Dep pn (Constrained [(vr, Goal (P pn') [])]) - --- | Convert a Cabal package identifier to a solver-specific dependency. -convPI :: PN -> PackageIdentifier -> Dep PN -convPI pn' (PackageIdentifier pn v) = Dep pn (Constrained [(eqVR v, Goal (P pn') [])]) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Index.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Index.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Index.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Index.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -module Distribution.Client.Dependency.Modular.Index where - -import Data.List as L -import Data.Map as M -import Prelude hiding (pi) - -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Flag -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.Tree - --- | An index contains information about package instances. This is a nested --- dictionary. Package names are mapped to instances, which in turn is mapped --- to info. -type Index = Map PN (Map I PInfo) - --- | Info associated with a package instance. --- Currently, dependencies, flags, encapsulations and failure reasons. --- Packages that have a failure reason recorded for them are disabled --- globally, for reasons external to the solver. We currently use this --- for shadowing which essentially is a GHC limitation, and for --- installed packages that are broken. -data PInfo = PInfo (FlaggedDeps PN) FlagInfo Encaps (Maybe FailReason) - deriving (Show) - --- | Encapsulations. A list of package names. -type Encaps = [PN] - -mkIndex :: [(PN, I, PInfo)] -> Index -mkIndex xs = M.map M.fromList (groupMap (L.map (\ (pn, i, pi) -> (pn, (i, pi))) xs)) - -groupMap :: Ord a => [(a, b)] -> Map a [b] -groupMap xs = M.fromListWith (flip (++)) (L.map (\ (x, y) -> (x, [y])) xs) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Log.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Log.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Log.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Log.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -module Distribution.Client.Dependency.Modular.Log where - -import Control.Applicative -import Data.List as L -import Data.Set as S - -import Distribution.Client.Dependency.Types -- from Cabal - -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Message -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.Tree (FailReason(..)) - --- | The 'Log' datatype. --- --- Represents the progress of a computation lazily. --- --- Parameterized over the type of actual messages and the final result. -type Log m a = Progress m () a - --- | Turns a log into a list of messages paired with a final result. A final result --- of 'Nothing' indicates failure. A final result of 'Just' indicates success. --- Keep in mind that forcing the second component of the returned pair will force the --- entire log. -runLog :: Log m a -> ([m], Maybe a) -runLog (Done x) = ([], Just x) -runLog (Fail _) = ([], Nothing) -runLog (Step m p) = let - (ms, r) = runLog p - in - (m : ms, r) - --- | Postprocesses a log file. Takes as an argument a limit on allowed backjumps. --- If the limit is 'Nothing', then infinitely many backjumps are allowed. If the --- limit is 'Just 0', backtracking is completely disabled. -logToProgress :: Maybe Int -> Log Message a -> Progress String String a -logToProgress mbj l = let - (ms, s) = runLog l - -- 'Nothing' for 's' means search tree exhaustively searched and failed - (es, e) = proc 0 ms -- catch first error (always) - -- 'Nothing' in 'e' means no backjump found - (ns, t) = case mbj of - Nothing -> (ms, Nothing) - Just n -> proc n ms - -- 'Nothing' in 't' means backjump limit not reached - -- prefer first error over later error - (exh, r) = case t of - -- backjump limit not reached - Nothing -> case s of - Nothing -> (True, e) -- failed after exhaustive search - Just _ -> (True, Nothing) -- success - -- backjump limit reached; prefer first error - Just _ -> (False, e) -- failed after backjump limit was reached - in go es es -- trace for first error - (showMessages (const True) True ns) -- shortened run - r s exh - where - -- Proc takes the allowed number of backjumps and a list of messages and explores the - -- message list until the maximum number of backjumps has been reached. The log until - -- that point as well as whether we have encountered an error or not are returned. - proc :: Int -> [Message] -> ([Message], Maybe (ConflictSet QPN)) - proc _ [] = ([], Nothing) - proc n ( Failure cs Backjump : xs@(Leave : Failure cs' Backjump : _)) - | cs == cs' = proc n xs -- repeated backjumps count as one - proc 0 ( Failure cs Backjump : _ ) = ([], Just cs) - proc n (x@(Failure _ Backjump) : xs) = (\ ~(ys, r) -> (x : ys, r)) (proc (n - 1) xs) - proc n (x : xs) = (\ ~(ys, r) -> (x : ys, r)) (proc n xs) - - -- This function takes a lot of arguments. The first two are both supposed to be - -- the log up to the first error. That's the error that will always be printed in - -- case we do not find a solution. We pass this log twice, because we evaluate it - -- in parallel with the full log, but we also want to retain the reference to its - -- beginning for when we print it. This trick prevents a space leak! - -- - -- The third argument is the full log, the fifth and six error conditions. - -- The seventh argument indicates whether the search was exhaustive. - -- - -- The order of arguments is important! In particular 's' must not be evaluated - -- unless absolutely necessary. It contains the final result, and if we shortcut - -- with an error due to backjumping, evaluating 's' would still require traversing - -- the entire tree. - go ms (_ : ns) (x : xs) r s exh = Step x (go ms ns xs r s exh) - go ms [] (x : xs) r s exh = Step x (go ms [] xs r s exh) - go ms _ [] (Just cs) _ exh = Fail $ - "Could not resolve dependencies:\n" ++ - unlines (showMessages (L.foldr (\ v _ -> v `S.member` cs) True) False ms) ++ - (if exh then "Dependency tree exhaustively searched.\n" - else "Backjump limit reached (change with --max-backjumps).\n") - go _ _ [] _ (Just s) _ = Done s - go _ _ [] _ _ _ = Fail ("Could not resolve dependencies; something strange happened.") -- should not happen - -logToProgress' :: Log Message a -> Progress String String a -logToProgress' l = let - (ms, r) = runLog l - xs = showMessages (const True) True ms - in go xs r - where - go [x] Nothing = Fail x - go [] Nothing = Fail "" - go [] (Just r) = Done r - go (x:xs) r = Step x (go xs r) - - -runLogIO :: Log Message a -> IO (Maybe a) -runLogIO x = - do - let (ms, r) = runLog x - putStr (unlines $ showMessages (const True) True ms) - return r - -failWith :: m -> Log m a -failWith m = Step m (Fail ()) - -succeedWith :: m -> a -> Log m a -succeedWith m x = Step m (Done x) - -continueWith :: m -> Log m a -> Log m a -continueWith = Step - -tryWith :: Message -> Log Message a -> Log Message a -tryWith m x = Step m (Step Enter x) <|> failWith Leave diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Message.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Message.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Message.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Message.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -module Distribution.Client.Dependency.Modular.Message where - -import qualified Data.List as L -import Prelude hiding (pi) - -import Distribution.Text -- from Cabal - -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Flag -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.Tree - -data Message = - Enter -- ^ increase indentation level - | Leave -- ^ decrease indentation level - | TryP (PI QPN) - | TryF QFN Bool - | TryS QSN Bool - | Next (Goal QPN) - | Success - | Failure (ConflictSet QPN) FailReason - --- | Transforms the structured message type to actual messages (strings). --- --- Takes an additional relevance predicate. The predicate gets a stack of goal --- variables and can decide whether messages regarding these goals are relevant. --- You can plug in 'const True' if you're interested in a full trace. If you --- want a slice of the trace concerning a particular conflict set, then plug in --- a predicate returning 'True' on the empty stack and if the head is in the --- conflict set. --- --- The second argument indicates if the level numbers should be shown. This is --- recommended for any trace that involves backtracking, because only the level --- numbers will allow to keep track of backjumps. -showMessages :: ([Var QPN] -> Bool) -> Bool -> [Message] -> [String] -showMessages p sl = go [] 0 - where - go :: [Var QPN] -> Int -> [Message] -> [String] - go _ _ [] = [] - -- complex patterns - go v l (TryP (PI qpn i) : Enter : Failure c fr : Leave : ms) = goPReject v l qpn [i] c fr ms - go v l (TryF qfn b : Enter : Failure c fr : Leave : ms) = (atLevel (add (F qfn) v) l $ "rejecting: " ++ showQFNBool qfn b ++ showFR c fr) (go v l ms) - go v l (TryS qsn b : Enter : Failure c fr : Leave : ms) = (atLevel (add (S qsn) v) l $ "rejecting: " ++ showQSNBool qsn b ++ showFR c fr) (go v l ms) - go v l (Next (Goal (P qpn) gr) : TryP pi : ms@(Enter : Next _ : _)) = (atLevel (add (P qpn) v) l $ "trying: " ++ showPI pi ++ showGRs gr) (go (add (P qpn) v) l ms) - go v l (Failure c Backjump : ms@(Leave : Failure c' Backjump : _)) | c == c' = go v l ms - -- standard display - go v l (Enter : ms) = go v (l+1) ms - go v l (Leave : ms) = go (drop 1 v) (l-1) ms - go v l (TryP pi@(PI qpn _) : ms) = (atLevel (add (P qpn) v) l $ "trying: " ++ showPI pi) (go (add (P qpn) v) l ms) - go v l (TryF qfn b : ms) = (atLevel (add (F qfn) v) l $ "trying: " ++ showQFNBool qfn b) (go (add (F qfn) v) l ms) - go v l (TryS qsn b : ms) = (atLevel (add (S qsn) v) l $ "trying: " ++ showQSNBool qsn b) (go (add (S qsn) v) l ms) - go v l (Next (Goal (P qpn) gr) : ms) = (atLevel (add (P qpn) v) l $ "next goal: " ++ showQPN qpn ++ showGRs gr) (go v l ms) - go v l (Next _ : ms) = go v l ms -- ignore flag goals in the log - go v l (Success : ms) = (atLevel v l $ "done") (go v l ms) - go v l (Failure c fr : ms) = (atLevel v l $ "fail" ++ showFR c fr) (go v l ms) - - add :: Var QPN -> [Var QPN] -> [Var QPN] - add v vs = simplifyVar v : vs - - -- special handler for many subsequent package rejections - goPReject :: [Var QPN] -> Int -> QPN -> [I] -> ConflictSet QPN -> FailReason -> [Message] -> [String] - goPReject v l qpn is c fr (TryP (PI qpn' i) : Enter : Failure _ fr' : Leave : ms) | qpn == qpn' && fr == fr' = goPReject v l qpn (i : is) c fr ms - goPReject v l qpn is c fr ms = (atLevel (P qpn : v) l $ "rejecting: " ++ showQPN qpn ++ "-" ++ L.intercalate ", " (map showI (reverse is)) ++ showFR c fr) (go v l ms) - - -- write a message, but only if it's relevant; we can also enable or disable the display of the current level - atLevel v l x xs - | sl && p v = let s = show l - in ("[" ++ replicate (3 - length s) '_' ++ s ++ "] " ++ x) : xs - | p v = x : xs - | otherwise = xs - -showGRs :: QGoalReasonChain -> String -showGRs (gr : _) = showGR gr -showGRs [] = "" - -showGR :: GoalReason QPN -> String -showGR UserGoal = " (user goal)" -showGR (PDependency pi) = " (dependency of " ++ showPI pi ++ ")" -showGR (FDependency qfn b) = " (dependency of " ++ showQFNBool qfn b ++ ")" -showGR (SDependency qsn) = " (dependency of " ++ showQSNBool qsn True ++ ")" - -showFR :: ConflictSet QPN -> FailReason -> String -showFR _ InconsistentInitialConstraints = " (inconsistent initial constraints)" -showFR _ (Conflicting ds) = " (conflict: " ++ L.intercalate ", " (map showDep ds) ++ ")" -showFR _ CannotInstall = " (only already installed instances can be used)" -showFR _ CannotReinstall = " (avoiding to reinstall a package with same version but new dependencies)" -showFR _ Shadowed = " (shadowed by another installed package with same version)" -showFR _ Broken = " (package is broken)" -showFR _ (GlobalConstraintVersion vr) = " (global constraint requires " ++ display vr ++ ")" -showFR _ GlobalConstraintInstalled = " (global constraint requires installed instance)" -showFR _ GlobalConstraintSource = " (global constraint requires source instance)" -showFR _ GlobalConstraintFlag = " (global constraint requires opposite flag selection)" -showFR _ ManualFlag = " (manual flag can only be changed explicitly)" -showFR _ (BuildFailureNotInIndex pn) = " (unknown package: " ++ display pn ++ ")" -showFR c Backjump = " (backjumping, conflict set: " ++ showCS c ++ ")" --- The following are internal failures. They should not occur. In the --- interest of not crashing unnecessarily, we still just print an error --- message though. -showFR _ (MalformedFlagChoice qfn) = " (INTERNAL ERROR: MALFORMED FLAG CHOICE: " ++ showQFN qfn ++ ")" -showFR _ (MalformedStanzaChoice qsn) = " (INTERNAL ERROR: MALFORMED STANZA CHOICE: " ++ showQSN qsn ++ ")" -showFR _ EmptyGoalChoice = " (INTERNAL ERROR: EMPTY GOAL CHOICE)" diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Package.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Package.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Package.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Package.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -{-# LANGUAGE DeriveFunctor #-} -module Distribution.Client.Dependency.Modular.Package - (module Distribution.Client.Dependency.Modular.Package, - module Distribution.Package) where - -import Data.List as L -import Data.Map as M - -import Distribution.Package -- from Cabal -import Distribution.Text -- from Cabal - -import Distribution.Client.Dependency.Modular.Version - --- | A package name. -type PN = PackageName - --- | Unpacking a package name. -unPN :: PN -> String -unPN (PackageName pn) = pn - --- | Package version. A package name plus a version number. -type PV = PackageId - --- | Qualified package version. -type QPV = Q PV - --- | Package id. Currently just a black-box string. -type PId = InstalledPackageId - --- | Location. Info about whether a package is installed or not, and where --- exactly it is located. For installed packages, uniquely identifies the --- package instance via its 'PId'. --- --- TODO: More information is needed about the repo. -data Loc = Inst PId | InRepo - deriving (Eq, Ord, Show) - --- | Instance. A version number and a location. -data I = I Ver Loc - deriving (Eq, Ord, Show) - --- | String representation of an instance. -showI :: I -> String -showI (I v InRepo) = showVer v -showI (I v (Inst (InstalledPackageId i))) = showVer v ++ "/installed" ++ shortId i - where - -- A hack to extract the beginning of the package ABI hash - shortId = snip (splitAt 4) (++ "...") . - snip ((\ (x, y) -> (reverse x, y)) . break (=='-') . reverse) ('-':) - snip p f xs = case p xs of - (ys, zs) -> (if L.null zs then id else f) ys - --- | Package instance. A package name and an instance. -data PI qpn = PI qpn I - deriving (Eq, Ord, Show, Functor) - --- | String representation of a package instance. -showPI :: PI QPN -> String -showPI (PI qpn i) = showQPN qpn ++ "-" ++ showI i - --- | Checks if a package instance corresponds to an installed package. -instPI :: PI qpn -> Bool -instPI (PI _ (I _ (Inst _))) = True -instPI _ = False - -instI :: I -> Bool -instI (I _ (Inst _)) = True -instI _ = False - --- | Package path. (Stored in "reverse" order.) -type PP = [PN] - --- | String representation of a package path. -showPP :: PP -> String -showPP = intercalate "." . L.map display . reverse - - --- | A qualified entity. Pairs a package path with the entity. -data Q a = Q PP a - deriving (Eq, Ord, Show) - --- | Standard string representation of a qualified entity. -showQ :: (a -> String) -> (Q a -> String) -showQ showa (Q [] x) = showa x -showQ showa (Q pp x) = showPP pp ++ "." ++ showa x - --- | Qualified package name. -type QPN = Q PN - --- | String representation of a qualified package path. -showQPN :: QPN -> String -showQPN = showQ display - --- | The scope associates every package with a path. The convention is that packages --- not in the data structure have an empty path associated with them. -type Scope = Map PN PP - --- | An empty scope structure, for initialization. -emptyScope :: Scope -emptyScope = M.empty - --- | Create artificial parents for each of the package names, making --- them all independent. -makeIndependent :: [PN] -> Scope -makeIndependent ps = L.foldl (\ sc (n, p) -> M.insert p [PackageName (show n)] sc) emptyScope (zip ([0..] :: [Int]) ps) - -qualify :: Scope -> PN -> QPN -qualify sc pn = Q (findWithDefault [] pn sc) pn - -unQualify :: Q a -> a -unQualify (Q _ x) = x diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Preference.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Preference.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Preference.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Preference.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,281 +0,0 @@ -{-# LANGUAGE CPP #-} -module Distribution.Client.Dependency.Modular.Preference where - --- Reordering or pruning the tree in order to prefer or make certain choices. - -import qualified Data.List as L -import qualified Data.Map as M -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid -#endif -import Data.Ord - -import Distribution.Client.Dependency.Types - ( PackageConstraint(..), PackagePreferences(..), InstalledPreference(..) ) -import Distribution.Client.Types - ( OptionalStanza(..) ) - -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Flag -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.PSQ as P -import Distribution.Client.Dependency.Modular.Tree -import Distribution.Client.Dependency.Modular.Version - --- | Generic abstraction for strategies that just rearrange the package order. --- Only packages that match the given predicate are reordered. -packageOrderFor :: (PN -> Bool) -> (PN -> I -> I -> Ordering) -> Tree a -> Tree a -packageOrderFor p cmp = trav go - where - go (PChoiceF v@(Q _ pn) r cs) - | p pn = PChoiceF v r (P.sortByKeys (flip (cmp pn)) cs) - | otherwise = PChoiceF v r cs - go x = x - --- | Ordering that treats preferred versions as greater than non-preferred --- versions. -preferredVersionsOrdering :: VR -> Ver -> Ver -> Ordering -preferredVersionsOrdering vr v1 v2 = - compare (checkVR vr v1) (checkVR vr v2) - --- | Traversal that tries to establish package preferences (not constraints). --- Works by reordering choice nodes. -preferPackagePreferences :: (PN -> PackagePreferences) -> Tree a -> Tree a -preferPackagePreferences pcs = packageOrderFor (const True) preference - where - preference pn i1@(I v1 _) i2@(I v2 _) = - let PackagePreferences vr ipref = pcs pn - in preferredVersionsOrdering vr v1 v2 `mappend` -- combines lexically - locationsOrdering ipref i1 i2 - - -- Note that we always rank installed before uninstalled, and later - -- versions before earlier, but we can change the priority of the - -- two orderings. - locationsOrdering PreferInstalled v1 v2 = - preferInstalledOrdering v1 v2 `mappend` preferLatestOrdering v1 v2 - locationsOrdering PreferLatest v1 v2 = - preferLatestOrdering v1 v2 `mappend` preferInstalledOrdering v1 v2 - --- | Ordering that treats installed instances as greater than uninstalled ones. -preferInstalledOrdering :: I -> I -> Ordering -preferInstalledOrdering (I _ (Inst _)) (I _ (Inst _)) = EQ -preferInstalledOrdering (I _ (Inst _)) _ = GT -preferInstalledOrdering _ (I _ (Inst _)) = LT -preferInstalledOrdering _ _ = EQ - --- | Compare instances by their version numbers. -preferLatestOrdering :: I -> I -> Ordering -preferLatestOrdering (I v1 _) (I v2 _) = compare v1 v2 - --- | Helper function that tries to enforce a single package constraint on a --- given instance for a P-node. Translates the constraint into a --- tree-transformer that either leaves the subtree untouched, or replaces it --- with an appropriate failure node. -processPackageConstraintP :: ConflictSet QPN -> I -> PackageConstraint -> Tree a -> Tree a -processPackageConstraintP c (I v _) (PackageConstraintVersion _ vr) r - | checkVR vr v = r - | otherwise = Fail c (GlobalConstraintVersion vr) -processPackageConstraintP c i (PackageConstraintInstalled _) r - | instI i = r - | otherwise = Fail c GlobalConstraintInstalled -processPackageConstraintP c i (PackageConstraintSource _) r - | not (instI i) = r - | otherwise = Fail c GlobalConstraintSource -processPackageConstraintP _ _ _ r = r - --- | Helper function that tries to enforce a single package constraint on a --- given flag setting for an F-node. Translates the constraint into a --- tree-transformer that either leaves the subtree untouched, or replaces it --- with an appropriate failure node. -processPackageConstraintF :: Flag -> ConflictSet QPN -> Bool -> PackageConstraint -> Tree a -> Tree a -processPackageConstraintF f c b' (PackageConstraintFlags _ fa) r = - case L.lookup f fa of - Nothing -> r - Just b | b == b' -> r - | otherwise -> Fail c GlobalConstraintFlag -processPackageConstraintF _ _ _ _ r = r - --- | Helper function that tries to enforce a single package constraint on a --- given flag setting for an F-node. Translates the constraint into a --- tree-transformer that either leaves the subtree untouched, or replaces it --- with an appropriate failure node. -processPackageConstraintS :: OptionalStanza -> ConflictSet QPN -> Bool -> PackageConstraint -> Tree a -> Tree a -processPackageConstraintS s c b' (PackageConstraintStanzas _ ss) r = - if not b' && s `elem` ss then Fail c GlobalConstraintFlag - else r -processPackageConstraintS _ _ _ _ r = r - --- | Traversal that tries to establish various kinds of user constraints. Works --- by selectively disabling choices that have been ruled out by global user --- constraints. -enforcePackageConstraints :: M.Map PN [PackageConstraint] -> Tree QGoalReasonChain -> Tree QGoalReasonChain -enforcePackageConstraints pcs = trav go - where - go (PChoiceF qpn@(Q _ pn) gr ts) = - let c = toConflictSet (Goal (P qpn) gr) - -- compose the transformation functions for each of the relevant constraint - g = \ i -> foldl (\ h pc -> h . processPackageConstraintP c i pc) id - (M.findWithDefault [] pn pcs) - in PChoiceF qpn gr (P.mapWithKey g ts) - go (FChoiceF qfn@(FN (PI (Q _ pn) _) f) gr tr m ts) = - let c = toConflictSet (Goal (F qfn) gr) - -- compose the transformation functions for each of the relevant constraint - g = \ b -> foldl (\ h pc -> h . processPackageConstraintF f c b pc) id - (M.findWithDefault [] pn pcs) - in FChoiceF qfn gr tr m (P.mapWithKey g ts) - go (SChoiceF qsn@(SN (PI (Q _ pn) _) f) gr tr ts) = - let c = toConflictSet (Goal (S qsn) gr) - -- compose the transformation functions for each of the relevant constraint - g = \ b -> foldl (\ h pc -> h . processPackageConstraintS f c b pc) id - (M.findWithDefault [] pn pcs) - in SChoiceF qsn gr tr (P.mapWithKey g ts) - go x = x - --- | Transformation that tries to enforce manual flags. Manual flags --- can only be re-set explicitly by the user. This transformation should --- be run after user preferences have been enforced. For manual flags, --- it checks if a user choice has been made. If not, it disables all but --- the first choice. -enforceManualFlags :: Tree QGoalReasonChain -> Tree QGoalReasonChain -enforceManualFlags = trav go - where - go (FChoiceF qfn gr tr True ts) = FChoiceF qfn gr tr True $ - let c = toConflictSet (Goal (F qfn) gr) - in case span isDisabled (P.toList ts) of - ([], y : ys) -> P.fromList (y : L.map (\ (b, _) -> (b, Fail c ManualFlag)) ys) - _ -> ts -- something has been manually selected, leave things alone - where - isDisabled (_, Fail _ GlobalConstraintFlag) = True - isDisabled _ = False - go x = x - --- | Prefer installed packages over non-installed packages, generally. --- All installed packages or non-installed packages are treated as --- equivalent. -preferInstalled :: Tree a -> Tree a -preferInstalled = packageOrderFor (const True) (const preferInstalledOrdering) - --- | Prefer packages with higher version numbers over packages with --- lower version numbers, for certain packages. -preferLatestFor :: (PN -> Bool) -> Tree a -> Tree a -preferLatestFor p = packageOrderFor p (const preferLatestOrdering) - --- | Prefer packages with higher version numbers over packages with --- lower version numbers, for all packages. -preferLatest :: Tree a -> Tree a -preferLatest = preferLatestFor (const True) - --- | Require installed packages. -requireInstalled :: (PN -> Bool) -> Tree (QGoalReasonChain, a) -> Tree (QGoalReasonChain, a) -requireInstalled p = trav go - where - go (PChoiceF v@(Q _ pn) i@(gr, _) cs) - | p pn = PChoiceF v i (P.mapWithKey installed cs) - | otherwise = PChoiceF v i cs - where - installed (I _ (Inst _)) x = x - installed _ _ = Fail (toConflictSet (Goal (P v) gr)) CannotInstall - go x = x - --- | Avoid reinstalls. --- --- This is a tricky strategy. If a package version is installed already and the --- same version is available from a repo, the repo version will never be chosen. --- This would result in a reinstall (either destructively, or potentially, --- shadowing). The old instance won't be visible or even present anymore, but --- other packages might have depended on it. --- --- TODO: It would be better to actually check the reverse dependencies of installed --- packages. If they're not depended on, then reinstalling should be fine. Even if --- they are, perhaps this should just result in trying to reinstall those other --- packages as well. However, doing this all neatly in one pass would require to --- change the builder, or at least to change the goal set after building. -avoidReinstalls :: (PN -> Bool) -> Tree (QGoalReasonChain, a) -> Tree (QGoalReasonChain, a) -avoidReinstalls p = trav go - where - go (PChoiceF qpn@(Q _ pn) i@(gr, _) cs) - | p pn = PChoiceF qpn i disableReinstalls - | otherwise = PChoiceF qpn i cs - where - disableReinstalls = - let installed = [ v | (I v (Inst _), _) <- toList cs ] - in P.mapWithKey (notReinstall installed) cs - - notReinstall vs (I v InRepo) _ - | v `elem` vs = Fail (toConflictSet (Goal (P qpn) gr)) CannotReinstall - notReinstall _ _ x = x - go x = x - --- | Always choose the first goal in the list next, abandoning all --- other choices. --- --- This is unnecessary for the default search strategy, because --- it descends only into the first goal choice anyway, --- but may still make sense to just reduce the tree size a bit. -firstGoal :: Tree a -> Tree a -firstGoal = trav go - where - go (GoalChoiceF xs) = -- casePSQ xs (GoalChoiceF xs) (\ _ t _ -> out t) -- more space efficient, but removes valuable debug info - casePSQ xs (GoalChoiceF (fromList [])) (\ g t _ -> GoalChoiceF (fromList [(g, t)])) - go x = x - -- Note that we keep empty choice nodes, because they mean success. - --- | Transformation that tries to make a decision on base as early as --- possible. In nearly all cases, there's a single choice for the base --- package. Also, fixing base early should lead to better error messages. -preferBaseGoalChoice :: Tree a -> Tree a -preferBaseGoalChoice = trav go - where - go (GoalChoiceF xs) = GoalChoiceF (P.sortByKeys preferBase xs) - go x = x - - preferBase :: OpenGoal -> OpenGoal -> Ordering - preferBase (OpenGoal (Simple (Dep (Q [] pn) _)) _) _ | unPN pn == "base" = LT - preferBase _ (OpenGoal (Simple (Dep (Q [] pn) _)) _) | unPN pn == "base" = GT - preferBase _ _ = EQ - --- | Transformation that sorts choice nodes so that --- child nodes with a small branching degree are preferred. As a --- special case, choices with 0 branches will be preferred (as they --- are immediately considered inconsistent), and choices with 1 --- branch will also be preferred (as they don't involve choice). -preferEasyGoalChoices :: Tree a -> Tree a -preferEasyGoalChoices = trav go - where - go (GoalChoiceF xs) = GoalChoiceF (P.sortBy (comparing choices) xs) - go x = x - --- | Transformation that tries to avoid making weak flag choices early. --- Weak flags are trivial flags (not influencing dependencies) or such --- flags that are explicitly declared to be weak in the index. -deferWeakFlagChoices :: Tree a -> Tree a -deferWeakFlagChoices = trav go - where - go (GoalChoiceF xs) = GoalChoiceF (P.sortBy defer xs) - go x = x - - defer :: Tree a -> Tree a -> Ordering - defer (FChoice _ _ True _ _) _ = GT - defer _ (FChoice _ _ True _ _) = LT - defer _ _ = EQ - --- | Variant of 'preferEasyGoalChoices'. --- --- Only approximates the number of choices in the branches. Less accurate, --- more efficient. -lpreferEasyGoalChoices :: Tree a -> Tree a -lpreferEasyGoalChoices = trav go - where - go (GoalChoiceF xs) = GoalChoiceF (P.sortBy (comparing lchoices) xs) - go x = x - --- | Variant of 'preferEasyGoalChoices'. --- --- I first thought that using a paramorphism might be faster here, --- but it doesn't seem to make any difference. -preferEasyGoalChoices' :: Tree a -> Tree a -preferEasyGoalChoices' = para (inn . go) - where - go (GoalChoiceF xs) = GoalChoiceF (P.map fst (P.sortBy (comparing (choices . snd)) xs)) - go x = fmap fst x - diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/PSQ.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/PSQ.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/PSQ.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/PSQ.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,94 +0,0 @@ -{-# LANGUAGE DeriveFunctor, DeriveFoldable, DeriveFoldable, DeriveTraversable #-} -module Distribution.Client.Dependency.Modular.PSQ where - --- Priority search queues. --- --- I am not yet sure what exactly is needed. But we need a data structure with --- key-based lookup that can be sorted. We're using a sequence right now with --- (inefficiently implemented) lookup, because I think that queue-based --- operations and sorting turn out to be more efficiency-critical in practice. - -import Data.Foldable -import Data.Function -import Data.List as S hiding (foldr) -import Data.Traversable -import Prelude hiding (foldr) - -newtype PSQ k v = PSQ [(k, v)] - deriving (Eq, Show, Functor, Foldable, Traversable) - -keys :: PSQ k v -> [k] -keys (PSQ xs) = fmap fst xs - -lookup :: Eq k => k -> PSQ k v -> Maybe v -lookup k (PSQ xs) = S.lookup k xs - -map :: (v1 -> v2) -> PSQ k v1 -> PSQ k v2 -map f (PSQ xs) = PSQ (fmap (\ (k, v) -> (k, f v)) xs) - -mapKeys :: (k1 -> k2) -> PSQ k1 v -> PSQ k2 v -mapKeys f (PSQ xs) = PSQ (fmap (\ (k, v) -> (f k, v)) xs) - -mapWithKey :: (k -> a -> b) -> PSQ k a -> PSQ k b -mapWithKey f (PSQ xs) = PSQ (fmap (\ (k, v) -> (k, f k v)) xs) - -mapWithKeyState :: (s -> k -> a -> (b, s)) -> PSQ k a -> s -> PSQ k b -mapWithKeyState p (PSQ xs) s0 = - PSQ (foldr (\ (k, v) r s -> case p s k v of - (w, n) -> (k, w) : (r n)) - (const []) xs s0) - -delete :: Eq k => k -> PSQ k a -> PSQ k a -delete k (PSQ xs) = PSQ (snd (partition ((== k) . fst) xs)) - -fromList :: [(k, a)] -> PSQ k a -fromList = PSQ - -cons :: k -> a -> PSQ k a -> PSQ k a -cons k x (PSQ xs) = PSQ ((k, x) : xs) - -snoc :: PSQ k a -> k -> a -> PSQ k a -snoc (PSQ xs) k x = PSQ (xs ++ [(k, x)]) - -casePSQ :: PSQ k a -> r -> (k -> a -> PSQ k a -> r) -> r -casePSQ (PSQ xs) n c = - case xs of - [] -> n - (k, v) : ys -> c k v (PSQ ys) - -splits :: PSQ k a -> PSQ k (a, PSQ k a) -splits = go id - where - go f xs = casePSQ xs - (PSQ []) - (\ k v ys -> cons k (v, f ys) (go (f . cons k v) ys)) - -sortBy :: (a -> a -> Ordering) -> PSQ k a -> PSQ k a -sortBy cmp (PSQ xs) = PSQ (S.sortBy (cmp `on` snd) xs) - -sortByKeys :: (k -> k -> Ordering) -> PSQ k a -> PSQ k a -sortByKeys cmp (PSQ xs) = PSQ (S.sortBy (cmp `on` fst) xs) - -filterKeys :: (k -> Bool) -> PSQ k a -> PSQ k a -filterKeys p (PSQ xs) = PSQ (S.filter (p . fst) xs) - -filter :: (a -> Bool) -> PSQ k a -> PSQ k a -filter p (PSQ xs) = PSQ (S.filter (p . snd) xs) - -length :: PSQ k a -> Int -length (PSQ xs) = S.length xs - --- | "Lazy length". --- --- Only approximates the length, but doesn't force the list. -llength :: PSQ k a -> Int -llength (PSQ []) = 0 -llength (PSQ (_:[])) = 1 -llength (PSQ (_:_:[])) = 2 -llength (PSQ _) = 3 - -null :: PSQ k a -> Bool -null (PSQ xs) = S.null xs - -toList :: PSQ k a -> [(k, a)] -toList (PSQ xs) = xs diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Solver.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Solver.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Solver.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Solver.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,60 +0,0 @@ -module Distribution.Client.Dependency.Modular.Solver where - -import Data.Map as M - -import Distribution.Client.Dependency.Types - -import Distribution.Client.Dependency.Modular.Assignment -import Distribution.Client.Dependency.Modular.Builder -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Explore -import Distribution.Client.Dependency.Modular.Index -import Distribution.Client.Dependency.Modular.Log -import Distribution.Client.Dependency.Modular.Message -import Distribution.Client.Dependency.Modular.Package -import qualified Distribution.Client.Dependency.Modular.Preference as P -import Distribution.Client.Dependency.Modular.Validate - --- | Various options for the modular solver. -data SolverConfig = SolverConfig { - preferEasyGoalChoices :: Bool, - independentGoals :: Bool, - avoidReinstalls :: Bool, - shadowPkgs :: Bool, - strongFlags :: Bool, - maxBackjumps :: Maybe Int -} - -solve :: SolverConfig -> -- solver parameters - Index -> -- all available packages as an index - (PN -> PackagePreferences) -> -- preferences - Map PN [PackageConstraint] -> -- global constraints - [PN] -> -- global goals - Log Message (Assignment, RevDepMap) -solve sc idx userPrefs userConstraints userGoals = - explorePhase $ - heuristicsPhase $ - preferencesPhase $ - validationPhase $ - prunePhase $ - buildPhase - where - explorePhase = exploreTreeLog . backjump - heuristicsPhase = P.firstGoal . -- after doing goal-choice heuristics, commit to the first choice (saves space) - P.deferWeakFlagChoices . - P.preferBaseGoalChoice . - if preferEasyGoalChoices sc - then P.lpreferEasyGoalChoices - else id - preferencesPhase = P.preferPackagePreferences userPrefs - validationPhase = P.enforceManualFlags . -- can only be done after user constraints - P.enforcePackageConstraints userConstraints . - validateTree idx - prunePhase = (if avoidReinstalls sc then P.avoidReinstalls (const True) else id) . - -- packages that can never be "upgraded": - P.requireInstalled (`elem` [ PackageName "base" - , PackageName "ghc-prim" - , PackageName "integer-gmp" - , PackageName "integer-simple" - ]) - buildPhase = buildTree idx (independentGoals sc) userGoals diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Tree.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Tree.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Tree.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Tree.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -{-# LANGUAGE DeriveFunctor, DeriveFoldable, DeriveTraversable #-} -module Distribution.Client.Dependency.Modular.Tree where - -import Control.Monad hiding (mapM) -import Data.Foldable -import Data.Traversable -import Prelude hiding (foldr, mapM) - -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Flag -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.PSQ as P -import Distribution.Client.Dependency.Modular.Version - --- | Type of the search tree. Inlining the choice nodes for now. -data Tree a = - PChoice QPN a (PSQ I (Tree a)) - | FChoice QFN a Bool Bool (PSQ Bool (Tree a)) -- Bool indicates whether it's weak/trivial, second Bool whether it's manual - | SChoice QSN a Bool (PSQ Bool (Tree a)) -- Bool indicates whether it's trivial - | GoalChoice (PSQ OpenGoal (Tree a)) -- PSQ should never be empty - | Done RevDepMap - | Fail (ConflictSet QPN) FailReason - deriving (Eq, Show, Functor) - -- Above, a choice is called trivial if it clearly does not matter. The - -- special case of triviality we actually consider is if there are no new - -- dependencies introduced by this node. - -- - -- A (flag) choice is called weak if we do want to defer it. This is the - -- case for flags that should be implied by what's currently installed on - -- the system, as opposed to flags that are used to explicitly enable or - -- disable some functionality. - -data FailReason = InconsistentInitialConstraints - | Conflicting [Dep QPN] - | CannotInstall - | CannotReinstall - | Shadowed - | Broken - | GlobalConstraintVersion VR - | GlobalConstraintInstalled - | GlobalConstraintSource - | GlobalConstraintFlag - | ManualFlag - | BuildFailureNotInIndex PN - | MalformedFlagChoice QFN - | MalformedStanzaChoice QSN - | EmptyGoalChoice - | Backjump - deriving (Eq, Show) - --- | Functor for the tree type. -data TreeF a b = - PChoiceF QPN a (PSQ I b) - | FChoiceF QFN a Bool Bool (PSQ Bool b) - | SChoiceF QSN a Bool (PSQ Bool b) - | GoalChoiceF (PSQ OpenGoal b) - | DoneF RevDepMap - | FailF (ConflictSet QPN) FailReason - deriving (Functor, Foldable, Traversable) - -out :: Tree a -> TreeF a (Tree a) -out (PChoice p i ts) = PChoiceF p i ts -out (FChoice p i b m ts) = FChoiceF p i b m ts -out (SChoice p i b ts) = SChoiceF p i b ts -out (GoalChoice ts) = GoalChoiceF ts -out (Done x ) = DoneF x -out (Fail c x ) = FailF c x - -inn :: TreeF a (Tree a) -> Tree a -inn (PChoiceF p i ts) = PChoice p i ts -inn (FChoiceF p i b m ts) = FChoice p i b m ts -inn (SChoiceF p i b ts) = SChoice p i b ts -inn (GoalChoiceF ts) = GoalChoice ts -inn (DoneF x ) = Done x -inn (FailF c x ) = Fail c x - --- | Determines whether a tree is active, i.e., isn't a failure node. -active :: Tree a -> Bool -active (Fail _ _) = False -active _ = True - --- | Determines how many active choices are available in a node. Note that we --- count goal choices as having one choice, always. -choices :: Tree a -> Int -choices (PChoice _ _ ts) = P.length (P.filter active ts) -choices (FChoice _ _ _ _ ts) = P.length (P.filter active ts) -choices (SChoice _ _ _ ts) = P.length (P.filter active ts) -choices (GoalChoice _ ) = 1 -choices (Done _ ) = 1 -choices (Fail _ _ ) = 0 - --- | Variant of 'choices' that only approximates the number of choices, --- using 'llength'. -lchoices :: Tree a -> Int -lchoices (PChoice _ _ ts) = P.llength (P.filter active ts) -lchoices (FChoice _ _ _ _ ts) = P.llength (P.filter active ts) -lchoices (SChoice _ _ _ ts) = P.llength (P.filter active ts) -lchoices (GoalChoice _ ) = 1 -lchoices (Done _ ) = 1 -lchoices (Fail _ _ ) = 0 - --- | Catamorphism on trees. -cata :: (TreeF a b -> b) -> Tree a -> b -cata phi x = (phi . fmap (cata phi) . out) x - -trav :: (TreeF a (Tree b) -> TreeF b (Tree b)) -> Tree a -> Tree b -trav psi x = cata (inn . psi) x - --- | Paramorphism on trees. -para :: (TreeF a (b, Tree a) -> b) -> Tree a -> b -para phi = phi . fmap (\ x -> (para phi x, x)) . out - -cataM :: Monad m => (TreeF a b -> m b) -> Tree a -> m b -cataM phi = phi <=< mapM (cataM phi) <=< return . out - --- | Anamorphism on trees. -ana :: (b -> TreeF a b) -> b -> Tree a -ana psi = inn . fmap (ana psi) . psi - -anaM :: Monad m => (b -> m (TreeF a b)) -> b -> m (Tree a) -anaM psi = return . inn <=< mapM (anaM psi) <=< psi diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Validate.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Validate.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Validate.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Validate.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,232 +0,0 @@ -module Distribution.Client.Dependency.Modular.Validate where - --- Validation of the tree. --- --- The task here is to make sure all constraints hold. After validation, any --- assignment returned by exploration of the tree should be a complete valid --- assignment, i.e., actually constitute a solution. - -import Control.Applicative -import Control.Monad.Reader hiding (sequence) -import Data.List as L -import Data.Map as M -import Data.Traversable -import Prelude hiding (sequence) - -import Distribution.Client.Dependency.Modular.Assignment -import Distribution.Client.Dependency.Modular.Dependency -import Distribution.Client.Dependency.Modular.Flag -import Distribution.Client.Dependency.Modular.Index -import Distribution.Client.Dependency.Modular.Package -import Distribution.Client.Dependency.Modular.PSQ as P -import Distribution.Client.Dependency.Modular.Tree - --- In practice, most constraints are implication constraints (IF we have made --- a number of choices, THEN we also have to ensure that). We call constraints --- that for which the preconditions are fulfilled ACTIVE. We maintain a set --- of currently active constraints that we pass down the node. --- --- We aim at detecting inconsistent states as early as possible. --- --- Whenever we make a choice, there are two things that need to happen: --- --- (1) We must check that the choice is consistent with the currently --- active constraints. --- --- (2) The choice increases the set of active constraints. For the new --- active constraints, we must check that they are consistent with --- the current state. --- --- We can actually merge (1) and (2) by saying the the current choice is --- a new active constraint, fixing the choice. --- --- If a test fails, we have detected an inconsistent state. We can --- disable the current subtree and do not have to traverse it any further. --- --- We need a good way to represent the current state, i.e., the current --- set of active constraints. Since the main situation where we have to --- search in it is (1), it seems best to store the state by package: for --- every package, we store which versions are still allowed. If for any --- package, we have inconsistent active constraints, we can also stop. --- This is a particular way to read task (2): --- --- (2, weak) We only check if the new constraints are consistent with --- the choices we've already made, and add them to the active set. --- --- (2, strong) We check if the new constraints are consistent with the --- choices we've already made, and the constraints we already have. --- --- It currently seems as if we're implementing the weak variant. However, --- when used together with 'preferEasyGoalChoices', we will find an --- inconsistent state in the very next step. --- --- What do we do about flags? --- --- Like for packages, we store the flag choices we have already made. --- Now, regarding (1), we only have to test whether we've decided the --- current flag before. Regarding (2), the interesting bit is in discovering --- the new active constraints. To this end, we look up the constraints for --- the package the flag belongs to, and traverse its flagged dependencies. --- Wherever we find the flag in question, we start recording dependencies --- underneath as new active dependencies. If we encounter other flags, we --- check if we've chosen them already and either proceed or stop. - --- | The state needed during validation. -data ValidateState = VS { - index :: Index, - saved :: Map QPN (FlaggedDeps QPN), -- saved, scoped, dependencies - pa :: PreAssignment -} - -type Validate = Reader ValidateState - -validate :: Tree (QGoalReasonChain, Scope) -> Validate (Tree QGoalReasonChain) -validate = cata go - where - go :: TreeF (QGoalReasonChain, Scope) (Validate (Tree QGoalReasonChain)) -> Validate (Tree QGoalReasonChain) - - go (PChoiceF qpn (gr, sc) ts) = PChoice qpn gr <$> sequence (P.mapWithKey (goP qpn gr sc) ts) - go (FChoiceF qfn (gr, _sc) b m ts) = - do - -- Flag choices may occur repeatedly (because they can introduce new constraints - -- in various places). However, subsequent choices must be consistent. We thereby - -- collapse repeated flag choice nodes. - PA _ pfa _ <- asks pa -- obtain current flag-preassignment - case M.lookup qfn pfa of - Just rb -> -- flag has already been assigned; collapse choice to the correct branch - case P.lookup rb ts of - Just t -> goF qfn gr rb t - Nothing -> return $ Fail (toConflictSet (Goal (F qfn) gr)) (MalformedFlagChoice qfn) - Nothing -> -- flag choice is new, follow both branches - FChoice qfn gr b m <$> sequence (P.mapWithKey (goF qfn gr) ts) - go (SChoiceF qsn (gr, _sc) b ts) = - do - -- Optional stanza choices are very similar to flag choices. - PA _ _ psa <- asks pa -- obtain current stanza-preassignment - case M.lookup qsn psa of - Just rb -> -- stanza choice has already been made; collapse choice to the correct branch - case P.lookup rb ts of - Just t -> goS qsn gr rb t - Nothing -> return $ Fail (toConflictSet (Goal (S qsn) gr)) (MalformedStanzaChoice qsn) - Nothing -> -- stanza choice is new, follow both branches - SChoice qsn gr b <$> sequence (P.mapWithKey (goS qsn gr) ts) - - -- We don't need to do anything for goal choices or failure nodes. - go (GoalChoiceF ts) = GoalChoice <$> sequence ts - go (DoneF rdm ) = pure (Done rdm) - go (FailF c fr ) = pure (Fail c fr) - - -- What to do for package nodes ... - goP :: QPN -> QGoalReasonChain -> Scope -> I -> Validate (Tree QGoalReasonChain) -> Validate (Tree QGoalReasonChain) - goP qpn@(Q _pp pn) gr sc i r = do - PA ppa pfa psa <- asks pa -- obtain current preassignment - idx <- asks index -- obtain the index - svd <- asks saved -- obtain saved dependencies - let (PInfo deps _ _ mfr) = idx ! pn ! i -- obtain dependencies and index-dictated exclusions introduced by the choice - let qdeps = L.map (fmap (qualify sc)) deps -- qualify the deps in the current scope - -- the new active constraints are given by the instance we have chosen, - -- plus the dependency information we have for that instance - let goal = Goal (P qpn) gr - let newactives = Dep qpn (Fixed i goal) : L.map (resetGoal goal) (extractDeps pfa psa qdeps) - -- We now try to extend the partial assignment with the new active constraints. - let mnppa = extend (P qpn) ppa newactives - -- In case we continue, we save the scoped dependencies - let nsvd = M.insert qpn qdeps svd - case mfr of - Just fr -> -- The index marks this as an invalid choice. We can stop. - return (Fail (toConflictSet goal) fr) - _ -> case mnppa of - Left (c, d) -> -- We have an inconsistency. We can stop. - return (Fail c (Conflicting d)) - Right nppa -> -- We have an updated partial assignment for the recursive validation. - local (\ s -> s { pa = PA nppa pfa psa, saved = nsvd }) r - - -- What to do for flag nodes ... - goF :: QFN -> QGoalReasonChain -> Bool -> Validate (Tree QGoalReasonChain) -> Validate (Tree QGoalReasonChain) - goF qfn@(FN (PI qpn _i) _f) gr b r = do - PA ppa pfa psa <- asks pa -- obtain current preassignment - svd <- asks saved -- obtain saved dependencies - -- Note that there should be saved dependencies for the package in question, - -- because while building, we do not choose flags before we see the packages - -- that define them. - let qdeps = svd ! qpn - -- We take the *saved* dependencies, because these have been qualified in the - -- correct scope. - -- - -- Extend the flag assignment - let npfa = M.insert qfn b pfa - -- We now try to get the new active dependencies we might learn about because - -- we have chosen a new flag. - let newactives = extractNewDeps (F qfn) gr b npfa psa qdeps - -- As in the package case, we try to extend the partial assignment. - case extend (F qfn) ppa newactives of - Left (c, d) -> return (Fail c (Conflicting d)) -- inconsistency found - Right nppa -> local (\ s -> s { pa = PA nppa npfa psa }) r - - -- What to do for stanza nodes (similar to flag nodes) ... - goS :: QSN -> QGoalReasonChain -> Bool -> Validate (Tree QGoalReasonChain) -> Validate (Tree QGoalReasonChain) - goS qsn@(SN (PI qpn _i) _f) gr b r = do - PA ppa pfa psa <- asks pa -- obtain current preassignment - svd <- asks saved -- obtain saved dependencies - -- Note that there should be saved dependencies for the package in question, - -- because while building, we do not choose flags before we see the packages - -- that define them. - let qdeps = svd ! qpn - -- We take the *saved* dependencies, because these have been qualified in the - -- correct scope. - -- - -- Extend the flag assignment - let npsa = M.insert qsn b psa - -- We now try to get the new active dependencies we might learn about because - -- we have chosen a new flag. - let newactives = extractNewDeps (S qsn) gr b pfa npsa qdeps - -- As in the package case, we try to extend the partial assignment. - case extend (S qsn) ppa newactives of - Left (c, d) -> return (Fail c (Conflicting d)) -- inconsistency found - Right nppa -> local (\ s -> s { pa = PA nppa pfa npsa }) r - --- | We try to extract as many concrete dependencies from the given flagged --- dependencies as possible. We make use of all the flag knowledge we have --- already acquired. -extractDeps :: FAssignment -> SAssignment -> FlaggedDeps QPN -> [Dep QPN] -extractDeps fa sa deps = do - d <- deps - case d of - Simple sd -> return sd - Flagged qfn _ td fd -> case M.lookup qfn fa of - Nothing -> mzero - Just True -> extractDeps fa sa td - Just False -> extractDeps fa sa fd - Stanza qsn td -> case M.lookup qsn sa of - Nothing -> mzero - Just True -> extractDeps fa sa td - Just False -> [] - --- | We try to find new dependencies that become available due to the given --- flag or stanza choice. We therefore look for the choice in question, and then call --- 'extractDeps' for everything underneath. -extractNewDeps :: Var QPN -> QGoalReasonChain -> Bool -> FAssignment -> SAssignment -> FlaggedDeps QPN -> [Dep QPN] -extractNewDeps v gr b fa sa = go - where - go deps = do - d <- deps - case d of - Simple _ -> mzero - Flagged qfn' _ td fd - | v == F qfn' -> L.map (resetGoal (Goal v gr)) $ - if b then extractDeps fa sa td else extractDeps fa sa fd - | otherwise -> case M.lookup qfn' fa of - Nothing -> mzero - Just True -> go td - Just False -> go fd - Stanza qsn' td - | v == S qsn' -> L.map (resetGoal (Goal v gr)) $ - if b then extractDeps fa sa td else [] - | otherwise -> case M.lookup qsn' sa of - Nothing -> mzero - Just True -> go td - Just False -> [] - --- | Interface. -validateTree :: Index -> Tree (QGoalReasonChain, Scope) -> Tree QGoalReasonChain -validateTree idx t = runReader (validate t) (VS idx M.empty (PA M.empty M.empty M.empty)) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Version.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Version.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular/Version.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular/Version.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -module Distribution.Client.Dependency.Modular.Version where - -import qualified Distribution.Version as CV -- from Cabal -import Distribution.Text -- from Cabal - --- | Preliminary type for versions. -type Ver = CV.Version - --- | String representation of a version. -showVer :: Ver -> String -showVer = display - --- | Version range. Consists of a lower and upper bound. -type VR = CV.VersionRange - --- | String representation of a version range. -showVR :: VR -> String -showVR = display - --- | Unconstrained version range. -anyVR :: VR -anyVR = CV.anyVersion - --- | Version range fixing a single version. -eqVR :: Ver -> VR -eqVR = CV.thisVersion - --- | Intersect two version ranges. -(.&&.) :: VR -> VR -> VR -(.&&.) = CV.intersectVersionRanges - --- | Simplify a version range. -simplifyVR :: VR -> VR -simplifyVR = CV.simplifyVersionRange - --- | Checking a version against a version range. -checkVR :: VR -> Ver -> Bool -checkVR = flip CV.withinRange - --- | Make a version number. -mkV :: [Int] -> Ver -mkV xs = CV.Version xs [] diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Modular.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Modular.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -module Distribution.Client.Dependency.Modular - ( modularResolver, SolverConfig(..)) where - --- Here, we try to map between the external cabal-install solver --- interface and the internal interface that the solver actually --- expects. There are a number of type conversions to perform: we --- have to convert the package indices to the uniform index used --- by the solver; we also have to convert the initial constraints; --- and finally, we have to convert back the resulting install --- plan. - -import Data.Map as M - ( fromListWith ) -import Distribution.Client.Dependency.Modular.Assignment - ( Assignment, toCPs ) -import Distribution.Client.Dependency.Modular.Dependency - ( RevDepMap ) -import Distribution.Client.Dependency.Modular.ConfiguredConversion - ( convCP ) -import Distribution.Client.Dependency.Modular.IndexConversion - ( convPIs ) -import Distribution.Client.Dependency.Modular.Log - ( logToProgress ) -import Distribution.Client.Dependency.Modular.Package - ( PN ) -import Distribution.Client.Dependency.Modular.Solver - ( SolverConfig(..), solve ) -import Distribution.Client.Dependency.Types - ( DependencyResolver, PackageConstraint(..) ) -import Distribution.Client.InstallPlan - ( PlanPackage ) -import Distribution.System - ( Platform(..) ) - --- | Ties the two worlds together: classic cabal-install vs. the modular --- solver. Performs the necessary translations before and after. -modularResolver :: SolverConfig -> DependencyResolver -modularResolver sc (Platform arch os) cinfo iidx sidx pprefs pcs pns = - fmap (uncurry postprocess) $ -- convert install plan - logToProgress (maxBackjumps sc) $ -- convert log format into progress format - solve sc idx pprefs gcs pns - where - -- Indices have to be converted into solver-specific uniform index. - idx = convPIs os arch cinfo (shadowPkgs sc) (strongFlags sc) iidx sidx - -- Constraints have to be converted into a finite map indexed by PN. - gcs = M.fromListWith (++) (map (\ pc -> (pcName pc, [pc])) pcs) - - -- Results have to be converted into an install plan. - postprocess :: Assignment -> RevDepMap -> [PlanPackage] - postprocess a rdm = map (convCP iidx sidx) (toCPs a rdm) - - -- Helper function to extract the PN from a constraint. - pcName :: PackageConstraint -> PN - pcName (PackageConstraintVersion pn _) = pn - pcName (PackageConstraintInstalled pn ) = pn - pcName (PackageConstraintSource pn ) = pn - pcName (PackageConstraintFlags pn _) = pn - pcName (PackageConstraintStanzas pn _) = pn diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/TopDown/Constraints.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/TopDown/Constraints.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/TopDown/Constraints.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/TopDown/Constraints.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,603 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Dependency.TopDown.Constraints --- Copyright : (c) Duncan Coutts 2008 --- License : BSD-like --- --- Maintainer : duncan@community.haskell.org --- Stability : provisional --- Portability : portable --- --- A set of satisfiable constraints on a set of packages. ------------------------------------------------------------------------------ -module Distribution.Client.Dependency.TopDown.Constraints ( - Constraints, - empty, - packages, - choices, - isPaired, - - addTarget, - constrain, - Satisfiable(..), - conflicting, - ) where - -import Distribution.Client.Dependency.TopDown.Types -import qualified Distribution.Client.PackageIndex as PackageIndex -import Distribution.Client.PackageIndex (PackageIndex) -import Distribution.Package - ( PackageName, PackageId, PackageIdentifier(..) - , Package(packageId), packageName, packageVersion - , Dependency, PackageFixedDeps(depends) ) -import Distribution.Version - ( Version ) -import Distribution.Client.Utils - ( mergeBy, MergeResult(..) ) - -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid - ( Monoid(mempty) ) -#endif -import Data.Either - ( partitionEithers ) -import qualified Data.Map as Map -import Data.Map (Map) -import qualified Data.Set as Set -import Data.Set (Set) -import Control.Exception - ( assert ) - - --- | A set of satisfiable constraints on a set of packages. --- --- The 'Constraints' type keeps track of a set of targets (identified by --- package name) that we know that we need. It also keeps track of a set of --- constraints over all packages in the environment. --- --- It maintains the guarantee that, for the target set, the constraints are --- satisfiable, meaning that there is at least one instance available for each --- package name that satisfies the constraints on that package name. --- --- Note that it is possible to over-constrain a package in the environment that --- is not in the target set -- the satisfiability guarantee is only maintained --- for the target set. This is useful because it allows us to exclude packages --- without needing to know if it would ever be needed or not (e.g. allows --- excluding broken installed packages). --- --- Adding a constraint for a target package can fail if it would mean that --- there are no remaining choices. --- --- Adding a constraint for package that is not a target never fails. --- --- Adding a new target package can fail if that package already has conflicting --- constraints. --- -data Constraints installed source reason - = Constraints - - -- | Targets that we know we need. This is the set for which we - -- guarantee the constraints are satisfiable. - !(Set PackageName) - - -- | The available/remaining set. These are packages that have available - -- choices remaining. This is guaranteed to cover the target packages, - -- but can also cover other packages in the environment. New targets can - -- only be added if there are available choices remaining for them. - !(PackageIndex (InstalledOrSource installed source)) - - -- | The excluded set. Choices that we have excluded by applying - -- constraints. Excluded choices are tagged with the reason. - !(PackageIndex (ExcludedPkg (InstalledOrSource installed source) reason)) - - -- | Paired choices, this is an ugly hack. - !(Map PackageName (Version, Version)) - - -- | Purely for the invariant, we keep a copy of the original index - !(PackageIndex (InstalledOrSource installed source)) - - --- | Reasons for excluding all, or some choices for a package version. --- --- Each package version can have a source instance, an installed instance or --- both. We distinguish reasons for constraints that excluded both instances, --- from reasons for constraints that excluded just one instance. --- -data ExcludedPkg pkg reason - = ExcludedPkg pkg - [reason] -- ^ reasons for excluding both source and installed instances - [reason] -- ^ reasons for excluding the installed instance - [reason] -- ^ reasons for excluding the source instance - -instance Package pkg => Package (ExcludedPkg pkg reason) where - packageId (ExcludedPkg p _ _ _) = packageId p - - --- | There is a conservation of packages property. Packages are never gained or --- lost, they just transfer from the remaining set to the excluded set. --- -invariant :: (Package installed, Package source) - => Constraints installed source a -> Bool -invariant (Constraints targets available excluded _ original) = - - -- Relationship between available, excluded and original - all check merged - - -- targets is a subset of available - && all (PackageIndex.elemByPackageName available) (Set.elems targets) - - where - merged = mergeBy (\a b -> packageId a `compare` mergedPackageId b) - (PackageIndex.allPackages original) - (mergeBy (\a b -> packageId a `compare` packageId b) - (PackageIndex.allPackages available) - (PackageIndex.allPackages excluded)) - where - mergedPackageId (OnlyInLeft p ) = packageId p - mergedPackageId (OnlyInRight p) = packageId p - mergedPackageId (InBoth p _) = packageId p - - -- If the package was originally installed only, then - check (InBoth (InstalledOnly _) cur) = case cur of - -- now it's either still remaining as installed only - OnlyInLeft (InstalledOnly _) -> True - -- or it has been excluded - OnlyInRight (ExcludedPkg (InstalledOnly _) [] (_:_) []) -> True - _ -> False - - -- If the package was originally available only, then - check (InBoth (SourceOnly _) cur) = case cur of - -- now it's either still remaining as source only - OnlyInLeft (SourceOnly _) -> True - -- or it has been excluded - OnlyInRight (ExcludedPkg (SourceOnly _) [] [] (_:_)) -> True - _ -> False - - -- If the package was originally installed and source, then - check (InBoth (InstalledAndSource _ _) cur) = case cur of - -- We can have both remaining: - OnlyInLeft (InstalledAndSource _ _) -> True - - -- both excluded, in particular it can have had the just source or - -- installed excluded and later had both excluded so we do not mind if - -- the source or installed excluded is empty or non-empty. - OnlyInRight (ExcludedPkg (InstalledAndSource _ _) _ _ _) -> True - - -- the installed remaining and the source excluded: - InBoth (InstalledOnly _) - (ExcludedPkg (SourceOnly _) [] [] (_:_)) -> True - - -- the source remaining and the installed excluded: - InBoth (SourceOnly _) - (ExcludedPkg (InstalledOnly _) [] (_:_) []) -> True - _ -> False - - check _ = False - - --- | An update to the constraints can move packages between the two piles --- but not gain or loose packages. -transitionsTo :: (Package installed, Package source) - => Constraints installed source a - -> Constraints installed source a -> Bool -transitionsTo constraints @(Constraints _ available excluded _ _) - constraints'@(Constraints _ available' excluded' _ _) = - - invariant constraints && invariant constraints' - && null availableGained && null excludedLost - && map (mapInstalledOrSource packageId packageId) availableLost - == map (mapInstalledOrSource packageId packageId) excludedGained - - where - (availableLost, availableGained) - = partitionEithers (foldr lostAndGained [] availableChange) - - (excludedLost, excludedGained) - = partitionEithers (foldr lostAndGained [] excludedChange) - - availableChange = - mergeBy (\a b -> packageId a `compare` packageId b) - (PackageIndex.allPackages available) - (PackageIndex.allPackages available') - - excludedChange = - mergeBy (\a b -> packageId a `compare` packageId b) - [ pkg | ExcludedPkg pkg _ _ _ <- PackageIndex.allPackages excluded ] - [ pkg | ExcludedPkg pkg _ _ _ <- PackageIndex.allPackages excluded' ] - - lostAndGained mr rest = case mr of - OnlyInLeft pkg -> Left pkg : rest - InBoth (InstalledAndSource pkg _) - (SourceOnly _) -> Left (InstalledOnly pkg) : rest - InBoth (InstalledAndSource _ pkg) - (InstalledOnly _) -> Left (SourceOnly pkg) : rest - InBoth (SourceOnly _) - (InstalledAndSource pkg _) -> Right (InstalledOnly pkg) : rest - InBoth (InstalledOnly _) - (InstalledAndSource _ pkg) -> Right (SourceOnly pkg) : rest - OnlyInRight pkg -> Right pkg : rest - _ -> rest - - mapInstalledOrSource f g pkg = case pkg of - InstalledOnly a -> InstalledOnly (f a) - SourceOnly b -> SourceOnly (g b) - InstalledAndSource a b -> InstalledAndSource (f a) (g b) - - --- | We construct 'Constraints' with an initial 'PackageIndex' of all the --- packages available. --- -empty :: (PackageFixedDeps installed, Package source) - => PackageIndex installed - -> PackageIndex source - -> Constraints installed source reason -empty installed source = - Constraints targets pkgs excluded pairs pkgs - where - targets = mempty - excluded = mempty - pkgs = PackageIndex.fromList - . map toInstalledOrSource - $ mergeBy (\a b -> packageId a `compare` packageId b) - (PackageIndex.allPackages installed) - (PackageIndex.allPackages source) - toInstalledOrSource (OnlyInLeft i ) = InstalledOnly i - toInstalledOrSource (OnlyInRight a) = SourceOnly a - toInstalledOrSource (InBoth i a) = InstalledAndSource i a - - -- pick up cases like base-3 and 4 where one version depends on the other: - pairs = Map.fromList - [ (name, (packageVersion pkgid1, packageVersion pkgid2)) - | [pkg1, pkg2] <- PackageIndex.allPackagesByName installed - , let name = packageName pkg1 - pkgid1 = packageId pkg1 - pkgid2 = packageId pkg2 - , any ((pkgid1==) . packageId) (depends pkg2) - || any ((pkgid2==) . packageId) (depends pkg1) ] - - --- | The package targets. --- -packages :: (Package installed, Package source) - => Constraints installed source reason - -> Set PackageName -packages (Constraints ts _ _ _ _) = ts - - --- | The package choices that are still available. --- -choices :: (Package installed, Package source) - => Constraints installed source reason - -> PackageIndex (InstalledOrSource installed source) -choices (Constraints _ available _ _ _) = available - -isPaired :: (Package installed, Package source) - => Constraints installed source reason - -> PackageId -> Maybe PackageId -isPaired (Constraints _ _ _ pairs _) (PackageIdentifier name version) = - case Map.lookup name pairs of - Just (v1, v2) - | version == v1 -> Just (PackageIdentifier name v2) - | version == v2 -> Just (PackageIdentifier name v1) - _ -> Nothing - - -data Satisfiable constraints discarded reason - = Satisfiable constraints discarded - | Unsatisfiable - | ConflictsWith [(PackageId, [reason])] - - -addTarget :: (Package installed, Package source) - => PackageName - -> Constraints installed source reason - -> Satisfiable (Constraints installed source reason) - () reason -addTarget pkgname - constraints@(Constraints targets available excluded paired original) - - -- If it's already a target then there's no change - | pkgname `Set.member` targets - = Satisfiable constraints () - - -- If there is some possible choice available for this target then we're ok - | PackageIndex.elemByPackageName available pkgname - = let targets' = Set.insert pkgname targets - constraints' = Constraints targets' available excluded paired original - in assert (constraints `transitionsTo` constraints') $ - Satisfiable constraints' () - - -- If it's not available and it is excluded then we return the conflicts - | PackageIndex.elemByPackageName excluded pkgname - = ConflictsWith conflicts - - -- Otherwise, it's not available and it has not been excluded so the - -- package is simply completely unknown. - | otherwise - = Unsatisfiable - - where - conflicts = - [ (packageId pkg, reasons) - | let excludedChoices = PackageIndex.lookupPackageName excluded pkgname - , ExcludedPkg pkg isReasons iReasons sReasons <- excludedChoices - , let reasons = isReasons ++ iReasons ++ sReasons ] - - -constrain :: (Package installed, Package source) - => PackageName -- ^ which package to constrain - -> (Version -> Bool -> Bool) -- ^ the constraint test - -> reason -- ^ the reason for the constraint - -> Constraints installed source reason - -> Satisfiable (Constraints installed source reason) - [PackageId] reason -constrain pkgname constraint reason - constraints@(Constraints targets available excluded paired original) - - | pkgname `Set.member` targets && not anyRemaining - = if null conflicts then Unsatisfiable - else ConflictsWith conflicts - - | otherwise - = let constraints' = Constraints targets available' excluded' paired original - in assert (constraints `transitionsTo` constraints') $ - Satisfiable constraints' (map packageId newExcluded) - - where - -- This tells us if any packages would remain at all for this package name if - -- we applied this constraint. This amounts to checking if any package - -- satisfies the given constraint, including version range and installation - -- status. - -- - (available', excluded', newExcluded, anyRemaining, conflicts) = - updatePkgsStatus - available excluded - [] False [] - (mergeBy (\pkg pkg' -> packageVersion pkg `compare` packageVersion pkg') - (PackageIndex.lookupPackageName available pkgname) - (PackageIndex.lookupPackageName excluded pkgname)) - - testConstraint pkg = - let ver = packageVersion pkg in - case Map.lookup (packageName pkg) paired of - - Just (v1, v2) - | ver == v1 || ver == v2 - -> case pkg of - InstalledOnly ipkg -> InstalledOnly (ipkg, iOk) - SourceOnly spkg -> SourceOnly (spkg, sOk) - InstalledAndSource ipkg spkg -> - InstalledAndSource (ipkg, iOk) (spkg, sOk) - where - iOk = constraint v1 True || constraint v2 True - sOk = constraint v1 False || constraint v2 False - - _ -> case pkg of - InstalledOnly ipkg -> InstalledOnly (ipkg, iOk) - SourceOnly spkg -> SourceOnly (spkg, sOk) - InstalledAndSource ipkg spkg -> - InstalledAndSource (ipkg, iOk) (spkg, sOk) - where - iOk = constraint ver True - sOk = constraint ver False - - -- For the info about available and excluded versions of the package in - -- question, update the info given the current constraint - -- - -- We update the available package map and the excluded package map - -- we also collect: - -- * the change in available packages (for logging) - -- * whether there are any remaining choices - -- * any constraints that conflict with the current constraint - - updatePkgsStatus _ _ nePkgs ok cs _ - | seq nePkgs $ seq ok $ seq cs False = undefined - - updatePkgsStatus aPkgs ePkgs nePkgs ok cs [] - = (aPkgs, ePkgs, reverse nePkgs, ok, reverse cs) - - updatePkgsStatus aPkgs ePkgs nePkgs ok cs (pkg:pkgs) = - let (aPkgs', ePkgs', mnePkg, ok', mc) = updatePkgStatus aPkgs ePkgs pkg - nePkgs' = maybeCons mnePkg nePkgs - cs' = maybeCons mc cs - in updatePkgsStatus aPkgs' ePkgs' nePkgs' (ok' || ok) cs' pkgs - - maybeCons Nothing xs = xs - maybeCons (Just x) xs = x:xs - - - -- For the info about an available or excluded version of the package in - -- question, update the info given the current constraint. - -- - updatePkgStatus aPkgs ePkgs pkg = - case viewPackageStatus pkg of - AllAvailable (InstalledOnly (aiPkg, False)) -> - removeAvailable False - (InstalledOnly aiPkg) - (PackageIndex.deletePackageId pkgid) - (ExcludedPkg (InstalledOnly aiPkg) [] [reason] []) - Nothing - - AllAvailable (SourceOnly (asPkg, False)) -> - removeAvailable False - (SourceOnly asPkg) - (PackageIndex.deletePackageId pkgid) - (ExcludedPkg (SourceOnly asPkg) [] [] [reason]) - Nothing - - AllAvailable (InstalledAndSource (aiPkg, False) (asPkg, False)) -> - removeAvailable False - (InstalledAndSource aiPkg asPkg) - (PackageIndex.deletePackageId pkgid) - (ExcludedPkg (InstalledAndSource aiPkg asPkg) [reason] [] []) - Nothing - - AllAvailable (InstalledAndSource (aiPkg, True) (asPkg, False)) -> - removeAvailable True - (SourceOnly asPkg) - (PackageIndex.insert (InstalledOnly aiPkg)) - (ExcludedPkg (SourceOnly asPkg) [] [] [reason]) - Nothing - - AllAvailable (InstalledAndSource (aiPkg, False) (asPkg, True)) -> - removeAvailable True - (InstalledOnly aiPkg) - (PackageIndex.insert (SourceOnly asPkg)) - (ExcludedPkg (InstalledOnly aiPkg) [] [reason] []) - Nothing - - AllAvailable _ -> noChange True Nothing - - AvailableExcluded (aiPkg, False) (ExcludedPkg (esPkg, False) _ _ srs) -> - removeAvailable False - (InstalledOnly aiPkg) - (PackageIndex.deletePackageId pkgid) - (ExcludedPkg (InstalledAndSource aiPkg esPkg) [reason] [] srs) - Nothing - - AvailableExcluded (_aiPkg, True) (ExcludedPkg (esPkg, False) _ _ srs) -> - addExtraExclusion True - (ExcludedPkg (SourceOnly esPkg) [] [] (reason:srs)) - Nothing - - AvailableExcluded (aiPkg, False) (ExcludedPkg (esPkg, True) _ _ srs) -> - removeAvailable True - (InstalledOnly aiPkg) - (PackageIndex.deletePackageId pkgid) - (ExcludedPkg (InstalledAndSource aiPkg esPkg) [] [reason] srs) - (Just (pkgid, srs)) - - AvailableExcluded (_aiPkg, True) (ExcludedPkg (_esPkg, True) _ _ srs) -> - noChange True - (Just (pkgid, srs)) - - ExcludedAvailable (ExcludedPkg (eiPkg, False) _ irs _) (asPkg, False) -> - removeAvailable False - (SourceOnly asPkg) - (PackageIndex.deletePackageId pkgid) - (ExcludedPkg (InstalledAndSource eiPkg asPkg) [reason] irs []) - Nothing - - ExcludedAvailable (ExcludedPkg (eiPkg, True) _ irs _) (asPkg, False) -> - removeAvailable False - (SourceOnly asPkg) - (PackageIndex.deletePackageId pkgid) - (ExcludedPkg (InstalledAndSource eiPkg asPkg) [] irs [reason]) - (Just (pkgid, irs)) - - ExcludedAvailable (ExcludedPkg (eiPkg, False) _ irs _) (_asPkg, True) -> - addExtraExclusion True - (ExcludedPkg (InstalledOnly eiPkg) [] (reason:irs) []) - Nothing - - ExcludedAvailable (ExcludedPkg (_eiPkg, True) _ irs _) (_asPkg, True) -> - noChange True - (Just (pkgid, irs)) - - AllExcluded (ExcludedPkg (InstalledOnly (eiPkg, False)) _ irs _) -> - addExtraExclusion False - (ExcludedPkg (InstalledOnly eiPkg) [] (reason:irs) []) - Nothing - - AllExcluded (ExcludedPkg (InstalledOnly (_eiPkg, True)) _ irs _) -> - noChange False - (Just (pkgid, irs)) - - AllExcluded (ExcludedPkg (SourceOnly (esPkg, False)) _ _ srs) -> - addExtraExclusion False - (ExcludedPkg (SourceOnly esPkg) [] [] (reason:srs)) - Nothing - - AllExcluded (ExcludedPkg (SourceOnly (_esPkg, True)) _ _ srs) -> - noChange False - (Just (pkgid, srs)) - - AllExcluded (ExcludedPkg (InstalledAndSource (eiPkg, False) (esPkg, False)) isrs irs srs) -> - addExtraExclusion False - (ExcludedPkg (InstalledAndSource eiPkg esPkg) (reason:isrs) irs srs) - Nothing - - AllExcluded (ExcludedPkg (InstalledAndSource (eiPkg, True) (esPkg, False)) isrs irs srs) -> - addExtraExclusion False - (ExcludedPkg (InstalledAndSource eiPkg esPkg) isrs irs (reason:srs)) - (Just (pkgid, irs)) - - AllExcluded (ExcludedPkg (InstalledAndSource (eiPkg, False) (esPkg, True)) isrs irs srs) -> - addExtraExclusion False - (ExcludedPkg (InstalledAndSource eiPkg esPkg) isrs (reason:irs) srs) - (Just (pkgid, srs)) - - AllExcluded (ExcludedPkg (InstalledAndSource (_eiPkg, True) (_esPkg, True)) isrs irs srs) -> - noChange False - (Just (pkgid, isrs ++ irs ++ srs)) - - where - removeAvailable ok nePkg adjustAvailable ePkg c = - let aPkgs' = adjustAvailable aPkgs - ePkgs' = PackageIndex.insert ePkg ePkgs - in aPkgs' `seq` ePkgs' `seq` - (aPkgs', ePkgs', Just nePkg, ok, c) - - addExtraExclusion ok ePkg c = - let ePkgs' = PackageIndex.insert ePkg ePkgs - in ePkgs' `seq` - (aPkgs, ePkgs', Nothing, ok, c) - - noChange ok c = - (aPkgs, ePkgs, Nothing, ok, c) - - pkgid = case pkg of OnlyInLeft p -> packageId p - OnlyInRight p -> packageId p - InBoth p _ -> packageId p - - - viewPackageStatus - :: (Package installed, Package source) - => MergeResult (InstalledOrSource installed source) - (ExcludedPkg (InstalledOrSource installed source) reason) - -> PackageStatus (installed, Bool) (source, Bool) reason - viewPackageStatus merged = - case merged of - OnlyInLeft aPkg -> - AllAvailable (testConstraint aPkg) - - OnlyInRight (ExcludedPkg ePkg isrs irs srs) -> - AllExcluded (ExcludedPkg (testConstraint ePkg) isrs irs srs) - - InBoth (InstalledOnly aiPkg) - (ExcludedPkg (SourceOnly esPkg) [] [] srs) -> - case testConstraint (InstalledAndSource aiPkg esPkg) of - InstalledAndSource (aiPkg', iOk) (esPkg', sOk) -> - AvailableExcluded (aiPkg', iOk) (ExcludedPkg (esPkg', sOk) [] [] srs) - _ -> impossible - - InBoth (SourceOnly asPkg) - (ExcludedPkg (InstalledOnly eiPkg) [] irs []) -> - case testConstraint (InstalledAndSource eiPkg asPkg) of - InstalledAndSource (eiPkg', iOk) (asPkg', sOk) -> - ExcludedAvailable (ExcludedPkg (eiPkg', iOk) [] irs []) (asPkg', sOk) - _ -> impossible - _ -> impossible - where - impossible = error "impossible: viewPackageStatus invariant violation" - --- A intermediate structure that enumerates all the possible cases given the --- invariant. This helps us to get simpler and complete pattern matching in --- updatePkg above --- -data PackageStatus installed source reason - = AllAvailable (InstalledOrSource installed source) - | AllExcluded (ExcludedPkg (InstalledOrSource installed source) reason) - | AvailableExcluded installed (ExcludedPkg source reason) - | ExcludedAvailable (ExcludedPkg installed reason) source - - -conflicting :: (Package installed, Package source) - => Constraints installed source reason - -> Dependency - -> [(PackageId, [reason])] -conflicting (Constraints _ _ excluded _ _) dep = - [ (packageId pkg, reasonsAll ++ reasonsAvail ++ reasonsInstalled) --TODO - | ExcludedPkg pkg reasonsAll reasonsAvail reasonsInstalled <- - PackageIndex.lookupDependency excluded dep ] diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/TopDown/Types.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/TopDown/Types.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/TopDown/Types.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/TopDown/Types.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Dependency.TopDown.Types --- Copyright : (c) Duncan Coutts 2008 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Types for the top-down dependency resolver. ------------------------------------------------------------------------------ -module Distribution.Client.Dependency.TopDown.Types where - -import Distribution.Client.Types - ( SourcePackage(..), InstalledPackage, OptionalStanza ) - -import Distribution.Package - ( PackageIdentifier, Dependency - , Package(packageId), PackageFixedDeps(depends) ) -import Distribution.PackageDescription - ( FlagAssignment ) - --- ------------------------------------------------------------ --- * The various kinds of packages --- ------------------------------------------------------------ - -type SelectablePackage - = InstalledOrSource InstalledPackageEx UnconfiguredPackage - -type SelectedPackage - = InstalledOrSource InstalledPackageEx SemiConfiguredPackage - -data InstalledOrSource installed source - = InstalledOnly installed - | SourceOnly source - | InstalledAndSource installed source - deriving Eq - -type TopologicalSortNumber = Int - -data InstalledPackageEx - = InstalledPackageEx - InstalledPackage - !TopologicalSortNumber - [PackageIdentifier] -- transitive closure of installed deps - -data UnconfiguredPackage - = UnconfiguredPackage - SourcePackage - !TopologicalSortNumber - FlagAssignment - [OptionalStanza] - -data SemiConfiguredPackage - = SemiConfiguredPackage - SourcePackage -- package info - FlagAssignment -- total flag assignment for the package - [OptionalStanza] -- enabled optional stanzas - [Dependency] -- dependencies we end up with when we apply - -- the flag assignment - -instance Package InstalledPackageEx where - packageId (InstalledPackageEx p _ _) = packageId p - -instance PackageFixedDeps InstalledPackageEx where - depends (InstalledPackageEx _ _ deps) = deps - -instance Package UnconfiguredPackage where - packageId (UnconfiguredPackage p _ _ _) = packageId p - -instance Package SemiConfiguredPackage where - packageId (SemiConfiguredPackage p _ _ _) = packageId p - -instance (Package installed, Package source) - => Package (InstalledOrSource installed source) where - packageId (InstalledOnly p ) = packageId p - packageId (SourceOnly p ) = packageId p - packageId (InstalledAndSource p _) = packageId p - - --- | We can have constraints on selecting just installed or just source --- packages. --- --- In particular, installed packages can only depend on other installed --- packages while packages that are not yet installed but which we plan to --- install can depend on installed or other not-yet-installed packages. --- -data InstalledConstraint = InstalledConstraint - | SourceConstraint - deriving (Eq, Show) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/TopDown.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/TopDown.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/TopDown.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/TopDown.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,946 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Dependency.Types --- Copyright : (c) Duncan Coutts 2008 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Common types for dependency resolution. ------------------------------------------------------------------------------ -module Distribution.Client.Dependency.TopDown ( - topDownResolver - ) where - -import Distribution.Client.Dependency.TopDown.Types -import qualified Distribution.Client.Dependency.TopDown.Constraints as Constraints -import Distribution.Client.Dependency.TopDown.Constraints - ( Satisfiable(..) ) -import Distribution.Client.IndexUtils - ( convert ) -import qualified Distribution.Client.InstallPlan as InstallPlan -import Distribution.Client.InstallPlan - ( PlanPackage(..) ) -import Distribution.Client.Types - ( SourcePackage(..), ConfiguredPackage(..), InstalledPackage(..) - , enableStanzas ) -import Distribution.Client.Dependency.Types - ( DependencyResolver, PackageConstraint(..) - , PackagePreferences(..), InstalledPreference(..) - , Progress(..), foldProgress ) - -import qualified Distribution.Client.PackageIndex as PackageIndex -import Distribution.Client.PackageIndex (PackageIndex) -import Distribution.Package - ( PackageName(..), PackageId, Package(..), packageVersion, packageName - , Dependency(Dependency), thisPackageVersion - , simplifyDependency, PackageFixedDeps(depends) ) -import Distribution.PackageDescription - ( PackageDescription(buildDepends) ) -import Distribution.Client.PackageUtils - ( externalBuildDepends ) -import Distribution.PackageDescription.Configuration - ( finalizePackageDescription, flattenPackageDescription ) -import Distribution.Version - ( VersionRange, withinRange, simplifyVersionRange - , UpperBound(..), asVersionIntervals ) -import Distribution.Compiler - ( CompilerInfo ) -import Distribution.System - ( Platform ) -import Distribution.Simple.Utils - ( equating, comparing ) -import Distribution.Text - ( display ) - -import Data.List - ( foldl', maximumBy, minimumBy, nub, sort, sortBy, groupBy ) -import Data.Maybe - ( fromJust, fromMaybe, catMaybes ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid - ( Monoid(mempty) ) -#endif -import Control.Monad - ( guard ) -import qualified Data.Set as Set -import Data.Set (Set) -import qualified Data.Map as Map -import qualified Data.Graph as Graph -import qualified Data.Array as Array -import Control.Exception - ( assert ) - --- ------------------------------------------------------------ --- * Search state types --- ------------------------------------------------------------ - -type Constraints = Constraints.Constraints - InstalledPackageEx UnconfiguredPackage ExclusionReason -type SelectedPackages = PackageIndex SelectedPackage - --- ------------------------------------------------------------ --- * The search tree type --- ------------------------------------------------------------ - -data SearchSpace inherited pkg - = ChoiceNode inherited [[(pkg, SearchSpace inherited pkg)]] - | Failure Failure - --- ------------------------------------------------------------ --- * Traverse a search tree --- ------------------------------------------------------------ - -explore :: (PackageName -> PackagePreferences) - -> SearchSpace (SelectedPackages, Constraints, SelectionChanges) - SelectablePackage - -> Progress Log Failure (SelectedPackages, Constraints) - -explore _ (Failure failure) = Fail failure -explore _ (ChoiceNode (s,c,_) []) = Done (s,c) -explore pref (ChoiceNode _ choices) = - case [ choice | [choice] <- choices ] of - ((_, node'):_) -> Step (logInfo node') (explore pref node') - [] -> Step (logInfo node') (explore pref node') - where - choice = minimumBy (comparing topSortNumber) choices - pkgname = packageName . fst . head $ choice - (_, node') = maximumBy (bestByPref pkgname) choice - where - topSortNumber choice = case fst (head choice) of - InstalledOnly (InstalledPackageEx _ i _) -> i - SourceOnly (UnconfiguredPackage _ i _ _) -> i - InstalledAndSource _ (UnconfiguredPackage _ i _ _) -> i - - bestByPref pkgname = case packageInstalledPreference of - PreferLatest -> - comparing (\(p,_) -> ( isPreferred p, packageId p)) - PreferInstalled -> - comparing (\(p,_) -> (isInstalled p, isPreferred p, packageId p)) - where - isInstalled (SourceOnly _) = False - isInstalled _ = True - isPreferred p = packageVersion p `withinRange` preferredVersions - (PackagePreferences preferredVersions packageInstalledPreference) - = pref pkgname - - logInfo node = Select selected discarded - where (selected, discarded) = case node of - Failure _ -> ([], []) - ChoiceNode (_,_,changes) _ -> changes - --- ------------------------------------------------------------ --- * Generate a search tree --- ------------------------------------------------------------ - -type ConfigurePackage = PackageIndex SelectablePackage - -> SelectablePackage - -> Either [Dependency] SelectedPackage - --- | (packages selected, packages discarded) -type SelectionChanges = ([SelectedPackage], [PackageId]) - -searchSpace :: ConfigurePackage - -> Constraints - -> SelectedPackages - -> SelectionChanges - -> Set PackageName - -> SearchSpace (SelectedPackages, Constraints, SelectionChanges) - SelectablePackage -searchSpace configure constraints selected changes next = - assert (Set.null (selectedSet `Set.intersection` next)) $ - assert (selectedSet `Set.isSubsetOf` Constraints.packages constraints) $ - assert (next `Set.isSubsetOf` Constraints.packages constraints) $ - - ChoiceNode (selected, constraints, changes) - [ [ (pkg, select name pkg) - | pkg <- PackageIndex.lookupPackageName available name ] - | name <- Set.elems next ] - where - available = Constraints.choices constraints - - selectedSet = Set.fromList (map packageName (PackageIndex.allPackages selected)) - - select name pkg = case configure available pkg of - Left missing -> Failure $ ConfigureFailed pkg - [ (dep, Constraints.conflicting constraints dep) - | dep <- missing ] - Right pkg' -> - case constrainDeps pkg' newDeps (addDeps constraints newPkgs) [] of - Left failure -> Failure failure - Right (constraints', newDiscarded) -> - searchSpace configure - constraints' selected' (newSelected, newDiscarded) next' - where - selected' = foldl' (flip PackageIndex.insert) selected newSelected - newSelected = - case Constraints.isPaired constraints (packageId pkg) of - Nothing -> [pkg'] - Just pkgid' -> [pkg', pkg''] - where - Just pkg'' = fmap (\(InstalledOnly p) -> InstalledOnly p) - (PackageIndex.lookupPackageId available pkgid') - - newPkgs = [ name' - | (Dependency name' _, _) <- newDeps - , null (PackageIndex.lookupPackageName selected' name') ] - newDeps = concatMap packageConstraints newSelected - next' = Set.delete name - $ foldl' (flip Set.insert) next newPkgs - -packageConstraints :: SelectedPackage -> [(Dependency, Bool)] -packageConstraints = either installedConstraints availableConstraints - . preferSource - where - preferSource (InstalledOnly pkg) = Left pkg - preferSource (SourceOnly pkg) = Right pkg - preferSource (InstalledAndSource _ pkg) = Right pkg - installedConstraints (InstalledPackageEx _ _ deps) = - [ (thisPackageVersion dep, True) - | dep <- deps ] - availableConstraints (SemiConfiguredPackage _ _ _ deps) = - [ (dep, False) | dep <- deps ] - -addDeps :: Constraints -> [PackageName] -> Constraints -addDeps = - foldr $ \pkgname cs -> - case Constraints.addTarget pkgname cs of - Satisfiable cs' () -> cs' - _ -> impossible "addDeps unsatisfiable" - -constrainDeps :: SelectedPackage -> [(Dependency, Bool)] -> Constraints - -> [PackageId] - -> Either Failure (Constraints, [PackageId]) -constrainDeps pkg [] cs discard = - case addPackageSelectConstraint (packageId pkg) cs of - Satisfiable cs' discard' -> Right (cs', discard' ++ discard) - _ -> impossible "constrainDeps unsatisfiable(1)" -constrainDeps pkg ((dep, installedConstraint):deps) cs discard = - case addPackageDependencyConstraint (packageId pkg) dep installedConstraint cs of - Satisfiable cs' discard' -> constrainDeps pkg deps cs' (discard' ++ discard) - Unsatisfiable -> impossible "constrainDeps unsatisfiable(2)" - ConflictsWith conflicts -> - Left (DependencyConflict pkg dep installedConstraint conflicts) - --- ------------------------------------------------------------ --- * The main algorithm --- ------------------------------------------------------------ - -search :: ConfigurePackage - -> (PackageName -> PackagePreferences) - -> Constraints - -> Set PackageName - -> Progress Log Failure (SelectedPackages, Constraints) -search configure pref constraints = - explore pref . searchSpace configure constraints mempty ([], []) - --- ------------------------------------------------------------ --- * The top level resolver --- ------------------------------------------------------------ - --- | The main exported resolver, with string logging and failure types to fit --- the standard 'DependencyResolver' interface. --- -topDownResolver :: DependencyResolver -topDownResolver platform cinfo installedPkgIndex sourcePkgIndex - preferences constraints targets = - mapMessages (topDownResolver' platform cinfo - (convert installedPkgIndex) sourcePkgIndex - preferences constraints targets) - where - mapMessages :: Progress Log Failure a -> Progress String String a - mapMessages = foldProgress (Step . showLog) (Fail . showFailure) Done - --- | The native resolver with detailed structured logging and failure types. --- -topDownResolver' :: Platform -> CompilerInfo - -> PackageIndex InstalledPackage - -> PackageIndex SourcePackage - -> (PackageName -> PackagePreferences) - -> [PackageConstraint] - -> [PackageName] - -> Progress Log Failure [PlanPackage] -topDownResolver' platform cinfo installedPkgIndex sourcePkgIndex - preferences constraints targets = - fmap (uncurry finalise) - . (\cs -> search configure preferences cs initialPkgNames) - =<< pruneBottomUp platform cinfo - =<< addTopLevelConstraints constraints - =<< addTopLevelTargets targets emptyConstraintSet - - where - configure = configurePackage platform cinfo - emptyConstraintSet :: Constraints - emptyConstraintSet = Constraints.empty - (annotateInstalledPackages topSortNumber installedPkgIndex') - (annotateSourcePackages constraints topSortNumber sourcePkgIndex') - (installedPkgIndex', sourcePkgIndex') = - selectNeededSubset installedPkgIndex sourcePkgIndex initialPkgNames - topSortNumber = topologicalSortNumbering installedPkgIndex' sourcePkgIndex' - - initialPkgNames = Set.fromList targets - - finalise selected' constraints' = - PackageIndex.allPackages - . fst . improvePlan installedPkgIndex' constraints' - . PackageIndex.fromList - $ finaliseSelectedPackages preferences selected' constraints' - - -addTopLevelTargets :: [PackageName] - -> Constraints - -> Progress a Failure Constraints -addTopLevelTargets [] cs = Done cs -addTopLevelTargets (pkg:pkgs) cs = - case Constraints.addTarget pkg cs of - Satisfiable cs' () -> addTopLevelTargets pkgs cs' - Unsatisfiable -> Fail (NoSuchPackage pkg) - ConflictsWith _conflicts -> impossible "addTopLevelTargets conflicts" - - -addTopLevelConstraints :: [PackageConstraint] -> Constraints - -> Progress Log Failure Constraints -addTopLevelConstraints [] cs = Done cs -addTopLevelConstraints (PackageConstraintFlags _ _ :deps) cs = - addTopLevelConstraints deps cs - -addTopLevelConstraints (PackageConstraintVersion pkg ver:deps) cs = - case addTopLevelVersionConstraint pkg ver cs of - Satisfiable cs' pkgids -> - Step (AppliedVersionConstraint pkg ver pkgids) - (addTopLevelConstraints deps cs') - - Unsatisfiable -> - Fail (TopLevelVersionConstraintUnsatisfiable pkg ver) - - ConflictsWith conflicts -> - Fail (TopLevelVersionConstraintConflict pkg ver conflicts) - -addTopLevelConstraints (PackageConstraintInstalled pkg:deps) cs = - case addTopLevelInstalledConstraint pkg cs of - Satisfiable cs' pkgids -> - Step (AppliedInstalledConstraint pkg InstalledConstraint pkgids) - (addTopLevelConstraints deps cs') - - Unsatisfiable -> - Fail (TopLevelInstallConstraintUnsatisfiable pkg InstalledConstraint) - - ConflictsWith conflicts -> - Fail (TopLevelInstallConstraintConflict pkg InstalledConstraint conflicts) - -addTopLevelConstraints (PackageConstraintSource pkg:deps) cs = - case addTopLevelSourceConstraint pkg cs of - Satisfiable cs' pkgids -> - Step (AppliedInstalledConstraint pkg SourceConstraint pkgids) - (addTopLevelConstraints deps cs') - - Unsatisfiable -> - Fail (TopLevelInstallConstraintUnsatisfiable pkg SourceConstraint) - - ConflictsWith conflicts -> - Fail (TopLevelInstallConstraintConflict pkg SourceConstraint conflicts) - -addTopLevelConstraints (PackageConstraintStanzas _ _ : deps) cs = - addTopLevelConstraints deps cs - --- | Add exclusion on available packages that cannot be configured. --- -pruneBottomUp :: Platform -> CompilerInfo - -> Constraints -> Progress Log Failure Constraints -pruneBottomUp platform comp constraints = - foldr prune Done (initialPackages constraints) constraints - - where - prune pkgs rest cs = foldr addExcludeConstraint rest unconfigurable cs - where - unconfigurable = - [ (pkg, missing) -- if necessary we could look up missing reasons - | (Just pkg', pkg) <- zip (map getSourcePkg pkgs) pkgs - , Left missing <- [configure cs pkg'] ] - - addExcludeConstraint (pkg, missing) rest cs = - let reason = ExcludedByConfigureFail missing in - case addPackageExcludeConstraint (packageId pkg) reason cs of - Satisfiable cs' [pkgid]| packageId pkg == pkgid - -> Step (ExcludeUnconfigurable pkgid) (rest cs') - Satisfiable _ _ -> impossible "pruneBottomUp satisfiable" - _ -> Fail $ ConfigureFailed pkg - [ (dep, Constraints.conflicting cs dep) - | dep <- missing ] - - configure cs (UnconfiguredPackage (SourcePackage _ pkg _ _) _ flags stanzas) = - finalizePackageDescription flags (dependencySatisfiable cs) - platform comp [] (enableStanzas stanzas pkg) - dependencySatisfiable cs = - not . null . PackageIndex.lookupDependency (Constraints.choices cs) - - -- collect each group of packages (by name) in reverse topsort order - initialPackages = - reverse - . sortBy (comparing (topSortNumber . head)) - . PackageIndex.allPackagesByName - . Constraints.choices - - topSortNumber (InstalledOnly (InstalledPackageEx _ i _)) = i - topSortNumber (SourceOnly (UnconfiguredPackage _ i _ _)) = i - topSortNumber (InstalledAndSource _ (UnconfiguredPackage _ i _ _)) = i - - getSourcePkg (InstalledOnly _ ) = Nothing - getSourcePkg (SourceOnly spkg) = Just spkg - getSourcePkg (InstalledAndSource _ spkg) = Just spkg - - -configurePackage :: Platform -> CompilerInfo -> ConfigurePackage -configurePackage platform cinfo available spkg = case spkg of - InstalledOnly ipkg -> Right (InstalledOnly ipkg) - SourceOnly apkg -> fmap SourceOnly (configure apkg) - InstalledAndSource ipkg apkg -> fmap (InstalledAndSource ipkg) - (configure apkg) - where - configure (UnconfiguredPackage apkg@(SourcePackage _ p _ _) _ flags stanzas) = - case finalizePackageDescription flags dependencySatisfiable - platform cinfo [] - (enableStanzas stanzas p) of - Left missing -> Left missing - Right (pkg, flags') -> Right $ - SemiConfiguredPackage apkg flags' stanzas (externalBuildDepends pkg) - - dependencySatisfiable = not . null . PackageIndex.lookupDependency available - --- | Annotate each installed packages with its set of transitive dependencies --- and its topological sort number. --- -annotateInstalledPackages :: (PackageName -> TopologicalSortNumber) - -> PackageIndex InstalledPackage - -> PackageIndex InstalledPackageEx -annotateInstalledPackages dfsNumber installed = PackageIndex.fromList - [ InstalledPackageEx pkg (dfsNumber (packageName pkg)) (transitiveDepends pkg) - | pkg <- PackageIndex.allPackages installed ] - where - transitiveDepends :: InstalledPackage -> [PackageId] - transitiveDepends = map (packageId . toPkg) . tail . Graph.reachable graph - . fromJust . toVertex . packageId - (graph, toPkg, toVertex) = PackageIndex.dependencyGraph installed - - --- | Annotate each available packages with its topological sort number and any --- user-supplied partial flag assignment. --- -annotateSourcePackages :: [PackageConstraint] - -> (PackageName -> TopologicalSortNumber) - -> PackageIndex SourcePackage - -> PackageIndex UnconfiguredPackage -annotateSourcePackages constraints dfsNumber sourcePkgIndex = - PackageIndex.fromList - [ UnconfiguredPackage pkg (dfsNumber name) (flagsFor name) (stanzasFor name) - | pkg <- PackageIndex.allPackages sourcePkgIndex - , let name = packageName pkg ] - where - flagsFor = fromMaybe [] . flip Map.lookup flagsMap - flagsMap = Map.fromList - [ (name, flags) - | PackageConstraintFlags name flags <- constraints ] - stanzasFor = fromMaybe [] . flip Map.lookup stanzasMap - stanzasMap = Map.fromListWith (++) - [ (name, stanzas) - | PackageConstraintStanzas name stanzas <- constraints ] - --- | One of the heuristics we use when guessing which path to take in the --- search space is an ordering on the choices we make. It's generally better --- to make decisions about packages higer in the dep graph first since they --- place constraints on packages lower in the dep graph. --- --- To pick them in that order we annotate each package with its topological --- sort number. So if package A depends on package B then package A will have --- a lower topological sort number than B and we'll make a choice about which --- version of A to pick before we make a choice about B (unless there is only --- one possible choice for B in which case we pick that immediately). --- --- To construct these topological sort numbers we combine and flatten the --- installed and source package sets. We consider only dependencies between --- named packages, not including versions and for not-yet-configured packages --- we look at all the possible dependencies, not just those under any single --- flag assignment. This means we can actually get impossible combinations of --- edges and even cycles, but that doesn't really matter here, it's only a --- heuristic. --- -topologicalSortNumbering :: PackageIndex InstalledPackage - -> PackageIndex SourcePackage - -> (PackageName -> TopologicalSortNumber) -topologicalSortNumbering installedPkgIndex sourcePkgIndex = - \pkgname -> let Just vertex = toVertex pkgname - in topologicalSortNumbers Array.! vertex - where - topologicalSortNumbers = Array.array (Array.bounds graph) - (zip (Graph.topSort graph) [0..]) - (graph, _, toVertex) = Graph.graphFromEdges $ - [ ((), packageName pkg, nub deps) - | pkgs@(pkg:_) <- PackageIndex.allPackagesByName installedPkgIndex - , let deps = [ packageName dep - | pkg' <- pkgs - , dep <- depends pkg' ] ] - ++ [ ((), packageName pkg, nub deps) - | pkgs@(pkg:_) <- PackageIndex.allPackagesByName sourcePkgIndex - , let deps = [ depName - | SourcePackage _ pkg' _ _ <- pkgs - , Dependency depName _ <- - buildDepends (flattenPackageDescription pkg') ] ] - --- | We don't need the entire index (which is rather large and costly if we --- force it by examining the whole thing). So trace out the maximul subset of --- each index that we could possibly ever need. Do this by flattening packages --- and looking at the names of all possible dependencies. --- -selectNeededSubset :: PackageIndex InstalledPackage - -> PackageIndex SourcePackage - -> Set PackageName - -> (PackageIndex InstalledPackage - ,PackageIndex SourcePackage) -selectNeededSubset installedPkgIndex sourcePkgIndex = select mempty mempty - where - select :: PackageIndex InstalledPackage - -> PackageIndex SourcePackage - -> Set PackageName - -> (PackageIndex InstalledPackage - ,PackageIndex SourcePackage) - select installedPkgIndex' sourcePkgIndex' remaining - | Set.null remaining = (installedPkgIndex', sourcePkgIndex') - | otherwise = select installedPkgIndex'' sourcePkgIndex'' remaining'' - where - (next, remaining') = Set.deleteFindMin remaining - moreInstalled = PackageIndex.lookupPackageName installedPkgIndex next - moreSource = PackageIndex.lookupPackageName sourcePkgIndex next - moreRemaining = -- we filter out packages already included in the indexes - -- this avoids an infinite loop if a package depends on itself - -- like base-3.0.3.0 with base-4.0.0.0 - filter notAlreadyIncluded - $ [ packageName dep - | pkg <- moreInstalled - , dep <- depends pkg ] - ++ [ name - | SourcePackage _ pkg _ _ <- moreSource - , Dependency name _ <- - buildDepends (flattenPackageDescription pkg) ] - installedPkgIndex'' = foldl' (flip PackageIndex.insert) - installedPkgIndex' moreInstalled - sourcePkgIndex'' = foldl' (flip PackageIndex.insert) - sourcePkgIndex' moreSource - remaining'' = foldl' (flip Set.insert) - remaining' moreRemaining - notAlreadyIncluded name = - null (PackageIndex.lookupPackageName installedPkgIndex' name) - && null (PackageIndex.lookupPackageName sourcePkgIndex' name) - --- ------------------------------------------------------------ --- * Post processing the solution --- ------------------------------------------------------------ - -finaliseSelectedPackages :: (PackageName -> PackagePreferences) - -> SelectedPackages - -> Constraints - -> [PlanPackage] -finaliseSelectedPackages pref selected constraints = - map finaliseSelected (PackageIndex.allPackages selected) - where - remainingChoices = Constraints.choices constraints - finaliseSelected (InstalledOnly ipkg ) = finaliseInstalled ipkg - finaliseSelected (SourceOnly apkg) = finaliseSource Nothing apkg - finaliseSelected (InstalledAndSource ipkg apkg) = - case PackageIndex.lookupPackageId remainingChoices (packageId ipkg) of - --picked package not in constraints - Nothing -> impossible "finaliseSelected no pkg" - -- to constrain to avail only: - Just (SourceOnly _) -> impossible "finaliseSelected src only" - Just (InstalledOnly _) -> finaliseInstalled ipkg - Just (InstalledAndSource _ _) -> finaliseSource (Just ipkg) apkg - - finaliseInstalled (InstalledPackageEx pkg _ _) = InstallPlan.PreExisting pkg - finaliseSource mipkg (SemiConfiguredPackage pkg flags stanzas deps) = - InstallPlan.Configured (ConfiguredPackage pkg flags stanzas deps') - where - deps' = map (packageId . pickRemaining mipkg) deps - - pickRemaining mipkg dep@(Dependency _name versionRange) = - case PackageIndex.lookupDependency remainingChoices dep of - [] -> impossible "pickRemaining no pkg" - [pkg'] -> pkg' - remaining -> assert (checkIsPaired remaining) - $ maximumBy bestByPref remaining - where - -- We order candidate packages to pick for a dependency by these - -- three factors. The last factor is just highest version wins. - bestByPref = - comparing (\p -> (isCurrent p, isPreferred p, packageVersion p)) - -- Is the package already used by the installed version of this - -- package? If so we should pick that first. This stops us from doing - -- silly things like deciding to rebuild haskell98 against base 3. - isCurrent = case mipkg :: Maybe InstalledPackageEx of - Nothing -> \_ -> False - Just ipkg -> \p -> packageId p `elem` depends ipkg - -- If there is no upper bound on the version range then we apply a - -- preferred version according to the hackage or user's suggested - -- version constraints. TODO: distinguish hacks from prefs - bounded = boundedAbove versionRange - isPreferred p - | bounded = True -- any constant will do - | otherwise = packageVersion p `withinRange` preferredVersions - where (PackagePreferences preferredVersions _) = pref (packageName p) - - boundedAbove :: VersionRange -> Bool - boundedAbove vr = case asVersionIntervals vr of - [] -> True -- this is the inconsistent version range. - intervals -> case last intervals of - (_, UpperBound _ _) -> True - (_, NoUpperBound ) -> False - - -- We really only expect to find more than one choice remaining when - -- we're finalising a dependency on a paired package. - checkIsPaired [p1, p2] = - case Constraints.isPaired constraints (packageId p1) of - Just p2' -> packageId p2' == packageId p2 - Nothing -> False - checkIsPaired _ = False - --- | Improve an existing installation plan by, where possible, swapping --- packages we plan to install with ones that are already installed. --- This may add additional constraints due to the dependencies of installed --- packages on other installed packages. --- -improvePlan :: PackageIndex InstalledPackage - -> Constraints - -> PackageIndex PlanPackage - -> (PackageIndex PlanPackage, Constraints) -improvePlan installed constraints0 selected0 = - foldl' improve (selected0, constraints0) (reverseTopologicalOrder selected0) - where - improve (selected, constraints) = fromMaybe (selected, constraints) - . improvePkg selected constraints - - -- The idea is to improve the plan by swapping a configured package for - -- an equivalent installed one. For a particular package the condition is - -- that the package be in a configured state, that a the same version be - -- already installed with the exact same dependencies and all the packages - -- in the plan that it depends on are in the installed state - improvePkg selected constraints pkgid = do - Configured pkg <- PackageIndex.lookupPackageId selected pkgid - ipkg <- PackageIndex.lookupPackageId installed pkgid - guard $ all (isInstalled selected) (depends pkg) - tryInstalled selected constraints [ipkg] - - isInstalled selected pkgid = - case PackageIndex.lookupPackageId selected pkgid of - Just (PreExisting _) -> True - _ -> False - - tryInstalled :: PackageIndex PlanPackage -> Constraints - -> [InstalledPackage] - -> Maybe (PackageIndex PlanPackage, Constraints) - tryInstalled selected constraints [] = Just (selected, constraints) - tryInstalled selected constraints (pkg:pkgs) = - case constraintsOk (packageId pkg) (depends pkg) constraints of - Nothing -> Nothing - Just constraints' -> tryInstalled selected' constraints' pkgs' - where - selected' = PackageIndex.insert (PreExisting pkg) selected - pkgs' = catMaybes (map notSelected (depends pkg)) ++ pkgs - notSelected pkgid = - case (PackageIndex.lookupPackageId installed pkgid - ,PackageIndex.lookupPackageId selected pkgid) of - (Just pkg', Nothing) -> Just pkg' - _ -> Nothing - - constraintsOk _ [] constraints = Just constraints - constraintsOk pkgid (pkgid':pkgids) constraints = - case addPackageDependencyConstraint pkgid dep True constraints of - Satisfiable constraints' _ -> constraintsOk pkgid pkgids constraints' - _ -> Nothing - where - dep = thisPackageVersion pkgid' - - reverseTopologicalOrder :: PackageFixedDeps pkg - => PackageIndex pkg -> [PackageId] - reverseTopologicalOrder index = map (packageId . toPkg) - . Graph.topSort - . Graph.transposeG - $ graph - where (graph, toPkg, _) = PackageIndex.dependencyGraph index - --- ------------------------------------------------------------ --- * Adding and recording constraints --- ------------------------------------------------------------ - -addPackageSelectConstraint :: PackageId -> Constraints - -> Satisfiable Constraints - [PackageId] ExclusionReason -addPackageSelectConstraint pkgid = - Constraints.constrain pkgname constraint reason - where - pkgname = packageName pkgid - constraint ver _ = ver == packageVersion pkgid - reason = SelectedOther pkgid - -addPackageExcludeConstraint :: PackageId -> ExclusionReason - -> Constraints - -> Satisfiable Constraints - [PackageId] ExclusionReason -addPackageExcludeConstraint pkgid reason = - Constraints.constrain pkgname constraint reason - where - pkgname = packageName pkgid - constraint ver installed - | ver == packageVersion pkgid = installed - | otherwise = True - -addPackageDependencyConstraint :: PackageId -> Dependency -> Bool - -> Constraints - -> Satisfiable Constraints - [PackageId] ExclusionReason -addPackageDependencyConstraint pkgid dep@(Dependency pkgname verrange) - installedConstraint = - Constraints.constrain pkgname constraint reason - where - constraint ver installed = ver `withinRange` verrange - && if installedConstraint then installed else True - reason = ExcludedByPackageDependency pkgid dep installedConstraint - -addTopLevelVersionConstraint :: PackageName -> VersionRange - -> Constraints - -> Satisfiable Constraints - [PackageId] ExclusionReason -addTopLevelVersionConstraint pkgname verrange = - Constraints.constrain pkgname constraint reason - where - constraint ver _installed = ver `withinRange` verrange - reason = ExcludedByTopLevelConstraintVersion pkgname verrange - -addTopLevelInstalledConstraint, - addTopLevelSourceConstraint :: PackageName - -> Constraints - -> Satisfiable Constraints - [PackageId] ExclusionReason -addTopLevelInstalledConstraint pkgname = - Constraints.constrain pkgname constraint reason - where - constraint _ver installed = installed - reason = ExcludedByTopLevelConstraintInstalled pkgname - -addTopLevelSourceConstraint pkgname = - Constraints.constrain pkgname constraint reason - where - constraint _ver installed = not installed - reason = ExcludedByTopLevelConstraintSource pkgname - - --- ------------------------------------------------------------ --- * Reasons for constraints --- ------------------------------------------------------------ - --- | For every constraint we record we also record the reason that constraint --- is needed. So if we end up failing due to conflicting constraints then we --- can give an explnanation as to what was conflicting and why. --- -data ExclusionReason = - - -- | We selected this other version of the package. That means we exclude - -- all the other versions. - SelectedOther PackageId - - -- | We excluded this version of the package because it failed to - -- configure probably because of unsatisfiable deps. - | ExcludedByConfigureFail [Dependency] - - -- | We excluded this version of the package because another package that - -- we selected imposed a dependency which this package did not satisfy. - | ExcludedByPackageDependency PackageId Dependency Bool - - -- | We excluded this version of the package because it did not satisfy - -- a dependency given as an original top level input. - -- - | ExcludedByTopLevelConstraintVersion PackageName VersionRange - | ExcludedByTopLevelConstraintInstalled PackageName - | ExcludedByTopLevelConstraintSource PackageName - - deriving Eq - --- | Given an excluded package and the reason it was excluded, produce a human --- readable explanation. --- -showExclusionReason :: PackageId -> ExclusionReason -> String -showExclusionReason pkgid (SelectedOther pkgid') = - display pkgid ++ " was excluded because " ++ - display pkgid' ++ " was selected instead" -showExclusionReason pkgid (ExcludedByConfigureFail missingDeps) = - display pkgid ++ " was excluded because it could not be configured. " - ++ "It requires " ++ listOf displayDep missingDeps -showExclusionReason pkgid (ExcludedByPackageDependency pkgid' dep installedConstraint) - = display pkgid ++ " was excluded because " ++ display pkgid' ++ " requires " - ++ (if installedConstraint then "an installed instance of " else "") - ++ displayDep dep -showExclusionReason pkgid (ExcludedByTopLevelConstraintVersion pkgname verRange) = - display pkgid ++ " was excluded because of the top level constraint " ++ - displayDep (Dependency pkgname verRange) -showExclusionReason pkgid (ExcludedByTopLevelConstraintInstalled pkgname) - = display pkgid ++ " was excluded because of the top level constraint '" - ++ display pkgname ++ " installed' which means that only installed instances " - ++ "of the package may be selected." -showExclusionReason pkgid (ExcludedByTopLevelConstraintSource pkgname) - = display pkgid ++ " was excluded because of the top level constraint '" - ++ display pkgname ++ " source' which means that only source versions " - ++ "of the package may be selected." - - --- ------------------------------------------------------------ --- * Logging progress and failures --- ------------------------------------------------------------ - -data Log = Select [SelectedPackage] [PackageId] - | AppliedVersionConstraint PackageName VersionRange [PackageId] - | AppliedInstalledConstraint PackageName InstalledConstraint [PackageId] - | ExcludeUnconfigurable PackageId - -data Failure - = NoSuchPackage - PackageName - | ConfigureFailed - SelectablePackage - [(Dependency, [(PackageId, [ExclusionReason])])] - | DependencyConflict - SelectedPackage Dependency Bool - [(PackageId, [ExclusionReason])] - | TopLevelVersionConstraintConflict - PackageName VersionRange - [(PackageId, [ExclusionReason])] - | TopLevelVersionConstraintUnsatisfiable - PackageName VersionRange - | TopLevelInstallConstraintConflict - PackageName InstalledConstraint - [(PackageId, [ExclusionReason])] - | TopLevelInstallConstraintUnsatisfiable - PackageName InstalledConstraint - -showLog :: Log -> String -showLog (Select selected discarded) = case (selectedMsg, discardedMsg) of - ("", y) -> y - (x, "") -> x - (x, y) -> x ++ " and " ++ y - - where - selectedMsg = "selecting " ++ case selected of - [] -> "" - [s] -> display (packageId s) ++ " " ++ kind s - (s:ss) -> listOf id - $ (display (packageId s) ++ " " ++ kind s) - : [ display (packageVersion s') ++ " " ++ kind s' - | s' <- ss ] - - kind (InstalledOnly _) = "(installed)" - kind (SourceOnly _) = "(source)" - kind (InstalledAndSource _ _) = "(installed or source)" - - discardedMsg = case discarded of - [] -> "" - _ -> "discarding " ++ listOf id - [ element - | (pkgid:pkgids) <- groupBy (equating packageName) (sort discarded) - , element <- display pkgid : map (display . packageVersion) pkgids ] -showLog (AppliedVersionConstraint pkgname ver pkgids) = - "applying constraint " ++ display (Dependency pkgname ver) - ++ if null pkgids - then "" - else " which excludes " ++ listOf display pkgids -showLog (AppliedInstalledConstraint pkgname inst pkgids) = - "applying constraint " ++ display pkgname ++ " '" - ++ (case inst of InstalledConstraint -> "installed"; _ -> "source") ++ "' " - ++ if null pkgids - then "" - else "which excludes " ++ listOf display pkgids -showLog (ExcludeUnconfigurable pkgid) = - "excluding " ++ display pkgid ++ " (it cannot be configured)" - -showFailure :: Failure -> String -showFailure (NoSuchPackage pkgname) = - "The package " ++ display pkgname ++ " is unknown." -showFailure (ConfigureFailed pkg missingDeps) = - "cannot configure " ++ displayPkg pkg ++ ". It requires " - ++ listOf (displayDep . fst) missingDeps - ++ '\n' : unlines (map (uncurry whyNot) missingDeps) - - where - whyNot (Dependency name ver) [] = - "There is no available version of " ++ display name - ++ " that satisfies " ++ displayVer ver - - whyNot dep conflicts = - "For the dependency on " ++ displayDep dep - ++ " there are these packages: " ++ listOf display pkgs - ++ ". However none of them are available.\n" - ++ unlines [ showExclusionReason (packageId pkg') reason - | (pkg', reasons) <- conflicts, reason <- reasons ] - - where pkgs = map fst conflicts - -showFailure (DependencyConflict pkg dep installedConstraint conflicts) = - "dependencies conflict: " - ++ displayPkg pkg ++ " requires " - ++ (if installedConstraint then "an installed instance of " else "") - ++ displayDep dep ++ " however:\n" - ++ unlines [ showExclusionReason (packageId pkg') reason - | (pkg', reasons) <- conflicts, reason <- reasons ] - -showFailure (TopLevelVersionConstraintConflict name ver conflicts) = - "constraints conflict: we have the top level constraint " - ++ displayDep (Dependency name ver) ++ ", but\n" - ++ unlines [ showExclusionReason (packageId pkg') reason - | (pkg', reasons) <- conflicts, reason <- reasons ] - -showFailure (TopLevelVersionConstraintUnsatisfiable name ver) = - "There is no available version of " ++ display name - ++ " that satisfies " ++ displayVer ver - -showFailure (TopLevelInstallConstraintConflict name InstalledConstraint conflicts) = - "constraints conflict: " - ++ "top level constraint '" ++ display name ++ " installed' however\n" - ++ unlines [ showExclusionReason (packageId pkg') reason - | (pkg', reasons) <- conflicts, reason <- reasons ] - -showFailure (TopLevelInstallConstraintUnsatisfiable name InstalledConstraint) = - "There is no installed version of " ++ display name - -showFailure (TopLevelInstallConstraintConflict name SourceConstraint conflicts) = - "constraints conflict: " - ++ "top level constraint '" ++ display name ++ " source' however\n" - ++ unlines [ showExclusionReason (packageId pkg') reason - | (pkg', reasons) <- conflicts, reason <- reasons ] - -showFailure (TopLevelInstallConstraintUnsatisfiable name SourceConstraint) = - "There is no available source version of " ++ display name - -displayVer :: VersionRange -> String -displayVer = display . simplifyVersionRange - -displayDep :: Dependency -> String -displayDep = display . simplifyDependency - - --- ------------------------------------------------------------ --- * Utils --- ------------------------------------------------------------ - -impossible :: String -> a -impossible msg = internalError $ "assertion failure: " ++ msg - -internalError :: String -> a -internalError msg = error $ "internal error: " ++ msg - -displayPkg :: Package pkg => pkg -> String -displayPkg = display . packageId - -listOf :: (a -> String) -> [a] -> String -listOf _ [] = [] -listOf disp [x0] = disp x0 -listOf disp (x0:x1:xs) = disp x0 ++ go x1 xs - where go x [] = " and " ++ disp x - go x (x':xs') = ", " ++ disp x ++ go x' xs' diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Types.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Types.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency/Types.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency/Types.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,258 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE DeriveFunctor #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Dependency.Types --- Copyright : (c) Duncan Coutts 2008 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Common types for dependency resolution. ------------------------------------------------------------------------------ -module Distribution.Client.Dependency.Types ( - ExtDependency(..), - - PreSolver(..), - Solver(..), - DependencyResolver, - - AllowNewer(..), isAllowNewer, - PackageConstraint(..), - debugPackageConstraint, - PackagePreferences(..), - InstalledPreference(..), - PackagesPreferenceDefault(..), - - Progress(..), - foldProgress, - ) where - -#if !MIN_VERSION_base(4,8,0) -import Control.Applicative - ( Applicative(..) ) -#endif -import Control.Applicative - ( Alternative(..) ) - - -import Data.Char - ( isAlpha, toLower ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid - ( Monoid(..) ) -#endif - -import Distribution.Client.Types - ( OptionalStanza(..), SourcePackage(..) ) -import qualified Distribution.Client.InstallPlan as InstallPlan - -import Distribution.Compat.ReadP - ( (<++) ) - -import qualified Distribution.Compat.ReadP as Parse - ( pfail, munch1 ) -import Distribution.PackageDescription - ( FlagAssignment, FlagName(..) ) -import qualified Distribution.Client.PackageIndex as PackageIndex - ( PackageIndex ) -import Distribution.Simple.PackageIndex ( InstalledPackageIndex ) -import Distribution.Package - ( Dependency, PackageName, InstalledPackageId ) -import Distribution.Version - ( VersionRange, simplifyVersionRange ) -import Distribution.Compiler - ( CompilerInfo ) -import Distribution.System - ( Platform ) -import Distribution.Text - ( Text(..), display ) - -import Text.PrettyPrint - ( text ) - -import Prelude hiding (fail) - --- | Covers source dependencies and installed dependencies in --- one type. -data ExtDependency = SourceDependency Dependency - | InstalledDependency InstalledPackageId - -instance Text ExtDependency where - disp (SourceDependency dep) = disp dep - disp (InstalledDependency dep) = disp dep - - parse = (SourceDependency `fmap` parse) <++ (InstalledDependency `fmap` parse) - --- | All the solvers that can be selected. -data PreSolver = AlwaysTopDown | AlwaysModular | Choose - deriving (Eq, Ord, Show, Bounded, Enum) - --- | All the solvers that can be used. -data Solver = TopDown | Modular - deriving (Eq, Ord, Show, Bounded, Enum) - -instance Text PreSolver where - disp AlwaysTopDown = text "topdown" - disp AlwaysModular = text "modular" - disp Choose = text "choose" - parse = do - name <- Parse.munch1 isAlpha - case map toLower name of - "topdown" -> return AlwaysTopDown - "modular" -> return AlwaysModular - "choose" -> return Choose - _ -> Parse.pfail - --- | A dependency resolver is a function that works out an installation plan --- given the set of installed and available packages and a set of deps to --- solve for. --- --- The reason for this interface is because there are dozens of approaches to --- solving the package dependency problem and we want to make it easy to swap --- in alternatives. --- -type DependencyResolver = Platform - -> CompilerInfo - -> InstalledPackageIndex - -> PackageIndex.PackageIndex SourcePackage - -> (PackageName -> PackagePreferences) - -> [PackageConstraint] - -> [PackageName] - -> Progress String String [InstallPlan.PlanPackage] - --- | Per-package constraints. Package constraints must be respected by the --- solver. Multiple constraints for each package can be given, though obviously --- it is possible to construct conflicting constraints (eg impossible version --- range or inconsistent flag assignment). --- -data PackageConstraint - = PackageConstraintVersion PackageName VersionRange - | PackageConstraintInstalled PackageName - | PackageConstraintSource PackageName - | PackageConstraintFlags PackageName FlagAssignment - | PackageConstraintStanzas PackageName [OptionalStanza] - deriving (Show,Eq) - --- | Provide a textual representation of a package constraint --- for debugging purposes. --- -debugPackageConstraint :: PackageConstraint -> String -debugPackageConstraint (PackageConstraintVersion pn vr) = - display pn ++ " " ++ display (simplifyVersionRange vr) -debugPackageConstraint (PackageConstraintInstalled pn) = - display pn ++ " installed" -debugPackageConstraint (PackageConstraintSource pn) = - display pn ++ " source" -debugPackageConstraint (PackageConstraintFlags pn fs) = - "flags " ++ display pn ++ " " ++ unwords (map (uncurry showFlag) fs) - where - showFlag (FlagName f) True = "+" ++ f - showFlag (FlagName f) False = "-" ++ f -debugPackageConstraint (PackageConstraintStanzas pn ss) = - "stanzas " ++ display pn ++ " " ++ unwords (map showStanza ss) - where - showStanza TestStanzas = "test" - showStanza BenchStanzas = "bench" - --- | A per-package preference on the version. It is a soft constraint that the --- 'DependencyResolver' should try to respect where possible. It consists of --- a 'InstalledPreference' which says if we prefer versions of packages --- that are already installed. It also has a 'PackageVersionPreference' which --- is a suggested constraint on the version number. The resolver should try to --- use package versions that satisfy the suggested version constraint. --- --- It is not specified if preferences on some packages are more important than --- others. --- -data PackagePreferences = PackagePreferences VersionRange InstalledPreference - --- | Whether we prefer an installed version of a package or simply the latest --- version. --- -data InstalledPreference = PreferInstalled | PreferLatest - deriving Show - --- | Global policy for all packages to say if we prefer package versions that --- are already installed locally or if we just prefer the latest available. --- -data PackagesPreferenceDefault = - - -- | Always prefer the latest version irrespective of any existing - -- installed version. - -- - -- * This is the standard policy for upgrade. - -- - PreferAllLatest - - -- | Always prefer the installed versions over ones that would need to be - -- installed. Secondarily, prefer latest versions (eg the latest installed - -- version or if there are none then the latest source version). - | PreferAllInstalled - - -- | Prefer the latest version for packages that are explicitly requested - -- but prefers the installed version for any other packages. - -- - -- * This is the standard policy for install. - -- - | PreferLatestForSelected - deriving Show - --- | Policy for relaxing upper bounds in dependencies. For example, given --- 'build-depends: array >= 0.3 && < 0.5', are we allowed to relax the upper --- bound and choose a version of 'array' that is greater or equal to 0.5? By --- default the upper bounds are always strictly honored. -data AllowNewer = - - -- | Default: honor the upper bounds in all dependencies, never choose - -- versions newer than allowed. - AllowNewerNone - - -- | Ignore upper bounds in dependencies on the given packages. - | AllowNewerSome [PackageName] - - -- | Ignore upper bounds in dependencies on all packages. - | AllowNewerAll - --- | Convert 'AllowNewer' to a boolean. -isAllowNewer :: AllowNewer -> Bool -isAllowNewer AllowNewerNone = False -isAllowNewer (AllowNewerSome _) = True -isAllowNewer AllowNewerAll = True - --- | A type to represent the unfolding of an expensive long running --- calculation that may fail. We may get intermediate steps before the final --- result which may be used to indicate progress and\/or logging messages. --- -data Progress step fail done = Step step (Progress step fail done) - | Fail fail - | Done done - deriving Functor - --- | Consume a 'Progress' calculation. Much like 'foldr' for lists but with two --- base cases, one for a final result and one for failure. --- --- Eg to convert into a simple 'Either' result use: --- --- > foldProgress (flip const) Left Right --- -foldProgress :: (step -> a -> a) -> (fail -> a) -> (done -> a) - -> Progress step fail done -> a -foldProgress step fail done = fold - where fold (Step s p) = step s (fold p) - fold (Fail f) = fail f - fold (Done r) = done r - -instance Monad (Progress step fail) where - return a = Done a - p >>= f = foldProgress Step Fail f p - -instance Applicative (Progress step fail) where - pure a = Done a - p <*> x = foldProgress Step Fail (flip fmap x) p - -instance Monoid fail => Alternative (Progress step fail) where - empty = Fail mempty - p <|> q = foldProgress Step (const q) Done p diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Dependency.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Dependency.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,687 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Dependency --- Copyright : (c) David Himmelstrup 2005, --- Bjorn Bringert 2007 --- Duncan Coutts 2008 --- License : BSD-like --- --- Maintainer : cabal-devel@gmail.com --- Stability : provisional --- Portability : portable --- --- Top level interface to dependency resolution. ------------------------------------------------------------------------------ -module Distribution.Client.Dependency ( - -- * The main package dependency resolver - chooseSolver, - resolveDependencies, - Progress(..), - foldProgress, - - -- * Alternate, simple resolver that does not do dependencies recursively - resolveWithoutDependencies, - - -- * Constructing resolver policies - DepResolverParams(..), - PackageConstraint(..), - PackagesPreferenceDefault(..), - PackagePreference(..), - InstalledPreference(..), - - -- ** Standard policy - standardInstallPolicy, - PackageSpecifier(..), - - -- ** Sandbox policy - applySandboxInstallPolicy, - - -- ** Extra policy options - dontUpgradeNonUpgradeablePackages, - hideBrokenInstalledPackages, - upgradeDependencies, - reinstallTargets, - - -- ** Policy utils - addConstraints, - addPreferences, - setPreferenceDefault, - setReorderGoals, - setIndependentGoals, - setAvoidReinstalls, - setShadowPkgs, - setStrongFlags, - setMaxBackjumps, - addSourcePackages, - hideInstalledPackagesSpecificByInstalledPackageId, - hideInstalledPackagesSpecificBySourcePackageId, - hideInstalledPackagesAllVersions, - removeUpperBounds - ) where - -import Distribution.Client.Dependency.TopDown - ( topDownResolver ) -import Distribution.Client.Dependency.Modular - ( modularResolver, SolverConfig(..) ) -import qualified Distribution.Client.PackageIndex as PackageIndex -import Distribution.Simple.PackageIndex (InstalledPackageIndex) -import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex -import qualified Distribution.Client.InstallPlan as InstallPlan -import Distribution.Client.InstallPlan (InstallPlan) -import Distribution.Client.Types - ( SourcePackageDb(SourcePackageDb) - , SourcePackage(..) ) -import Distribution.Client.Dependency.Types - ( PreSolver(..), Solver(..), DependencyResolver, PackageConstraint(..) - , debugPackageConstraint - , AllowNewer(..), PackagePreferences(..), InstalledPreference(..) - , PackagesPreferenceDefault(..) - , Progress(..), foldProgress ) -import Distribution.Client.Sandbox.Types - ( SandboxPackageInfo(..) ) -import Distribution.Client.Targets -import qualified Distribution.InstalledPackageInfo as Installed -import Distribution.Package - ( PackageName(..), PackageId, Package(..), packageName, packageVersion - , InstalledPackageId, Dependency(Dependency)) -import qualified Distribution.PackageDescription as PD - ( PackageDescription(..), GenericPackageDescription(..) - , Library(..), Executable(..), TestSuite(..), Benchmark(..), CondTree) -import Distribution.PackageDescription (BuildInfo(targetBuildDepends)) -import Distribution.PackageDescription.Configuration (mapCondTree) -import Distribution.Version - ( Version(..), VersionRange, anyVersion, thisVersion, withinRange - , removeUpperBound, simplifyVersionRange ) -import Distribution.Compiler - ( CompilerId(..), CompilerInfo(..), CompilerFlavor(..) ) -import Distribution.System - ( Platform ) -import Distribution.Simple.Utils - ( comparing, warn, info ) -import Distribution.Text - ( display ) -import Distribution.Verbosity - ( Verbosity ) - -import Data.List (maximumBy, foldl', intercalate) -import Data.Maybe (fromMaybe) -import qualified Data.Map as Map -import qualified Data.Set as Set -import Data.Set (Set) - --- ------------------------------------------------------------ --- * High level planner policy --- ------------------------------------------------------------ - --- | The set of parameters to the dependency resolver. These parameters are --- relatively low level but many kinds of high level policies can be --- implemented in terms of adjustments to the parameters. --- -data DepResolverParams = DepResolverParams { - depResolverTargets :: [PackageName], - depResolverConstraints :: [PackageConstraint], - depResolverPreferences :: [PackagePreference], - depResolverPreferenceDefault :: PackagesPreferenceDefault, - depResolverInstalledPkgIndex :: InstalledPackageIndex, - depResolverSourcePkgIndex :: PackageIndex.PackageIndex SourcePackage, - depResolverReorderGoals :: Bool, - depResolverIndependentGoals :: Bool, - depResolverAvoidReinstalls :: Bool, - depResolverShadowPkgs :: Bool, - depResolverStrongFlags :: Bool, - depResolverMaxBackjumps :: Maybe Int - } - -debugDepResolverParams :: DepResolverParams -> String -debugDepResolverParams p = - "targets: " ++ intercalate ", " (map display (depResolverTargets p)) - ++ "\nconstraints: " - ++ concatMap (("\n " ++) . debugPackageConstraint) (depResolverConstraints p) - ++ "\npreferences: " - ++ concatMap (("\n " ++) . debugPackagePreference) (depResolverPreferences p) - ++ "\nstrategy: " ++ show (depResolverPreferenceDefault p) - --- | A package selection preference for a particular package. --- --- Preferences are soft constraints that the dependency resolver should try to --- respect where possible. It is not specified if preferences on some packages --- are more important than others. --- -data PackagePreference = - - -- | A suggested constraint on the version number. - PackageVersionPreference PackageName VersionRange - - -- | If we prefer versions of packages that are already installed. - | PackageInstalledPreference PackageName InstalledPreference - --- | Provide a textual representation of a package preference --- for debugging purposes. --- -debugPackagePreference :: PackagePreference -> String -debugPackagePreference (PackageVersionPreference pn vr) = - display pn ++ " " ++ display (simplifyVersionRange vr) -debugPackagePreference (PackageInstalledPreference pn ip) = - display pn ++ " " ++ show ip - -basicDepResolverParams :: InstalledPackageIndex - -> PackageIndex.PackageIndex SourcePackage - -> DepResolverParams -basicDepResolverParams installedPkgIndex sourcePkgIndex = - DepResolverParams { - depResolverTargets = [], - depResolverConstraints = [], - depResolverPreferences = [], - depResolverPreferenceDefault = PreferLatestForSelected, - depResolverInstalledPkgIndex = installedPkgIndex, - depResolverSourcePkgIndex = sourcePkgIndex, - depResolverReorderGoals = False, - depResolverIndependentGoals = False, - depResolverAvoidReinstalls = False, - depResolverShadowPkgs = False, - depResolverStrongFlags = False, - depResolverMaxBackjumps = Nothing - } - -addTargets :: [PackageName] - -> DepResolverParams -> DepResolverParams -addTargets extraTargets params = - params { - depResolverTargets = extraTargets ++ depResolverTargets params - } - -addConstraints :: [PackageConstraint] - -> DepResolverParams -> DepResolverParams -addConstraints extraConstraints params = - params { - depResolverConstraints = extraConstraints - ++ depResolverConstraints params - } - -addPreferences :: [PackagePreference] - -> DepResolverParams -> DepResolverParams -addPreferences extraPreferences params = - params { - depResolverPreferences = extraPreferences - ++ depResolverPreferences params - } - -setPreferenceDefault :: PackagesPreferenceDefault - -> DepResolverParams -> DepResolverParams -setPreferenceDefault preferenceDefault params = - params { - depResolverPreferenceDefault = preferenceDefault - } - -setReorderGoals :: Bool -> DepResolverParams -> DepResolverParams -setReorderGoals b params = - params { - depResolverReorderGoals = b - } - -setIndependentGoals :: Bool -> DepResolverParams -> DepResolverParams -setIndependentGoals b params = - params { - depResolverIndependentGoals = b - } - -setAvoidReinstalls :: Bool -> DepResolverParams -> DepResolverParams -setAvoidReinstalls b params = - params { - depResolverAvoidReinstalls = b - } - -setShadowPkgs :: Bool -> DepResolverParams -> DepResolverParams -setShadowPkgs b params = - params { - depResolverShadowPkgs = b - } - -setStrongFlags :: Bool -> DepResolverParams -> DepResolverParams -setStrongFlags b params = - params { - depResolverStrongFlags = b - } - -setMaxBackjumps :: Maybe Int -> DepResolverParams -> DepResolverParams -setMaxBackjumps n params = - params { - depResolverMaxBackjumps = n - } - --- | Some packages are specific to a given compiler version and should never be --- upgraded. -dontUpgradeNonUpgradeablePackages :: DepResolverParams -> DepResolverParams -dontUpgradeNonUpgradeablePackages params = - addConstraints extraConstraints params - where - extraConstraints = - [ PackageConstraintInstalled pkgname - | all (/=PackageName "base") (depResolverTargets params) - , pkgname <- map PackageName [ "base", "ghc-prim", "integer-gmp" - , "integer-simple" ] - , isInstalled pkgname ] - -- TODO: the top down resolver chokes on the base constraints - -- below when there are no targets and thus no dep on base. - -- Need to refactor constraints separate from needing packages. - isInstalled = not . null - . InstalledPackageIndex.lookupPackageName - (depResolverInstalledPkgIndex params) - -addSourcePackages :: [SourcePackage] - -> DepResolverParams -> DepResolverParams -addSourcePackages pkgs params = - params { - depResolverSourcePkgIndex = - foldl (flip PackageIndex.insert) - (depResolverSourcePkgIndex params) pkgs - } - -hideInstalledPackagesSpecificByInstalledPackageId :: [InstalledPackageId] - -> DepResolverParams - -> DepResolverParams -hideInstalledPackagesSpecificByInstalledPackageId pkgids params = - --TODO: this should work using exclude constraints instead - params { - depResolverInstalledPkgIndex = - foldl' (flip InstalledPackageIndex.deleteInstalledPackageId) - (depResolverInstalledPkgIndex params) pkgids - } - -hideInstalledPackagesSpecificBySourcePackageId :: [PackageId] - -> DepResolverParams - -> DepResolverParams -hideInstalledPackagesSpecificBySourcePackageId pkgids params = - --TODO: this should work using exclude constraints instead - params { - depResolverInstalledPkgIndex = - foldl' (flip InstalledPackageIndex.deleteSourcePackageId) - (depResolverInstalledPkgIndex params) pkgids - } - -hideInstalledPackagesAllVersions :: [PackageName] - -> DepResolverParams -> DepResolverParams -hideInstalledPackagesAllVersions pkgnames params = - --TODO: this should work using exclude constraints instead - params { - depResolverInstalledPkgIndex = - foldl' (flip InstalledPackageIndex.deletePackageName) - (depResolverInstalledPkgIndex params) pkgnames - } - - -hideBrokenInstalledPackages :: DepResolverParams -> DepResolverParams -hideBrokenInstalledPackages params = - hideInstalledPackagesSpecificByInstalledPackageId pkgids params - where - pkgids = map Installed.installedPackageId - . InstalledPackageIndex.reverseDependencyClosure - (depResolverInstalledPkgIndex params) - . map (Installed.installedPackageId . fst) - . InstalledPackageIndex.brokenPackages - $ depResolverInstalledPkgIndex params - --- | Remove upper bounds in dependencies using the policy specified by the --- 'AllowNewer' argument (all/some/none). -removeUpperBounds :: AllowNewer -> DepResolverParams -> DepResolverParams -removeUpperBounds allowNewer params = - params { - -- NB: It's important to apply 'removeUpperBounds' after - -- 'addSourcePackages'. Otherwise, the packages inserted by - -- 'addSourcePackages' won't have upper bounds in dependencies relaxed. - - depResolverSourcePkgIndex = sourcePkgIndex' - } - where - sourcePkgIndex = depResolverSourcePkgIndex params - sourcePkgIndex' = case allowNewer of - AllowNewerNone -> sourcePkgIndex - AllowNewerAll -> fmap relaxAllPackageDeps sourcePkgIndex - AllowNewerSome pkgs -> fmap (relaxSomePackageDeps pkgs) sourcePkgIndex - - relaxAllPackageDeps :: SourcePackage -> SourcePackage - relaxAllPackageDeps = onAllBuildDepends doRelax - where - doRelax (Dependency pkgName verRange) = - Dependency pkgName (removeUpperBound verRange) - - relaxSomePackageDeps :: [PackageName] -> SourcePackage -> SourcePackage - relaxSomePackageDeps pkgNames = onAllBuildDepends doRelax - where - doRelax d@(Dependency pkgName verRange) - | pkgName `elem` pkgNames = Dependency pkgName - (removeUpperBound verRange) - | otherwise = d - - -- Walk a 'GenericPackageDescription' and apply 'f' to all 'build-depends' - -- fields. - onAllBuildDepends :: (Dependency -> Dependency) - -> SourcePackage -> SourcePackage - onAllBuildDepends f srcPkg = srcPkg' - where - gpd = packageDescription srcPkg - pd = PD.packageDescription gpd - condLib = PD.condLibrary gpd - condExes = PD.condExecutables gpd - condTests = PD.condTestSuites gpd - condBenchs = PD.condBenchmarks gpd - - f' = onBuildInfo f - onBuildInfo g bi = bi - { targetBuildDepends = map g (targetBuildDepends bi) } - - onLibrary lib = lib { PD.libBuildInfo = f' $ PD.libBuildInfo lib } - onExecutable exe = exe { PD.buildInfo = f' $ PD.buildInfo exe } - onTestSuite tst = tst { PD.testBuildInfo = f' $ PD.testBuildInfo tst } - onBenchmark bmk = bmk { PD.benchmarkBuildInfo = - f' $ PD.benchmarkBuildInfo bmk } - - srcPkg' = srcPkg { packageDescription = gpd' } - gpd' = gpd { - PD.packageDescription = pd', - PD.condLibrary = condLib', - PD.condExecutables = condExes', - PD.condTestSuites = condTests', - PD.condBenchmarks = condBenchs' - } - pd' = pd { - PD.buildDepends = map f (PD.buildDepends pd), - PD.library = fmap onLibrary (PD.library pd), - PD.executables = map onExecutable (PD.executables pd), - PD.testSuites = map onTestSuite (PD.testSuites pd), - PD.benchmarks = map onBenchmark (PD.benchmarks pd) - } - condLib' = fmap (onCondTree onLibrary) condLib - condExes' = map (mapSnd $ onCondTree onExecutable) condExes - condTests' = map (mapSnd $ onCondTree onTestSuite) condTests - condBenchs' = map (mapSnd $ onCondTree onBenchmark) condBenchs - - mapSnd :: (a -> b) -> (c,a) -> (c,b) - mapSnd = fmap - - onCondTree :: (a -> b) -> PD.CondTree v [Dependency] a - -> PD.CondTree v [Dependency] b - onCondTree g = mapCondTree g (map f) id - - -upgradeDependencies :: DepResolverParams -> DepResolverParams -upgradeDependencies = setPreferenceDefault PreferAllLatest - - -reinstallTargets :: DepResolverParams -> DepResolverParams -reinstallTargets params = - hideInstalledPackagesAllVersions (depResolverTargets params) params - - -standardInstallPolicy :: InstalledPackageIndex - -> SourcePackageDb - -> [PackageSpecifier SourcePackage] - -> DepResolverParams -standardInstallPolicy - installedPkgIndex (SourcePackageDb sourcePkgIndex sourcePkgPrefs) - pkgSpecifiers - - = addPreferences - [ PackageVersionPreference name ver - | (name, ver) <- Map.toList sourcePkgPrefs ] - - . addConstraints - (concatMap pkgSpecifierConstraints pkgSpecifiers) - - . addTargets - (map pkgSpecifierTarget pkgSpecifiers) - - . hideInstalledPackagesSpecificBySourcePackageId - [ packageId pkg | SpecificSourcePackage pkg <- pkgSpecifiers ] - - . addSourcePackages - [ pkg | SpecificSourcePackage pkg <- pkgSpecifiers ] - - $ basicDepResolverParams - installedPkgIndex sourcePkgIndex - -applySandboxInstallPolicy :: SandboxPackageInfo - -> DepResolverParams - -> DepResolverParams -applySandboxInstallPolicy - (SandboxPackageInfo modifiedDeps otherDeps allSandboxPkgs _allDeps) - params - - = addPreferences [ PackageInstalledPreference n PreferInstalled - | n <- installedNotModified ] - - . addTargets installedNotModified - - . addPreferences - [ PackageVersionPreference (packageName pkg) - (thisVersion (packageVersion pkg)) | pkg <- otherDeps ] - - . addConstraints - [ PackageConstraintVersion (packageName pkg) - (thisVersion (packageVersion pkg)) | pkg <- modifiedDeps ] - - . addTargets [ packageName pkg | pkg <- modifiedDeps ] - - . hideInstalledPackagesSpecificBySourcePackageId - [ packageId pkg | pkg <- modifiedDeps ] - - -- We don't need to add source packages for add-source deps to the - -- 'installedPkgIndex' since 'getSourcePackages' did that for us. - - $ params - - where - installedPkgIds = - map fst . InstalledPackageIndex.allPackagesBySourcePackageId - $ allSandboxPkgs - modifiedPkgIds = map packageId modifiedDeps - installedNotModified = [ packageName pkg | pkg <- installedPkgIds, - pkg `notElem` modifiedPkgIds ] - --- ------------------------------------------------------------ --- * Interface to the standard resolver --- ------------------------------------------------------------ - -chooseSolver :: Verbosity -> PreSolver -> CompilerInfo -> IO Solver -chooseSolver _ AlwaysTopDown _ = return TopDown -chooseSolver _ AlwaysModular _ = return Modular -chooseSolver verbosity Choose cinfo = do - let (CompilerId f v) = compilerInfoId cinfo - chosenSolver | f == GHC && v <= Version [7] [] = TopDown - | otherwise = Modular - msg TopDown = warn verbosity "Falling back to topdown solver for GHC < 7." - msg Modular = info verbosity "Choosing modular solver." - msg chosenSolver - return chosenSolver - -runSolver :: Solver -> SolverConfig -> DependencyResolver -runSolver TopDown = const topDownResolver -- TODO: warn about unsupported options -runSolver Modular = modularResolver - --- | Run the dependency solver. --- --- Since this is potentially an expensive operation, the result is wrapped in a --- a 'Progress' structure that can be unfolded to provide progress information, --- logging messages and the final result or an error. --- -resolveDependencies :: Platform - -> CompilerInfo - -> Solver - -> DepResolverParams - -> Progress String String InstallPlan - - --TODO: is this needed here? see dontUpgradeNonUpgradeablePackages -resolveDependencies platform comp _solver params - | null (depResolverTargets params) - = return (mkInstallPlan platform comp []) - -resolveDependencies platform comp solver params = - - Step (debugDepResolverParams finalparams) - $ fmap (mkInstallPlan platform comp) - $ runSolver solver (SolverConfig reorderGoals indGoals noReinstalls - shadowing strFlags maxBkjumps) - platform comp installedPkgIndex sourcePkgIndex - preferences constraints targets - where - - finalparams @ (DepResolverParams - targets constraints - prefs defpref - installedPkgIndex - sourcePkgIndex - reorderGoals - indGoals - noReinstalls - shadowing - strFlags - maxBkjumps) = dontUpgradeNonUpgradeablePackages - -- TODO: - -- The modular solver can properly deal with broken - -- packages and won't select them. So the - -- 'hideBrokenInstalledPackages' function should be moved - -- into a module that is specific to the top-down solver. - . (if solver /= Modular then hideBrokenInstalledPackages - else id) - $ params - - preferences = interpretPackagesPreference - (Set.fromList targets) defpref prefs - --- | Make an install plan from the output of the dep resolver. --- It checks that the plan is valid, or it's an error in the dep resolver. --- -mkInstallPlan :: Platform - -> CompilerInfo - -> [InstallPlan.PlanPackage] -> InstallPlan -mkInstallPlan platform comp pkgIndex = - let index = InstalledPackageIndex.fromList pkgIndex in - case InstallPlan.new platform comp index of - Right plan -> plan - Left problems -> error $ unlines $ - "internal error: could not construct a valid install plan." - : "The proposed (invalid) plan contained the following problems:" - : map InstallPlan.showPlanProblem problems - ++ "Proposed plan:" - : [InstallPlan.showPlanIndex index] - - --- | Give an interpretation to the global 'PackagesPreference' as --- specific per-package 'PackageVersionPreference'. --- -interpretPackagesPreference :: Set PackageName - -> PackagesPreferenceDefault - -> [PackagePreference] - -> (PackageName -> PackagePreferences) -interpretPackagesPreference selected defaultPref prefs = - \pkgname -> PackagePreferences (versionPref pkgname) (installPref pkgname) - - where - versionPref pkgname = - fromMaybe anyVersion (Map.lookup pkgname versionPrefs) - versionPrefs = Map.fromList - [ (pkgname, pref) - | PackageVersionPreference pkgname pref <- prefs ] - - installPref pkgname = - fromMaybe (installPrefDefault pkgname) (Map.lookup pkgname installPrefs) - installPrefs = Map.fromList - [ (pkgname, pref) - | PackageInstalledPreference pkgname pref <- prefs ] - installPrefDefault = case defaultPref of - PreferAllLatest -> \_ -> PreferLatest - PreferAllInstalled -> \_ -> PreferInstalled - PreferLatestForSelected -> \pkgname -> - -- When you say cabal install foo, what you really mean is, prefer the - -- latest version of foo, but the installed version of everything else - if pkgname `Set.member` selected then PreferLatest - else PreferInstalled - --- ------------------------------------------------------------ --- * Simple resolver that ignores dependencies --- ------------------------------------------------------------ - --- | A simplistic method of resolving a list of target package names to --- available packages. --- --- Specifically, it does not consider package dependencies at all. Unlike --- 'resolveDependencies', no attempt is made to ensure that the selected --- packages have dependencies that are satisfiable or consistent with --- each other. --- --- It is suitable for tasks such as selecting packages to download for user --- inspection. It is not suitable for selecting packages to install. --- --- Note: if no installed package index is available, it is OK to pass 'mempty'. --- It simply means preferences for installed packages will be ignored. --- -resolveWithoutDependencies :: DepResolverParams - -> Either [ResolveNoDepsError] [SourcePackage] -resolveWithoutDependencies (DepResolverParams targets constraints - prefs defpref installedPkgIndex sourcePkgIndex - _reorderGoals _indGoals _avoidReinstalls - _shadowing _strFlags _maxBjumps) = - collectEithers (map selectPackage targets) - where - selectPackage :: PackageName -> Either ResolveNoDepsError SourcePackage - selectPackage pkgname - | null choices = Left $! ResolveUnsatisfiable pkgname requiredVersions - | otherwise = Right $! maximumBy bestByPrefs choices - - where - -- Constraints - requiredVersions = packageConstraints pkgname - pkgDependency = Dependency pkgname requiredVersions - choices = PackageIndex.lookupDependency sourcePkgIndex - pkgDependency - - -- Preferences - PackagePreferences preferredVersions preferInstalled - = packagePreferences pkgname - - bestByPrefs = comparing $ \pkg -> - (installPref pkg, versionPref pkg, packageVersion pkg) - installPref = case preferInstalled of - PreferLatest -> const False - PreferInstalled -> not . null - . InstalledPackageIndex.lookupSourcePackageId - installedPkgIndex - . packageId - versionPref pkg = packageVersion pkg `withinRange` preferredVersions - - packageConstraints :: PackageName -> VersionRange - packageConstraints pkgname = - Map.findWithDefault anyVersion pkgname packageVersionConstraintMap - packageVersionConstraintMap = - Map.fromList [ (name, range) - | PackageConstraintVersion name range <- constraints ] - - packagePreferences :: PackageName -> PackagePreferences - packagePreferences = interpretPackagesPreference - (Set.fromList targets) defpref prefs - - -collectEithers :: [Either a b] -> Either [a] [b] -collectEithers = collect . partitionEithers - where - collect ([], xs) = Right xs - collect (errs,_) = Left errs - partitionEithers :: [Either a b] -> ([a],[b]) - partitionEithers = foldr (either left right) ([],[]) - where - left a (l, r) = (a:l, r) - right a (l, r) = (l, a:r) - --- | Errors for 'resolveWithoutDependencies'. --- -data ResolveNoDepsError = - - -- | A package name which cannot be resolved to a specific package. - -- Also gives the constraint on the version and whether there was - -- a constraint on the package being installed. - ResolveUnsatisfiable PackageName VersionRange - -instance Show ResolveNoDepsError where - show (ResolveUnsatisfiable name ver) = - "There is no available version of " ++ display name - ++ " that satisfies " ++ display (simplifyVersionRange ver) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Exec.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Exec.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Exec.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Exec.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,122 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Exec --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- Implementation of the 'exec' command. Runs an arbitrary executable in an --- environment suitable for making use of the sandbox. ------------------------------------------------------------------------------ - -module Distribution.Client.Exec ( exec - ) where - -import qualified Distribution.Simple.GHC as GHC -import qualified Distribution.Simple.GHCJS as GHCJS - -import Distribution.Client.Sandbox (getSandboxConfigFilePath) -import Distribution.Client.Sandbox.PackageEnvironment (sandboxPackageDBPath) -import Distribution.Client.Sandbox.Types (UseSandbox (..)) - -import Distribution.Simple.Compiler (Compiler, CompilerFlavor(..), compilerFlavor) -import Distribution.Simple.Program (ghcProgram, ghcjsProgram, lookupProgram) -import Distribution.Simple.Program.Db (ProgramDb, requireProgram, modifyProgramSearchPath) -import Distribution.Simple.Program.Find (ProgramSearchPathEntry(..)) -import Distribution.Simple.Program.Run (programInvocation, runProgramInvocation) -import Distribution.Simple.Program.Types ( simpleProgram, ConfiguredProgram(..) ) -import Distribution.Simple.Utils (die) - -import Distribution.System (Platform) -import Distribution.Verbosity (Verbosity) - -import System.FilePath (searchPathSeparator, ()) -#if !MIN_VERSION_base(4,8,0) -import Control.Applicative ((<$>)) -import Data.Monoid (mempty) -#endif - - --- | Execute the given command in the package's environment. --- --- The given command is executed with GHC configured to use the correct --- package database and with the sandbox bin directory added to the PATH. -exec :: Verbosity - -> UseSandbox - -> Compiler - -> Platform - -> ProgramDb - -> [String] - -> IO () -exec verbosity useSandbox comp platform programDb extraArgs = - case extraArgs of - (exe:args) -> do - program <- requireProgram' verbosity useSandbox programDb exe - env <- ((++) (programOverrideEnv program)) <$> environmentOverrides - let invocation = programInvocation - program { programOverrideEnv = env } - args - runProgramInvocation verbosity invocation - - [] -> die "Please specify an executable to run" - where - environmentOverrides = - case useSandbox of - NoSandbox -> return [] - (UseSandbox sandboxDir) -> - sandboxEnvironment verbosity sandboxDir comp platform programDb - - --- | Return the package's sandbox environment. --- --- The environment sets GHC_PACKAGE_PATH so that GHC will use the sandbox. -sandboxEnvironment :: Verbosity - -> FilePath - -> Compiler - -> Platform - -> ProgramDb - -> IO [(String, Maybe String)] -sandboxEnvironment verbosity sandboxDir comp platform programDb = - case compilerFlavor comp of - GHC -> env GHC.getGlobalPackageDB ghcProgram "GHC_PACKAGE_PATH" - GHCJS -> env GHCJS.getGlobalPackageDB ghcjsProgram "GHCJS_PACKAGE_PATH" - _ -> die "exec only works with GHC and GHCJS" - where - env getGlobalPackageDB hcProgram packagePathEnvVar = do - let Just program = lookupProgram hcProgram programDb - gDb <- getGlobalPackageDB verbosity program - sandboxConfigFilePath <- getSandboxConfigFilePath mempty - let compilerPackagePath = hcPackagePath gDb - return [ (packagePathEnvVar, compilerPackagePath) - , ("CABAL_SANDBOX_PACKAGE_PATH", compilerPackagePath) - , ("CABAL_SANDBOX_CONFIG", Just sandboxConfigFilePath) - ] - - hcPackagePath gDb = - let s = sandboxPackageDBPath sandboxDir comp platform - in Just $ prependToSearchPath gDb s - - prependToSearchPath path newValue = - newValue ++ [searchPathSeparator] ++ path - - --- | Check that a program is configured and available to be run. If --- a sandbox is available check in the sandbox's directory. -requireProgram' :: Verbosity - -> UseSandbox - -> ProgramDb - -> String - -> IO ConfiguredProgram -requireProgram' verbosity useSandbox programDb exe = do - (program, _) <- requireProgram - verbosity - (simpleProgram exe) - updateSearchPath - return program - where - updateSearchPath = - flip modifyProgramSearchPath programDb $ \searchPath -> - case useSandbox of - NoSandbox -> searchPath - UseSandbox sandboxDir -> - ProgramSearchPathDir (sandboxDir "bin") : searchPath diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Fetch.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Fetch.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Fetch.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Fetch.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,195 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Fetch --- Copyright : (c) David Himmelstrup 2005 --- Duncan Coutts 2011 --- License : BSD-like --- --- Maintainer : cabal-devel@gmail.com --- Stability : provisional --- Portability : portable --- --- The cabal fetch command ------------------------------------------------------------------------------ -module Distribution.Client.Fetch ( - fetch, - ) where - -import Distribution.Client.Types -import Distribution.Client.Targets -import Distribution.Client.FetchUtils hiding (fetchPackage) -import Distribution.Client.Dependency -import Distribution.Client.IndexUtils as IndexUtils - ( getSourcePackages, getInstalledPackages ) -import qualified Distribution.Client.InstallPlan as InstallPlan -import Distribution.Client.Setup - ( GlobalFlags(..), FetchFlags(..) ) - -import Distribution.Package - ( packageId ) -import Distribution.Simple.Compiler - ( Compiler, compilerInfo, PackageDBStack ) -import Distribution.Simple.PackageIndex (InstalledPackageIndex) -import Distribution.Simple.Program - ( ProgramConfiguration ) -import Distribution.Simple.Setup - ( fromFlag ) -import Distribution.Simple.Utils - ( die, notice, debug ) -import Distribution.System - ( Platform ) -import Distribution.Text - ( display ) -import Distribution.Verbosity - ( Verbosity ) - -import Control.Monad - ( filterM ) - --- ------------------------------------------------------------ --- * The fetch command --- ------------------------------------------------------------ - ---TODO: --- * add fetch -o support --- * support tarball URLs via ad-hoc download cache (or in -o mode?) --- * suggest using --no-deps, unpack or fetch -o if deps cannot be satisfied --- * Port various flags from install: --- * --updage-dependencies --- * --constraint and --preference --- * --only-dependencies, but note it conflicts with --no-deps - - --- | Fetch a list of packages and their dependencies. --- -fetch :: Verbosity - -> PackageDBStack - -> [Repo] - -> Compiler - -> Platform - -> ProgramConfiguration - -> GlobalFlags - -> FetchFlags - -> [UserTarget] - -> IO () -fetch verbosity _ _ _ _ _ _ _ [] = - notice verbosity "No packages requested. Nothing to do." - -fetch verbosity packageDBs repos comp platform conf - globalFlags fetchFlags userTargets = do - - mapM_ checkTarget userTargets - - installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf - sourcePkgDb <- getSourcePackages verbosity repos - - pkgSpecifiers <- resolveUserTargets verbosity - (fromFlag $ globalWorldFile globalFlags) - (packageIndex sourcePkgDb) - userTargets - - pkgs <- planPackages - verbosity comp platform fetchFlags - installedPkgIndex sourcePkgDb pkgSpecifiers - - pkgs' <- filterM (fmap not . isFetched . packageSource) pkgs - if null pkgs' - --TODO: when we add support for remote tarballs then this message - -- will need to be changed because for remote tarballs we fetch them - -- at the earlier phase. - then notice verbosity $ "No packages need to be fetched. " - ++ "All the requested packages are already local " - ++ "or cached locally." - else if dryRun - then notice verbosity $ unlines $ - "The following packages would be fetched:" - : map (display . packageId) pkgs' - - else mapM_ (fetchPackage verbosity . packageSource) pkgs' - - where - dryRun = fromFlag (fetchDryRun fetchFlags) - -planPackages :: Verbosity - -> Compiler - -> Platform - -> FetchFlags - -> InstalledPackageIndex - -> SourcePackageDb - -> [PackageSpecifier SourcePackage] - -> IO [SourcePackage] -planPackages verbosity comp platform fetchFlags - installedPkgIndex sourcePkgDb pkgSpecifiers - - | includeDependencies = do - solver <- chooseSolver verbosity - (fromFlag (fetchSolver fetchFlags)) (compilerInfo comp) - notice verbosity "Resolving dependencies..." - installPlan <- foldProgress logMsg die return $ - resolveDependencies - platform (compilerInfo comp) - solver - resolverParams - - -- The packages we want to fetch are those packages the 'InstallPlan' - -- that are in the 'InstallPlan.Configured' state. - return - [ pkg - | (InstallPlan.Configured (InstallPlan.ConfiguredPackage pkg _ _ _)) - <- InstallPlan.toList installPlan ] - - | otherwise = - either (die . unlines . map show) return $ - resolveWithoutDependencies resolverParams - - where - resolverParams = - - setMaxBackjumps (if maxBackjumps < 0 then Nothing - else Just maxBackjumps) - - . setIndependentGoals independentGoals - - . setReorderGoals reorderGoals - - . setShadowPkgs shadowPkgs - - . setStrongFlags strongFlags - - -- Reinstall the targets given on the command line so that the dep - -- resolver will decide that they need fetching, even if they're - -- already installed. Since we want to get the source packages of - -- things we might have installed (but not have the sources for). - . reinstallTargets - - $ standardInstallPolicy installedPkgIndex sourcePkgDb pkgSpecifiers - - includeDependencies = fromFlag (fetchDeps fetchFlags) - logMsg message rest = debug verbosity message >> rest - - reorderGoals = fromFlag (fetchReorderGoals fetchFlags) - independentGoals = fromFlag (fetchIndependentGoals fetchFlags) - shadowPkgs = fromFlag (fetchShadowPkgs fetchFlags) - strongFlags = fromFlag (fetchStrongFlags fetchFlags) - maxBackjumps = fromFlag (fetchMaxBackjumps fetchFlags) - - -checkTarget :: UserTarget -> IO () -checkTarget target = case target of - UserTargetRemoteTarball _uri - -> die $ "The 'fetch' command does not yet support remote tarballs. " - ++ "In the meantime you can use the 'unpack' commands." - _ -> return () - -fetchPackage :: Verbosity -> PackageLocation a -> IO () -fetchPackage verbosity pkgsrc = case pkgsrc of - LocalUnpackedPackage _dir -> return () - LocalTarballPackage _file -> return () - - RemoteTarballPackage _uri _ -> - die $ "The 'fetch' command does not yet support remote tarballs. " - ++ "In the meantime you can use the 'unpack' commands." - - RepoTarballPackage repo pkgid _ -> do - _ <- fetchRepoTarball verbosity repo pkgid - return () diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/FetchUtils.hs cabal-install-1.22-1.22.9.0/Distribution/Client/FetchUtils.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/FetchUtils.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/FetchUtils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,192 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.FetchUtils --- Copyright : (c) David Himmelstrup 2005 --- Duncan Coutts 2011 --- License : BSD-like --- --- Maintainer : cabal-devel@gmail.com --- Stability : provisional --- Portability : portable --- --- Functions for fetching packages ------------------------------------------------------------------------------ -module Distribution.Client.FetchUtils ( - - -- * fetching packages - fetchPackage, - isFetched, - checkFetched, - - -- ** specifically for repo packages - fetchRepoTarball, - - -- * fetching other things - downloadIndex, - ) where - -import Distribution.Client.Types -import Distribution.Client.HttpUtils - ( downloadURI, isOldHackageURI, DownloadResult(..) ) - -import Distribution.Package - ( PackageId, packageName, packageVersion ) -import Distribution.Simple.Utils - ( notice, info, setupMessage ) -import Distribution.Text - ( display ) -import Distribution.Verbosity - ( Verbosity ) - -import Data.Maybe -import System.Directory - ( doesFileExist, createDirectoryIfMissing, getTemporaryDirectory ) -import System.IO - ( openTempFile, hClose ) -import System.FilePath - ( (), (<.>) ) -import qualified System.FilePath.Posix as FilePath.Posix - ( combine, joinPath ) -import Network.URI - ( URI(uriPath) ) - --- ------------------------------------------------------------ --- * Actually fetch things --- ------------------------------------------------------------ - --- | Returns @True@ if the package has already been fetched --- or does not need fetching. --- -isFetched :: PackageLocation (Maybe FilePath) -> IO Bool -isFetched loc = case loc of - LocalUnpackedPackage _dir -> return True - LocalTarballPackage _file -> return True - RemoteTarballPackage _uri local -> return (isJust local) - RepoTarballPackage repo pkgid _ -> doesFileExist (packageFile repo pkgid) - - -checkFetched :: PackageLocation (Maybe FilePath) - -> IO (Maybe (PackageLocation FilePath)) -checkFetched loc = case loc of - LocalUnpackedPackage dir -> - return (Just $ LocalUnpackedPackage dir) - LocalTarballPackage file -> - return (Just $ LocalTarballPackage file) - RemoteTarballPackage uri (Just file) -> - return (Just $ RemoteTarballPackage uri file) - RepoTarballPackage repo pkgid (Just file) -> - return (Just $ RepoTarballPackage repo pkgid file) - - RemoteTarballPackage _uri Nothing -> return Nothing - RepoTarballPackage repo pkgid Nothing -> do - let file = packageFile repo pkgid - exists <- doesFileExist file - if exists - then return (Just $ RepoTarballPackage repo pkgid file) - else return Nothing - - --- | Fetch a package if we don't have it already. --- -fetchPackage :: Verbosity - -> PackageLocation (Maybe FilePath) - -> IO (PackageLocation FilePath) -fetchPackage verbosity loc = case loc of - LocalUnpackedPackage dir -> - return (LocalUnpackedPackage dir) - LocalTarballPackage file -> - return (LocalTarballPackage file) - RemoteTarballPackage uri (Just file) -> - return (RemoteTarballPackage uri file) - RepoTarballPackage repo pkgid (Just file) -> - return (RepoTarballPackage repo pkgid file) - - RemoteTarballPackage uri Nothing -> do - path <- downloadTarballPackage uri - return (RemoteTarballPackage uri path) - RepoTarballPackage repo pkgid Nothing -> do - local <- fetchRepoTarball verbosity repo pkgid - return (RepoTarballPackage repo pkgid local) - where - downloadTarballPackage uri = do - notice verbosity ("Downloading " ++ show uri) - tmpdir <- getTemporaryDirectory - (path, hnd) <- openTempFile tmpdir "cabal-.tar.gz" - hClose hnd - _ <- downloadURI verbosity uri path - return path - - --- | Fetch a repo package if we don't have it already. --- -fetchRepoTarball :: Verbosity -> Repo -> PackageId -> IO FilePath -fetchRepoTarball verbosity repo pkgid = do - fetched <- doesFileExist (packageFile repo pkgid) - if fetched - then do info verbosity $ display pkgid ++ " has already been downloaded." - return (packageFile repo pkgid) - else do setupMessage verbosity "Downloading" pkgid - downloadRepoPackage - where - downloadRepoPackage = case repoKind repo of - Right LocalRepo -> return (packageFile repo pkgid) - - Left remoteRepo -> do - let uri = packageURI remoteRepo pkgid - dir = packageDir repo pkgid - path = packageFile repo pkgid - createDirectoryIfMissing True dir - _ <- downloadURI verbosity uri path - return path - --- | Downloads an index file to [config-dir/packages/serv-id]. --- -downloadIndex :: Verbosity -> RemoteRepo -> FilePath -> IO DownloadResult -downloadIndex verbosity repo cacheDir = do - let uri = (remoteRepoURI repo) { - uriPath = uriPath (remoteRepoURI repo) - `FilePath.Posix.combine` "00-index.tar.gz" - } - path = cacheDir "00-index" <.> "tar.gz" - createDirectoryIfMissing True cacheDir - downloadURI verbosity uri path - - --- ------------------------------------------------------------ --- * Path utilities --- ------------------------------------------------------------ - --- | Generate the full path to the locally cached copy of --- the tarball for a given @PackageIdentifer@. --- -packageFile :: Repo -> PackageId -> FilePath -packageFile repo pkgid = packageDir repo pkgid - display pkgid - <.> "tar.gz" - --- | Generate the full path to the directory where the local cached copy of --- the tarball for a given @PackageIdentifer@ is stored. --- -packageDir :: Repo -> PackageId -> FilePath -packageDir repo pkgid = repoLocalDir repo - display (packageName pkgid) - display (packageVersion pkgid) - --- | Generate the URI of the tarball for a given package. --- -packageURI :: RemoteRepo -> PackageId -> URI -packageURI repo pkgid | isOldHackageURI (remoteRepoURI repo) = - (remoteRepoURI repo) { - uriPath = FilePath.Posix.joinPath - [uriPath (remoteRepoURI repo) - ,display (packageName pkgid) - ,display (packageVersion pkgid) - ,display pkgid <.> "tar.gz"] - } -packageURI repo pkgid = - (remoteRepoURI repo) { - uriPath = FilePath.Posix.joinPath - [uriPath (remoteRepoURI repo) - ,"package" - ,display pkgid <.> "tar.gz"] - } diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Freeze.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Freeze.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Freeze.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Freeze.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,237 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Freeze --- Copyright : (c) David Himmelstrup 2005 --- Duncan Coutts 2011 --- License : BSD-like --- --- Maintainer : cabal-devel@gmail.com --- Stability : provisional --- Portability : portable --- --- The cabal freeze command ------------------------------------------------------------------------------ -module Distribution.Client.Freeze ( - freeze, - ) where - -import Distribution.Client.Config ( SavedConfig(..) ) -import Distribution.Client.Types -import Distribution.Client.Targets -import Distribution.Client.Dependency -import Distribution.Client.IndexUtils as IndexUtils - ( getSourcePackages, getInstalledPackages ) -import Distribution.Client.InstallPlan - ( PlanPackage ) -import qualified Distribution.Client.InstallPlan as InstallPlan -import Distribution.Client.Setup - ( GlobalFlags(..), FreezeFlags(..), ConfigExFlags(..) ) -import Distribution.Client.Sandbox.PackageEnvironment - ( loadUserConfig, pkgEnvSavedConfig, showPackageEnvironment, - userPackageEnvironmentFile ) -import Distribution.Client.Sandbox.Types - ( SandboxPackageInfo(..) ) - -import Distribution.Package - ( Package, PackageIdentifier, packageId, packageName, packageVersion ) -import Distribution.Simple.Compiler - ( Compiler, compilerInfo, PackageDBStack ) -import Distribution.Simple.PackageIndex (InstalledPackageIndex) -import qualified Distribution.Client.PackageIndex as PackageIndex -import Distribution.Simple.Program - ( ProgramConfiguration ) -import Distribution.Simple.Setup - ( fromFlag, fromFlagOrDefault ) -import Distribution.Simple.Utils - ( die, notice, debug, writeFileAtomic ) -import Distribution.System - ( Platform ) -import Distribution.Text - ( display ) -import Distribution.Verbosity - ( Verbosity ) - -import Control.Monad - ( when ) -import qualified Data.ByteString.Lazy.Char8 as BS.Char8 -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid - ( mempty ) -#endif -import Data.Version - ( showVersion ) -import Distribution.Version - ( thisVersion ) - --- ------------------------------------------------------------ --- * The freeze command --- ------------------------------------------------------------ - --- | Freeze all of the dependencies by writing a constraints section --- constraining each dependency to an exact version. --- -freeze :: Verbosity - -> PackageDBStack - -> [Repo] - -> Compiler - -> Platform - -> ProgramConfiguration - -> Maybe SandboxPackageInfo - -> GlobalFlags - -> FreezeFlags - -> IO () -freeze verbosity packageDBs repos comp platform conf mSandboxPkgInfo - globalFlags freezeFlags = do - - installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf - sourcePkgDb <- getSourcePackages verbosity repos - - pkgSpecifiers <- resolveUserTargets verbosity - (fromFlag $ globalWorldFile globalFlags) - (packageIndex sourcePkgDb) - [UserTargetLocalDir "."] - - sanityCheck pkgSpecifiers - pkgs <- planPackages - verbosity comp platform mSandboxPkgInfo freezeFlags - installedPkgIndex sourcePkgDb pkgSpecifiers - - if null pkgs - then notice verbosity $ "No packages to be frozen. " - ++ "As this package has no dependencies." - else if dryRun - then notice verbosity $ unlines $ - "The following packages would be frozen:" - : formatPkgs pkgs - - else freezePackages verbosity pkgs - - where - dryRun = fromFlag (freezeDryRun freezeFlags) - - sanityCheck pkgSpecifiers = do - when (not . null $ [n | n@(NamedPackage _ _) <- pkgSpecifiers]) $ - die $ "internal error: 'resolveUserTargets' returned " - ++ "unexpected named package specifiers!" - when (length pkgSpecifiers /= 1) $ - die $ "internal error: 'resolveUserTargets' returned " - ++ "unexpected source package specifiers!" - -planPackages :: Verbosity - -> Compiler - -> Platform - -> Maybe SandboxPackageInfo - -> FreezeFlags - -> InstalledPackageIndex - -> SourcePackageDb - -> [PackageSpecifier SourcePackage] - -> IO [PlanPackage] -planPackages verbosity comp platform mSandboxPkgInfo freezeFlags - installedPkgIndex sourcePkgDb pkgSpecifiers = do - - solver <- chooseSolver verbosity - (fromFlag (freezeSolver freezeFlags)) (compilerInfo comp) - notice verbosity "Resolving dependencies..." - - installPlan <- foldProgress logMsg die return $ - resolveDependencies - platform (compilerInfo comp) - solver - resolverParams - - return $ either id - (error "planPackages: installPlan contains broken packages") - (pruneInstallPlan installPlan pkgSpecifiers) - - where - resolverParams = - - setMaxBackjumps (if maxBackjumps < 0 then Nothing - else Just maxBackjumps) - - . setIndependentGoals independentGoals - - . setReorderGoals reorderGoals - - . setShadowPkgs shadowPkgs - - . setStrongFlags strongFlags - - . addConstraints - [ PackageConstraintStanzas (pkgSpecifierTarget pkgSpecifier) stanzas - | pkgSpecifier <- pkgSpecifiers ] - - . maybe id applySandboxInstallPolicy mSandboxPkgInfo - - $ standardInstallPolicy installedPkgIndex sourcePkgDb pkgSpecifiers - - logMsg message rest = debug verbosity message >> rest - - stanzas = concat - [ if testsEnabled then [TestStanzas] else [] - , if benchmarksEnabled then [BenchStanzas] else [] - ] - testsEnabled = fromFlagOrDefault False $ freezeTests freezeFlags - benchmarksEnabled = fromFlagOrDefault False $ freezeBenchmarks freezeFlags - - reorderGoals = fromFlag (freezeReorderGoals freezeFlags) - independentGoals = fromFlag (freezeIndependentGoals freezeFlags) - shadowPkgs = fromFlag (freezeShadowPkgs freezeFlags) - strongFlags = fromFlag (freezeStrongFlags freezeFlags) - maxBackjumps = fromFlag (freezeMaxBackjumps freezeFlags) - - --- | Remove all unneeded packages from an install plan. --- --- A package is unneeded if it is either --- --- 1) the package that we are freezing, or --- --- 2) not a dependency (directly or transitively) of the package we are --- freezing. This is useful for removing previously installed packages --- which are no longer required from the install plan. -pruneInstallPlan :: InstallPlan.InstallPlan - -> [PackageSpecifier SourcePackage] - -> Either [PlanPackage] [(PlanPackage, [PackageIdentifier])] -pruneInstallPlan installPlan pkgSpecifiers = - mapLeft (removeSelf pkgIds . PackageIndex.allPackages) $ - PackageIndex.dependencyClosure pkgIdx pkgIds - where - pkgIdx = PackageIndex.fromList $ InstallPlan.toList installPlan - pkgIds = [ packageId pkg | SpecificSourcePackage pkg <- pkgSpecifiers ] - mapLeft f (Left v) = Left $ f v - mapLeft _ (Right v) = Right v - removeSelf [thisPkg] = filter (\pp -> packageId pp /= thisPkg) - removeSelf _ = - error $ "internal error: 'pruneInstallPlan' given " - ++ "unexpected package specifiers!" - - -freezePackages :: Package pkg => Verbosity -> [pkg] -> IO () -freezePackages verbosity pkgs = do - pkgEnv <- fmap (createPkgEnv . addFrozenConstraints) $ - loadUserConfig verbosity "" - writeFileAtomic userPackageEnvironmentFile $ showPkgEnv pkgEnv - where - addFrozenConstraints config = - config { - savedConfigureExFlags = (savedConfigureExFlags config) { - configExConstraints = constraints pkgs - } - } - constraints = map $ pkgIdToConstraint . packageId - where - pkgIdToConstraint pkg = - UserConstraintVersion (packageName pkg) - (thisVersion $ packageVersion pkg) - createPkgEnv config = mempty { pkgEnvSavedConfig = config } - showPkgEnv = BS.Char8.pack . showPackageEnvironment - - -formatPkgs :: Package pkg => [pkg] -> [String] -formatPkgs = map $ showPkg . packageId - where - showPkg pid = name pid ++ " == " ++ version pid - name = display . packageName - version = showVersion . packageVersion diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Get.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Get.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Get.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Get.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,355 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Get --- Copyright : (c) Andrea Vezzosi 2008 --- Duncan Coutts 2011 --- John Millikin 2012 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- The 'cabal get' command. ------------------------------------------------------------------------------ - -module Distribution.Client.Get ( - get - ) where - -import Distribution.Package - ( PackageId, packageId, packageName ) -import Distribution.Simple.Setup - ( Flag(..), fromFlag, fromFlagOrDefault ) -import Distribution.Simple.Utils - ( notice, die, info, writeFileAtomic ) -import Distribution.Verbosity - ( Verbosity ) -import Distribution.Text(display) -import qualified Distribution.PackageDescription as PD - -import Distribution.Client.Setup - ( GlobalFlags(..), GetFlags(..) ) -import Distribution.Client.Types -import Distribution.Client.Targets -import Distribution.Client.Dependency -import Distribution.Client.FetchUtils -import qualified Distribution.Client.Tar as Tar (extractTarGzFile) -import Distribution.Client.IndexUtils as IndexUtils - ( getSourcePackages ) -import Distribution.Client.Compat.Process - ( readProcessWithExitCode ) -import Distribution.Compat.Exception - ( catchIO ) - -import Control.Exception - ( finally ) -import Control.Monad - ( filterM, forM_, unless, when ) -import Data.List - ( sortBy ) -import qualified Data.Map -import Data.Maybe - ( listToMaybe, mapMaybe ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid - ( mempty ) -#endif -import Data.Ord - ( comparing ) -import System.Directory - ( createDirectoryIfMissing, doesDirectoryExist, doesFileExist - , getCurrentDirectory, setCurrentDirectory - ) -import System.Exit - ( ExitCode(..) ) -import System.FilePath - ( (), (<.>), addTrailingPathSeparator ) -import System.Process - ( rawSystem ) - - --- | Entry point for the 'cabal get' command. -get :: Verbosity - -> [Repo] - -> GlobalFlags - -> GetFlags - -> [UserTarget] - -> IO () -get verbosity _ _ _ [] = - notice verbosity "No packages requested. Nothing to do." - -get verbosity repos globalFlags getFlags userTargets = do - let useFork = case (getSourceRepository getFlags) of - NoFlag -> False - _ -> True - - unless useFork $ - mapM_ checkTarget userTargets - - sourcePkgDb <- getSourcePackages verbosity repos - - pkgSpecifiers <- resolveUserTargets verbosity - (fromFlag $ globalWorldFile globalFlags) - (packageIndex sourcePkgDb) - userTargets - - pkgs <- either (die . unlines . map show) return $ - resolveWithoutDependencies - (resolverParams sourcePkgDb pkgSpecifiers) - - unless (null prefix) $ - createDirectoryIfMissing True prefix - - if useFork - then fork pkgs - else unpack pkgs - - where - resolverParams sourcePkgDb pkgSpecifiers = - --TODO: add command-line constraint and preference args for unpack - standardInstallPolicy mempty sourcePkgDb pkgSpecifiers - - prefix = fromFlagOrDefault "" (getDestDir getFlags) - - fork :: [SourcePackage] -> IO () - fork pkgs = do - let kind = fromFlag . getSourceRepository $ getFlags - branchers <- findUsableBranchers - mapM_ (forkPackage verbosity branchers prefix kind) pkgs - - unpack :: [SourcePackage] -> IO () - unpack pkgs = do - forM_ pkgs $ \pkg -> do - location <- fetchPackage verbosity (packageSource pkg) - let pkgid = packageId pkg - descOverride | usePristine = Nothing - | otherwise = packageDescrOverride pkg - case location of - LocalTarballPackage tarballPath -> - unpackPackage verbosity prefix pkgid descOverride tarballPath - - RemoteTarballPackage _tarballURL tarballPath -> - unpackPackage verbosity prefix pkgid descOverride tarballPath - - RepoTarballPackage _repo _pkgid tarballPath -> - unpackPackage verbosity prefix pkgid descOverride tarballPath - - LocalUnpackedPackage _ -> - error "Distribution.Client.Get.unpack: the impossible happened." - where - usePristine = fromFlagOrDefault False (getPristine getFlags) - -checkTarget :: UserTarget -> IO () -checkTarget target = case target of - UserTargetLocalDir dir -> die (notTarball dir) - UserTargetLocalCabalFile file -> die (notTarball file) - _ -> return () - where - notTarball t = - "The 'get' command is for tarball packages. " - ++ "The target '" ++ t ++ "' is not a tarball." - --- ------------------------------------------------------------ --- * Unpacking the source tarball --- ------------------------------------------------------------ - -unpackPackage :: Verbosity -> FilePath -> PackageId - -> PackageDescriptionOverride - -> FilePath -> IO () -unpackPackage verbosity prefix pkgid descOverride pkgPath = do - let pkgdirname = display pkgid - pkgdir = prefix pkgdirname - pkgdir' = addTrailingPathSeparator pkgdir - existsDir <- doesDirectoryExist pkgdir - when existsDir $ die $ - "The directory \"" ++ pkgdir' ++ "\" already exists, not unpacking." - existsFile <- doesFileExist pkgdir - when existsFile $ die $ - "A file \"" ++ pkgdir ++ "\" is in the way, not unpacking." - notice verbosity $ "Unpacking to " ++ pkgdir' - Tar.extractTarGzFile prefix pkgdirname pkgPath - - case descOverride of - Nothing -> return () - Just pkgtxt -> do - let descFilePath = pkgdir display (packageName pkgid) <.> "cabal" - info verbosity $ - "Updating " ++ descFilePath - ++ " with the latest revision from the index." - writeFileAtomic descFilePath pkgtxt - - --- ------------------------------------------------------------ --- * Forking the source repository --- ------------------------------------------------------------ - -data BranchCmd = BranchCmd (Verbosity -> FilePath -> IO ExitCode) - -data Brancher = Brancher - { brancherBinary :: String - , brancherBuildCmd :: PD.SourceRepo -> Maybe BranchCmd - } - --- | The set of all supported branch drivers. -allBranchers :: [(PD.RepoType, Brancher)] -allBranchers = - [ (PD.Bazaar, branchBzr) - , (PD.Darcs, branchDarcs) - , (PD.Git, branchGit) - , (PD.Mercurial, branchHg) - , (PD.SVN, branchSvn) - ] - --- | Find which usable branch drivers (selected from 'allBranchers') are --- available and usable on the local machine. --- --- Each driver's main command is run with @--help@, and if the child process --- exits successfully, that brancher is considered usable. -findUsableBranchers :: IO (Data.Map.Map PD.RepoType Brancher) -findUsableBranchers = do - let usable (_, brancher) = flip catchIO (const (return False)) $ do - let cmd = brancherBinary brancher - (exitCode, _, _) <- readProcessWithExitCode cmd ["--help"] "" - return (exitCode == ExitSuccess) - pairs <- filterM usable allBranchers - return (Data.Map.fromList pairs) - --- | Fork a single package from a remote source repository to the local --- file system. -forkPackage :: Verbosity - -> Data.Map.Map PD.RepoType Brancher - -- ^ Branchers supported by the local machine. - -> FilePath - -- ^ The directory in which new branches or repositories will - -- be created. - -> (Maybe PD.RepoKind) - -- ^ Which repo to choose. - -> SourcePackage - -- ^ The package to fork. - -> IO () -forkPackage verbosity branchers prefix kind src = do - let desc = PD.packageDescription (packageDescription src) - pkgid = display (packageId src) - pkgname = display (packageName src) - destdir = prefix pkgname - - destDirExists <- doesDirectoryExist destdir - when destDirExists $ do - die ("The directory " ++ show destdir ++ " already exists, not forking.") - - destFileExists <- doesFileExist destdir - when destFileExists $ do - die ("A file " ++ show destdir ++ " is in the way, not forking.") - - let repos = PD.sourceRepos desc - case findBranchCmd branchers repos kind of - Just (BranchCmd io) -> do - exitCode <- io verbosity destdir - case exitCode of - ExitSuccess -> return () - ExitFailure _ -> die ("Couldn't fork package " ++ pkgid) - Nothing -> case repos of - [] -> die ("Package " ++ pkgid - ++ " does not have any source repositories.") - _ -> die ("Package " ++ pkgid - ++ " does not have any usable source repositories.") - --- | Given a set of possible branchers, and a set of possible source --- repositories, find a repository that is both 1) likely to be specific to --- this source version and 2) is supported by the local machine. -findBranchCmd :: Data.Map.Map PD.RepoType Brancher -> [PD.SourceRepo] - -> (Maybe PD.RepoKind) -> Maybe BranchCmd -findBranchCmd branchers allRepos maybeKind = cmd where - -- Sort repositories by kind, from This to Head to Unknown. Repositories - -- with equivalent kinds are selected based on the order they appear in - -- the Cabal description file. - repos' = sortBy (comparing thisFirst) allRepos - thisFirst r = case PD.repoKind r of - PD.RepoThis -> 0 :: Int - PD.RepoHead -> case PD.repoTag r of - -- If the type is 'head' but the author specified a tag, they - -- probably meant to create a 'this' repository but screwed up. - Just _ -> 0 - Nothing -> 1 - PD.RepoKindUnknown _ -> 2 - - -- If the user has specified the repo kind, filter out the repositories - -- she's not interested in. - repos = maybe repos' (\k -> filter ((==) k . PD.repoKind) repos') maybeKind - - repoBranchCmd repo = do - t <- PD.repoType repo - brancher <- Data.Map.lookup t branchers - brancherBuildCmd brancher repo - - cmd = listToMaybe (mapMaybe repoBranchCmd repos) - --- | Branch driver for Bazaar. -branchBzr :: Brancher -branchBzr = Brancher "bzr" $ \repo -> do - src <- PD.repoLocation repo - let args dst = case PD.repoTag repo of - Just tag -> ["branch", src, dst, "-r", "tag:" ++ tag] - Nothing -> ["branch", src, dst] - return $ BranchCmd $ \verbosity dst -> do - notice verbosity ("bzr: branch " ++ show src) - rawSystem "bzr" (args dst) - --- | Branch driver for Darcs. -branchDarcs :: Brancher -branchDarcs = Brancher "darcs" $ \repo -> do - src <- PD.repoLocation repo - let args dst = case PD.repoTag repo of - Just tag -> ["get", src, dst, "-t", tag] - Nothing -> ["get", src, dst] - return $ BranchCmd $ \verbosity dst -> do - notice verbosity ("darcs: get " ++ show src) - rawSystem "darcs" (args dst) - --- | Branch driver for Git. -branchGit :: Brancher -branchGit = Brancher "git" $ \repo -> do - src <- PD.repoLocation repo - let branchArgs = case PD.repoBranch repo of - Just b -> ["--branch", b] - Nothing -> [] - let postClone dst = case PD.repoTag repo of - Just t -> do - cwd <- getCurrentDirectory - setCurrentDirectory dst - finally - (rawSystem "git" (["checkout", t] ++ branchArgs)) - (setCurrentDirectory cwd) - Nothing -> return ExitSuccess - return $ BranchCmd $ \verbosity dst -> do - notice verbosity ("git: clone " ++ show src) - code <- rawSystem "git" (["clone", src, dst] ++ branchArgs) - case code of - ExitFailure _ -> return code - ExitSuccess -> postClone dst - --- | Branch driver for Mercurial. -branchHg :: Brancher -branchHg = Brancher "hg" $ \repo -> do - src <- PD.repoLocation repo - let branchArgs = case PD.repoBranch repo of - Just b -> ["--branch", b] - Nothing -> [] - let tagArgs = case PD.repoTag repo of - Just t -> ["--rev", t] - Nothing -> [] - let args dst = ["clone", src, dst] ++ branchArgs ++ tagArgs - return $ BranchCmd $ \verbosity dst -> do - notice verbosity ("hg: clone " ++ show src) - rawSystem "hg" (args dst) - --- | Branch driver for Subversion. -branchSvn :: Brancher -branchSvn = Brancher "svn" $ \repo -> do - src <- PD.repoLocation repo - let args dst = ["checkout", src, dst] - return $ BranchCmd $ \verbosity dst -> do - notice verbosity ("svn: checkout " ++ show src) - rawSystem "svn" (args dst) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/GZipUtils.hs cabal-install-1.22-1.22.9.0/Distribution/Client/GZipUtils.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/GZipUtils.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/GZipUtils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.GZipUtils --- Copyright : (c) Dmitry Astapov 2010 --- License : BSD-like --- --- Maintainer : cabal-devel@gmail.com --- Stability : provisional --- Portability : portable --- --- Provides a convenience functions for working with files that may or may not --- be zipped. ------------------------------------------------------------------------------ -module Distribution.Client.GZipUtils ( - maybeDecompress, - ) where - -import qualified Data.ByteString.Lazy.Internal as BS (ByteString(..)) -import Data.ByteString.Lazy (ByteString) -import Codec.Compression.GZip -import Codec.Compression.Zlib.Internal - --- | Attempts to decompress the `bytes' under the assumption that --- "data format" error at the very beginning of the stream means --- that it is already decompressed. Caller should make sanity checks --- to verify that it is not, in fact, garbage. --- --- This is to deal with http proxies that lie to us and transparently --- decompress without removing the content-encoding header. See: --- --- -maybeDecompress :: ByteString -> ByteString -maybeDecompress bytes = foldStream $ decompressWithErrors gzipOrZlibFormat defaultDecompressParams bytes - where - -- DataError at the beginning of the stream probably means that stream is not compressed. - -- Returning it as-is. - -- TODO: alternatively, we might consider looking for the two magic bytes - -- at the beginning of the gzip header. - foldStream (StreamError DataError _) = bytes - foldStream somethingElse = doFold somethingElse - - doFold StreamEnd = BS.Empty - doFold (StreamChunk bs stream) = BS.Chunk bs (doFold stream) - doFold (StreamError _ msg) = error $ "Codec.Compression.Zlib: " ++ msg diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Haddock.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Haddock.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Haddock.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Haddock.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Haddock --- Copyright : (c) Andrea Vezzosi 2009 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- Interfacing with Haddock --- ------------------------------------------------------------------------------ -module Distribution.Client.Haddock - ( - regenerateHaddockIndex - ) - where - -import Data.List (maximumBy) -import System.Directory (createDirectoryIfMissing, renameFile) -import System.FilePath ((), splitFileName) -import Distribution.Package - ( packageVersion ) -import Distribution.Simple.Haddock (haddockPackagePaths) -import Distribution.Simple.Program (haddockProgram, ProgramConfiguration - , rawSystemProgram, requireProgramVersion) -import Distribution.Version (Version(Version), orLaterVersion) -import Distribution.Verbosity (Verbosity) -import Distribution.Simple.PackageIndex - ( InstalledPackageIndex, allPackagesByName ) -import Distribution.Simple.Utils - ( comparing, debug, installDirectoryContents, withTempDirectory ) -import Distribution.InstalledPackageInfo as InstalledPackageInfo - ( InstalledPackageInfo_(exposed) ) - -regenerateHaddockIndex :: Verbosity - -> InstalledPackageIndex -> ProgramConfiguration -> FilePath - -> IO () -regenerateHaddockIndex verbosity pkgs conf index = do - (paths, warns) <- haddockPackagePaths pkgs' Nothing - let paths' = [ (interface, html) | (interface, Just html) <- paths] - case warns of - Nothing -> return () - Just m -> debug verbosity m - - (confHaddock, _, _) <- - requireProgramVersion verbosity haddockProgram - (orLaterVersion (Version [0,6] [])) conf - - createDirectoryIfMissing True destDir - - withTempDirectory verbosity destDir "tmphaddock" $ \tempDir -> do - - let flags = [ "--gen-contents" - , "--gen-index" - , "--odir=" ++ tempDir - , "--title=Haskell modules on this system" ] - ++ [ "--read-interface=" ++ html ++ "," ++ interface - | (interface, html) <- paths' ] - rawSystemProgram verbosity confHaddock flags - renameFile (tempDir "index.html") (tempDir destFile) - installDirectoryContents verbosity tempDir destDir - - where - (destDir,destFile) = splitFileName index - pkgs' = [ maximumBy (comparing packageVersion) pkgvers' - | (_pname, pkgvers) <- allPackagesByName pkgs - , let pkgvers' = filter exposed pkgvers - , not (null pkgvers') ] diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/HttpUtils.hs cabal-install-1.22-1.22.9.0/Distribution/Client/HttpUtils.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/HttpUtils.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/HttpUtils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,159 +0,0 @@ ------------------------------------------------------------------------------ --- | Separate module for HTTP actions, using a proxy server if one exists ------------------------------------------------------------------------------ -module Distribution.Client.HttpUtils ( - DownloadResult(..), - downloadURI, - getHTTP, - cabalBrowse, - proxy, - isOldHackageURI - ) where - -import Network.HTTP - ( Request (..), Response (..), RequestMethod (..) - , Header(..), HeaderName(..), lookupHeader ) -import Network.HTTP.Proxy ( Proxy(..), fetchProxy) -import Network.URI - ( URI (..), URIAuth (..) ) -import Network.Browser - ( BrowserAction, browse, setAllowBasicAuth, setAuthorityGen - , setOutHandler, setErrHandler, setProxy, request) -import Network.Stream - ( Result, ConnError(..) ) -import Control.Monad - ( liftM ) -import qualified Data.ByteString.Lazy.Char8 as ByteString -import Data.ByteString.Lazy (ByteString) - -import qualified Paths_cabal_install (version) -import Distribution.Verbosity (Verbosity) -import Distribution.Simple.Utils - ( die, info, warn, debug, notice - , copyFileVerbose, writeFileAtomic ) -import Distribution.System - ( buildOS, buildArch ) -import Distribution.Text - ( display ) -import Data.Char ( isSpace ) -import qualified System.FilePath.Posix as FilePath.Posix - ( splitDirectories ) -import System.FilePath - ( (<.>) ) -import System.Directory - ( doesFileExist ) - -data DownloadResult = FileAlreadyInCache | FileDownloaded FilePath deriving (Eq) - --- Trim -trim :: String -> String -trim = f . f - where f = reverse . dropWhile isSpace - --- |Get the local proxy settings ---TODO: print info message when we're using a proxy based on verbosity -proxy :: Verbosity -> IO Proxy -proxy _verbosity = do - p <- fetchProxy True - -- Handle empty proxy strings - return $ case p of - Proxy uri auth -> - let uri' = trim uri in - if uri' == "" then NoProxy else Proxy uri' auth - _ -> p - -mkRequest :: URI - -> Maybe String -- ^ Optional etag to be set in the If-None-Match HTTP header. - -> Request ByteString -mkRequest uri etag = Request{ rqURI = uri - , rqMethod = GET - , rqHeaders = Header HdrUserAgent userAgent : ifNoneMatchHdr - , rqBody = ByteString.empty } - where userAgent = concat [ "cabal-install/", display Paths_cabal_install.version - , " (", display buildOS, "; ", display buildArch, ")" - ] - ifNoneMatchHdr = maybe [] (\t -> [Header HdrIfNoneMatch t]) etag - --- |Carry out a GET request, using the local proxy settings -getHTTP :: Verbosity - -> URI - -> Maybe String -- ^ Optional etag to check if we already have the latest file. - -> IO (Result (Response ByteString)) -getHTTP verbosity uri etag = liftM (\(_, resp) -> Right resp) $ - cabalBrowse verbosity Nothing (request (mkRequest uri etag)) - -cabalBrowse :: Verbosity - -> Maybe (String, String) - -> BrowserAction s a - -> IO a -cabalBrowse verbosity auth act = do - p <- proxy verbosity - browse $ do - setProxy p - setErrHandler (warn verbosity . ("http error: "++)) - setOutHandler (debug verbosity) - setAllowBasicAuth False - setAuthorityGen (\_ _ -> return auth) - act - -downloadURI :: Verbosity - -> URI -- ^ What to download - -> FilePath -- ^ Where to put it - -> IO DownloadResult -downloadURI verbosity uri path | uriScheme uri == "file:" = do - copyFileVerbose verbosity (uriPath uri) path - return (FileDownloaded path) - -- Can we store the hash of the file so we can safely return path when the - -- hash matches to avoid unnecessary computation? -downloadURI verbosity uri path = do - let etagPath = path <.> "etag" - targetExists <- doesFileExist path - etagPathExists <- doesFileExist etagPath - -- In rare cases the target file doesn't exist, but the etag does. - etag <- if targetExists && etagPathExists - then liftM Just $ readFile etagPath - else return Nothing - - result <- getHTTP verbosity uri etag - let result' = case result of - Left err -> Left err - Right rsp -> case rspCode rsp of - (2,0,0) -> Right rsp - (3,0,4) -> Right rsp - (a,b,c) -> Left err - where - err = ErrorMisc $ "Error HTTP code: " - ++ concatMap show [a,b,c] - - -- Only write the etag if we get a 200 response code. - -- A 304 still sends us an etag header. - case result' of - Left _ -> return () - Right rsp -> case rspCode rsp of - (2,0,0) -> case lookupHeader HdrETag (rspHeaders rsp) of - Nothing -> return () - Just newEtag -> writeFile etagPath newEtag - (_,_,_) -> return () - - case result' of - Left err -> die $ "Failed to download " ++ show uri ++ " : " ++ show err - Right rsp -> case rspCode rsp of - (2,0,0) -> do - info verbosity ("Downloaded to " ++ path) - writeFileAtomic path $ rspBody rsp - return (FileDownloaded path) - (3,0,4) -> do - notice verbosity "Skipping download: Local and remote files match." - return FileAlreadyInCache - (_,_,_) -> return (FileDownloaded path) - --FIXME: check the content-length header matches the body length. - --TODO: stream the download into the file rather than buffering the whole - -- thing in memory. - --- Utility function for legacy support. -isOldHackageURI :: URI -> Bool -isOldHackageURI uri - = case uriAuthority uri of - Just (URIAuth {uriRegName = "hackage.haskell.org"}) -> - FilePath.Posix.splitDirectories (uriPath uri) == ["/","packages","archive"] - _ -> False diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/IndexUtils.hs cabal-install-1.22-1.22.9.0/Distribution/Client/IndexUtils.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/IndexUtils.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/IndexUtils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,591 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.IndexUtils --- Copyright : (c) Duncan Coutts 2008 --- License : BSD-like --- --- Maintainer : duncan@community.haskell.org --- Stability : provisional --- Portability : portable --- --- Extra utils related to the package indexes. ------------------------------------------------------------------------------ -module Distribution.Client.IndexUtils ( - getIndexFileAge, - getInstalledPackages, - getSourcePackages, - getSourcePackagesStrict, - convert, - - readPackageIndexFile, - parsePackageIndex, - readRepoIndex, - updateRepoIndexCache, - updatePackageIndexCacheFile, - - BuildTreeRefType(..), refTypeFromTypeCode, typeCodeFromRefType - ) where - -import qualified Distribution.Client.Tar as Tar -import Distribution.Client.Types - -import Distribution.Package - ( PackageId, PackageIdentifier(..), PackageName(..) - , Package(..), packageVersion, packageName - , Dependency(Dependency), InstalledPackageId(..) ) -import Distribution.Client.PackageIndex (PackageIndex) -import qualified Distribution.Client.PackageIndex as PackageIndex -import Distribution.Simple.PackageIndex (InstalledPackageIndex) -import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex -import qualified Distribution.InstalledPackageInfo as InstalledPackageInfo -import qualified Distribution.PackageDescription.Parse as PackageDesc.Parse -import Distribution.PackageDescription - ( GenericPackageDescription ) -import Distribution.PackageDescription.Parse - ( parsePackageDescription ) -import Distribution.Simple.Compiler - ( Compiler, PackageDBStack ) -import Distribution.Simple.Program - ( ProgramConfiguration ) -import qualified Distribution.Simple.Configure as Configure - ( getInstalledPackages ) -import Distribution.ParseUtils - ( ParseResult(..) ) -import Distribution.Version - ( Version(Version), intersectVersionRanges ) -import Distribution.Text - ( display, simpleParse ) -import Distribution.Verbosity - ( Verbosity, normal, lessVerbose ) -import Distribution.Simple.Utils - ( die, warn, info, fromUTF8 ) - -import Data.Char (isAlphaNum) -import Data.Maybe (mapMaybe, fromMaybe) -import Data.List (isPrefixOf) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid (Monoid(..)) -#endif -import qualified Data.Map as Map -import Control.Monad (MonadPlus(mplus), when, liftM) -import Control.Exception (evaluate) -import qualified Data.ByteString.Lazy as BS -import qualified Data.ByteString.Lazy.Char8 as BS.Char8 -import qualified Data.ByteString.Char8 as BSS -import Data.ByteString.Lazy (ByteString) -import Distribution.Client.GZipUtils (maybeDecompress) -import Distribution.Client.Utils ( byteStringToFilePath - , tryFindAddSourcePackageDesc ) -import Distribution.Compat.Exception (catchIO) -import Distribution.Client.Compat.Time (getFileAge, getModTime) -import System.Directory (doesFileExist) -import System.FilePath ((), takeExtension, splitDirectories, normalise) -import System.FilePath.Posix as FilePath.Posix - ( takeFileName ) -import System.IO -import System.IO.Unsafe (unsafeInterleaveIO) -import System.IO.Error (isDoesNotExistError) -import Numeric (showFFloat) - - -getInstalledPackages :: Verbosity -> Compiler - -> PackageDBStack -> ProgramConfiguration - -> IO InstalledPackageIndex -getInstalledPackages verbosity comp packageDbs conf = - Configure.getInstalledPackages verbosity' comp packageDbs conf - where - --FIXME: make getInstalledPackages use sensible verbosity in the first place - verbosity' = lessVerbose verbosity - -convert :: InstalledPackageIndex -> PackageIndex InstalledPackage -convert index' = PackageIndex.fromList - -- There can be multiple installed instances of each package version, - -- like when the same package is installed in the global & user DBs. - -- InstalledPackageIndex.allPackagesBySourcePackageId gives us the - -- installed packages with the most preferred instances first, so by - -- picking the first we should get the user one. This is almost but not - -- quite the same as what ghc does. - [ InstalledPackage ipkg (sourceDeps index' ipkg) - | (_,ipkg:_) <- InstalledPackageIndex.allPackagesBySourcePackageId index' ] - where - -- The InstalledPackageInfo only lists dependencies by the - -- InstalledPackageId, which means we do not directly know the corresponding - -- source dependency. The only way to find out is to lookup the - -- InstalledPackageId to get the InstalledPackageInfo and look at its - -- source PackageId. But if the package is broken because it depends on - -- other packages that do not exist then we have a problem we cannot find - -- the original source package id. Instead we make up a bogus package id. - -- This should have the same effect since it should be a dependency on a - -- nonexistent package. - sourceDeps index ipkg = - [ maybe (brokenPackageId depid) packageId mdep - | let depids = InstalledPackageInfo.depends ipkg - getpkg = InstalledPackageIndex.lookupInstalledPackageId index - , (depid, mdep) <- zip depids (map getpkg depids) ] - - brokenPackageId (InstalledPackageId str) = - PackageIdentifier (PackageName (str ++ "-broken")) (Version [] []) - ------------------------------------------------------------------------- --- Reading the source package index --- - --- | Read a repository index from disk, from the local files specified by --- a list of 'Repo's. --- --- All the 'SourcePackage's are marked as having come from the appropriate --- 'Repo'. --- --- This is a higher level wrapper used internally in cabal-install. --- -getSourcePackages :: Verbosity -> [Repo] -> IO SourcePackageDb -getSourcePackages verbosity repos = getSourcePackages' verbosity repos - ReadPackageIndexLazyIO - --- | Like 'getSourcePackages', but reads the package index strictly. Useful if --- you want to write to the package index after having read it. -getSourcePackagesStrict :: Verbosity -> [Repo] -> IO SourcePackageDb -getSourcePackagesStrict verbosity repos = getSourcePackages' verbosity repos - ReadPackageIndexStrict - --- | Common implementation used by getSourcePackages and --- getSourcePackagesStrict. -getSourcePackages' :: Verbosity -> [Repo] -> ReadPackageIndexMode - -> IO SourcePackageDb -getSourcePackages' verbosity [] _mode = do - warn verbosity $ "No remote package servers have been specified. Usually " - ++ "you would have one specified in the config file." - return SourcePackageDb { - packageIndex = mempty, - packagePreferences = mempty - } -getSourcePackages' verbosity repos mode = do - info verbosity "Reading available packages..." - pkgss <- mapM (\r -> readRepoIndex verbosity r mode) repos - let (pkgs, prefs) = mconcat pkgss - prefs' = Map.fromListWith intersectVersionRanges - [ (name, range) | Dependency name range <- prefs ] - _ <- evaluate pkgs - _ <- evaluate prefs' - return SourcePackageDb { - packageIndex = pkgs, - packagePreferences = prefs' - } - --- | Read a repository index from disk, from the local file specified by --- the 'Repo'. --- --- All the 'SourcePackage's are marked as having come from the given 'Repo'. --- --- This is a higher level wrapper used internally in cabal-install. --- -readRepoIndex :: Verbosity -> Repo -> ReadPackageIndexMode - -> IO (PackageIndex SourcePackage, [Dependency]) -readRepoIndex verbosity repo mode = - let indexFile = repoLocalDir repo "00-index.tar" - cacheFile = repoLocalDir repo "00-index.cache" - in handleNotFound $ do - warnIfIndexIsOld =<< getIndexFileAge repo - whenCacheOutOfDate indexFile cacheFile $ do - updatePackageIndexCacheFile verbosity indexFile cacheFile - readPackageIndexCacheFile mkAvailablePackage indexFile cacheFile mode - - where - mkAvailablePackage pkgEntry = - SourcePackage { - packageInfoId = pkgid, - packageDescription = packageDesc pkgEntry, - packageSource = case pkgEntry of - NormalPackage _ _ _ _ -> RepoTarballPackage repo pkgid Nothing - BuildTreeRef _ _ _ path _ -> LocalUnpackedPackage path, - packageDescrOverride = case pkgEntry of - NormalPackage _ _ pkgtxt _ -> Just pkgtxt - _ -> Nothing - } - where - pkgid = packageId pkgEntry - - handleNotFound action = catchIO action $ \e -> if isDoesNotExistError e - then do - case repoKind repo of - Left remoteRepo -> warn verbosity $ - "The package list for '" ++ remoteRepoName remoteRepo - ++ "' does not exist. Run 'cabal update' to download it." - Right _localRepo -> warn verbosity $ - "The package list for the local repo '" ++ repoLocalDir repo - ++ "' is missing. The repo is invalid." - return mempty - else ioError e - - isOldThreshold = 15 --days - warnIfIndexIsOld dt = do - when (dt >= isOldThreshold) $ case repoKind repo of - Left remoteRepo -> warn verbosity $ - "The package list for '" ++ remoteRepoName remoteRepo - ++ "' is " ++ showFFloat (Just 1) dt " days old.\nRun " - ++ "'cabal update' to get the latest list of available packages." - Right _localRepo -> return () - - --- | Return the age of the index file in days (as a Double). -getIndexFileAge :: Repo -> IO Double -getIndexFileAge repo = getFileAge $ repoLocalDir repo "00-index.tar" - - --- | It is not necessary to call this, as the cache will be updated when the --- index is read normally. However you can do the work earlier if you like. --- -updateRepoIndexCache :: Verbosity -> Repo -> IO () -updateRepoIndexCache verbosity repo = - whenCacheOutOfDate indexFile cacheFile $ do - updatePackageIndexCacheFile verbosity indexFile cacheFile - where - indexFile = repoLocalDir repo "00-index.tar" - cacheFile = repoLocalDir repo "00-index.cache" - -whenCacheOutOfDate :: FilePath -> FilePath -> IO () -> IO () -whenCacheOutOfDate origFile cacheFile action = do - exists <- doesFileExist cacheFile - if not exists - then action - else do - origTime <- getModTime origFile - cacheTime <- getModTime cacheFile - when (origTime > cacheTime) action - ------------------------------------------------------------------------- --- Reading the index file --- - --- | An index entry is either a normal package, or a local build tree reference. -data PackageEntry = - NormalPackage PackageId GenericPackageDescription ByteString BlockNo - | BuildTreeRef BuildTreeRefType - PackageId GenericPackageDescription FilePath BlockNo - --- | A build tree reference is either a link or a snapshot. -data BuildTreeRefType = SnapshotRef | LinkRef - deriving Eq - -refTypeFromTypeCode :: Tar.TypeCode -> BuildTreeRefType -refTypeFromTypeCode t - | t == Tar.buildTreeRefTypeCode = LinkRef - | t == Tar.buildTreeSnapshotTypeCode = SnapshotRef - | otherwise = - error "Distribution.Client.IndexUtils.refTypeFromTypeCode: unknown type code" - -typeCodeFromRefType :: BuildTreeRefType -> Tar.TypeCode -typeCodeFromRefType LinkRef = Tar.buildTreeRefTypeCode -typeCodeFromRefType SnapshotRef = Tar.buildTreeSnapshotTypeCode - -type MkPackageEntry = IO PackageEntry - -instance Package PackageEntry where - packageId (NormalPackage pkgid _ _ _) = pkgid - packageId (BuildTreeRef _ pkgid _ _ _) = pkgid - -packageDesc :: PackageEntry -> GenericPackageDescription -packageDesc (NormalPackage _ descr _ _) = descr -packageDesc (BuildTreeRef _ _ descr _ _) = descr - --- | Read a compressed \"00-index.tar.gz\" file into a 'PackageIndex'. --- --- This is supposed to be an \"all in one\" way to easily get at the info in --- the Hackage package index. --- --- It takes a function to map a 'GenericPackageDescription' into any more --- specific instance of 'Package' that you might want to use. In the simple --- case you can just use @\_ p -> p@ here. --- -readPackageIndexFile :: Package pkg - => (PackageEntry -> pkg) - -> FilePath - -> IO (PackageIndex pkg, [Dependency]) -readPackageIndexFile mkPkg indexFile = do - (mkPkgs, prefs) <- either fail return - . parsePackageIndex - . maybeDecompress - =<< BS.readFile indexFile - - pkgEntries <- sequence mkPkgs - pkgs <- evaluate $ PackageIndex.fromList (map mkPkg pkgEntries) - return (pkgs, prefs) - --- | Parse an uncompressed \"00-index.tar\" repository index file represented --- as a 'ByteString'. --- -parsePackageIndex :: ByteString - -> Either String ([MkPackageEntry], [Dependency]) -parsePackageIndex = accum 0 [] [] . Tar.read - where - accum blockNo pkgs prefs es = case es of - Tar.Fail err -> Left err - Tar.Done -> Right (reverse pkgs, reverse prefs) - Tar.Next e es' -> accum blockNo' pkgs' prefs' es' - where - (pkgs', prefs') = extract blockNo pkgs prefs e - blockNo' = blockNo + Tar.entrySizeInBlocks e - - extract blockNo pkgs prefs entry = - fromMaybe (pkgs, prefs) $ - tryExtractPkg - `mplus` tryExtractPrefs - where - tryExtractPkg = do - mkPkgEntry <- extractPkg entry blockNo - return (mkPkgEntry:pkgs, prefs) - - tryExtractPrefs = do - prefs' <- extractPrefs entry - return (pkgs, prefs'++prefs) - -extractPkg :: Tar.Entry -> BlockNo -> Maybe MkPackageEntry -extractPkg entry blockNo = case Tar.entryContent entry of - Tar.NormalFile content _ - | takeExtension fileName == ".cabal" - -> case splitDirectories (normalise fileName) of - [pkgname,vers,_] -> case simpleParse vers of - Just ver -> Just $ return (NormalPackage pkgid descr content blockNo) - where - pkgid = PackageIdentifier (PackageName pkgname) ver - parsed = parsePackageDescription . fromUTF8 . BS.Char8.unpack - $ content - descr = case parsed of - ParseOk _ d -> d - _ -> error $ "Couldn't read cabal file " - ++ show fileName - _ -> Nothing - _ -> Nothing - - Tar.OtherEntryType typeCode content _ - | Tar.isBuildTreeRefTypeCode typeCode -> - Just $ do - let path = byteStringToFilePath content - err = "Error reading package index." - cabalFile <- tryFindAddSourcePackageDesc path err - descr <- PackageDesc.Parse.readPackageDescription normal cabalFile - return $ BuildTreeRef (refTypeFromTypeCode typeCode) (packageId descr) - descr path blockNo - - _ -> Nothing - - where - fileName = Tar.entryPath entry - -extractPrefs :: Tar.Entry -> Maybe [Dependency] -extractPrefs entry = case Tar.entryContent entry of - Tar.NormalFile content _ - | takeFileName (Tar.entryPath entry) == "preferred-versions" - -> Just . parsePreferredVersions - . BS.Char8.unpack $ content - _ -> Nothing - -parsePreferredVersions :: String -> [Dependency] -parsePreferredVersions = mapMaybe simpleParse - . filter (not . isPrefixOf "--") - . lines - ------------------------------------------------------------------------- --- Reading and updating the index cache --- - -updatePackageIndexCacheFile :: Verbosity -> FilePath -> FilePath -> IO () -updatePackageIndexCacheFile verbosity indexFile cacheFile = do - info verbosity "Updating the index cache file..." - (mkPkgs, prefs) <- either fail return - . parsePackageIndex - . maybeDecompress - =<< BS.readFile indexFile - pkgEntries <- sequence mkPkgs - let cache = mkCache pkgEntries prefs - writeFile cacheFile (showIndexCache cache) - where - mkCache pkgs prefs = - [ CachePreference pref | pref <- prefs ] - ++ [ CachePackageId pkgid blockNo - | (NormalPackage pkgid _ _ blockNo) <- pkgs ] - ++ [ CacheBuildTreeRef refType blockNo - | (BuildTreeRef refType _ _ _ blockNo) <- pkgs] - -data ReadPackageIndexMode = ReadPackageIndexStrict - | ReadPackageIndexLazyIO - -readPackageIndexCacheFile :: Package pkg - => (PackageEntry -> pkg) - -> FilePath - -> FilePath - -> ReadPackageIndexMode - -> IO (PackageIndex pkg, [Dependency]) -readPackageIndexCacheFile mkPkg indexFile cacheFile mode = do - cache <- liftM readIndexCache (BSS.readFile cacheFile) - myWithFile indexFile ReadMode $ \indexHnd -> - packageIndexFromCache mkPkg indexHnd cache mode - where - myWithFile f m act = case mode of - ReadPackageIndexStrict -> withFile f m act - ReadPackageIndexLazyIO -> do indexHnd <- openFile f m - act indexHnd - - -packageIndexFromCache :: Package pkg - => (PackageEntry -> pkg) - -> Handle - -> [IndexCacheEntry] - -> ReadPackageIndexMode - -> IO (PackageIndex pkg, [Dependency]) -packageIndexFromCache mkPkg hnd entrs mode = accum mempty [] entrs - where - accum srcpkgs prefs [] = do - -- Have to reverse entries, since in a tar file, later entries mask - -- earlier ones, and PackageIndex.fromList does the same, but we - -- accumulate the list of entries in reverse order, so need to reverse. - pkgIndex <- evaluate $ PackageIndex.fromList (reverse srcpkgs) - return (pkgIndex, prefs) - - accum srcpkgs prefs (CachePackageId pkgid blockno : entries) = do - -- Given the cache entry, make a package index entry. - -- The magic here is that we use lazy IO to read the .cabal file - -- from the index tarball if it turns out that we need it. - -- Most of the time we only need the package id. - ~(pkg, pkgtxt) <- unsafeInterleaveIO $ do - pkgtxt <- getEntryContent blockno - pkg <- readPackageDescription pkgtxt - return (pkg, pkgtxt) - let srcpkg = case mode of - ReadPackageIndexLazyIO -> - mkPkg (NormalPackage pkgid pkg pkgtxt blockno) - ReadPackageIndexStrict -> - pkg `seq` pkgtxt `seq` mkPkg (NormalPackage pkgid pkg - pkgtxt blockno) - accum (srcpkg:srcpkgs) prefs entries - - accum srcpkgs prefs (CacheBuildTreeRef refType blockno : entries) = do - -- We have to read the .cabal file eagerly here because we can't cache the - -- package id for build tree references - the user might edit the .cabal - -- file after the reference was added to the index. - path <- liftM byteStringToFilePath . getEntryContent $ blockno - pkg <- do let err = "Error reading package index from cache." - file <- tryFindAddSourcePackageDesc path err - PackageDesc.Parse.readPackageDescription normal file - let srcpkg = mkPkg (BuildTreeRef refType (packageId pkg) pkg path blockno) - accum (srcpkg:srcpkgs) prefs entries - - accum srcpkgs prefs (CachePreference pref : entries) = - accum srcpkgs (pref:prefs) entries - - getEntryContent :: BlockNo -> IO ByteString - getEntryContent blockno = do - hSeek hnd AbsoluteSeek (fromIntegral (blockno * 512)) - header <- BS.hGet hnd 512 - size <- getEntrySize header - BS.hGet hnd (fromIntegral size) - - getEntrySize :: ByteString -> IO Tar.FileSize - getEntrySize header = - case Tar.read header of - Tar.Next e _ -> - case Tar.entryContent e of - Tar.NormalFile _ size -> return size - Tar.OtherEntryType typecode _ size - | Tar.isBuildTreeRefTypeCode typecode - -> return size - _ -> interror "unexpected tar entry type" - _ -> interror "could not read tar file entry" - - readPackageDescription :: ByteString -> IO GenericPackageDescription - readPackageDescription content = - case parsePackageDescription . fromUTF8 . BS.Char8.unpack $ content of - ParseOk _ d -> return d - _ -> interror "failed to parse .cabal file" - - interror msg = die $ "internal error when reading package index: " ++ msg - ++ "The package index or index cache is probably " - ++ "corrupt. Running cabal update might fix it." - ------------------------------------------------------------------------- --- Index cache data structure --- - --- | Tar files are block structured with 512 byte blocks. Every header and file --- content starts on a block boundary. --- -type BlockNo = Int - -data IndexCacheEntry = CachePackageId PackageId BlockNo - | CacheBuildTreeRef BuildTreeRefType BlockNo - | CachePreference Dependency - deriving (Eq) - -packageKey, blocknoKey, buildTreeRefKey, preferredVersionKey :: String -packageKey = "pkg:" -blocknoKey = "b#" -buildTreeRefKey = "build-tree-ref:" -preferredVersionKey = "pref-ver:" - -readIndexCacheEntry :: BSS.ByteString -> Maybe IndexCacheEntry -readIndexCacheEntry = \line -> - case BSS.words line of - [key, pkgnamestr, pkgverstr, sep, blocknostr] - | key == BSS.pack packageKey && sep == BSS.pack blocknoKey -> - case (parseName pkgnamestr, parseVer pkgverstr [], - parseBlockNo blocknostr) of - (Just pkgname, Just pkgver, Just blockno) - -> Just (CachePackageId (PackageIdentifier pkgname pkgver) blockno) - _ -> Nothing - [key, typecodestr, blocknostr] | key == BSS.pack buildTreeRefKey -> - case (parseRefType typecodestr, parseBlockNo blocknostr) of - (Just refType, Just blockno) - -> Just (CacheBuildTreeRef refType blockno) - _ -> Nothing - - (key: remainder) | key == BSS.pack preferredVersionKey -> - fmap CachePreference (simpleParse (BSS.unpack (BSS.unwords remainder))) - _ -> Nothing - where - parseName str - | BSS.all (\c -> isAlphaNum c || c == '-') str - = Just (PackageName (BSS.unpack str)) - | otherwise = Nothing - - parseVer str vs = - case BSS.readInt str of - Nothing -> Nothing - Just (v, str') -> case BSS.uncons str' of - Just ('.', str'') -> parseVer str'' (v:vs) - Just _ -> Nothing - Nothing -> Just (Version (reverse (v:vs)) []) - - parseBlockNo str = - case BSS.readInt str of - Just (blockno, remainder) | BSS.null remainder -> Just blockno - _ -> Nothing - - parseRefType str = - case BSS.uncons str of - Just (typeCode, remainder) - | BSS.null remainder && Tar.isBuildTreeRefTypeCode typeCode - -> Just (refTypeFromTypeCode typeCode) - _ -> Nothing - -showIndexCacheEntry :: IndexCacheEntry -> String -showIndexCacheEntry entry = unwords $ case entry of - CachePackageId pkgid b -> [ packageKey - , display (packageName pkgid) - , display (packageVersion pkgid) - , blocknoKey - , show b - ] - CacheBuildTreeRef t b -> [ buildTreeRefKey - , [typeCodeFromRefType t] - , show b - ] - CachePreference dep -> [ preferredVersionKey - , display dep - ] - -readIndexCache :: BSS.ByteString -> [IndexCacheEntry] -readIndexCache = mapMaybe readIndexCacheEntry . BSS.lines - -showIndexCache :: [IndexCacheEntry] -> String -showIndexCache = unlines . map showIndexCacheEntry diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Init/Heuristics.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Init/Heuristics.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Init/Heuristics.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Init/Heuristics.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,386 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Init.Heuristics --- Copyright : (c) Benedikt Huber 2009 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Heuristics for creating initial cabal files. --- ------------------------------------------------------------------------------ -module Distribution.Client.Init.Heuristics ( - guessPackageName, - scanForModules, SourceFileEntry(..), - neededBuildPrograms, - guessMainFileCandidates, - guessAuthorNameMail, - knownCategories, -) where -import Distribution.Text (simpleParse) -import Distribution.Simple.Setup (Flag(..), flagToMaybe) -import Distribution.ModuleName - ( ModuleName, toFilePath ) -import Distribution.Client.PackageIndex - ( allPackagesByName ) -import qualified Distribution.Package as P -import qualified Distribution.PackageDescription as PD - ( category, packageDescription ) -import Distribution.Simple.Utils - ( intercalate ) -import Distribution.Client.Utils - ( tryCanonicalizePath ) -import Language.Haskell.Extension ( Extension ) - -import Distribution.Client.Types ( packageDescription, SourcePackageDb(..) ) -#if !MIN_VERSION_base(4,8,0) -import Control.Applicative ( pure, (<$>), (<*>) ) -#endif -import Control.Arrow ( first ) -import Control.Monad ( liftM ) -import Data.Char ( isAlphaNum, isNumber, isUpper, isLower, isSpace ) -import Data.Either ( partitionEithers ) -import Data.List ( isInfixOf, isPrefixOf, isSuffixOf, sortBy ) -import Data.Maybe ( mapMaybe, catMaybes, maybeToList ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid ( mempty, mappend, mconcat, ) -#endif -import Data.Ord ( comparing ) -import qualified Data.Set as Set ( fromList, toList ) -import System.Directory ( getCurrentDirectory, getDirectoryContents, - doesDirectoryExist, doesFileExist, getHomeDirectory, ) -import Distribution.Compat.Environment ( getEnvironment ) -import System.FilePath ( takeExtension, takeBaseName, dropExtension, - (), (<.>), splitDirectories, makeRelative ) - -import Distribution.Client.Init.Types ( InitFlags(..) ) -import Distribution.Client.Compat.Process ( readProcessWithExitCode ) -import System.Exit ( ExitCode(..) ) - --- | Return a list of candidate main files for this executable: top-level --- modules including the word 'Main' in the file name. The list is sorted in --- order of preference, shorter file names are preferred. 'Right's are existing --- candidates and 'Left's are those that do not yet exist. -guessMainFileCandidates :: InitFlags -> IO [Either FilePath FilePath] -guessMainFileCandidates flags = do - dir <- - maybe getCurrentDirectory return (flagToMaybe $ packageDir flags) - files <- getDirectoryContents dir - let existingCandidates = filter isMain files - -- We always want to give the user at least one default choice. If either - -- Main.hs or Main.lhs has already been created, then we don't want to - -- suggest the other; however, if neither has been created, then we - -- suggest both. - newCandidates = - if any (`elem` existingCandidates) ["Main.hs", "Main.lhs"] - then [] - else ["Main.hs", "Main.lhs"] - candidates = - sortBy (\x y -> comparing (length . either id id) x y - `mappend` compare x y) - (map Left newCandidates ++ map Right existingCandidates) - return candidates - - where - isMain f = (isInfixOf "Main" f || isInfixOf "main" f) - && (isSuffixOf ".hs" f || isSuffixOf ".lhs" f) - --- | Guess the package name based on the given root directory. -guessPackageName :: FilePath -> IO P.PackageName -guessPackageName = liftM (P.PackageName . repair . last . splitDirectories) - . tryCanonicalizePath - where - -- Treat each span of non-alphanumeric characters as a hyphen. Each - -- hyphenated component of a package name must contain at least one - -- alphabetic character. An arbitrary character ('x') will be prepended if - -- this is not the case for the first component, and subsequent components - -- will simply be run together. For example, "1+2_foo-3" will become - -- "x12-foo3". - repair = repair' ('x' :) id - repair' invalid valid x = case dropWhile (not . isAlphaNum) x of - "" -> repairComponent "" - x' -> let (c, r) = first repairComponent $ break (not . isAlphaNum) x' - in c ++ repairRest r - where - repairComponent c | all isNumber c = invalid c - | otherwise = valid c - repairRest = repair' id ('-' :) - --- |Data type of source files found in the working directory -data SourceFileEntry = SourceFileEntry - { relativeSourcePath :: FilePath - , moduleName :: ModuleName - , fileExtension :: String - , imports :: [ModuleName] - , extensions :: [Extension] - } deriving Show - -sfToFileName :: FilePath -> SourceFileEntry -> FilePath -sfToFileName projectRoot (SourceFileEntry relPath m ext _ _) - = projectRoot relPath toFilePath m <.> ext - --- |Search for source files in the given directory --- and return pairs of guessed Haskell source path and --- module names. -scanForModules :: FilePath -> IO [SourceFileEntry] -scanForModules rootDir = scanForModulesIn rootDir rootDir - -scanForModulesIn :: FilePath -> FilePath -> IO [SourceFileEntry] -scanForModulesIn projectRoot srcRoot = scan srcRoot [] - where - scan dir hierarchy = do - entries <- getDirectoryContents (projectRoot dir) - (files, dirs) <- liftM partitionEithers (mapM (tagIsDir dir) entries) - let modules = catMaybes [ guessModuleName hierarchy file - | file <- files - , isUpper (head file) ] - modules' <- mapM (findImportsAndExts projectRoot) modules - recMods <- mapM (scanRecursive dir hierarchy) dirs - return $ concat (modules' : recMods) - tagIsDir parent entry = do - isDir <- doesDirectoryExist (parent entry) - return $ (if isDir then Right else Left) entry - guessModuleName hierarchy entry - | takeBaseName entry == "Setup" = Nothing - | ext `elem` sourceExtensions = - SourceFileEntry <$> pure relRoot <*> modName <*> pure ext <*> pure [] <*> pure [] - | otherwise = Nothing - where - relRoot = makeRelative projectRoot srcRoot - unqualModName = dropExtension entry - modName = simpleParse - $ intercalate "." . reverse $ (unqualModName : hierarchy) - ext = case takeExtension entry of '.':e -> e; e -> e - scanRecursive parent hierarchy entry - | isUpper (head entry) = scan (parent entry) (entry : hierarchy) - | isLower (head entry) && not (ignoreDir entry) = - scanForModulesIn projectRoot $ foldl () srcRoot (reverse (entry : hierarchy)) - | otherwise = return [] - ignoreDir ('.':_) = True - ignoreDir dir = dir `elem` ["dist", "_darcs"] - -findImportsAndExts :: FilePath -> SourceFileEntry -> IO SourceFileEntry -findImportsAndExts projectRoot sf = do - s <- readFile (sfToFileName projectRoot sf) - - let modules = mapMaybe - ( getModName - . drop 1 - . filter (not . null) - . dropWhile (/= "import") - . words - ) - . filter (not . ("--" `isPrefixOf`)) -- poor man's comment filtering - . lines - $ s - - -- XXX we should probably make a better attempt at parsing - -- comments above. Unfortunately we can't use a full-fledged - -- Haskell parser since cabal's dependencies must be kept at a - -- minimum. - - -- A poor man's LANGUAGE pragma parser. - exts = mapMaybe simpleParse - . concatMap getPragmas - . filter isLANGUAGEPragma - . map fst - . drop 1 - . takeWhile (not . null . snd) - . iterate (takeBraces . snd) - $ ("",s) - - takeBraces = break (== '}') . dropWhile (/= '{') - - isLANGUAGEPragma = ("{-# LANGUAGE " `isPrefixOf`) - - getPragmas = map trim . splitCommas . takeWhile (/= '#') . drop 13 - - splitCommas "" = [] - splitCommas xs = x : splitCommas (drop 1 y) - where (x,y) = break (==',') xs - - return sf { imports = modules - , extensions = exts - } - - where getModName :: [String] -> Maybe ModuleName - getModName [] = Nothing - getModName ("qualified":ws) = getModName ws - getModName (ms:_) = simpleParse ms - - - --- Unfortunately we cannot use the version exported by Distribution.Simple.Program -knownSuffixHandlers :: [(String,String)] -knownSuffixHandlers = - [ ("gc", "greencard") - , ("chs", "chs") - , ("hsc", "hsc2hs") - , ("x", "alex") - , ("y", "happy") - , ("ly", "happy") - , ("cpphs", "cpp") - ] - -sourceExtensions :: [String] -sourceExtensions = "hs" : "lhs" : map fst knownSuffixHandlers - -neededBuildPrograms :: [SourceFileEntry] -> [String] -neededBuildPrograms entries = - [ handler - | ext <- nubSet (map fileExtension entries) - , handler <- maybeToList (lookup ext knownSuffixHandlers) - ] - --- | Guess author and email using darcs and git configuration options. Use --- the following in decreasing order of preference: --- --- 1. vcs env vars ($DARCS_EMAIL, $GIT_AUTHOR_*) --- 2. Local repo configs --- 3. Global vcs configs --- 4. The generic $EMAIL --- --- Name and email are processed separately, so the guess might end up being --- a name from DARCS_EMAIL and an email from git config. --- --- Darcs has preference, for tradition's sake. -guessAuthorNameMail :: IO (Flag String, Flag String) -guessAuthorNameMail = fmap authorGuessPure authorGuessIO - --- Ordered in increasing preference, since Flag-as-monoid is identical to --- Last. -authorGuessPure :: AuthorGuessIO -> AuthorGuess -authorGuessPure (AuthorGuessIO env darcsLocalF darcsGlobalF gitLocal gitGlobal) - = mconcat - [ emailEnv env - , gitGlobal - , darcsCfg darcsGlobalF - , gitLocal - , darcsCfg darcsLocalF - , gitEnv env - , darcsEnv env - ] - -authorGuessIO :: IO AuthorGuessIO -authorGuessIO = AuthorGuessIO - <$> getEnvironment - <*> (maybeReadFile $ "_darcs" "prefs" "author") - <*> (maybeReadFile =<< liftM ( (".darcs" "author")) getHomeDirectory) - <*> gitCfg Local - <*> gitCfg Global - --- Types and functions used for guessing the author are now defined: - -type AuthorGuess = (Flag String, Flag String) -type Enviro = [(String, String)] -data GitLoc = Local | Global -data AuthorGuessIO = AuthorGuessIO - Enviro -- ^ Environment lookup table - (Maybe String) -- ^ Contents of local darcs author info - (Maybe String) -- ^ Contents of global darcs author info - AuthorGuess -- ^ Git config --local - AuthorGuess -- ^ Git config --global - -darcsEnv :: Enviro -> AuthorGuess -darcsEnv = maybe mempty nameAndMail . lookup "DARCS_EMAIL" - -gitEnv :: Enviro -> AuthorGuess -gitEnv env = (name, mail) - where - name = maybeFlag "GIT_AUTHOR_NAME" env - mail = maybeFlag "GIT_AUTHOR_EMAIL" env - -darcsCfg :: Maybe String -> AuthorGuess -darcsCfg = maybe mempty nameAndMail - -emailEnv :: Enviro -> AuthorGuess -emailEnv env = (mempty, mail) - where - mail = maybeFlag "EMAIL" env - -gitCfg :: GitLoc -> IO AuthorGuess -gitCfg which = do - name <- gitVar which "user.name" - mail <- gitVar which "user.email" - return (name, mail) - -gitVar :: GitLoc -> String -> IO (Flag String) -gitVar which = fmap happyOutput . gitConfigQuery which - -happyOutput :: (ExitCode, a, t) -> Flag a -happyOutput v = case v of - (ExitSuccess, s, _) -> Flag s - _ -> mempty - -gitConfigQuery :: GitLoc -> String -> IO (ExitCode, String, String) -gitConfigQuery which key = - fmap trim' $ readProcessWithExitCode "git" ["config", w, key] "" - where - w = case which of - Local -> "--local" - Global -> "--global" - trim' (a, b, c) = (a, trim b, c) - -maybeFlag :: String -> Enviro -> Flag String -maybeFlag k = maybe mempty Flag . lookup k - -maybeReadFile :: String -> IO (Maybe String) -maybeReadFile f = do - exists <- doesFileExist f - if exists - then fmap Just $ readFile f - else return Nothing - --- |Get list of categories used in Hackage. NOTE: Very slow, needs to be cached -knownCategories :: SourcePackageDb -> [String] -knownCategories (SourcePackageDb sourcePkgIndex _) = nubSet - [ cat | pkg <- map head (allPackagesByName sourcePkgIndex) - , let catList = (PD.category . PD.packageDescription . packageDescription) pkg - , cat <- splitString ',' catList - ] - --- Parse name and email, from darcs pref files or environment variable -nameAndMail :: String -> (Flag String, Flag String) -nameAndMail str - | all isSpace nameOrEmail = mempty - | null erest = (mempty, Flag $ trim nameOrEmail) - | otherwise = (Flag $ trim nameOrEmail, Flag mail) - where - (nameOrEmail,erest) = break (== '<') str - (mail,_) = break (== '>') (tail erest) - -trim :: String -> String -trim = removeLeadingSpace . reverse . removeLeadingSpace . reverse - where - removeLeadingSpace = dropWhile isSpace - --- split string at given character, and remove whitespace -splitString :: Char -> String -> [String] -splitString sep str = go str where - go s = if null s' then [] else tok : go rest where - s' = dropWhile (\c -> c == sep || isSpace c) s - (tok,rest) = break (==sep) s' - -nubSet :: (Ord a) => [a] -> [a] -nubSet = Set.toList . Set.fromList - -{- -test db testProjectRoot = do - putStrLn "Guessed package name" - (guessPackageName >=> print) testProjectRoot - putStrLn "Guessed name and email" - guessAuthorNameMail >>= print - - mods <- scanForModules testProjectRoot - - putStrLn "Guessed modules" - mapM_ print mods - putStrLn "Needed build programs" - print (neededBuildPrograms mods) - - putStrLn "List of known categories" - print $ knownCategories db --} diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Init/Licenses.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Init/Licenses.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Init/Licenses.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Init/Licenses.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,3065 +0,0 @@ -module Distribution.Client.Init.Licenses - ( License - , bsd2 - , bsd3 - , gplv2 - , gplv3 - , lgpl21 - , lgpl3 - , agplv3 - , apache20 - , mit - , mpl20 - , isc - ) where - -type License = String - -bsd2 :: String -> String -> License -bsd2 authors year = unlines - [ "Copyright (c) " ++ year ++ ", " ++ authors - , "All rights reserved." - , "" - , "Redistribution and use in source and binary forms, with or without" - , "modification, are permitted provided that the following conditions are" - , "met:" - , "" - , "1. Redistributions of source code must retain the above copyright" - , " notice, this list of conditions and the following disclaimer." - , "" - , "2. Redistributions in binary form must reproduce the above copyright" - , " notice, this list of conditions and the following disclaimer in the" - , " documentation and/or other materials provided with the" - , " distribution." - , "" - , "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS" - , "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT" - , "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR" - , "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT" - , "OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL," - , "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT" - , "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE," - , "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY" - , "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT" - , "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE" - , "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." - ] - -bsd3 :: String -> String -> License -bsd3 authors year = unlines - [ "Copyright (c) " ++ year ++ ", " ++ authors - , "" - , "All rights reserved." - , "" - , "Redistribution and use in source and binary forms, with or without" - , "modification, are permitted provided that the following conditions are met:" - , "" - , " * Redistributions of source code must retain the above copyright" - , " notice, this list of conditions and the following disclaimer." - , "" - , " * Redistributions in binary form must reproduce the above" - , " copyright notice, this list of conditions and the following" - , " disclaimer in the documentation and/or other materials provided" - , " with the distribution." - , "" - , " * Neither the name of " ++ authors ++ " nor the names of other" - , " contributors may be used to endorse or promote products derived" - , " from this software without specific prior written permission." - , "" - , "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS" - , "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT" - , "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR" - , "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT" - , "OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL," - , "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT" - , "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE," - , "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY" - , "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT" - , "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE" - , "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." - ] - -gplv2 :: License -gplv2 = unlines - [ " GNU GENERAL PUBLIC LICENSE" - , " Version 2, June 1991" - , "" - , " Copyright (C) 1989, 1991 Free Software Foundation, Inc.," - , " 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA" - , " Everyone is permitted to copy and distribute verbatim copies" - , " of this license document, but changing it is not allowed." - , "" - , " Preamble" - , "" - , " The licenses for most software are designed to take away your" - , "freedom to share and change it. By contrast, the GNU General Public" - , "License is intended to guarantee your freedom to share and change free" - , "software--to make sure the software is free for all its users. This" - , "General Public License applies to most of the Free Software" - , "Foundation's software and to any other program whose authors commit to" - , "using it. (Some other Free Software Foundation software is covered by" - , "the GNU Lesser General Public License instead.) You can apply it to" - , "your programs, too." - , "" - , " When we speak of free software, we are referring to freedom, not" - , "price. Our General Public Licenses are designed to make sure that you" - , "have the freedom to distribute copies of free software (and charge for" - , "this service if you wish), that you receive source code or can get it" - , "if you want it, that you can change the software or use pieces of it" - , "in new free programs; and that you know you can do these things." - , "" - , " To protect your rights, we need to make restrictions that forbid" - , "anyone to deny you these rights or to ask you to surrender the rights." - , "These restrictions translate to certain responsibilities for you if you" - , "distribute copies of the software, or if you modify it." - , "" - , " For example, if you distribute copies of such a program, whether" - , "gratis or for a fee, you must give the recipients all the rights that" - , "you have. You must make sure that they, too, receive or can get the" - , "source code. And you must show them these terms so they know their" - , "rights." - , "" - , " We protect your rights with two steps: (1) copyright the software, and" - , "(2) offer you this license which gives you legal permission to copy," - , "distribute and/or modify the software." - , "" - , " Also, for each author's protection and ours, we want to make certain" - , "that everyone understands that there is no warranty for this free" - , "software. If the software is modified by someone else and passed on, we" - , "want its recipients to know that what they have is not the original, so" - , "that any problems introduced by others will not reflect on the original" - , "authors' reputations." - , "" - , " Finally, any free program is threatened constantly by software" - , "patents. We wish to avoid the danger that redistributors of a free" - , "program will individually obtain patent licenses, in effect making the" - , "program proprietary. To prevent this, we have made it clear that any" - , "patent must be licensed for everyone's free use or not licensed at all." - , "" - , " The precise terms and conditions for copying, distribution and" - , "modification follow." - , "" - , " GNU GENERAL PUBLIC LICENSE" - , " TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION" - , "" - , " 0. This License applies to any program or other work which contains" - , "a notice placed by the copyright holder saying it may be distributed" - , "under the terms of this General Public License. The \"Program\", below," - , "refers to any such program or work, and a \"work based on the Program\"" - , "means either the Program or any derivative work under copyright law:" - , "that is to say, a work containing the Program or a portion of it," - , "either verbatim or with modifications and/or translated into another" - , "language. (Hereinafter, translation is included without limitation in" - , "the term \"modification\".) Each licensee is addressed as \"you\"." - , "" - , "Activities other than copying, distribution and modification are not" - , "covered by this License; they are outside its scope. The act of" - , "running the Program is not restricted, and the output from the Program" - , "is covered only if its contents constitute a work based on the" - , "Program (independent of having been made by running the Program)." - , "Whether that is true depends on what the Program does." - , "" - , " 1. You may copy and distribute verbatim copies of the Program's" - , "source code as you receive it, in any medium, provided that you" - , "conspicuously and appropriately publish on each copy an appropriate" - , "copyright notice and disclaimer of warranty; keep intact all the" - , "notices that refer to this License and to the absence of any warranty;" - , "and give any other recipients of the Program a copy of this License" - , "along with the Program." - , "" - , "You may charge a fee for the physical act of transferring a copy, and" - , "you may at your option offer warranty protection in exchange for a fee." - , "" - , " 2. You may modify your copy or copies of the Program or any portion" - , "of it, thus forming a work based on the Program, and copy and" - , "distribute such modifications or work under the terms of Section 1" - , "above, provided that you also meet all of these conditions:" - , "" - , " a) You must cause the modified files to carry prominent notices" - , " stating that you changed the files and the date of any change." - , "" - , " b) You must cause any work that you distribute or publish, that in" - , " whole or in part contains or is derived from the Program or any" - , " part thereof, to be licensed as a whole at no charge to all third" - , " parties under the terms of this License." - , "" - , " c) If the modified program normally reads commands interactively" - , " when run, you must cause it, when started running for such" - , " interactive use in the most ordinary way, to print or display an" - , " announcement including an appropriate copyright notice and a" - , " notice that there is no warranty (or else, saying that you provide" - , " a warranty) and that users may redistribute the program under" - , " these conditions, and telling the user how to view a copy of this" - , " License. (Exception: if the Program itself is interactive but" - , " does not normally print such an announcement, your work based on" - , " the Program is not required to print an announcement.)" - , "" - , "These requirements apply to the modified work as a whole. If" - , "identifiable sections of that work are not derived from the Program," - , "and can be reasonably considered independent and separate works in" - , "themselves, then this License, and its terms, do not apply to those" - , "sections when you distribute them as separate works. But when you" - , "distribute the same sections as part of a whole which is a work based" - , "on the Program, the distribution of the whole must be on the terms of" - , "this License, whose permissions for other licensees extend to the" - , "entire whole, and thus to each and every part regardless of who wrote it." - , "" - , "Thus, it is not the intent of this section to claim rights or contest" - , "your rights to work written entirely by you; rather, the intent is to" - , "exercise the right to control the distribution of derivative or" - , "collective works based on the Program." - , "" - , "In addition, mere aggregation of another work not based on the Program" - , "with the Program (or with a work based on the Program) on a volume of" - , "a storage or distribution medium does not bring the other work under" - , "the scope of this License." - , "" - , " 3. You may copy and distribute the Program (or a work based on it," - , "under Section 2) in object code or executable form under the terms of" - , "Sections 1 and 2 above provided that you also do one of the following:" - , "" - , " a) Accompany it with the complete corresponding machine-readable" - , " source code, which must be distributed under the terms of Sections" - , " 1 and 2 above on a medium customarily used for software interchange; or," - , "" - , " b) Accompany it with a written offer, valid for at least three" - , " years, to give any third party, for a charge no more than your" - , " cost of physically performing source distribution, a complete" - , " machine-readable copy of the corresponding source code, to be" - , " distributed under the terms of Sections 1 and 2 above on a medium" - , " customarily used for software interchange; or," - , "" - , " c) Accompany it with the information you received as to the offer" - , " to distribute corresponding source code. (This alternative is" - , " allowed only for noncommercial distribution and only if you" - , " received the program in object code or executable form with such" - , " an offer, in accord with Subsection b above.)" - , "" - , "The source code for a work means the preferred form of the work for" - , "making modifications to it. For an executable work, complete source" - , "code means all the source code for all modules it contains, plus any" - , "associated interface definition files, plus the scripts used to" - , "control compilation and installation of the executable. However, as a" - , "special exception, the source code distributed need not include" - , "anything that is normally distributed (in either source or binary" - , "form) with the major components (compiler, kernel, and so on) of the" - , "operating system on which the executable runs, unless that component" - , "itself accompanies the executable." - , "" - , "If distribution of executable or object code is made by offering" - , "access to copy from a designated place, then offering equivalent" - , "access to copy the source code from the same place counts as" - , "distribution of the source code, even though third parties are not" - , "compelled to copy the source along with the object code." - , "" - , " 4. You may not copy, modify, sublicense, or distribute the Program" - , "except as expressly provided under this License. Any attempt" - , "otherwise to copy, modify, sublicense or distribute the Program is" - , "void, and will automatically terminate your rights under this License." - , "However, parties who have received copies, or rights, from you under" - , "this License will not have their licenses terminated so long as such" - , "parties remain in full compliance." - , "" - , " 5. You are not required to accept this License, since you have not" - , "signed it. However, nothing else grants you permission to modify or" - , "distribute the Program or its derivative works. These actions are" - , "prohibited by law if you do not accept this License. Therefore, by" - , "modifying or distributing the Program (or any work based on the" - , "Program), you indicate your acceptance of this License to do so, and" - , "all its terms and conditions for copying, distributing or modifying" - , "the Program or works based on it." - , "" - , " 6. Each time you redistribute the Program (or any work based on the" - , "Program), the recipient automatically receives a license from the" - , "original licensor to copy, distribute or modify the Program subject to" - , "these terms and conditions. You may not impose any further" - , "restrictions on the recipients' exercise of the rights granted herein." - , "You are not responsible for enforcing compliance by third parties to" - , "this License." - , "" - , " 7. If, as a consequence of a court judgment or allegation of patent" - , "infringement or for any other reason (not limited to patent issues)," - , "conditions are imposed on you (whether by court order, agreement or" - , "otherwise) that contradict the conditions of this License, they do not" - , "excuse you from the conditions of this License. If you cannot" - , "distribute so as to satisfy simultaneously your obligations under this" - , "License and any other pertinent obligations, then as a consequence you" - , "may not distribute the Program at all. For example, if a patent" - , "license would not permit royalty-free redistribution of the Program by" - , "all those who receive copies directly or indirectly through you, then" - , "the only way you could satisfy both it and this License would be to" - , "refrain entirely from distribution of the Program." - , "" - , "If any portion of this section is held invalid or unenforceable under" - , "any particular circumstance, the balance of the section is intended to" - , "apply and the section as a whole is intended to apply in other" - , "circumstances." - , "" - , "It is not the purpose of this section to induce you to infringe any" - , "patents or other property right claims or to contest validity of any" - , "such claims; this section has the sole purpose of protecting the" - , "integrity of the free software distribution system, which is" - , "implemented by public license practices. Many people have made" - , "generous contributions to the wide range of software distributed" - , "through that system in reliance on consistent application of that" - , "system; it is up to the author/donor to decide if he or she is willing" - , "to distribute software through any other system and a licensee cannot" - , "impose that choice." - , "" - , "This section is intended to make thoroughly clear what is believed to" - , "be a consequence of the rest of this License." - , "" - , " 8. If the distribution and/or use of the Program is restricted in" - , "certain countries either by patents or by copyrighted interfaces, the" - , "original copyright holder who places the Program under this License" - , "may add an explicit geographical distribution limitation excluding" - , "those countries, so that distribution is permitted only in or among" - , "countries not thus excluded. In such case, this License incorporates" - , "the limitation as if written in the body of this License." - , "" - , " 9. The Free Software Foundation may publish revised and/or new versions" - , "of the General Public License from time to time. Such new versions will" - , "be similar in spirit to the present version, but may differ in detail to" - , "address new problems or concerns." - , "" - , "Each version is given a distinguishing version number. If the Program" - , "specifies a version number of this License which applies to it and \"any" - , "later version\", you have the option of following the terms and conditions" - , "either of that version or of any later version published by the Free" - , "Software Foundation. If the Program does not specify a version number of" - , "this License, you may choose any version ever published by the Free Software" - , "Foundation." - , "" - , " 10. If you wish to incorporate parts of the Program into other free" - , "programs whose distribution conditions are different, write to the author" - , "to ask for permission. For software which is copyrighted by the Free" - , "Software Foundation, write to the Free Software Foundation; we sometimes" - , "make exceptions for this. Our decision will be guided by the two goals" - , "of preserving the free status of all derivatives of our free software and" - , "of promoting the sharing and reuse of software generally." - , "" - , " NO WARRANTY" - , "" - , " 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY" - , "FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN" - , "OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES" - , "PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED" - , "OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF" - , "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS" - , "TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE" - , "PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING," - , "REPAIR OR CORRECTION." - , "" - , " 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING" - , "WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR" - , "REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES," - , "INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING" - , "OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED" - , "TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY" - , "YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER" - , "PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE" - , "POSSIBILITY OF SUCH DAMAGES." - , "" - , " END OF TERMS AND CONDITIONS" - , "" - , " How to Apply These Terms to Your New Programs" - , "" - , " If you develop a new program, and you want it to be of the greatest" - , "possible use to the public, the best way to achieve this is to make it" - , "free software which everyone can redistribute and change under these terms." - , "" - , " To do so, attach the following notices to the program. It is safest" - , "to attach them to the start of each source file to most effectively" - , "convey the exclusion of warranty; and each file should have at least" - , "the \"copyright\" line and a pointer to where the full notice is found." - , "" - , " " - , " Copyright (C) " - , "" - , " This program is free software; you can redistribute it and/or modify" - , " it under the terms of the GNU General Public License as published by" - , " the Free Software Foundation; either version 2 of the License, or" - , " (at your option) any later version." - , "" - , " This program is distributed in the hope that it will be useful," - , " but WITHOUT ANY WARRANTY; without even the implied warranty of" - , " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the" - , " GNU General Public License for more details." - , "" - , " You should have received a copy of the GNU General Public License along" - , " with this program; if not, write to the Free Software Foundation, Inc.," - , " 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA." - , "" - , "Also add information on how to contact you by electronic and paper mail." - , "" - , "If the program is interactive, make it output a short notice like this" - , "when it starts in an interactive mode:" - , "" - , " Gnomovision version 69, Copyright (C) year name of author" - , " Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'." - , " This is free software, and you are welcome to redistribute it" - , " under certain conditions; type `show c' for details." - , "" - , "The hypothetical commands `show w' and `show c' should show the appropriate" - , "parts of the General Public License. Of course, the commands you use may" - , "be called something other than `show w' and `show c'; they could even be" - , "mouse-clicks or menu items--whatever suits your program." - , "" - , "You should also get your employer (if you work as a programmer) or your" - , "school, if any, to sign a \"copyright disclaimer\" for the program, if" - , "necessary. Here is a sample; alter the names:" - , "" - , " Yoyodyne, Inc., hereby disclaims all copyright interest in the program" - , " `Gnomovision' (which makes passes at compilers) written by James Hacker." - , "" - , " , 1 April 1989" - , " Ty Coon, President of Vice" - , "" - , "This General Public License does not permit incorporating your program into" - , "proprietary programs. If your program is a subroutine library, you may" - , "consider it more useful to permit linking proprietary applications with the" - , "library. If this is what you want to do, use the GNU Lesser General" - , "Public License instead of this License." - ] - -gplv3 :: License -gplv3 = unlines - [ " GNU GENERAL PUBLIC LICENSE" - , " Version 3, 29 June 2007" - , "" - , " Copyright (C) 2007 Free Software Foundation, Inc. " - , " Everyone is permitted to copy and distribute verbatim copies" - , " of this license document, but changing it is not allowed." - , "" - , " Preamble" - , "" - , " The GNU General Public License is a free, copyleft license for" - , "software and other kinds of works." - , "" - , " The licenses for most software and other practical works are designed" - , "to take away your freedom to share and change the works. By contrast," - , "the GNU General Public License is intended to guarantee your freedom to" - , "share and change all versions of a program--to make sure it remains free" - , "software for all its users. We, the Free Software Foundation, use the" - , "GNU General Public License for most of our software; it applies also to" - , "any other work released this way by its authors. You can apply it to" - , "your programs, too." - , "" - , " When we speak of free software, we are referring to freedom, not" - , "price. Our General Public Licenses are designed to make sure that you" - , "have the freedom to distribute copies of free software (and charge for" - , "them if you wish), that you receive source code or can get it if you" - , "want it, that you can change the software or use pieces of it in new" - , "free programs, and that you know you can do these things." - , "" - , " To protect your rights, we need to prevent others from denying you" - , "these rights or asking you to surrender the rights. Therefore, you have" - , "certain responsibilities if you distribute copies of the software, or if" - , "you modify it: responsibilities to respect the freedom of others." - , "" - , " For example, if you distribute copies of such a program, whether" - , "gratis or for a fee, you must pass on to the recipients the same" - , "freedoms that you received. You must make sure that they, too, receive" - , "or can get the source code. And you must show them these terms so they" - , "know their rights." - , "" - , " Developers that use the GNU GPL protect your rights with two steps:" - , "(1) assert copyright on the software, and (2) offer you this License" - , "giving you legal permission to copy, distribute and/or modify it." - , "" - , " For the developers' and authors' protection, the GPL clearly explains" - , "that there is no warranty for this free software. For both users' and" - , "authors' sake, the GPL requires that modified versions be marked as" - , "changed, so that their problems will not be attributed erroneously to" - , "authors of previous versions." - , "" - , " Some devices are designed to deny users access to install or run" - , "modified versions of the software inside them, although the manufacturer" - , "can do so. This is fundamentally incompatible with the aim of" - , "protecting users' freedom to change the software. The systematic" - , "pattern of such abuse occurs in the area of products for individuals to" - , "use, which is precisely where it is most unacceptable. Therefore, we" - , "have designed this version of the GPL to prohibit the practice for those" - , "products. If such problems arise substantially in other domains, we" - , "stand ready to extend this provision to those domains in future versions" - , "of the GPL, as needed to protect the freedom of users." - , "" - , " Finally, every program is threatened constantly by software patents." - , "States should not allow patents to restrict development and use of" - , "software on general-purpose computers, but in those that do, we wish to" - , "avoid the special danger that patents applied to a free program could" - , "make it effectively proprietary. To prevent this, the GPL assures that" - , "patents cannot be used to render the program non-free." - , "" - , " The precise terms and conditions for copying, distribution and" - , "modification follow." - , "" - , " TERMS AND CONDITIONS" - , "" - , " 0. Definitions." - , "" - , " \"This License\" refers to version 3 of the GNU General Public License." - , "" - , " \"Copyright\" also means copyright-like laws that apply to other kinds of" - , "works, such as semiconductor masks." - , "" - , " \"The Program\" refers to any copyrightable work licensed under this" - , "License. Each licensee is addressed as \"you\". \"Licensees\" and" - , "\"recipients\" may be individuals or organizations." - , "" - , " To \"modify\" a work means to copy from or adapt all or part of the work" - , "in a fashion requiring copyright permission, other than the making of an" - , "exact copy. The resulting work is called a \"modified version\" of the" - , "earlier work or a work \"based on\" the earlier work." - , "" - , " A \"covered work\" means either the unmodified Program or a work based" - , "on the Program." - , "" - , " To \"propagate\" a work means to do anything with it that, without" - , "permission, would make you directly or secondarily liable for" - , "infringement under applicable copyright law, except executing it on a" - , "computer or modifying a private copy. Propagation includes copying," - , "distribution (with or without modification), making available to the" - , "public, and in some countries other activities as well." - , "" - , " To \"convey\" a work means any kind of propagation that enables other" - , "parties to make or receive copies. Mere interaction with a user through" - , "a computer network, with no transfer of a copy, is not conveying." - , "" - , " An interactive user interface displays \"Appropriate Legal Notices\"" - , "to the extent that it includes a convenient and prominently visible" - , "feature that (1) displays an appropriate copyright notice, and (2)" - , "tells the user that there is no warranty for the work (except to the" - , "extent that warranties are provided), that licensees may convey the" - , "work under this License, and how to view a copy of this License. If" - , "the interface presents a list of user commands or options, such as a" - , "menu, a prominent item in the list meets this criterion." - , "" - , " 1. Source Code." - , "" - , " The \"source code\" for a work means the preferred form of the work" - , "for making modifications to it. \"Object code\" means any non-source" - , "form of a work." - , "" - , " A \"Standard Interface\" means an interface that either is an official" - , "standard defined by a recognized standards body, or, in the case of" - , "interfaces specified for a particular programming language, one that" - , "is widely used among developers working in that language." - , "" - , " The \"System Libraries\" of an executable work include anything, other" - , "than the work as a whole, that (a) is included in the normal form of" - , "packaging a Major Component, but which is not part of that Major" - , "Component, and (b) serves only to enable use of the work with that" - , "Major Component, or to implement a Standard Interface for which an" - , "implementation is available to the public in source code form. A" - , "\"Major Component\", in this context, means a major essential component" - , "(kernel, window system, and so on) of the specific operating system" - , "(if any) on which the executable work runs, or a compiler used to" - , "produce the work, or an object code interpreter used to run it." - , "" - , " The \"Corresponding Source\" for a work in object code form means all" - , "the source code needed to generate, install, and (for an executable" - , "work) run the object code and to modify the work, including scripts to" - , "control those activities. However, it does not include the work's" - , "System Libraries, or general-purpose tools or generally available free" - , "programs which are used unmodified in performing those activities but" - , "which are not part of the work. For example, Corresponding Source" - , "includes interface definition files associated with source files for" - , "the work, and the source code for shared libraries and dynamically" - , "linked subprograms that the work is specifically designed to require," - , "such as by intimate data communication or control flow between those" - , "subprograms and other parts of the work." - , "" - , " The Corresponding Source need not include anything that users" - , "can regenerate automatically from other parts of the Corresponding" - , "Source." - , "" - , " The Corresponding Source for a work in source code form is that" - , "same work." - , "" - , " 2. Basic Permissions." - , "" - , " All rights granted under this License are granted for the term of" - , "copyright on the Program, and are irrevocable provided the stated" - , "conditions are met. This License explicitly affirms your unlimited" - , "permission to run the unmodified Program. The output from running a" - , "covered work is covered by this License only if the output, given its" - , "content, constitutes a covered work. This License acknowledges your" - , "rights of fair use or other equivalent, as provided by copyright law." - , "" - , " You may make, run and propagate covered works that you do not" - , "convey, without conditions so long as your license otherwise remains" - , "in force. You may convey covered works to others for the sole purpose" - , "of having them make modifications exclusively for you, or provide you" - , "with facilities for running those works, provided that you comply with" - , "the terms of this License in conveying all material for which you do" - , "not control copyright. Those thus making or running the covered works" - , "for you must do so exclusively on your behalf, under your direction" - , "and control, on terms that prohibit them from making any copies of" - , "your copyrighted material outside their relationship with you." - , "" - , " Conveying under any other circumstances is permitted solely under" - , "the conditions stated below. Sublicensing is not allowed; section 10" - , "makes it unnecessary." - , "" - , " 3. Protecting Users' Legal Rights From Anti-Circumvention Law." - , "" - , " No covered work shall be deemed part of an effective technological" - , "measure under any applicable law fulfilling obligations under article" - , "11 of the WIPO copyright treaty adopted on 20 December 1996, or" - , "similar laws prohibiting or restricting circumvention of such" - , "measures." - , "" - , " When you convey a covered work, you waive any legal power to forbid" - , "circumvention of technological measures to the extent such circumvention" - , "is effected by exercising rights under this License with respect to" - , "the covered work, and you disclaim any intention to limit operation or" - , "modification of the work as a means of enforcing, against the work's" - , "users, your or third parties' legal rights to forbid circumvention of" - , "technological measures." - , "" - , " 4. Conveying Verbatim Copies." - , "" - , " You may convey verbatim copies of the Program's source code as you" - , "receive it, in any medium, provided that you conspicuously and" - , "appropriately publish on each copy an appropriate copyright notice;" - , "keep intact all notices stating that this License and any" - , "non-permissive terms added in accord with section 7 apply to the code;" - , "keep intact all notices of the absence of any warranty; and give all" - , "recipients a copy of this License along with the Program." - , "" - , " You may charge any price or no price for each copy that you convey," - , "and you may offer support or warranty protection for a fee." - , "" - , " 5. Conveying Modified Source Versions." - , "" - , " You may convey a work based on the Program, or the modifications to" - , "produce it from the Program, in the form of source code under the" - , "terms of section 4, provided that you also meet all of these conditions:" - , "" - , " a) The work must carry prominent notices stating that you modified" - , " it, and giving a relevant date." - , "" - , " b) The work must carry prominent notices stating that it is" - , " released under this License and any conditions added under section" - , " 7. This requirement modifies the requirement in section 4 to" - , " \"keep intact all notices\"." - , "" - , " c) You must license the entire work, as a whole, under this" - , " License to anyone who comes into possession of a copy. This" - , " License will therefore apply, along with any applicable section 7" - , " additional terms, to the whole of the work, and all its parts," - , " regardless of how they are packaged. This License gives no" - , " permission to license the work in any other way, but it does not" - , " invalidate such permission if you have separately received it." - , "" - , " d) If the work has interactive user interfaces, each must display" - , " Appropriate Legal Notices; however, if the Program has interactive" - , " interfaces that do not display Appropriate Legal Notices, your" - , " work need not make them do so." - , "" - , " A compilation of a covered work with other separate and independent" - , "works, which are not by their nature extensions of the covered work," - , "and which are not combined with it such as to form a larger program," - , "in or on a volume of a storage or distribution medium, is called an" - , "\"aggregate\" if the compilation and its resulting copyright are not" - , "used to limit the access or legal rights of the compilation's users" - , "beyond what the individual works permit. Inclusion of a covered work" - , "in an aggregate does not cause this License to apply to the other" - , "parts of the aggregate." - , "" - , " 6. Conveying Non-Source Forms." - , "" - , " You may convey a covered work in object code form under the terms" - , "of sections 4 and 5, provided that you also convey the" - , "machine-readable Corresponding Source under the terms of this License," - , "in one of these ways:" - , "" - , " a) Convey the object code in, or embodied in, a physical product" - , " (including a physical distribution medium), accompanied by the" - , " Corresponding Source fixed on a durable physical medium" - , " customarily used for software interchange." - , "" - , " b) Convey the object code in, or embodied in, a physical product" - , " (including a physical distribution medium), accompanied by a" - , " written offer, valid for at least three years and valid for as" - , " long as you offer spare parts or customer support for that product" - , " model, to give anyone who possesses the object code either (1) a" - , " copy of the Corresponding Source for all the software in the" - , " product that is covered by this License, on a durable physical" - , " medium customarily used for software interchange, for a price no" - , " more than your reasonable cost of physically performing this" - , " conveying of source, or (2) access to copy the" - , " Corresponding Source from a network server at no charge." - , "" - , " c) Convey individual copies of the object code with a copy of the" - , " written offer to provide the Corresponding Source. This" - , " alternative is allowed only occasionally and noncommercially, and" - , " only if you received the object code with such an offer, in accord" - , " with subsection 6b." - , "" - , " d) Convey the object code by offering access from a designated" - , " place (gratis or for a charge), and offer equivalent access to the" - , " Corresponding Source in the same way through the same place at no" - , " further charge. You need not require recipients to copy the" - , " Corresponding Source along with the object code. If the place to" - , " copy the object code is a network server, the Corresponding Source" - , " may be on a different server (operated by you or a third party)" - , " that supports equivalent copying facilities, provided you maintain" - , " clear directions next to the object code saying where to find the" - , " Corresponding Source. Regardless of what server hosts the" - , " Corresponding Source, you remain obligated to ensure that it is" - , " available for as long as needed to satisfy these requirements." - , "" - , " e) Convey the object code using peer-to-peer transmission, provided" - , " you inform other peers where the object code and Corresponding" - , " Source of the work are being offered to the general public at no" - , " charge under subsection 6d." - , "" - , " A separable portion of the object code, whose source code is excluded" - , "from the Corresponding Source as a System Library, need not be" - , "included in conveying the object code work." - , "" - , " A \"User Product\" is either (1) a \"consumer product\", which means any" - , "tangible personal property which is normally used for personal, family," - , "or household purposes, or (2) anything designed or sold for incorporation" - , "into a dwelling. In determining whether a product is a consumer product," - , "doubtful cases shall be resolved in favor of coverage. For a particular" - , "product received by a particular user, \"normally used\" refers to a" - , "typical or common use of that class of product, regardless of the status" - , "of the particular user or of the way in which the particular user" - , "actually uses, or expects or is expected to use, the product. A product" - , "is a consumer product regardless of whether the product has substantial" - , "commercial, industrial or non-consumer uses, unless such uses represent" - , "the only significant mode of use of the product." - , "" - , " \"Installation Information\" for a User Product means any methods," - , "procedures, authorization keys, or other information required to install" - , "and execute modified versions of a covered work in that User Product from" - , "a modified version of its Corresponding Source. The information must" - , "suffice to ensure that the continued functioning of the modified object" - , "code is in no case prevented or interfered with solely because" - , "modification has been made." - , "" - , " If you convey an object code work under this section in, or with, or" - , "specifically for use in, a User Product, and the conveying occurs as" - , "part of a transaction in which the right of possession and use of the" - , "User Product is transferred to the recipient in perpetuity or for a" - , "fixed term (regardless of how the transaction is characterized), the" - , "Corresponding Source conveyed under this section must be accompanied" - , "by the Installation Information. But this requirement does not apply" - , "if neither you nor any third party retains the ability to install" - , "modified object code on the User Product (for example, the work has" - , "been installed in ROM)." - , "" - , " The requirement to provide Installation Information does not include a" - , "requirement to continue to provide support service, warranty, or updates" - , "for a work that has been modified or installed by the recipient, or for" - , "the User Product in which it has been modified or installed. Access to a" - , "network may be denied when the modification itself materially and" - , "adversely affects the operation of the network or violates the rules and" - , "protocols for communication across the network." - , "" - , " Corresponding Source conveyed, and Installation Information provided," - , "in accord with this section must be in a format that is publicly" - , "documented (and with an implementation available to the public in" - , "source code form), and must require no special password or key for" - , "unpacking, reading or copying." - , "" - , " 7. Additional Terms." - , "" - , " \"Additional permissions\" are terms that supplement the terms of this" - , "License by making exceptions from one or more of its conditions." - , "Additional permissions that are applicable to the entire Program shall" - , "be treated as though they were included in this License, to the extent" - , "that they are valid under applicable law. If additional permissions" - , "apply only to part of the Program, that part may be used separately" - , "under those permissions, but the entire Program remains governed by" - , "this License without regard to the additional permissions." - , "" - , " When you convey a copy of a covered work, you may at your option" - , "remove any additional permissions from that copy, or from any part of" - , "it. (Additional permissions may be written to require their own" - , "removal in certain cases when you modify the work.) You may place" - , "additional permissions on material, added by you to a covered work," - , "for which you have or can give appropriate copyright permission." - , "" - , " Notwithstanding any other provision of this License, for material you" - , "add to a covered work, you may (if authorized by the copyright holders of" - , "that material) supplement the terms of this License with terms:" - , "" - , " a) Disclaiming warranty or limiting liability differently from the" - , " terms of sections 15 and 16 of this License; or" - , "" - , " b) Requiring preservation of specified reasonable legal notices or" - , " author attributions in that material or in the Appropriate Legal" - , " Notices displayed by works containing it; or" - , "" - , " c) Prohibiting misrepresentation of the origin of that material, or" - , " requiring that modified versions of such material be marked in" - , " reasonable ways as different from the original version; or" - , "" - , " d) Limiting the use for publicity purposes of names of licensors or" - , " authors of the material; or" - , "" - , " e) Declining to grant rights under trademark law for use of some" - , " trade names, trademarks, or service marks; or" - , "" - , " f) Requiring indemnification of licensors and authors of that" - , " material by anyone who conveys the material (or modified versions of" - , " it) with contractual assumptions of liability to the recipient, for" - , " any liability that these contractual assumptions directly impose on" - , " those licensors and authors." - , "" - , " All other non-permissive additional terms are considered \"further" - , "restrictions\" within the meaning of section 10. If the Program as you" - , "received it, or any part of it, contains a notice stating that it is" - , "governed by this License along with a term that is a further" - , "restriction, you may remove that term. If a license document contains" - , "a further restriction but permits relicensing or conveying under this" - , "License, you may add to a covered work material governed by the terms" - , "of that license document, provided that the further restriction does" - , "not survive such relicensing or conveying." - , "" - , " If you add terms to a covered work in accord with this section, you" - , "must place, in the relevant source files, a statement of the" - , "additional terms that apply to those files, or a notice indicating" - , "where to find the applicable terms." - , "" - , " Additional terms, permissive or non-permissive, may be stated in the" - , "form of a separately written license, or stated as exceptions;" - , "the above requirements apply either way." - , "" - , " 8. Termination." - , "" - , " You may not propagate or modify a covered work except as expressly" - , "provided under this License. Any attempt otherwise to propagate or" - , "modify it is void, and will automatically terminate your rights under" - , "this License (including any patent licenses granted under the third" - , "paragraph of section 11)." - , "" - , " However, if you cease all violation of this License, then your" - , "license from a particular copyright holder is reinstated (a)" - , "provisionally, unless and until the copyright holder explicitly and" - , "finally terminates your license, and (b) permanently, if the copyright" - , "holder fails to notify you of the violation by some reasonable means" - , "prior to 60 days after the cessation." - , "" - , " Moreover, your license from a particular copyright holder is" - , "reinstated permanently if the copyright holder notifies you of the" - , "violation by some reasonable means, this is the first time you have" - , "received notice of violation of this License (for any work) from that" - , "copyright holder, and you cure the violation prior to 30 days after" - , "your receipt of the notice." - , "" - , " Termination of your rights under this section does not terminate the" - , "licenses of parties who have received copies or rights from you under" - , "this License. If your rights have been terminated and not permanently" - , "reinstated, you do not qualify to receive new licenses for the same" - , "material under section 10." - , "" - , " 9. Acceptance Not Required for Having Copies." - , "" - , " You are not required to accept this License in order to receive or" - , "run a copy of the Program. Ancillary propagation of a covered work" - , "occurring solely as a consequence of using peer-to-peer transmission" - , "to receive a copy likewise does not require acceptance. However," - , "nothing other than this License grants you permission to propagate or" - , "modify any covered work. These actions infringe copyright if you do" - , "not accept this License. Therefore, by modifying or propagating a" - , "covered work, you indicate your acceptance of this License to do so." - , "" - , " 10. Automatic Licensing of Downstream Recipients." - , "" - , " Each time you convey a covered work, the recipient automatically" - , "receives a license from the original licensors, to run, modify and" - , "propagate that work, subject to this License. You are not responsible" - , "for enforcing compliance by third parties with this License." - , "" - , " An \"entity transaction\" is a transaction transferring control of an" - , "organization, or substantially all assets of one, or subdividing an" - , "organization, or merging organizations. If propagation of a covered" - , "work results from an entity transaction, each party to that" - , "transaction who receives a copy of the work also receives whatever" - , "licenses to the work the party's predecessor in interest had or could" - , "give under the previous paragraph, plus a right to possession of the" - , "Corresponding Source of the work from the predecessor in interest, if" - , "the predecessor has it or can get it with reasonable efforts." - , "" - , " You may not impose any further restrictions on the exercise of the" - , "rights granted or affirmed under this License. For example, you may" - , "not impose a license fee, royalty, or other charge for exercise of" - , "rights granted under this License, and you may not initiate litigation" - , "(including a cross-claim or counterclaim in a lawsuit) alleging that" - , "any patent claim is infringed by making, using, selling, offering for" - , "sale, or importing the Program or any portion of it." - , "" - , " 11. Patents." - , "" - , " A \"contributor\" is a copyright holder who authorizes use under this" - , "License of the Program or a work on which the Program is based. The" - , "work thus licensed is called the contributor's \"contributor version\"." - , "" - , " A contributor's \"essential patent claims\" are all patent claims" - , "owned or controlled by the contributor, whether already acquired or" - , "hereafter acquired, that would be infringed by some manner, permitted" - , "by this License, of making, using, or selling its contributor version," - , "but do not include claims that would be infringed only as a" - , "consequence of further modification of the contributor version. For" - , "purposes of this definition, \"control\" includes the right to grant" - , "patent sublicenses in a manner consistent with the requirements of" - , "this License." - , "" - , " Each contributor grants you a non-exclusive, worldwide, royalty-free" - , "patent license under the contributor's essential patent claims, to" - , "make, use, sell, offer for sale, import and otherwise run, modify and" - , "propagate the contents of its contributor version." - , "" - , " In the following three paragraphs, a \"patent license\" is any express" - , "agreement or commitment, however denominated, not to enforce a patent" - , "(such as an express permission to practice a patent or covenant not to" - , "sue for patent infringement). To \"grant\" such a patent license to a" - , "party means to make such an agreement or commitment not to enforce a" - , "patent against the party." - , "" - , " If you convey a covered work, knowingly relying on a patent license," - , "and the Corresponding Source of the work is not available for anyone" - , "to copy, free of charge and under the terms of this License, through a" - , "publicly available network server or other readily accessible means," - , "then you must either (1) cause the Corresponding Source to be so" - , "available, or (2) arrange to deprive yourself of the benefit of the" - , "patent license for this particular work, or (3) arrange, in a manner" - , "consistent with the requirements of this License, to extend the patent" - , "license to downstream recipients. \"Knowingly relying\" means you have" - , "actual knowledge that, but for the patent license, your conveying the" - , "covered work in a country, or your recipient's use of the covered work" - , "in a country, would infringe one or more identifiable patents in that" - , "country that you have reason to believe are valid." - , "" - , " If, pursuant to or in connection with a single transaction or" - , "arrangement, you convey, or propagate by procuring conveyance of, a" - , "covered work, and grant a patent license to some of the parties" - , "receiving the covered work authorizing them to use, propagate, modify" - , "or convey a specific copy of the covered work, then the patent license" - , "you grant is automatically extended to all recipients of the covered" - , "work and works based on it." - , "" - , " A patent license is \"discriminatory\" if it does not include within" - , "the scope of its coverage, prohibits the exercise of, or is" - , "conditioned on the non-exercise of one or more of the rights that are" - , "specifically granted under this License. You may not convey a covered" - , "work if you are a party to an arrangement with a third party that is" - , "in the business of distributing software, under which you make payment" - , "to the third party based on the extent of your activity of conveying" - , "the work, and under which the third party grants, to any of the" - , "parties who would receive the covered work from you, a discriminatory" - , "patent license (a) in connection with copies of the covered work" - , "conveyed by you (or copies made from those copies), or (b) primarily" - , "for and in connection with specific products or compilations that" - , "contain the covered work, unless you entered into that arrangement," - , "or that patent license was granted, prior to 28 March 2007." - , "" - , " Nothing in this License shall be construed as excluding or limiting" - , "any implied license or other defenses to infringement that may" - , "otherwise be available to you under applicable patent law." - , "" - , " 12. No Surrender of Others' Freedom." - , "" - , " If conditions are imposed on you (whether by court order, agreement or" - , "otherwise) that contradict the conditions of this License, they do not" - , "excuse you from the conditions of this License. If you cannot convey a" - , "covered work so as to satisfy simultaneously your obligations under this" - , "License and any other pertinent obligations, then as a consequence you may" - , "not convey it at all. For example, if you agree to terms that obligate you" - , "to collect a royalty for further conveying from those to whom you convey" - , "the Program, the only way you could satisfy both those terms and this" - , "License would be to refrain entirely from conveying the Program." - , "" - , " 13. Use with the GNU Affero General Public License." - , "" - , " Notwithstanding any other provision of this License, you have" - , "permission to link or combine any covered work with a work licensed" - , "under version 3 of the GNU Affero General Public License into a single" - , "combined work, and to convey the resulting work. The terms of this" - , "License will continue to apply to the part which is the covered work," - , "but the special requirements of the GNU Affero General Public License," - , "section 13, concerning interaction through a network will apply to the" - , "combination as such." - , "" - , " 14. Revised Versions of this License." - , "" - , " The Free Software Foundation may publish revised and/or new versions of" - , "the GNU General Public License from time to time. Such new versions will" - , "be similar in spirit to the present version, but may differ in detail to" - , "address new problems or concerns." - , "" - , " Each version is given a distinguishing version number. If the" - , "Program specifies that a certain numbered version of the GNU General" - , "Public License \"or any later version\" applies to it, you have the" - , "option of following the terms and conditions either of that numbered" - , "version or of any later version published by the Free Software" - , "Foundation. If the Program does not specify a version number of the" - , "GNU General Public License, you may choose any version ever published" - , "by the Free Software Foundation." - , "" - , " If the Program specifies that a proxy can decide which future" - , "versions of the GNU General Public License can be used, that proxy's" - , "public statement of acceptance of a version permanently authorizes you" - , "to choose that version for the Program." - , "" - , " Later license versions may give you additional or different" - , "permissions. However, no additional obligations are imposed on any" - , "author or copyright holder as a result of your choosing to follow a" - , "later version." - , "" - , " 15. Disclaimer of Warranty." - , "" - , " THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY" - , "APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT" - , "HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY" - , "OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO," - , "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR" - , "PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM" - , "IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF" - , "ALL NECESSARY SERVICING, REPAIR OR CORRECTION." - , "" - , " 16. Limitation of Liability." - , "" - , " IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING" - , "WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS" - , "THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY" - , "GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE" - , "USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF" - , "DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD" - , "PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS)," - , "EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF" - , "SUCH DAMAGES." - , "" - , " 17. Interpretation of Sections 15 and 16." - , "" - , " If the disclaimer of warranty and limitation of liability provided" - , "above cannot be given local legal effect according to their terms," - , "reviewing courts shall apply local law that most closely approximates" - , "an absolute waiver of all civil liability in connection with the" - , "Program, unless a warranty or assumption of liability accompanies a" - , "copy of the Program in return for a fee." - , "" - , " END OF TERMS AND CONDITIONS" - , "" - , " How to Apply These Terms to Your New Programs" - , "" - , " If you develop a new program, and you want it to be of the greatest" - , "possible use to the public, the best way to achieve this is to make it" - , "free software which everyone can redistribute and change under these terms." - , "" - , " To do so, attach the following notices to the program. It is safest" - , "to attach them to the start of each source file to most effectively" - , "state the exclusion of warranty; and each file should have at least" - , "the \"copyright\" line and a pointer to where the full notice is found." - , "" - , " " - , " Copyright (C) " - , "" - , " This program is free software: you can redistribute it and/or modify" - , " it under the terms of the GNU General Public License as published by" - , " the Free Software Foundation, either version 3 of the License, or" - , " (at your option) any later version." - , "" - , " This program is distributed in the hope that it will be useful," - , " but WITHOUT ANY WARRANTY; without even the implied warranty of" - , " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the" - , " GNU General Public License for more details." - , "" - , " You should have received a copy of the GNU General Public License" - , " along with this program. If not, see ." - , "" - , "Also add information on how to contact you by electronic and paper mail." - , "" - , " If the program does terminal interaction, make it output a short" - , "notice like this when it starts in an interactive mode:" - , "" - , " Copyright (C) " - , " This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'." - , " This is free software, and you are welcome to redistribute it" - , " under certain conditions; type `show c' for details." - , "" - , "The hypothetical commands `show w' and `show c' should show the appropriate" - , "parts of the General Public License. Of course, your program's commands" - , "might be different; for a GUI interface, you would use an \"about box\"." - , "" - , " You should also get your employer (if you work as a programmer) or school," - , "if any, to sign a \"copyright disclaimer\" for the program, if necessary." - , "For more information on this, and how to apply and follow the GNU GPL, see" - , "." - , "" - , " The GNU General Public License does not permit incorporating your program" - , "into proprietary programs. If your program is a subroutine library, you" - , "may consider it more useful to permit linking proprietary applications with" - , "the library. If this is what you want to do, use the GNU Lesser General" - , "Public License instead of this License. But first, please read" - , "." - ] - -agplv3 :: License -agplv3 = unlines - [ " GNU AFFERO GENERAL PUBLIC LICENSE" - , " Version 3, 19 November 2007" - , "" - , " Copyright (C) 2007 Free Software Foundation, Inc. " - , " Everyone is permitted to copy and distribute verbatim copies" - , " of this license document, but changing it is not allowed." - , "" - , " Preamble" - , "" - , " The GNU Affero General Public License is a free, copyleft license for" - , "software and other kinds of works, specifically designed to ensure" - , "cooperation with the community in the case of network server software." - , "" - , " The licenses for most software and other practical works are designed" - , "to take away your freedom to share and change the works. By contrast," - , "our General Public Licenses are intended to guarantee your freedom to" - , "share and change all versions of a program--to make sure it remains free" - , "software for all its users." - , "" - , " When we speak of free software, we are referring to freedom, not" - , "price. Our General Public Licenses are designed to make sure that you" - , "have the freedom to distribute copies of free software (and charge for" - , "them if you wish), that you receive source code or can get it if you" - , "want it, that you can change the software or use pieces of it in new" - , "free programs, and that you know you can do these things." - , "" - , " Developers that use our General Public Licenses protect your rights" - , "with two steps: (1) assert copyright on the software, and (2) offer" - , "you this License which gives you legal permission to copy, distribute" - , "and/or modify the software." - , "" - , " A secondary benefit of defending all users' freedom is that" - , "improvements made in alternate versions of the program, if they" - , "receive widespread use, become available for other developers to" - , "incorporate. Many developers of free software are heartened and" - , "encouraged by the resulting cooperation. However, in the case of" - , "software used on network servers, this result may fail to come about." - , "The GNU General Public License permits making a modified version and" - , "letting the public access it on a server without ever releasing its" - , "source code to the public." - , "" - , " The GNU Affero General Public License is designed specifically to" - , "ensure that, in such cases, the modified source code becomes available" - , "to the community. It requires the operator of a network server to" - , "provide the source code of the modified version running there to the" - , "users of that server. Therefore, public use of a modified version, on" - , "a publicly accessible server, gives the public access to the source" - , "code of the modified version." - , "" - , " An older license, called the Affero General Public License and" - , "published by Affero, was designed to accomplish similar goals. This is" - , "a different license, not a version of the Affero GPL, but Affero has" - , "released a new version of the Affero GPL which permits relicensing under" - , "this license." - , "" - , " The precise terms and conditions for copying, distribution and" - , "modification follow." - , "" - , " TERMS AND CONDITIONS" - , "" - , " 0. Definitions." - , "" - , " \"This License\" refers to version 3 of the GNU Affero General Public License." - , "" - , " \"Copyright\" also means copyright-like laws that apply to other kinds of" - , "works, such as semiconductor masks." - , "" - , " \"The Program\" refers to any copyrightable work licensed under this" - , "License. Each licensee is addressed as \"you\". \"Licensees\" and" - , "\"recipients\" may be individuals or organizations." - , "" - , " To \"modify\" a work means to copy from or adapt all or part of the work" - , "in a fashion requiring copyright permission, other than the making of an" - , "exact copy. The resulting work is called a \"modified version\" of the" - , "earlier work or a work \"based on\" the earlier work." - , "" - , " A \"covered work\" means either the unmodified Program or a work based" - , "on the Program." - , "" - , " To \"propagate\" a work means to do anything with it that, without" - , "permission, would make you directly or secondarily liable for" - , "infringement under applicable copyright law, except executing it on a" - , "computer or modifying a private copy. Propagation includes copying," - , "distribution (with or without modification), making available to the" - , "public, and in some countries other activities as well." - , "" - , " To \"convey\" a work means any kind of propagation that enables other" - , "parties to make or receive copies. Mere interaction with a user through" - , "a computer network, with no transfer of a copy, is not conveying." - , "" - , " An interactive user interface displays \"Appropriate Legal Notices\"" - , "to the extent that it includes a convenient and prominently visible" - , "feature that (1) displays an appropriate copyright notice, and (2)" - , "tells the user that there is no warranty for the work (except to the" - , "extent that warranties are provided), that licensees may convey the" - , "work under this License, and how to view a copy of this License. If" - , "the interface presents a list of user commands or options, such as a" - , "menu, a prominent item in the list meets this criterion." - , "" - , " 1. Source Code." - , "" - , " The \"source code\" for a work means the preferred form of the work" - , "for making modifications to it. \"Object code\" means any non-source" - , "form of a work." - , "" - , " A \"Standard Interface\" means an interface that either is an official" - , "standard defined by a recognized standards body, or, in the case of" - , "interfaces specified for a particular programming language, one that" - , "is widely used among developers working in that language." - , "" - , " The \"System Libraries\" of an executable work include anything, other" - , "than the work as a whole, that (a) is included in the normal form of" - , "packaging a Major Component, but which is not part of that Major" - , "Component, and (b) serves only to enable use of the work with that" - , "Major Component, or to implement a Standard Interface for which an" - , "implementation is available to the public in source code form. A" - , "\"Major Component\", in this context, means a major essential component" - , "(kernel, window system, and so on) of the specific operating system" - , "(if any) on which the executable work runs, or a compiler used to" - , "produce the work, or an object code interpreter used to run it." - , "" - , " The \"Corresponding Source\" for a work in object code form means all" - , "the source code needed to generate, install, and (for an executable" - , "work) run the object code and to modify the work, including scripts to" - , "control those activities. However, it does not include the work's" - , "System Libraries, or general-purpose tools or generally available free" - , "programs which are used unmodified in performing those activities but" - , "which are not part of the work. For example, Corresponding Source" - , "includes interface definition files associated with source files for" - , "the work, and the source code for shared libraries and dynamically" - , "linked subprograms that the work is specifically designed to require," - , "such as by intimate data communication or control flow between those" - , "subprograms and other parts of the work." - , "" - , " The Corresponding Source need not include anything that users" - , "can regenerate automatically from other parts of the Corresponding" - , "Source." - , "" - , " The Corresponding Source for a work in source code form is that" - , "same work." - , "" - , " 2. Basic Permissions." - , "" - , " All rights granted under this License are granted for the term of" - , "copyright on the Program, and are irrevocable provided the stated" - , "conditions are met. This License explicitly affirms your unlimited" - , "permission to run the unmodified Program. The output from running a" - , "covered work is covered by this License only if the output, given its" - , "content, constitutes a covered work. This License acknowledges your" - , "rights of fair use or other equivalent, as provided by copyright law." - , "" - , " You may make, run and propagate covered works that you do not" - , "convey, without conditions so long as your license otherwise remains" - , "in force. You may convey covered works to others for the sole purpose" - , "of having them make modifications exclusively for you, or provide you" - , "with facilities for running those works, provided that you comply with" - , "the terms of this License in conveying all material for which you do" - , "not control copyright. Those thus making or running the covered works" - , "for you must do so exclusively on your behalf, under your direction" - , "and control, on terms that prohibit them from making any copies of" - , "your copyrighted material outside their relationship with you." - , "" - , " Conveying under any other circumstances is permitted solely under" - , "the conditions stated below. Sublicensing is not allowed; section 10" - , "makes it unnecessary." - , "" - , " 3. Protecting Users' Legal Rights From Anti-Circumvention Law." - , "" - , " No covered work shall be deemed part of an effective technological" - , "measure under any applicable law fulfilling obligations under article" - , "11 of the WIPO copyright treaty adopted on 20 December 1996, or" - , "similar laws prohibiting or restricting circumvention of such" - , "measures." - , "" - , " When you convey a covered work, you waive any legal power to forbid" - , "circumvention of technological measures to the extent such circumvention" - , "is effected by exercising rights under this License with respect to" - , "the covered work, and you disclaim any intention to limit operation or" - , "modification of the work as a means of enforcing, against the work's" - , "users, your or third parties' legal rights to forbid circumvention of" - , "technological measures." - , "" - , " 4. Conveying Verbatim Copies." - , "" - , " You may convey verbatim copies of the Program's source code as you" - , "receive it, in any medium, provided that you conspicuously and" - , "appropriately publish on each copy an appropriate copyright notice;" - , "keep intact all notices stating that this License and any" - , "non-permissive terms added in accord with section 7 apply to the code;" - , "keep intact all notices of the absence of any warranty; and give all" - , "recipients a copy of this License along with the Program." - , "" - , " You may charge any price or no price for each copy that you convey," - , "and you may offer support or warranty protection for a fee." - , "" - , " 5. Conveying Modified Source Versions." - , "" - , " You may convey a work based on the Program, or the modifications to" - , "produce it from the Program, in the form of source code under the" - , "terms of section 4, provided that you also meet all of these conditions:" - , "" - , " a) The work must carry prominent notices stating that you modified" - , " it, and giving a relevant date." - , "" - , " b) The work must carry prominent notices stating that it is" - , " released under this License and any conditions added under section" - , " 7. This requirement modifies the requirement in section 4 to" - , " \"keep intact all notices\"." - , "" - , " c) You must license the entire work, as a whole, under this" - , " License to anyone who comes into possession of a copy. This" - , " License will therefore apply, along with any applicable section 7" - , " additional terms, to the whole of the work, and all its parts," - , " regardless of how they are packaged. This License gives no" - , " permission to license the work in any other way, but it does not" - , " invalidate such permission if you have separately received it." - , "" - , " d) If the work has interactive user interfaces, each must display" - , " Appropriate Legal Notices; however, if the Program has interactive" - , " interfaces that do not display Appropriate Legal Notices, your" - , " work need not make them do so." - , "" - , " A compilation of a covered work with other separate and independent" - , "works, which are not by their nature extensions of the covered work," - , "and which are not combined with it such as to form a larger program," - , "in or on a volume of a storage or distribution medium, is called an" - , "\"aggregate\" if the compilation and its resulting copyright are not" - , "used to limit the access or legal rights of the compilation's users" - , "beyond what the individual works permit. Inclusion of a covered work" - , "in an aggregate does not cause this License to apply to the other" - , "parts of the aggregate." - , "" - , " 6. Conveying Non-Source Forms." - , "" - , " You may convey a covered work in object code form under the terms" - , "of sections 4 and 5, provided that you also convey the" - , "machine-readable Corresponding Source under the terms of this License," - , "in one of these ways:" - , "" - , " a) Convey the object code in, or embodied in, a physical product" - , " (including a physical distribution medium), accompanied by the" - , " Corresponding Source fixed on a durable physical medium" - , " customarily used for software interchange." - , "" - , " b) Convey the object code in, or embodied in, a physical product" - , " (including a physical distribution medium), accompanied by a" - , " written offer, valid for at least three years and valid for as" - , " long as you offer spare parts or customer support for that product" - , " model, to give anyone who possesses the object code either (1) a" - , " copy of the Corresponding Source for all the software in the" - , " product that is covered by this License, on a durable physical" - , " medium customarily used for software interchange, for a price no" - , " more than your reasonable cost of physically performing this" - , " conveying of source, or (2) access to copy the" - , " Corresponding Source from a network server at no charge." - , "" - , " c) Convey individual copies of the object code with a copy of the" - , " written offer to provide the Corresponding Source. This" - , " alternative is allowed only occasionally and noncommercially, and" - , " only if you received the object code with such an offer, in accord" - , " with subsection 6b." - , "" - , " d) Convey the object code by offering access from a designated" - , " place (gratis or for a charge), and offer equivalent access to the" - , " Corresponding Source in the same way through the same place at no" - , " further charge. You need not require recipients to copy the" - , " Corresponding Source along with the object code. If the place to" - , " copy the object code is a network server, the Corresponding Source" - , " may be on a different server (operated by you or a third party)" - , " that supports equivalent copying facilities, provided you maintain" - , " clear directions next to the object code saying where to find the" - , " Corresponding Source. Regardless of what server hosts the" - , " Corresponding Source, you remain obligated to ensure that it is" - , " available for as long as needed to satisfy these requirements." - , "" - , " e) Convey the object code using peer-to-peer transmission, provided" - , " you inform other peers where the object code and Corresponding" - , " Source of the work are being offered to the general public at no" - , " charge under subsection 6d." - , "" - , " A separable portion of the object code, whose source code is excluded" - , "from the Corresponding Source as a System Library, need not be" - , "included in conveying the object code work." - , "" - , " A \"User Product\" is either (1) a \"consumer product\", which means any" - , "tangible personal property which is normally used for personal, family," - , "or household purposes, or (2) anything designed or sold for incorporation" - , "into a dwelling. In determining whether a product is a consumer product," - , "doubtful cases shall be resolved in favor of coverage. For a particular" - , "product received by a particular user, \"normally used\" refers to a" - , "typical or common use of that class of product, regardless of the status" - , "of the particular user or of the way in which the particular user" - , "actually uses, or expects or is expected to use, the product. A product" - , "is a consumer product regardless of whether the product has substantial" - , "commercial, industrial or non-consumer uses, unless such uses represent" - , "the only significant mode of use of the product." - , "" - , " \"Installation Information\" for a User Product means any methods," - , "procedures, authorization keys, or other information required to install" - , "and execute modified versions of a covered work in that User Product from" - , "a modified version of its Corresponding Source. The information must" - , "suffice to ensure that the continued functioning of the modified object" - , "code is in no case prevented or interfered with solely because" - , "modification has been made." - , "" - , " If you convey an object code work under this section in, or with, or" - , "specifically for use in, a User Product, and the conveying occurs as" - , "part of a transaction in which the right of possession and use of the" - , "User Product is transferred to the recipient in perpetuity or for a" - , "fixed term (regardless of how the transaction is characterized), the" - , "Corresponding Source conveyed under this section must be accompanied" - , "by the Installation Information. But this requirement does not apply" - , "if neither you nor any third party retains the ability to install" - , "modified object code on the User Product (for example, the work has" - , "been installed in ROM)." - , "" - , " The requirement to provide Installation Information does not include a" - , "requirement to continue to provide support service, warranty, or updates" - , "for a work that has been modified or installed by the recipient, or for" - , "the User Product in which it has been modified or installed. Access to a" - , "network may be denied when the modification itself materially and" - , "adversely affects the operation of the network or violates the rules and" - , "protocols for communication across the network." - , "" - , " Corresponding Source conveyed, and Installation Information provided," - , "in accord with this section must be in a format that is publicly" - , "documented (and with an implementation available to the public in" - , "source code form), and must require no special password or key for" - , "unpacking, reading or copying." - , "" - , " 7. Additional Terms." - , "" - , " \"Additional permissions\" are terms that supplement the terms of this" - , "License by making exceptions from one or more of its conditions." - , "Additional permissions that are applicable to the entire Program shall" - , "be treated as though they were included in this License, to the extent" - , "that they are valid under applicable law. If additional permissions" - , "apply only to part of the Program, that part may be used separately" - , "under those permissions, but the entire Program remains governed by" - , "this License without regard to the additional permissions." - , "" - , " When you convey a copy of a covered work, you may at your option" - , "remove any additional permissions from that copy, or from any part of" - , "it. (Additional permissions may be written to require their own" - , "removal in certain cases when you modify the work.) You may place" - , "additional permissions on material, added by you to a covered work," - , "for which you have or can give appropriate copyright permission." - , "" - , " Notwithstanding any other provision of this License, for material you" - , "add to a covered work, you may (if authorized by the copyright holders of" - , "that material) supplement the terms of this License with terms:" - , "" - , " a) Disclaiming warranty or limiting liability differently from the" - , " terms of sections 15 and 16 of this License; or" - , "" - , " b) Requiring preservation of specified reasonable legal notices or" - , " author attributions in that material or in the Appropriate Legal" - , " Notices displayed by works containing it; or" - , "" - , " c) Prohibiting misrepresentation of the origin of that material, or" - , " requiring that modified versions of such material be marked in" - , " reasonable ways as different from the original version; or" - , "" - , " d) Limiting the use for publicity purposes of names of licensors or" - , " authors of the material; or" - , "" - , " e) Declining to grant rights under trademark law for use of some" - , " trade names, trademarks, or service marks; or" - , "" - , " f) Requiring indemnification of licensors and authors of that" - , " material by anyone who conveys the material (or modified versions of" - , " it) with contractual assumptions of liability to the recipient, for" - , " any liability that these contractual assumptions directly impose on" - , " those licensors and authors." - , "" - , " All other non-permissive additional terms are considered \"further" - , "restrictions\" within the meaning of section 10. If the Program as you" - , "received it, or any part of it, contains a notice stating that it is" - , "governed by this License along with a term that is a further" - , "restriction, you may remove that term. If a license document contains" - , "a further restriction but permits relicensing or conveying under this" - , "License, you may add to a covered work material governed by the terms" - , "of that license document, provided that the further restriction does" - , "not survive such relicensing or conveying." - , "" - , " If you add terms to a covered work in accord with this section, you" - , "must place, in the relevant source files, a statement of the" - , "additional terms that apply to those files, or a notice indicating" - , "where to find the applicable terms." - , "" - , " Additional terms, permissive or non-permissive, may be stated in the" - , "form of a separately written license, or stated as exceptions;" - , "the above requirements apply either way." - , "" - , " 8. Termination." - , "" - , " You may not propagate or modify a covered work except as expressly" - , "provided under this License. Any attempt otherwise to propagate or" - , "modify it is void, and will automatically terminate your rights under" - , "this License (including any patent licenses granted under the third" - , "paragraph of section 11)." - , "" - , " However, if you cease all violation of this License, then your" - , "license from a particular copyright holder is reinstated (a)" - , "provisionally, unless and until the copyright holder explicitly and" - , "finally terminates your license, and (b) permanently, if the copyright" - , "holder fails to notify you of the violation by some reasonable means" - , "prior to 60 days after the cessation." - , "" - , " Moreover, your license from a particular copyright holder is" - , "reinstated permanently if the copyright holder notifies you of the" - , "violation by some reasonable means, this is the first time you have" - , "received notice of violation of this License (for any work) from that" - , "copyright holder, and you cure the violation prior to 30 days after" - , "your receipt of the notice." - , "" - , " Termination of your rights under this section does not terminate the" - , "licenses of parties who have received copies or rights from you under" - , "this License. If your rights have been terminated and not permanently" - , "reinstated, you do not qualify to receive new licenses for the same" - , "material under section 10." - , "" - , " 9. Acceptance Not Required for Having Copies." - , "" - , " You are not required to accept this License in order to receive or" - , "run a copy of the Program. Ancillary propagation of a covered work" - , "occurring solely as a consequence of using peer-to-peer transmission" - , "to receive a copy likewise does not require acceptance. However," - , "nothing other than this License grants you permission to propagate or" - , "modify any covered work. These actions infringe copyright if you do" - , "not accept this License. Therefore, by modifying or propagating a" - , "covered work, you indicate your acceptance of this License to do so." - , "" - , " 10. Automatic Licensing of Downstream Recipients." - , "" - , " Each time you convey a covered work, the recipient automatically" - , "receives a license from the original licensors, to run, modify and" - , "propagate that work, subject to this License. You are not responsible" - , "for enforcing compliance by third parties with this License." - , "" - , " An \"entity transaction\" is a transaction transferring control of an" - , "organization, or substantially all assets of one, or subdividing an" - , "organization, or merging organizations. If propagation of a covered" - , "work results from an entity transaction, each party to that" - , "transaction who receives a copy of the work also receives whatever" - , "licenses to the work the party's predecessor in interest had or could" - , "give under the previous paragraph, plus a right to possession of the" - , "Corresponding Source of the work from the predecessor in interest, if" - , "the predecessor has it or can get it with reasonable efforts." - , "" - , " You may not impose any further restrictions on the exercise of the" - , "rights granted or affirmed under this License. For example, you may" - , "not impose a license fee, royalty, or other charge for exercise of" - , "rights granted under this License, and you may not initiate litigation" - , "(including a cross-claim or counterclaim in a lawsuit) alleging that" - , "any patent claim is infringed by making, using, selling, offering for" - , "sale, or importing the Program or any portion of it." - , "" - , " 11. Patents." - , "" - , " A \"contributor\" is a copyright holder who authorizes use under this" - , "License of the Program or a work on which the Program is based. The" - , "work thus licensed is called the contributor's \"contributor version\"." - , "" - , " A contributor's \"essential patent claims\" are all patent claims" - , "owned or controlled by the contributor, whether already acquired or" - , "hereafter acquired, that would be infringed by some manner, permitted" - , "by this License, of making, using, or selling its contributor version," - , "but do not include claims that would be infringed only as a" - , "consequence of further modification of the contributor version. For" - , "purposes of this definition, \"control\" includes the right to grant" - , "patent sublicenses in a manner consistent with the requirements of" - , "this License." - , "" - , " Each contributor grants you a non-exclusive, worldwide, royalty-free" - , "patent license under the contributor's essential patent claims, to" - , "make, use, sell, offer for sale, import and otherwise run, modify and" - , "propagate the contents of its contributor version." - , "" - , " In the following three paragraphs, a \"patent license\" is any express" - , "agreement or commitment, however denominated, not to enforce a patent" - , "(such as an express permission to practice a patent or covenant not to" - , "sue for patent infringement). To \"grant\" such a patent license to a" - , "party means to make such an agreement or commitment not to enforce a" - , "patent against the party." - , "" - , " If you convey a covered work, knowingly relying on a patent license," - , "and the Corresponding Source of the work is not available for anyone" - , "to copy, free of charge and under the terms of this License, through a" - , "publicly available network server or other readily accessible means," - , "then you must either (1) cause the Corresponding Source to be so" - , "available, or (2) arrange to deprive yourself of the benefit of the" - , "patent license for this particular work, or (3) arrange, in a manner" - , "consistent with the requirements of this License, to extend the patent" - , "license to downstream recipients. \"Knowingly relying\" means you have" - , "actual knowledge that, but for the patent license, your conveying the" - , "covered work in a country, or your recipient's use of the covered work" - , "in a country, would infringe one or more identifiable patents in that" - , "country that you have reason to believe are valid." - , "" - , " If, pursuant to or in connection with a single transaction or" - , "arrangement, you convey, or propagate by procuring conveyance of, a" - , "covered work, and grant a patent license to some of the parties" - , "receiving the covered work authorizing them to use, propagate, modify" - , "or convey a specific copy of the covered work, then the patent license" - , "you grant is automatically extended to all recipients of the covered" - , "work and works based on it." - , "" - , " A patent license is \"discriminatory\" if it does not include within" - , "the scope of its coverage, prohibits the exercise of, or is" - , "conditioned on the non-exercise of one or more of the rights that are" - , "specifically granted under this License. You may not convey a covered" - , "work if you are a party to an arrangement with a third party that is" - , "in the business of distributing software, under which you make payment" - , "to the third party based on the extent of your activity of conveying" - , "the work, and under which the third party grants, to any of the" - , "parties who would receive the covered work from you, a discriminatory" - , "patent license (a) in connection with copies of the covered work" - , "conveyed by you (or copies made from those copies), or (b) primarily" - , "for and in connection with specific products or compilations that" - , "contain the covered work, unless you entered into that arrangement," - , "or that patent license was granted, prior to 28 March 2007." - , "" - , " Nothing in this License shall be construed as excluding or limiting" - , "any implied license or other defenses to infringement that may" - , "otherwise be available to you under applicable patent law." - , "" - , " 12. No Surrender of Others' Freedom." - , "" - , " If conditions are imposed on you (whether by court order, agreement or" - , "otherwise) that contradict the conditions of this License, they do not" - , "excuse you from the conditions of this License. If you cannot convey a" - , "covered work so as to satisfy simultaneously your obligations under this" - , "License and any other pertinent obligations, then as a consequence you may" - , "not convey it at all. For example, if you agree to terms that obligate you" - , "to collect a royalty for further conveying from those to whom you convey" - , "the Program, the only way you could satisfy both those terms and this" - , "License would be to refrain entirely from conveying the Program." - , "" - , " 13. Remote Network Interaction; Use with the GNU General Public License." - , "" - , " Notwithstanding any other provision of this License, if you modify the" - , "Program, your modified version must prominently offer all users" - , "interacting with it remotely through a computer network (if your version" - , "supports such interaction) an opportunity to receive the Corresponding" - , "Source of your version by providing access to the Corresponding Source" - , "from a network server at no charge, through some standard or customary" - , "means of facilitating copying of software. This Corresponding Source" - , "shall include the Corresponding Source for any work covered by version 3" - , "of the GNU General Public License that is incorporated pursuant to the" - , "following paragraph." - , "" - , " Notwithstanding any other provision of this License, you have" - , "permission to link or combine any covered work with a work licensed" - , "under version 3 of the GNU General Public License into a single" - , "combined work, and to convey the resulting work. The terms of this" - , "License will continue to apply to the part which is the covered work," - , "but the work with which it is combined will remain governed by version" - , "3 of the GNU General Public License." - , "" - , " 14. Revised Versions of this License." - , "" - , " The Free Software Foundation may publish revised and/or new versions of" - , "the GNU Affero General Public License from time to time. Such new versions" - , "will be similar in spirit to the present version, but may differ in detail to" - , "address new problems or concerns." - , "" - , " Each version is given a distinguishing version number. If the" - , "Program specifies that a certain numbered version of the GNU Affero General" - , "Public License \"or any later version\" applies to it, you have the" - , "option of following the terms and conditions either of that numbered" - , "version or of any later version published by the Free Software" - , "Foundation. If the Program does not specify a version number of the" - , "GNU Affero General Public License, you may choose any version ever published" - , "by the Free Software Foundation." - , "" - , " If the Program specifies that a proxy can decide which future" - , "versions of the GNU Affero General Public License can be used, that proxy's" - , "public statement of acceptance of a version permanently authorizes you" - , "to choose that version for the Program." - , "" - , " Later license versions may give you additional or different" - , "permissions. However, no additional obligations are imposed on any" - , "author or copyright holder as a result of your choosing to follow a" - , "later version." - , "" - , " 15. Disclaimer of Warranty." - , "" - , " THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY" - , "APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT" - , "HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY" - , "OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO," - , "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR" - , "PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM" - , "IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF" - , "ALL NECESSARY SERVICING, REPAIR OR CORRECTION." - , "" - , " 16. Limitation of Liability." - , "" - , " IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING" - , "WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS" - , "THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY" - , "GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE" - , "USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF" - , "DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD" - , "PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS)," - , "EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF" - , "SUCH DAMAGES." - , "" - , " 17. Interpretation of Sections 15 and 16." - , "" - , " If the disclaimer of warranty and limitation of liability provided" - , "above cannot be given local legal effect according to their terms," - , "reviewing courts shall apply local law that most closely approximates" - , "an absolute waiver of all civil liability in connection with the" - , "Program, unless a warranty or assumption of liability accompanies a" - , "copy of the Program in return for a fee." - , "" - , " END OF TERMS AND CONDITIONS" - , "" - , " How to Apply These Terms to Your New Programs" - , "" - , " If you develop a new program, and you want it to be of the greatest" - , "possible use to the public, the best way to achieve this is to make it" - , "free software which everyone can redistribute and change under these terms." - , "" - , " To do so, attach the following notices to the program. It is safest" - , "to attach them to the start of each source file to most effectively" - , "state the exclusion of warranty; and each file should have at least" - , "the \"copyright\" line and a pointer to where the full notice is found." - , "" - , " " - , " Copyright (C) " - , "" - , " This program is free software: you can redistribute it and/or modify" - , " it under the terms of the GNU Affero General Public License as published by" - , " the Free Software Foundation, either version 3 of the License, or" - , " (at your option) any later version." - , "" - , " This program is distributed in the hope that it will be useful," - , " but WITHOUT ANY WARRANTY; without even the implied warranty of" - , " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the" - , " GNU Affero General Public License for more details." - , "" - , " You should have received a copy of the GNU Affero General Public License" - , " along with this program. If not, see ." - , "" - , "Also add information on how to contact you by electronic and paper mail." - , "" - , " If your software can interact with users remotely through a computer" - , "network, you should also make sure that it provides a way for users to" - , "get its source. For example, if your program is a web application, its" - , "interface could display a \"Source\" link that leads users to an archive" - , "of the code. There are many ways you could offer source, and different" - , "solutions will be better for different programs; see section 13 for the" - , "specific requirements." - , "" - , " You should also get your employer (if you work as a programmer) or school," - , "if any, to sign a \"copyright disclaimer\" for the program, if necessary." - , "For more information on this, and how to apply and follow the GNU AGPL, see" - , "." - ] - -lgpl21 :: License -lgpl21 = unlines - [ " GNU LESSER GENERAL PUBLIC LICENSE" - , " Version 2.1, February 1999" - , "" - , " Copyright (C) 1991, 1999 Free Software Foundation, Inc." - , " 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA" - , " Everyone is permitted to copy and distribute verbatim copies" - , " of this license document, but changing it is not allowed." - , "" - , "[This is the first released version of the Lesser GPL. It also counts" - , " as the successor of the GNU Library Public License, version 2, hence" - , " the version number 2.1.]" - , "" - , " Preamble" - , "" - , " The licenses for most software are designed to take away your" - , "freedom to share and change it. By contrast, the GNU General Public" - , "Licenses are intended to guarantee your freedom to share and change" - , "free software--to make sure the software is free for all its users." - , "" - , " This license, the Lesser General Public License, applies to some" - , "specially designated software packages--typically libraries--of the" - , "Free Software Foundation and other authors who decide to use it. You" - , "can use it too, but we suggest you first think carefully about whether" - , "this license or the ordinary General Public License is the better" - , "strategy to use in any particular case, based on the explanations below." - , "" - , " When we speak of free software, we are referring to freedom of use," - , "not price. Our General Public Licenses are designed to make sure that" - , "you have the freedom to distribute copies of free software (and charge" - , "for this service if you wish); that you receive source code or can get" - , "it if you want it; that you can change the software and use pieces of" - , "it in new free programs; and that you are informed that you can do" - , "these things." - , "" - , " To protect your rights, we need to make restrictions that forbid" - , "distributors to deny you these rights or to ask you to surrender these" - , "rights. These restrictions translate to certain responsibilities for" - , "you if you distribute copies of the library or if you modify it." - , "" - , " For example, if you distribute copies of the library, whether gratis" - , "or for a fee, you must give the recipients all the rights that we gave" - , "you. You must make sure that they, too, receive or can get the source" - , "code. If you link other code with the library, you must provide" - , "complete object files to the recipients, so that they can relink them" - , "with the library after making changes to the library and recompiling" - , "it. And you must show them these terms so they know their rights." - , "" - , " We protect your rights with a two-step method: (1) we copyright the" - , "library, and (2) we offer you this license, which gives you legal" - , "permission to copy, distribute and/or modify the library." - , "" - , " To protect each distributor, we want to make it very clear that" - , "there is no warranty for the free library. Also, if the library is" - , "modified by someone else and passed on, the recipients should know" - , "that what they have is not the original version, so that the original" - , "author's reputation will not be affected by problems that might be" - , "introduced by others." - , "" - , " Finally, software patents pose a constant threat to the existence of" - , "any free program. We wish to make sure that a company cannot" - , "effectively restrict the users of a free program by obtaining a" - , "restrictive license from a patent holder. Therefore, we insist that" - , "any patent license obtained for a version of the library must be" - , "consistent with the full freedom of use specified in this license." - , "" - , " Most GNU software, including some libraries, is covered by the" - , "ordinary GNU General Public License. This license, the GNU Lesser" - , "General Public License, applies to certain designated libraries, and" - , "is quite different from the ordinary General Public License. We use" - , "this license for certain libraries in order to permit linking those" - , "libraries into non-free programs." - , "" - , " When a program is linked with a library, whether statically or using" - , "a shared library, the combination of the two is legally speaking a" - , "combined work, a derivative of the original library. The ordinary" - , "General Public License therefore permits such linking only if the" - , "entire combination fits its criteria of freedom. The Lesser General" - , "Public License permits more lax criteria for linking other code with" - , "the library." - , "" - , " We call this license the \"Lesser\" General Public License because it" - , "does Less to protect the user's freedom than the ordinary General" - , "Public License. It also provides other free software developers Less" - , "of an advantage over competing non-free programs. These disadvantages" - , "are the reason we use the ordinary General Public License for many" - , "libraries. However, the Lesser license provides advantages in certain" - , "special circumstances." - , "" - , " For example, on rare occasions, there may be a special need to" - , "encourage the widest possible use of a certain library, so that it becomes" - , "a de-facto standard. To achieve this, non-free programs must be" - , "allowed to use the library. A more frequent case is that a free" - , "library does the same job as widely used non-free libraries. In this" - , "case, there is little to gain by limiting the free library to free" - , "software only, so we use the Lesser General Public License." - , "" - , " In other cases, permission to use a particular library in non-free" - , "programs enables a greater number of people to use a large body of" - , "free software. For example, permission to use the GNU C Library in" - , "non-free programs enables many more people to use the whole GNU" - , "operating system, as well as its variant, the GNU/Linux operating" - , "system." - , "" - , " Although the Lesser General Public License is Less protective of the" - , "users' freedom, it does ensure that the user of a program that is" - , "linked with the Library has the freedom and the wherewithal to run" - , "that program using a modified version of the Library." - , "" - , " The precise terms and conditions for copying, distribution and" - , "modification follow. Pay close attention to the difference between a" - , "\"work based on the library\" and a \"work that uses the library\". The" - , "former contains code derived from the library, whereas the latter must" - , "be combined with the library in order to run." - , "" - , " GNU LESSER GENERAL PUBLIC LICENSE" - , " TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION" - , "" - , " 0. This License Agreement applies to any software library or other" - , "program which contains a notice placed by the copyright holder or" - , "other authorized party saying it may be distributed under the terms of" - , "this Lesser General Public License (also called \"this License\")." - , "Each licensee is addressed as \"you\"." - , "" - , " A \"library\" means a collection of software functions and/or data" - , "prepared so as to be conveniently linked with application programs" - , "(which use some of those functions and data) to form executables." - , "" - , " The \"Library\", below, refers to any such software library or work" - , "which has been distributed under these terms. A \"work based on the" - , "Library\" means either the Library or any derivative work under" - , "copyright law: that is to say, a work containing the Library or a" - , "portion of it, either verbatim or with modifications and/or translated" - , "straightforwardly into another language. (Hereinafter, translation is" - , "included without limitation in the term \"modification\".)" - , "" - , " \"Source code\" for a work means the preferred form of the work for" - , "making modifications to it. For a library, complete source code means" - , "all the source code for all modules it contains, plus any associated" - , "interface definition files, plus the scripts used to control compilation" - , "and installation of the library." - , "" - , " Activities other than copying, distribution and modification are not" - , "covered by this License; they are outside its scope. The act of" - , "running a program using the Library is not restricted, and output from" - , "such a program is covered only if its contents constitute a work based" - , "on the Library (independent of the use of the Library in a tool for" - , "writing it). Whether that is true depends on what the Library does" - , "and what the program that uses the Library does." - , "" - , " 1. You may copy and distribute verbatim copies of the Library's" - , "complete source code as you receive it, in any medium, provided that" - , "you conspicuously and appropriately publish on each copy an" - , "appropriate copyright notice and disclaimer of warranty; keep intact" - , "all the notices that refer to this License and to the absence of any" - , "warranty; and distribute a copy of this License along with the" - , "Library." - , "" - , " You may charge a fee for the physical act of transferring a copy," - , "and you may at your option offer warranty protection in exchange for a" - , "fee." - , "" - , " 2. You may modify your copy or copies of the Library or any portion" - , "of it, thus forming a work based on the Library, and copy and" - , "distribute such modifications or work under the terms of Section 1" - , "above, provided that you also meet all of these conditions:" - , "" - , " a) The modified work must itself be a software library." - , "" - , " b) You must cause the files modified to carry prominent notices" - , " stating that you changed the files and the date of any change." - , "" - , " c) You must cause the whole of the work to be licensed at no" - , " charge to all third parties under the terms of this License." - , "" - , " d) If a facility in the modified Library refers to a function or a" - , " table of data to be supplied by an application program that uses" - , " the facility, other than as an argument passed when the facility" - , " is invoked, then you must make a good faith effort to ensure that," - , " in the event an application does not supply such function or" - , " table, the facility still operates, and performs whatever part of" - , " its purpose remains meaningful." - , "" - , " (For example, a function in a library to compute square roots has" - , " a purpose that is entirely well-defined independent of the" - , " application. Therefore, Subsection 2d requires that any" - , " application-supplied function or table used by this function must" - , " be optional: if the application does not supply it, the square" - , " root function must still compute square roots.)" - , "" - , "These requirements apply to the modified work as a whole. If" - , "identifiable sections of that work are not derived from the Library," - , "and can be reasonably considered independent and separate works in" - , "themselves, then this License, and its terms, do not apply to those" - , "sections when you distribute them as separate works. But when you" - , "distribute the same sections as part of a whole which is a work based" - , "on the Library, the distribution of the whole must be on the terms of" - , "this License, whose permissions for other licensees extend to the" - , "entire whole, and thus to each and every part regardless of who wrote" - , "it." - , "" - , "Thus, it is not the intent of this section to claim rights or contest" - , "your rights to work written entirely by you; rather, the intent is to" - , "exercise the right to control the distribution of derivative or" - , "collective works based on the Library." - , "" - , "In addition, mere aggregation of another work not based on the Library" - , "with the Library (or with a work based on the Library) on a volume of" - , "a storage or distribution medium does not bring the other work under" - , "the scope of this License." - , "" - , " 3. You may opt to apply the terms of the ordinary GNU General Public" - , "License instead of this License to a given copy of the Library. To do" - , "this, you must alter all the notices that refer to this License, so" - , "that they refer to the ordinary GNU General Public License, version 2," - , "instead of to this License. (If a newer version than version 2 of the" - , "ordinary GNU General Public License has appeared, then you can specify" - , "that version instead if you wish.) Do not make any other change in" - , "these notices." - , "" - , " Once this change is made in a given copy, it is irreversible for" - , "that copy, so the ordinary GNU General Public License applies to all" - , "subsequent copies and derivative works made from that copy." - , "" - , " This option is useful when you wish to copy part of the code of" - , "the Library into a program that is not a library." - , "" - , " 4. You may copy and distribute the Library (or a portion or" - , "derivative of it, under Section 2) in object code or executable form" - , "under the terms of Sections 1 and 2 above provided that you accompany" - , "it with the complete corresponding machine-readable source code, which" - , "must be distributed under the terms of Sections 1 and 2 above on a" - , "medium customarily used for software interchange." - , "" - , " If distribution of object code is made by offering access to copy" - , "from a designated place, then offering equivalent access to copy the" - , "source code from the same place satisfies the requirement to" - , "distribute the source code, even though third parties are not" - , "compelled to copy the source along with the object code." - , "" - , " 5. A program that contains no derivative of any portion of the" - , "Library, but is designed to work with the Library by being compiled or" - , "linked with it, is called a \"work that uses the Library\". Such a" - , "work, in isolation, is not a derivative work of the Library, and" - , "therefore falls outside the scope of this License." - , "" - , " However, linking a \"work that uses the Library\" with the Library" - , "creates an executable that is a derivative of the Library (because it" - , "contains portions of the Library), rather than a \"work that uses the" - , "library\". The executable is therefore covered by this License." - , "Section 6 states terms for distribution of such executables." - , "" - , " When a \"work that uses the Library\" uses material from a header file" - , "that is part of the Library, the object code for the work may be a" - , "derivative work of the Library even though the source code is not." - , "Whether this is true is especially significant if the work can be" - , "linked without the Library, or if the work is itself a library. The" - , "threshold for this to be true is not precisely defined by law." - , "" - , " If such an object file uses only numerical parameters, data" - , "structure layouts and accessors, and small macros and small inline" - , "functions (ten lines or less in length), then the use of the object" - , "file is unrestricted, regardless of whether it is legally a derivative" - , "work. (Executables containing this object code plus portions of the" - , "Library will still fall under Section 6.)" - , "" - , " Otherwise, if the work is a derivative of the Library, you may" - , "distribute the object code for the work under the terms of Section 6." - , "Any executables containing that work also fall under Section 6," - , "whether or not they are linked directly with the Library itself." - , "" - , " 6. As an exception to the Sections above, you may also combine or" - , "link a \"work that uses the Library\" with the Library to produce a" - , "work containing portions of the Library, and distribute that work" - , "under terms of your choice, provided that the terms permit" - , "modification of the work for the customer's own use and reverse" - , "engineering for debugging such modifications." - , "" - , " You must give prominent notice with each copy of the work that the" - , "Library is used in it and that the Library and its use are covered by" - , "this License. You must supply a copy of this License. If the work" - , "during execution displays copyright notices, you must include the" - , "copyright notice for the Library among them, as well as a reference" - , "directing the user to the copy of this License. Also, you must do one" - , "of these things:" - , "" - , " a) Accompany the work with the complete corresponding" - , " machine-readable source code for the Library including whatever" - , " changes were used in the work (which must be distributed under" - , " Sections 1 and 2 above); and, if the work is an executable linked" - , " with the Library, with the complete machine-readable \"work that" - , " uses the Library\", as object code and/or source code, so that the" - , " user can modify the Library and then relink to produce a modified" - , " executable containing the modified Library. (It is understood" - , " that the user who changes the contents of definitions files in the" - , " Library will not necessarily be able to recompile the application" - , " to use the modified definitions.)" - , "" - , " b) Use a suitable shared library mechanism for linking with the" - , " Library. A suitable mechanism is one that (1) uses at run time a" - , " copy of the library already present on the user's computer system," - , " rather than copying library functions into the executable, and (2)" - , " will operate properly with a modified version of the library, if" - , " the user installs one, as long as the modified version is" - , " interface-compatible with the version that the work was made with." - , "" - , " c) Accompany the work with a written offer, valid for at" - , " least three years, to give the same user the materials" - , " specified in Subsection 6a, above, for a charge no more" - , " than the cost of performing this distribution." - , "" - , " d) If distribution of the work is made by offering access to copy" - , " from a designated place, offer equivalent access to copy the above" - , " specified materials from the same place." - , "" - , " e) Verify that the user has already received a copy of these" - , " materials or that you have already sent this user a copy." - , "" - , " For an executable, the required form of the \"work that uses the" - , "Library\" must include any data and utility programs needed for" - , "reproducing the executable from it. However, as a special exception," - , "the materials to be distributed need not include anything that is" - , "normally distributed (in either source or binary form) with the major" - , "components (compiler, kernel, and so on) of the operating system on" - , "which the executable runs, unless that component itself accompanies" - , "the executable." - , "" - , " It may happen that this requirement contradicts the license" - , "restrictions of other proprietary libraries that do not normally" - , "accompany the operating system. Such a contradiction means you cannot" - , "use both them and the Library together in an executable that you" - , "distribute." - , "" - , " 7. You may place library facilities that are a work based on the" - , "Library side-by-side in a single library together with other library" - , "facilities not covered by this License, and distribute such a combined" - , "library, provided that the separate distribution of the work based on" - , "the Library and of the other library facilities is otherwise" - , "permitted, and provided that you do these two things:" - , "" - , " a) Accompany the combined library with a copy of the same work" - , " based on the Library, uncombined with any other library" - , " facilities. This must be distributed under the terms of the" - , " Sections above." - , "" - , " b) Give prominent notice with the combined library of the fact" - , " that part of it is a work based on the Library, and explaining" - , " where to find the accompanying uncombined form of the same work." - , "" - , " 8. You may not copy, modify, sublicense, link with, or distribute" - , "the Library except as expressly provided under this License. Any" - , "attempt otherwise to copy, modify, sublicense, link with, or" - , "distribute the Library is void, and will automatically terminate your" - , "rights under this License. However, parties who have received copies," - , "or rights, from you under this License will not have their licenses" - , "terminated so long as such parties remain in full compliance." - , "" - , " 9. You are not required to accept this License, since you have not" - , "signed it. However, nothing else grants you permission to modify or" - , "distribute the Library or its derivative works. These actions are" - , "prohibited by law if you do not accept this License. Therefore, by" - , "modifying or distributing the Library (or any work based on the" - , "Library), you indicate your acceptance of this License to do so, and" - , "all its terms and conditions for copying, distributing or modifying" - , "the Library or works based on it." - , "" - , " 10. Each time you redistribute the Library (or any work based on the" - , "Library), the recipient automatically receives a license from the" - , "original licensor to copy, distribute, link with or modify the Library" - , "subject to these terms and conditions. You may not impose any further" - , "restrictions on the recipients' exercise of the rights granted herein." - , "You are not responsible for enforcing compliance by third parties with" - , "this License." - , "" - , " 11. If, as a consequence of a court judgment or allegation of patent" - , "infringement or for any other reason (not limited to patent issues)," - , "conditions are imposed on you (whether by court order, agreement or" - , "otherwise) that contradict the conditions of this License, they do not" - , "excuse you from the conditions of this License. If you cannot" - , "distribute so as to satisfy simultaneously your obligations under this" - , "License and any other pertinent obligations, then as a consequence you" - , "may not distribute the Library at all. For example, if a patent" - , "license would not permit royalty-free redistribution of the Library by" - , "all those who receive copies directly or indirectly through you, then" - , "the only way you could satisfy both it and this License would be to" - , "refrain entirely from distribution of the Library." - , "" - , "If any portion of this section is held invalid or unenforceable under any" - , "particular circumstance, the balance of the section is intended to apply," - , "and the section as a whole is intended to apply in other circumstances." - , "" - , "It is not the purpose of this section to induce you to infringe any" - , "patents or other property right claims or to contest validity of any" - , "such claims; this section has the sole purpose of protecting the" - , "integrity of the free software distribution system which is" - , "implemented by public license practices. Many people have made" - , "generous contributions to the wide range of software distributed" - , "through that system in reliance on consistent application of that" - , "system; it is up to the author/donor to decide if he or she is willing" - , "to distribute software through any other system and a licensee cannot" - , "impose that choice." - , "" - , "This section is intended to make thoroughly clear what is believed to" - , "be a consequence of the rest of this License." - , "" - , " 12. If the distribution and/or use of the Library is restricted in" - , "certain countries either by patents or by copyrighted interfaces, the" - , "original copyright holder who places the Library under this License may add" - , "an explicit geographical distribution limitation excluding those countries," - , "so that distribution is permitted only in or among countries not thus" - , "excluded. In such case, this License incorporates the limitation as if" - , "written in the body of this License." - , "" - , " 13. The Free Software Foundation may publish revised and/or new" - , "versions of the Lesser General Public License from time to time." - , "Such new versions will be similar in spirit to the present version," - , "but may differ in detail to address new problems or concerns." - , "" - , "Each version is given a distinguishing version number. If the Library" - , "specifies a version number of this License which applies to it and" - , "\"any later version\", you have the option of following the terms and" - , "conditions either of that version or of any later version published by" - , "the Free Software Foundation. If the Library does not specify a" - , "license version number, you may choose any version ever published by" - , "the Free Software Foundation." - , "" - , " 14. If you wish to incorporate parts of the Library into other free" - , "programs whose distribution conditions are incompatible with these," - , "write to the author to ask for permission. For software which is" - , "copyrighted by the Free Software Foundation, write to the Free" - , "Software Foundation; we sometimes make exceptions for this. Our" - , "decision will be guided by the two goals of preserving the free status" - , "of all derivatives of our free software and of promoting the sharing" - , "and reuse of software generally." - , "" - , " NO WARRANTY" - , "" - , " 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO" - , "WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW." - , "EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR" - , "OTHER PARTIES PROVIDE THE LIBRARY \"AS IS\" WITHOUT WARRANTY OF ANY" - , "KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE" - , "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR" - , "PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE" - , "LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME" - , "THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION." - , "" - , " 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN" - , "WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY" - , "AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU" - , "FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR" - , "CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE" - , "LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING" - , "RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A" - , "FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF" - , "SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH" - , "DAMAGES." - , "" - , " END OF TERMS AND CONDITIONS" - , "" - , " How to Apply These Terms to Your New Libraries" - , "" - , " If you develop a new library, and you want it to be of the greatest" - , "possible use to the public, we recommend making it free software that" - , "everyone can redistribute and change. You can do so by permitting" - , "redistribution under these terms (or, alternatively, under the terms of the" - , "ordinary General Public License)." - , "" - , " To apply these terms, attach the following notices to the library. It is" - , "safest to attach them to the start of each source file to most effectively" - , "convey the exclusion of warranty; and each file should have at least the" - , "\"copyright\" line and a pointer to where the full notice is found." - , "" - , " " - , " Copyright (C) " - , "" - , " This library is free software; you can redistribute it and/or" - , " modify it under the terms of the GNU Lesser General Public" - , " License as published by the Free Software Foundation; either" - , " version 2.1 of the License, or (at your option) any later version." - , "" - , " This library is distributed in the hope that it will be useful," - , " but WITHOUT ANY WARRANTY; without even the implied warranty of" - , " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU" - , " Lesser General Public License for more details." - , "" - , " You should have received a copy of the GNU Lesser General Public" - , " License along with this library; if not, write to the Free Software" - , " Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA" - , "" - , "Also add information on how to contact you by electronic and paper mail." - , "" - , "You should also get your employer (if you work as a programmer) or your" - , "school, if any, to sign a \"copyright disclaimer\" for the library, if" - , "necessary. Here is a sample; alter the names:" - , "" - , " Yoyodyne, Inc., hereby disclaims all copyright interest in the" - , " library `Frob' (a library for tweaking knobs) written by James Random Hacker." - , "" - , " , 1 April 1990" - , " Ty Coon, President of Vice" - , "" - , "That's all there is to it!" - ] - -lgpl3 :: License -lgpl3 = unlines - [ " GNU LESSER GENERAL PUBLIC LICENSE" - , " Version 3, 29 June 2007" - , "" - , " Copyright (C) 2007 Free Software Foundation, Inc. " - , " Everyone is permitted to copy and distribute verbatim copies" - , " of this license document, but changing it is not allowed." - , "" - , "" - , " This version of the GNU Lesser General Public License incorporates" - , "the terms and conditions of version 3 of the GNU General Public" - , "License, supplemented by the additional permissions listed below." - , "" - , " 0. Additional Definitions." - , "" - , " As used herein, \"this License\" refers to version 3 of the GNU Lesser" - , "General Public License, and the \"GNU GPL\" refers to version 3 of the GNU" - , "General Public License." - , "" - , " \"The Library\" refers to a covered work governed by this License," - , "other than an Application or a Combined Work as defined below." - , "" - , " An \"Application\" is any work that makes use of an interface provided" - , "by the Library, but which is not otherwise based on the Library." - , "Defining a subclass of a class defined by the Library is deemed a mode" - , "of using an interface provided by the Library." - , "" - , " A \"Combined Work\" is a work produced by combining or linking an" - , "Application with the Library. The particular version of the Library" - , "with which the Combined Work was made is also called the \"Linked" - , "Version\"." - , "" - , " The \"Minimal Corresponding Source\" for a Combined Work means the" - , "Corresponding Source for the Combined Work, excluding any source code" - , "for portions of the Combined Work that, considered in isolation, are" - , "based on the Application, and not on the Linked Version." - , "" - , " The \"Corresponding Application Code\" for a Combined Work means the" - , "object code and/or source code for the Application, including any data" - , "and utility programs needed for reproducing the Combined Work from the" - , "Application, but excluding the System Libraries of the Combined Work." - , "" - , " 1. Exception to Section 3 of the GNU GPL." - , "" - , " You may convey a covered work under sections 3 and 4 of this License" - , "without being bound by section 3 of the GNU GPL." - , "" - , " 2. Conveying Modified Versions." - , "" - , " If you modify a copy of the Library, and, in your modifications, a" - , "facility refers to a function or data to be supplied by an Application" - , "that uses the facility (other than as an argument passed when the" - , "facility is invoked), then you may convey a copy of the modified" - , "version:" - , "" - , " a) under this License, provided that you make a good faith effort to" - , " ensure that, in the event an Application does not supply the" - , " function or data, the facility still operates, and performs" - , " whatever part of its purpose remains meaningful, or" - , "" - , " b) under the GNU GPL, with none of the additional permissions of" - , " this License applicable to that copy." - , "" - , " 3. Object Code Incorporating Material from Library Header Files." - , "" - , " The object code form of an Application may incorporate material from" - , "a header file that is part of the Library. You may convey such object" - , "code under terms of your choice, provided that, if the incorporated" - , "material is not limited to numerical parameters, data structure" - , "layouts and accessors, or small macros, inline functions and templates" - , "(ten or fewer lines in length), you do both of the following:" - , "" - , " a) Give prominent notice with each copy of the object code that the" - , " Library is used in it and that the Library and its use are" - , " covered by this License." - , "" - , " b) Accompany the object code with a copy of the GNU GPL and this license" - , " document." - , "" - , " 4. Combined Works." - , "" - , " You may convey a Combined Work under terms of your choice that," - , "taken together, effectively do not restrict modification of the" - , "portions of the Library contained in the Combined Work and reverse" - , "engineering for debugging such modifications, if you also do each of" - , "the following:" - , "" - , " a) Give prominent notice with each copy of the Combined Work that" - , " the Library is used in it and that the Library and its use are" - , " covered by this License." - , "" - , " b) Accompany the Combined Work with a copy of the GNU GPL and this license" - , " document." - , "" - , " c) For a Combined Work that displays copyright notices during" - , " execution, include the copyright notice for the Library among" - , " these notices, as well as a reference directing the user to the" - , " copies of the GNU GPL and this license document." - , "" - , " d) Do one of the following:" - , "" - , " 0) Convey the Minimal Corresponding Source under the terms of this" - , " License, and the Corresponding Application Code in a form" - , " suitable for, and under terms that permit, the user to" - , " recombine or relink the Application with a modified version of" - , " the Linked Version to produce a modified Combined Work, in the" - , " manner specified by section 6 of the GNU GPL for conveying" - , " Corresponding Source." - , "" - , " 1) Use a suitable shared library mechanism for linking with the" - , " Library. A suitable mechanism is one that (a) uses at run time" - , " a copy of the Library already present on the user's computer" - , " system, and (b) will operate properly with a modified version" - , " of the Library that is interface-compatible with the Linked" - , " Version." - , "" - , " e) Provide Installation Information, but only if you would otherwise" - , " be required to provide such information under section 6 of the" - , " GNU GPL, and only to the extent that such information is" - , " necessary to install and execute a modified version of the" - , " Combined Work produced by recombining or relinking the" - , " Application with a modified version of the Linked Version. (If" - , " you use option 4d0, the Installation Information must accompany" - , " the Minimal Corresponding Source and Corresponding Application" - , " Code. If you use option 4d1, you must provide the Installation" - , " Information in the manner specified by section 6 of the GNU GPL" - , " for conveying Corresponding Source.)" - , "" - , " 5. Combined Libraries." - , "" - , " You may place library facilities that are a work based on the" - , "Library side by side in a single library together with other library" - , "facilities that are not Applications and are not covered by this" - , "License, and convey such a combined library under terms of your" - , "choice, if you do both of the following:" - , "" - , " a) Accompany the combined library with a copy of the same work based" - , " on the Library, uncombined with any other library facilities," - , " conveyed under the terms of this License." - , "" - , " b) Give prominent notice with the combined library that part of it" - , " is a work based on the Library, and explaining where to find the" - , " accompanying uncombined form of the same work." - , "" - , " 6. Revised Versions of the GNU Lesser General Public License." - , "" - , " The Free Software Foundation may publish revised and/or new versions" - , "of the GNU Lesser General Public License from time to time. Such new" - , "versions will be similar in spirit to the present version, but may" - , "differ in detail to address new problems or concerns." - , "" - , " Each version is given a distinguishing version number. If the" - , "Library as you received it specifies that a certain numbered version" - , "of the GNU Lesser General Public License \"or any later version\"" - , "applies to it, you have the option of following the terms and" - , "conditions either of that published version or of any later version" - , "published by the Free Software Foundation. If the Library as you" - , "received it does not specify a version number of the GNU Lesser" - , "General Public License, you may choose any version of the GNU Lesser" - , "General Public License ever published by the Free Software Foundation." - , "" - , " If the Library as you received it specifies that a proxy can decide" - , "whether future versions of the GNU Lesser General Public License shall" - , "apply, that proxy's public statement of acceptance of any version is" - , "permanent authorization for you to choose that version for the" - , "Library." - ] - -apache20 :: License -apache20 = unlines - [ "" - , " Apache License" - , " Version 2.0, January 2004" - , " http://www.apache.org/licenses/" - , "" - , " TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION" - , "" - , " 1. Definitions." - , "" - , " \"License\" shall mean the terms and conditions for use, reproduction," - , " and distribution as defined by Sections 1 through 9 of this document." - , "" - , " \"Licensor\" shall mean the copyright owner or entity authorized by" - , " the copyright owner that is granting the License." - , "" - , " \"Legal Entity\" shall mean the union of the acting entity and all" - , " other entities that control, are controlled by, or are under common" - , " control with that entity. For the purposes of this definition," - , " \"control\" means (i) the power, direct or indirect, to cause the" - , " direction or management of such entity, whether by contract or" - , " otherwise, or (ii) ownership of fifty percent (50%) or more of the" - , " outstanding shares, or (iii) beneficial ownership of such entity." - , "" - , " \"You\" (or \"Your\") shall mean an individual or Legal Entity" - , " exercising permissions granted by this License." - , "" - , " \"Source\" form shall mean the preferred form for making modifications," - , " including but not limited to software source code, documentation" - , " source, and configuration files." - , "" - , " \"Object\" form shall mean any form resulting from mechanical" - , " transformation or translation of a Source form, including but" - , " not limited to compiled object code, generated documentation," - , " and conversions to other media types." - , "" - , " \"Work\" shall mean the work of authorship, whether in Source or" - , " Object form, made available under the License, as indicated by a" - , " copyright notice that is included in or attached to the work" - , " (an example is provided in the Appendix below)." - , "" - , " \"Derivative Works\" shall mean any work, whether in Source or Object" - , " form, that is based on (or derived from) the Work and for which the" - , " editorial revisions, annotations, elaborations, or other modifications" - , " represent, as a whole, an original work of authorship. For the purposes" - , " of this License, Derivative Works shall not include works that remain" - , " separable from, or merely link (or bind by name) to the interfaces of," - , " the Work and Derivative Works thereof." - , "" - , " \"Contribution\" shall mean any work of authorship, including" - , " the original version of the Work and any modifications or additions" - , " to that Work or Derivative Works thereof, that is intentionally" - , " submitted to Licensor for inclusion in the Work by the copyright owner" - , " or by an individual or Legal Entity authorized to submit on behalf of" - , " the copyright owner. For the purposes of this definition, \"submitted\"" - , " means any form of electronic, verbal, or written communication sent" - , " to the Licensor or its representatives, including but not limited to" - , " communication on electronic mailing lists, source code control systems," - , " and issue tracking systems that are managed by, or on behalf of, the" - , " Licensor for the purpose of discussing and improving the Work, but" - , " excluding communication that is conspicuously marked or otherwise" - , " designated in writing by the copyright owner as \"Not a Contribution.\"" - , "" - , " \"Contributor\" shall mean Licensor and any individual or Legal Entity" - , " on behalf of whom a Contribution has been received by Licensor and" - , " subsequently incorporated within the Work." - , "" - , " 2. Grant of Copyright License. Subject to the terms and conditions of" - , " this License, each Contributor hereby grants to You a perpetual," - , " worldwide, non-exclusive, no-charge, royalty-free, irrevocable" - , " copyright license to reproduce, prepare Derivative Works of," - , " publicly display, publicly perform, sublicense, and distribute the" - , " Work and such Derivative Works in Source or Object form." - , "" - , " 3. Grant of Patent License. Subject to the terms and conditions of" - , " this License, each Contributor hereby grants to You a perpetual," - , " worldwide, non-exclusive, no-charge, royalty-free, irrevocable" - , " (except as stated in this section) patent license to make, have made," - , " use, offer to sell, sell, import, and otherwise transfer the Work," - , " where such license applies only to those patent claims licensable" - , " by such Contributor that are necessarily infringed by their" - , " Contribution(s) alone or by combination of their Contribution(s)" - , " with the Work to which such Contribution(s) was submitted. If You" - , " institute patent litigation against any entity (including a" - , " cross-claim or counterclaim in a lawsuit) alleging that the Work" - , " or a Contribution incorporated within the Work constitutes direct" - , " or contributory patent infringement, then any patent licenses" - , " granted to You under this License for that Work shall terminate" - , " as of the date such litigation is filed." - , "" - , " 4. Redistribution. You may reproduce and distribute copies of the" - , " Work or Derivative Works thereof in any medium, with or without" - , " modifications, and in Source or Object form, provided that You" - , " meet the following conditions:" - , "" - , " (a) You must give any other recipients of the Work or" - , " Derivative Works a copy of this License; and" - , "" - , " (b) You must cause any modified files to carry prominent notices" - , " stating that You changed the files; and" - , "" - , " (c) You must retain, in the Source form of any Derivative Works" - , " that You distribute, all copyright, patent, trademark, and" - , " attribution notices from the Source form of the Work," - , " excluding those notices that do not pertain to any part of" - , " the Derivative Works; and" - , "" - , " (d) If the Work includes a \"NOTICE\" text file as part of its" - , " distribution, then any Derivative Works that You distribute must" - , " include a readable copy of the attribution notices contained" - , " within such NOTICE file, excluding those notices that do not" - , " pertain to any part of the Derivative Works, in at least one" - , " of the following places: within a NOTICE text file distributed" - , " as part of the Derivative Works; within the Source form or" - , " documentation, if provided along with the Derivative Works; or," - , " within a display generated by the Derivative Works, if and" - , " wherever such third-party notices normally appear. The contents" - , " of the NOTICE file are for informational purposes only and" - , " do not modify the License. You may add Your own attribution" - , " notices within Derivative Works that You distribute, alongside" - , " or as an addendum to the NOTICE text from the Work, provided" - , " that such additional attribution notices cannot be construed" - , " as modifying the License." - , "" - , " You may add Your own copyright statement to Your modifications and" - , " may provide additional or different license terms and conditions" - , " for use, reproduction, or distribution of Your modifications, or" - , " for any such Derivative Works as a whole, provided Your use," - , " reproduction, and distribution of the Work otherwise complies with" - , " the conditions stated in this License." - , "" - , " 5. Submission of Contributions. Unless You explicitly state otherwise," - , " any Contribution intentionally submitted for inclusion in the Work" - , " by You to the Licensor shall be under the terms and conditions of" - , " this License, without any additional terms or conditions." - , " Notwithstanding the above, nothing herein shall supersede or modify" - , " the terms of any separate license agreement you may have executed" - , " with Licensor regarding such Contributions." - , "" - , " 6. Trademarks. This License does not grant permission to use the trade" - , " names, trademarks, service marks, or product names of the Licensor," - , " except as required for reasonable and customary use in describing the" - , " origin of the Work and reproducing the content of the NOTICE file." - , "" - , " 7. Disclaimer of Warranty. Unless required by applicable law or" - , " agreed to in writing, Licensor provides the Work (and each" - , " Contributor provides its Contributions) on an \"AS IS\" BASIS," - , " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or" - , " implied, including, without limitation, any warranties or conditions" - , " of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A" - , " PARTICULAR PURPOSE. You are solely responsible for determining the" - , " appropriateness of using or redistributing the Work and assume any" - , " risks associated with Your exercise of permissions under this License." - , "" - , " 8. Limitation of Liability. In no event and under no legal theory," - , " whether in tort (including negligence), contract, or otherwise," - , " unless required by applicable law (such as deliberate and grossly" - , " negligent acts) or agreed to in writing, shall any Contributor be" - , " liable to You for damages, including any direct, indirect, special," - , " incidental, or consequential damages of any character arising as a" - , " result of this License or out of the use or inability to use the" - , " Work (including but not limited to damages for loss of goodwill," - , " work stoppage, computer failure or malfunction, or any and all" - , " other commercial damages or losses), even if such Contributor" - , " has been advised of the possibility of such damages." - , "" - , " 9. Accepting Warranty or Additional Liability. While redistributing" - , " the Work or Derivative Works thereof, You may choose to offer," - , " and charge a fee for, acceptance of support, warranty, indemnity," - , " or other liability obligations and/or rights consistent with this" - , " License. However, in accepting such obligations, You may act only" - , " on Your own behalf and on Your sole responsibility, not on behalf" - , " of any other Contributor, and only if You agree to indemnify," - , " defend, and hold each Contributor harmless for any liability" - , " incurred by, or claims asserted against, such Contributor by reason" - , " of your accepting any such warranty or additional liability." - , "" - , " END OF TERMS AND CONDITIONS" - , "" - , " APPENDIX: How to apply the Apache License to your work." - , "" - , " To apply the Apache License to your work, attach the following" - , " boilerplate notice, with the fields enclosed by brackets \"[]\"" - , " replaced with your own identifying information. (Don't include" - , " the brackets!) The text should be enclosed in the appropriate" - , " comment syntax for the file format. We also recommend that a" - , " file or class name and description of purpose be included on the" - , " same \"printed page\" as the copyright notice for easier" - , " identification within third-party archives." - , "" - , " Copyright [yyyy] [name of copyright owner]" - , "" - , " Licensed under the Apache License, Version 2.0 (the \"License\");" - , " you may not use this file except in compliance with the License." - , " You may obtain a copy of the License at" - , "" - , " http://www.apache.org/licenses/LICENSE-2.0" - , "" - , " Unless required by applicable law or agreed to in writing, software" - , " distributed under the License is distributed on an \"AS IS\" BASIS," - , " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied." - , " See the License for the specific language governing permissions and" - , " limitations under the License." - ] - -mit :: String -> String -> License -mit authors year = unlines - [ "Copyright (c) " ++ year ++ " " ++ authors - , "" - , "Permission is hereby granted, free of charge, to any person obtaining" - , "a copy of this software and associated documentation files (the" - , "\"Software\"), to deal in the Software without restriction, including" - , "without limitation the rights to use, copy, modify, merge, publish," - , "distribute, sublicense, and/or sell copies of the Software, and to" - , "permit persons to whom the Software is furnished to do so, subject to" - , "the following conditions:" - , "" - , "The above copyright notice and this permission notice shall be included" - , "in all copies or substantial portions of the Software." - , "" - , "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND," - , "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF" - , "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT." - , "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY" - , "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT," - , "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE" - , "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE." - ] - -mpl20 :: License -mpl20 = unlines - [ "Mozilla Public License Version 2.0" - , "==================================" - , "" - , "1. Definitions" - , "--------------" - , "" - , "1.1. \"Contributor\"" - , " means each individual or legal entity that creates, contributes to" - , " the creation of, or owns Covered Software." - , "" - , "1.2. \"Contributor Version\"" - , " means the combination of the Contributions of others (if any) used" - , " by a Contributor and that particular Contributor's Contribution." - , "" - , "1.3. \"Contribution\"" - , " means Covered Software of a particular Contributor." - , "" - , "1.4. \"Covered Software\"" - , " means Source Code Form to which the initial Contributor has attached" - , " the notice in Exhibit A, the Executable Form of such Source Code" - , " Form, and Modifications of such Source Code Form, in each case" - , " including portions thereof." - , "" - , "1.5. \"Incompatible With Secondary Licenses\"" - , " means" - , "" - , " (a) that the initial Contributor has attached the notice described" - , " in Exhibit B to the Covered Software; or" - , "" - , " (b) that the Covered Software was made available under the terms of" - , " version 1.1 or earlier of the License, but not also under the" - , " terms of a Secondary License." - , "" - , "1.6. \"Executable Form\"" - , " means any form of the work other than Source Code Form." - , "" - , "1.7. \"Larger Work\"" - , " means a work that combines Covered Software with other material, in" - , " a separate file or files, that is not Covered Software." - , "" - , "1.8. \"License\"" - , " means this document." - , "" - , "1.9. \"Licensable\"" - , " means having the right to grant, to the maximum extent possible," - , " whether at the time of the initial grant or subsequently, any and" - , " all of the rights conveyed by this License." - , "" - , "1.10. \"Modifications\"" - , " means any of the following:" - , "" - , " (a) any file in Source Code Form that results from an addition to," - , " deletion from, or modification of the contents of Covered" - , " Software; or" - , "" - , " (b) any new file in Source Code Form that contains any Covered" - , " Software." - , "" - , "1.11. \"Patent Claims\" of a Contributor" - , " means any patent claim(s), including without limitation, method," - , " process, and apparatus claims, in any patent Licensable by such" - , " Contributor that would be infringed, but for the grant of the" - , " License, by the making, using, selling, offering for sale, having" - , " made, import, or transfer of either its Contributions or its" - , " Contributor Version." - , "" - , "1.12. \"Secondary License\"" - , " means either the GNU General Public License, Version 2.0, the GNU" - , " Lesser General Public License, Version 2.1, the GNU Affero General" - , " Public License, Version 3.0, or any later versions of those" - , " licenses." - , "" - , "1.13. \"Source Code Form\"" - , " means the form of the work preferred for making modifications." - , "" - , "1.14. \"You\" (or \"Your\")" - , " means an individual or a legal entity exercising rights under this" - , " License. For legal entities, \"You\" includes any entity that" - , " controls, is controlled by, or is under common control with You. For" - , " purposes of this definition, \"control\" means (a) the power, direct" - , " or indirect, to cause the direction or management of such entity," - , " whether by contract or otherwise, or (b) ownership of more than" - , " fifty percent (50%) of the outstanding shares or beneficial" - , " ownership of such entity." - , "" - , "2. License Grants and Conditions" - , "--------------------------------" - , "" - , "2.1. Grants" - , "" - , "Each Contributor hereby grants You a world-wide, royalty-free," - , "non-exclusive license:" - , "" - , "(a) under intellectual property rights (other than patent or trademark)" - , " Licensable by such Contributor to use, reproduce, make available," - , " modify, display, perform, distribute, and otherwise exploit its" - , " Contributions, either on an unmodified basis, with Modifications, or" - , " as part of a Larger Work; and" - , "" - , "(b) under Patent Claims of such Contributor to make, use, sell, offer" - , " for sale, have made, import, and otherwise transfer either its" - , " Contributions or its Contributor Version." - , "" - , "2.2. Effective Date" - , "" - , "The licenses granted in Section 2.1 with respect to any Contribution" - , "become effective for each Contribution on the date the Contributor first" - , "distributes such Contribution." - , "" - , "2.3. Limitations on Grant Scope" - , "" - , "The licenses granted in this Section 2 are the only rights granted under" - , "this License. No additional rights or licenses will be implied from the" - , "distribution or licensing of Covered Software under this License." - , "Notwithstanding Section 2.1(b) above, no patent license is granted by a" - , "Contributor:" - , "" - , "(a) for any code that a Contributor has removed from Covered Software;" - , " or" - , "" - , "(b) for infringements caused by: (i) Your and any other third party's" - , " modifications of Covered Software, or (ii) the combination of its" - , " Contributions with other software (except as part of its Contributor" - , " Version); or" - , "" - , "(c) under Patent Claims infringed by Covered Software in the absence of" - , " its Contributions." - , "" - , "This License does not grant any rights in the trademarks, service marks," - , "or logos of any Contributor (except as may be necessary to comply with" - , "the notice requirements in Section 3.4)." - , "" - , "2.4. Subsequent Licenses" - , "" - , "No Contributor makes additional grants as a result of Your choice to" - , "distribute the Covered Software under a subsequent version of this" - , "License (see Section 10.2) or under the terms of a Secondary License (if" - , "permitted under the terms of Section 3.3)." - , "" - , "2.5. Representation" - , "" - , "Each Contributor represents that the Contributor believes its" - , "Contributions are its original creation(s) or it has sufficient rights" - , "to grant the rights to its Contributions conveyed by this License." - , "" - , "2.6. Fair Use" - , "" - , "This License is not intended to limit any rights You have under" - , "applicable copyright doctrines of fair use, fair dealing, or other" - , "equivalents." - , "" - , "2.7. Conditions" - , "" - , "Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted" - , "in Section 2.1." - , "" - , "3. Responsibilities" - , "-------------------" - , "" - , "3.1. Distribution of Source Form" - , "" - , "All distribution of Covered Software in Source Code Form, including any" - , "Modifications that You create or to which You contribute, must be under" - , "the terms of this License. You must inform recipients that the Source" - , "Code Form of the Covered Software is governed by the terms of this" - , "License, and how they can obtain a copy of this License. You may not" - , "attempt to alter or restrict the recipients' rights in the Source Code" - , "Form." - , "" - , "3.2. Distribution of Executable Form" - , "" - , "If You distribute Covered Software in Executable Form then:" - , "" - , "(a) such Covered Software must also be made available in Source Code" - , " Form, as described in Section 3.1, and You must inform recipients of" - , " the Executable Form how they can obtain a copy of such Source Code" - , " Form by reasonable means in a timely manner, at a charge no more" - , " than the cost of distribution to the recipient; and" - , "" - , "(b) You may distribute such Executable Form under the terms of this" - , " License, or sublicense it under different terms, provided that the" - , " license for the Executable Form does not attempt to limit or alter" - , " the recipients' rights in the Source Code Form under this License." - , "" - , "3.3. Distribution of a Larger Work" - , "" - , "You may create and distribute a Larger Work under terms of Your choice," - , "provided that You also comply with the requirements of this License for" - , "the Covered Software. If the Larger Work is a combination of Covered" - , "Software with a work governed by one or more Secondary Licenses, and the" - , "Covered Software is not Incompatible With Secondary Licenses, this" - , "License permits You to additionally distribute such Covered Software" - , "under the terms of such Secondary License(s), so that the recipient of" - , "the Larger Work may, at their option, further distribute the Covered" - , "Software under the terms of either this License or such Secondary" - , "License(s)." - , "" - , "3.4. Notices" - , "" - , "You may not remove or alter the substance of any license notices" - , "(including copyright notices, patent notices, disclaimers of warranty," - , "or limitations of liability) contained within the Source Code Form of" - , "the Covered Software, except that You may alter any license notices to" - , "the extent required to remedy known factual inaccuracies." - , "" - , "3.5. Application of Additional Terms" - , "" - , "You may choose to offer, and to charge a fee for, warranty, support," - , "indemnity or liability obligations to one or more recipients of Covered" - , "Software. However, You may do so only on Your own behalf, and not on" - , "behalf of any Contributor. You must make it absolutely clear that any" - , "such warranty, support, indemnity, or liability obligation is offered by" - , "You alone, and You hereby agree to indemnify every Contributor for any" - , "liability incurred by such Contributor as a result of warranty, support," - , "indemnity or liability terms You offer. You may include additional" - , "disclaimers of warranty and limitations of liability specific to any" - , "jurisdiction." - , "" - , "4. Inability to Comply Due to Statute or Regulation" - , "---------------------------------------------------" - , "" - , "If it is impossible for You to comply with any of the terms of this" - , "License with respect to some or all of the Covered Software due to" - , "statute, judicial order, or regulation then You must: (a) comply with" - , "the terms of this License to the maximum extent possible; and (b)" - , "describe the limitations and the code they affect. Such description must" - , "be placed in a text file included with all distributions of the Covered" - , "Software under this License. Except to the extent prohibited by statute" - , "or regulation, such description must be sufficiently detailed for a" - , "recipient of ordinary skill to be able to understand it." - , "" - , "5. Termination" - , "--------------" - , "" - , "5.1. The rights granted under this License will terminate automatically" - , "if You fail to comply with any of its terms. However, if You become" - , "compliant, then the rights granted under this License from a particular" - , "Contributor are reinstated (a) provisionally, unless and until such" - , "Contributor explicitly and finally terminates Your grants, and (b) on an" - , "ongoing basis, if such Contributor fails to notify You of the" - , "non-compliance by some reasonable means prior to 60 days after You have" - , "come back into compliance. Moreover, Your grants from a particular" - , "Contributor are reinstated on an ongoing basis if such Contributor" - , "notifies You of the non-compliance by some reasonable means, this is the" - , "first time You have received notice of non-compliance with this License" - , "from such Contributor, and You become compliant prior to 30 days after" - , "Your receipt of the notice." - , "" - , "5.2. If You initiate litigation against any entity by asserting a patent" - , "infringement claim (excluding declaratory judgment actions," - , "counter-claims, and cross-claims) alleging that a Contributor Version" - , "directly or indirectly infringes any patent, then the rights granted to" - , "You by any and all Contributors for the Covered Software under Section" - , "2.1 of this License shall terminate." - , "" - , "5.3. In the event of termination under Sections 5.1 or 5.2 above, all" - , "end user license agreements (excluding distributors and resellers) which" - , "have been validly granted by You or Your distributors under this License" - , "prior to termination shall survive termination." - , "" - , "************************************************************************" - , "* *" - , "* 6. Disclaimer of Warranty *" - , "* ------------------------- *" - , "* *" - , "* Covered Software is provided under this License on an \"as is\" *" - , "* basis, without warranty of any kind, either expressed, implied, or *" - , "* statutory, including, without limitation, warranties that the *" - , "* Covered Software is free of defects, merchantable, fit for a *" - , "* particular purpose or non-infringing. The entire risk as to the *" - , "* quality and performance of the Covered Software is with You. *" - , "* Should any Covered Software prove defective in any respect, You *" - , "* (not any Contributor) assume the cost of any necessary servicing, *" - , "* repair, or correction. This disclaimer of warranty constitutes an *" - , "* essential part of this License. No use of any Covered Software is *" - , "* authorized under this License except under this disclaimer. *" - , "* *" - , "************************************************************************" - , "" - , "************************************************************************" - , "* *" - , "* 7. Limitation of Liability *" - , "* -------------------------- *" - , "* *" - , "* Under no circumstances and under no legal theory, whether tort *" - , "* (including negligence), contract, or otherwise, shall any *" - , "* Contributor, or anyone who distributes Covered Software as *" - , "* permitted above, be liable to You for any direct, indirect, *" - , "* special, incidental, or consequential damages of any character *" - , "* including, without limitation, damages for lost profits, loss of *" - , "* goodwill, work stoppage, computer failure or malfunction, or any *" - , "* and all other commercial damages or losses, even if such party *" - , "* shall have been informed of the possibility of such damages. This *" - , "* limitation of liability shall not apply to liability for death or *" - , "* personal injury resulting from such party's negligence to the *" - , "* extent applicable law prohibits such limitation. Some *" - , "* jurisdictions do not allow the exclusion or limitation of *" - , "* incidental or consequential damages, so this exclusion and *" - , "* limitation may not apply to You. *" - , "* *" - , "************************************************************************" - , "" - , "8. Litigation" - , "-------------" - , "" - , "Any litigation relating to this License may be brought only in the" - , "courts of a jurisdiction where the defendant maintains its principal" - , "place of business and such litigation shall be governed by laws of that" - , "jurisdiction, without reference to its conflict-of-law provisions." - , "Nothing in this Section shall prevent a party's ability to bring" - , "cross-claims or counter-claims." - , "" - , "9. Miscellaneous" - , "----------------" - , "" - , "This License represents the complete agreement concerning the subject" - , "matter hereof. If any provision of this License is held to be" - , "unenforceable, such provision shall be reformed only to the extent" - , "necessary to make it enforceable. Any law or regulation which provides" - , "that the language of a contract shall be construed against the drafter" - , "shall not be used to construe this License against a Contributor." - , "" - , "10. Versions of the License" - , "---------------------------" - , "" - , "10.1. New Versions" - , "" - , "Mozilla Foundation is the license steward. Except as provided in Section" - , "10.3, no one other than the license steward has the right to modify or" - , "publish new versions of this License. Each version will be given a" - , "distinguishing version number." - , "" - , "10.2. Effect of New Versions" - , "" - , "You may distribute the Covered Software under the terms of the version" - , "of the License under which You originally received the Covered Software," - , "or under the terms of any subsequent version published by the license" - , "steward." - , "" - , "10.3. Modified Versions" - , "" - , "If you create software not governed by this License, and you want to" - , "create a new license for such software, you may create and use a" - , "modified version of this License if you rename the license and remove" - , "any references to the name of the license steward (except to note that" - , "such modified license differs from this License)." - , "" - , "10.4. Distributing Source Code Form that is Incompatible With Secondary" - , "Licenses" - , "" - , "If You choose to distribute Source Code Form that is Incompatible With" - , "Secondary Licenses under the terms of this version of the License, the" - , "notice described in Exhibit B of this License must be attached." - , "" - , "Exhibit A - Source Code Form License Notice" - , "-------------------------------------------" - , "" - , " This Source Code Form is subject to the terms of the Mozilla Public" - , " License, v. 2.0. If a copy of the MPL was not distributed with this" - , " file, You can obtain one at http://mozilla.org/MPL/2.0/." - , "" - , "If it is not possible or desirable to put the notice in a particular" - , "file, then You may include the notice in a location (such as a LICENSE" - , "file in a relevant directory) where a recipient would be likely to look" - , "for such a notice." - , "" - , "You may add additional accurate notices of copyright ownership." - , "" - , "Exhibit B - \"Incompatible With Secondary Licenses\" Notice" - , "---------------------------------------------------------" - , "" - , " This Source Code Form is \"Incompatible With Secondary Licenses\", as" - , " defined by the Mozilla Public License, v. 2.0." - ] - -isc :: String -> String -> License -isc authors year = unlines - [ "Copyright (c) " ++ year ++ " " ++ authors - , "" - , "Permission to use, copy, modify, and/or distribute this software for any purpose" - , "with or without fee is hereby granted, provided that the above copyright notice" - , "and this permission notice appear in all copies." - , "" - , "THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH" - , "REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND" - , "FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT," - , "INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS" - , "OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER" - , "TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF" - , "THIS SOFTWARE." - ] diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Init/Types.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Init/Types.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Init/Types.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Init/Types.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,170 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Init.Types --- Copyright : (c) Brent Yorgey, Benedikt Huber 2009 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Some types used by the 'cabal init' command. --- ------------------------------------------------------------------------------ -module Distribution.Client.Init.Types where - -import Distribution.Simple.Setup - ( Flag(..) ) - -import Distribution.Version -import Distribution.Verbosity -import qualified Distribution.Package as P -import Distribution.License -import Distribution.ModuleName -import Language.Haskell.Extension ( Language(..), Extension ) - -import qualified Text.PrettyPrint as Disp -import qualified Distribution.Compat.ReadP as Parse -import Distribution.Text - -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid -#endif - --- | InitFlags is really just a simple type to represent certain --- portions of a .cabal file. Rather than have a flag for EVERY --- possible field, we just have one for each field that the user is --- likely to want and/or that we are likely to be able to --- intelligently guess. -data InitFlags = - InitFlags { nonInteractive :: Flag Bool - , quiet :: Flag Bool - , packageDir :: Flag FilePath - , noComments :: Flag Bool - , minimal :: Flag Bool - - , packageName :: Flag P.PackageName - , version :: Flag Version - , cabalVersion :: Flag VersionRange - , license :: Flag License - , author :: Flag String - , email :: Flag String - , homepage :: Flag String - - , synopsis :: Flag String - , category :: Flag (Either String Category) - , extraSrc :: Maybe [String] - - , packageType :: Flag PackageType - , mainIs :: Flag FilePath - , language :: Flag Language - - , exposedModules :: Maybe [ModuleName] - , otherModules :: Maybe [ModuleName] - , otherExts :: Maybe [Extension] - - , dependencies :: Maybe [P.Dependency] - , sourceDirs :: Maybe [String] - , buildTools :: Maybe [String] - - , initVerbosity :: Flag Verbosity - , overwrite :: Flag Bool - } - deriving (Show) - - -- the Monoid instance for Flag has later values override earlier - -- ones, which is why we want Maybe [foo] for collecting foo values, - -- not Flag [foo]. - -data PackageType = Library | Executable - deriving (Show, Read, Eq) - -instance Text PackageType where - disp = Disp.text . show - parse = Parse.choice $ map (fmap read . Parse.string . show) [Library, Executable] - -instance Monoid InitFlags where - mempty = InitFlags - { nonInteractive = mempty - , quiet = mempty - , packageDir = mempty - , noComments = mempty - , minimal = mempty - , packageName = mempty - , version = mempty - , cabalVersion = mempty - , license = mempty - , author = mempty - , email = mempty - , homepage = mempty - , synopsis = mempty - , category = mempty - , extraSrc = mempty - , packageType = mempty - , mainIs = mempty - , language = mempty - , exposedModules = mempty - , otherModules = mempty - , otherExts = mempty - , dependencies = mempty - , sourceDirs = mempty - , buildTools = mempty - , initVerbosity = mempty - , overwrite = mempty - } - mappend a b = InitFlags - { nonInteractive = combine nonInteractive - , quiet = combine quiet - , packageDir = combine packageDir - , noComments = combine noComments - , minimal = combine minimal - , packageName = combine packageName - , version = combine version - , cabalVersion = combine cabalVersion - , license = combine license - , author = combine author - , email = combine email - , homepage = combine homepage - , synopsis = combine synopsis - , category = combine category - , extraSrc = combine extraSrc - , packageType = combine packageType - , mainIs = combine mainIs - , language = combine language - , exposedModules = combine exposedModules - , otherModules = combine otherModules - , otherExts = combine otherExts - , dependencies = combine dependencies - , sourceDirs = combine sourceDirs - , buildTools = combine buildTools - , initVerbosity = combine initVerbosity - , overwrite = combine overwrite - } - where combine field = field a `mappend` field b - --- | Some common package categories. -data Category - = Codec - | Concurrency - | Control - | Data - | Database - | Development - | Distribution - | Game - | Graphics - | Language - | Math - | Network - | Sound - | System - | Testing - | Text - | Web - deriving (Read, Show, Eq, Ord, Bounded, Enum) - -instance Text Category where - disp = Disp.text . show - parse = Parse.choice $ map (fmap read . Parse.string . show) [Codec .. ] - diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Init.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Init.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Init.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Init.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,864 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Init --- Copyright : (c) Brent Yorgey 2009 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Implementation of the 'cabal init' command, which creates an initial .cabal --- file for a project. --- ------------------------------------------------------------------------------ - -module Distribution.Client.Init ( - - -- * Commands - initCabal - - ) where - -import System.IO - ( hSetBuffering, stdout, BufferMode(..) ) -import System.Directory - ( getCurrentDirectory, doesDirectoryExist, doesFileExist, copyFile - , getDirectoryContents, createDirectoryIfMissing ) -import System.FilePath - ( (), (<.>), takeBaseName ) -import Data.Time - ( getCurrentTime, utcToLocalTime, toGregorian, localDay, getCurrentTimeZone ) - -import Data.Char - ( toUpper ) -import Data.List - ( intercalate, nub, groupBy, (\\) ) -import Data.Maybe - ( fromMaybe, isJust, catMaybes, listToMaybe ) -import Data.Function - ( on ) -import qualified Data.Map as M -#if !MIN_VERSION_base(4,8,0) -import Data.Traversable - ( traverse ) -import Control.Applicative - ( (<$>) ) -#endif -import Control.Monad - ( when, unless, (>=>), join, forM_ ) -import Control.Arrow - ( (&&&), (***) ) - -import Text.PrettyPrint hiding (mode, cat) - -import Data.Version - ( Version(..) ) -import Distribution.Version - ( orLaterVersion, earlierVersion, intersectVersionRanges, VersionRange ) -import Distribution.Verbosity - ( Verbosity ) -import Distribution.ModuleName - ( ModuleName, fromString ) -- And for the Text instance -import Distribution.InstalledPackageInfo - ( InstalledPackageInfo, sourcePackageId, exposed ) -import qualified Distribution.Package as P -import Language.Haskell.Extension ( Language(..) ) - -import Distribution.Client.Init.Types - ( InitFlags(..), PackageType(..), Category(..) ) -import Distribution.Client.Init.Licenses - ( bsd2, bsd3, gplv2, gplv3, lgpl21, lgpl3, agplv3, apache20, mit, mpl20, isc ) -import Distribution.Client.Init.Heuristics - ( guessPackageName, guessAuthorNameMail, guessMainFileCandidates, - SourceFileEntry(..), - scanForModules, neededBuildPrograms ) - -import Distribution.License - ( License(..), knownLicenses ) - -import Distribution.ReadE - ( runReadE, readP_to_E ) -import Distribution.Simple.Setup - ( Flag(..), flagToMaybe ) -import Distribution.Simple.Configure - ( getInstalledPackages ) -import Distribution.Simple.Compiler - ( PackageDBStack, Compiler ) -import Distribution.Simple.Program - ( ProgramConfiguration ) -import Distribution.Simple.PackageIndex - ( InstalledPackageIndex, moduleNameIndex ) -import Distribution.Text - ( display, Text(..) ) - -initCabal :: Verbosity - -> PackageDBStack - -> Compiler - -> ProgramConfiguration - -> InitFlags - -> IO () -initCabal verbosity packageDBs comp conf initFlags = do - - installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf - - hSetBuffering stdout NoBuffering - - initFlags' <- extendFlags installedPkgIndex initFlags - - writeLicense initFlags' - writeSetupFile initFlags' - createSourceDirectories initFlags' - success <- writeCabalFile initFlags' - - when success $ generateWarnings initFlags' - ---------------------------------------------------------------------------- --- Flag acquisition ----------------------------------------------------- ---------------------------------------------------------------------------- - --- | Fill in more details by guessing, discovering, or prompting the --- user. -extendFlags :: InstalledPackageIndex -> InitFlags -> IO InitFlags -extendFlags pkgIx = - getPackageName - >=> getVersion - >=> getLicense - >=> getAuthorInfo - >=> getHomepage - >=> getSynopsis - >=> getCategory - >=> getExtraSourceFiles - >=> getLibOrExec - >=> getLanguage - >=> getGenComments - >=> getSrcDir - >=> getModulesBuildToolsAndDeps pkgIx - --- | Combine two actions which may return a value, preferring the first. That --- is, run the second action only if the first doesn't return a value. -infixr 1 ?>> -(?>>) :: IO (Maybe a) -> IO (Maybe a) -> IO (Maybe a) -f ?>> g = do - ma <- f - if isJust ma - then return ma - else g - --- | Witness the isomorphism between Maybe and Flag. -maybeToFlag :: Maybe a -> Flag a -maybeToFlag = maybe NoFlag Flag - --- | Get the package name: use the package directory (supplied, or the current --- directory by default) as a guess. -getPackageName :: InitFlags -> IO InitFlags -getPackageName flags = do - guess <- traverse guessPackageName (flagToMaybe $ packageDir flags) - ?>> Just `fmap` (getCurrentDirectory >>= guessPackageName) - - pkgName' <- return (flagToMaybe $ packageName flags) - ?>> maybePrompt flags (prompt "Package name" guess) - ?>> return guess - - return $ flags { packageName = maybeToFlag pkgName' } - --- | Package version: use 0.1.0.0 as a last resort, but try prompting the user --- if possible. -getVersion :: InitFlags -> IO InitFlags -getVersion flags = do - let v = Just $ Version [0,1,0,0] [] - v' <- return (flagToMaybe $ version flags) - ?>> maybePrompt flags (prompt "Package version" v) - ?>> return v - return $ flags { version = maybeToFlag v' } - --- | Choose a license. -getLicense :: InitFlags -> IO InitFlags -getLicense flags = do - lic <- return (flagToMaybe $ license flags) - ?>> fmap (fmap (either UnknownLicense id) . join) - (maybePrompt flags - (promptListOptional "Please choose a license" listedLicenses)) - return $ flags { license = maybeToFlag lic } - where - listedLicenses = - knownLicenses \\ [GPL Nothing, LGPL Nothing, AGPL Nothing - , Apache Nothing, OtherLicense] - --- | The author's name and email. Prompt, or try to guess from an existing --- darcs repo. -getAuthorInfo :: InitFlags -> IO InitFlags -getAuthorInfo flags = do - (authorName, authorEmail) <- - (flagToMaybe *** flagToMaybe) `fmap` guessAuthorNameMail - authorName' <- return (flagToMaybe $ author flags) - ?>> maybePrompt flags (promptStr "Author name" authorName) - ?>> return authorName - - authorEmail' <- return (flagToMaybe $ email flags) - ?>> maybePrompt flags (promptStr "Maintainer email" authorEmail) - ?>> return authorEmail - - return $ flags { author = maybeToFlag authorName' - , email = maybeToFlag authorEmail' - } - --- | Prompt for a homepage URL. -getHomepage :: InitFlags -> IO InitFlags -getHomepage flags = do - hp <- queryHomepage - hp' <- return (flagToMaybe $ homepage flags) - ?>> maybePrompt flags (promptStr "Project homepage URL" hp) - ?>> return hp - - return $ flags { homepage = maybeToFlag hp' } - --- | Right now this does nothing, but it could be changed to do some --- intelligent guessing. -queryHomepage :: IO (Maybe String) -queryHomepage = return Nothing -- get default remote darcs repo? - --- | Prompt for a project synopsis. -getSynopsis :: InitFlags -> IO InitFlags -getSynopsis flags = do - syn <- return (flagToMaybe $ synopsis flags) - ?>> maybePrompt flags (promptStr "Project synopsis" Nothing) - - return $ flags { synopsis = maybeToFlag syn } - --- | Prompt for a package category. --- Note that it should be possible to do some smarter guessing here too, i.e. --- look at the name of the top level source directory. -getCategory :: InitFlags -> IO InitFlags -getCategory flags = do - cat <- return (flagToMaybe $ category flags) - ?>> fmap join (maybePrompt flags - (promptListOptional "Project category" [Codec ..])) - return $ flags { category = maybeToFlag cat } - --- | Try to guess extra source files (don't prompt the user). -getExtraSourceFiles :: InitFlags -> IO InitFlags -getExtraSourceFiles flags = do - extraSrcFiles <- return (extraSrc flags) - ?>> Just `fmap` guessExtraSourceFiles flags - - return $ flags { extraSrc = extraSrcFiles } - --- | Try to guess things to include in the extra-source-files field. --- For now, we just look for things in the root directory named --- 'readme', 'changes', or 'changelog', with any sort of --- capitalization and any extension. -guessExtraSourceFiles :: InitFlags -> IO [FilePath] -guessExtraSourceFiles flags = do - dir <- - maybe getCurrentDirectory return . flagToMaybe $ packageDir flags - files <- getDirectoryContents dir - return $ filter isExtra files - - where - isExtra = (`elem` ["README", "CHANGES", "CHANGELOG"]) - . map toUpper - . takeBaseName - --- | Ask whether the project builds a library or executable. -getLibOrExec :: InitFlags -> IO InitFlags -getLibOrExec flags = do - isLib <- return (flagToMaybe $ packageType flags) - ?>> maybePrompt flags (either (const Library) id `fmap` - promptList "What does the package build" - [Library, Executable] - Nothing display False) - ?>> return (Just Library) - mainFile <- if isLib /= Just Executable then return Nothing else - getMainFile flags - - return $ flags { packageType = maybeToFlag isLib - , mainIs = maybeToFlag mainFile - } - --- | Try to guess the main file of the executable, and prompt the user to choose --- one of them. Top-level modules including the word 'Main' in the file name --- will be candidates, and shorter filenames will be preferred. -getMainFile :: InitFlags -> IO (Maybe FilePath) -getMainFile flags = - return (flagToMaybe $ mainIs flags) - ?>> do - candidates <- guessMainFileCandidates flags - let showCandidate = either (++" (does not yet exist)") id - defaultFile = listToMaybe candidates - maybePrompt flags (either id (either id id) `fmap` - promptList "What is the main module of the executable" - candidates - defaultFile showCandidate True) - ?>> return (fmap (either id id) defaultFile) - --- | Ask for the base language of the package. -getLanguage :: InitFlags -> IO InitFlags -getLanguage flags = do - lang <- return (flagToMaybe $ language flags) - ?>> maybePrompt flags - (either UnknownLanguage id `fmap` - promptList "What base language is the package written in" - [Haskell2010, Haskell98] - (Just Haskell2010) display True) - ?>> return (Just Haskell2010) - - return $ flags { language = maybeToFlag lang } - --- | Ask whether to generate explanatory comments. -getGenComments :: InitFlags -> IO InitFlags -getGenComments flags = do - genComments <- return (not <$> flagToMaybe (noComments flags)) - ?>> maybePrompt flags (promptYesNo promptMsg (Just False)) - ?>> return (Just False) - return $ flags { noComments = maybeToFlag (fmap not genComments) } - where - promptMsg = "Include documentation on what each field means (y/n)" - --- | Ask for the source root directory. -getSrcDir :: InitFlags -> IO InitFlags -getSrcDir flags = do - srcDirs <- return (sourceDirs flags) - ?>> fmap (:[]) `fmap` guessSourceDir flags - ?>> fmap (fmap ((:[]) . either id id) . join) (maybePrompt - flags - (promptListOptional' "Source directory" ["src"] id)) - - return $ flags { sourceDirs = srcDirs } - --- | Try to guess source directory. Could try harder; for the --- moment just looks to see whether there is a directory called 'src'. -guessSourceDir :: InitFlags -> IO (Maybe String) -guessSourceDir flags = do - dir <- - maybe getCurrentDirectory return . flagToMaybe $ packageDir flags - srcIsDir <- doesDirectoryExist (dir "src") - return $ if srcIsDir - then Just "src" - else Nothing - --- | Get the list of exposed modules and extra tools needed to build them. -getModulesBuildToolsAndDeps :: InstalledPackageIndex -> InitFlags -> IO InitFlags -getModulesBuildToolsAndDeps pkgIx flags = do - dir <- maybe getCurrentDirectory return . flagToMaybe $ packageDir flags - - -- XXX really should use guessed source roots. - sourceFiles <- scanForModules dir - - Just mods <- return (exposedModules flags) - ?>> (return . Just . map moduleName $ sourceFiles) - - tools <- return (buildTools flags) - ?>> (return . Just . neededBuildPrograms $ sourceFiles) - - deps <- return (dependencies flags) - ?>> Just <$> importsToDeps flags - (fromString "Prelude" : -- to ensure we get base as a dep - ( nub -- only need to consider each imported package once - . filter (`notElem` mods) -- don't consider modules from - -- this package itself - . concatMap imports - $ sourceFiles - ) - ) - pkgIx - - exts <- return (otherExts flags) - ?>> (return . Just . nub . concatMap extensions $ sourceFiles) - - return $ flags { exposedModules = Just mods - , buildTools = tools - , dependencies = deps - , otherExts = exts - } - -importsToDeps :: InitFlags -> [ModuleName] -> InstalledPackageIndex -> IO [P.Dependency] -importsToDeps flags mods pkgIx = do - - let modMap :: M.Map ModuleName [InstalledPackageInfo] - modMap = M.map (filter exposed) $ moduleNameIndex pkgIx - - modDeps :: [(ModuleName, Maybe [InstalledPackageInfo])] - modDeps = map (id &&& flip M.lookup modMap) mods - - message flags "\nGuessing dependencies..." - nub . catMaybes <$> mapM (chooseDep flags) modDeps - --- Given a module and a list of installed packages providing it, --- choose a dependency (i.e. package + version range) to use for that --- module. -chooseDep :: InitFlags -> (ModuleName, Maybe [InstalledPackageInfo]) - -> IO (Maybe P.Dependency) - -chooseDep flags (m, Nothing) - = message flags ("\nWarning: no package found providing " ++ display m ++ ".") - >> return Nothing - -chooseDep flags (m, Just []) - = message flags ("\nWarning: no package found providing " ++ display m ++ ".") - >> return Nothing - - -- We found some packages: group them by name. -chooseDep flags (m, Just ps) - = case pkgGroups of - -- if there's only one group, i.e. multiple versions of a single package, - -- we make it into a dependency, choosing the latest-ish version (see toDep). - [grp] -> Just <$> toDep grp - -- otherwise, we refuse to choose between different packages and make the user - -- do it. - grps -> do message flags ("\nWarning: multiple packages found providing " - ++ display m - ++ ": " ++ intercalate ", " (map (display . P.pkgName . head) grps)) - message flags "You will need to pick one and manually add it to the Build-depends: field." - return Nothing - where - pkgGroups = groupBy ((==) `on` P.pkgName) (map sourcePackageId ps) - - -- Given a list of available versions of the same package, pick a dependency. - toDep :: [P.PackageIdentifier] -> IO P.Dependency - - -- If only one version, easy. We change e.g. 0.4.2 into 0.4.* - toDep [pid] = return $ P.Dependency (P.pkgName pid) (pvpize . P.pkgVersion $ pid) - - -- Otherwise, choose the latest version and issue a warning. - toDep pids = do - message flags ("\nWarning: multiple versions of " ++ display (P.pkgName . head $ pids) ++ " provide " ++ display m ++ ", choosing the latest.") - return $ P.Dependency (P.pkgName . head $ pids) - (pvpize . maximum . map P.pkgVersion $ pids) - - pvpize :: Version -> VersionRange - pvpize v = orLaterVersion v' - `intersectVersionRanges` - earlierVersion (incVersion 1 v') - where v' = (v { versionBranch = take 2 (versionBranch v) }) - -incVersion :: Int -> Version -> Version -incVersion n (Version vlist tags) = Version (incVersion' n vlist) tags - where - incVersion' 0 [] = [1] - incVersion' 0 (v:_) = [v+1] - incVersion' m [] = replicate m 0 ++ [1] - incVersion' m (v:vs) = v : incVersion' (m-1) vs - ---------------------------------------------------------------------------- --- Prompting/user interaction ------------------------------------------- ---------------------------------------------------------------------------- - --- | Run a prompt or not based on the nonInteractive flag of the --- InitFlags structure. -maybePrompt :: InitFlags -> IO t -> IO (Maybe t) -maybePrompt flags p = - case nonInteractive flags of - Flag True -> return Nothing - _ -> Just `fmap` p - --- | Create a prompt with optional default value that returns a --- String. -promptStr :: String -> Maybe String -> IO String -promptStr = promptDefault' Just id - --- | Create a yes/no prompt with optional default value. --- -promptYesNo :: String -> Maybe Bool -> IO Bool -promptYesNo = - promptDefault' recogniseYesNo showYesNo - where - recogniseYesNo s | s == "y" || s == "Y" = Just True - | s == "n" || s == "N" = Just False - | otherwise = Nothing - showYesNo True = "y" - showYesNo False = "n" - --- | Create a prompt with optional default value that returns a value --- of some Text instance. -prompt :: Text t => String -> Maybe t -> IO t -prompt = promptDefault' - (either (const Nothing) Just . runReadE (readP_to_E id parse)) - display - --- | Create a prompt with an optional default value. -promptDefault' :: (String -> Maybe t) -- ^ parser - -> (t -> String) -- ^ pretty-printer - -> String -- ^ prompt message - -> Maybe t -- ^ optional default value - -> IO t -promptDefault' parser pretty pr def = do - putStr $ mkDefPrompt pr (pretty `fmap` def) - inp <- getLine - case (inp, def) of - ("", Just d) -> return d - _ -> case parser inp of - Just t -> return t - Nothing -> do putStrLn $ "Couldn't parse " ++ inp ++ ", please try again!" - promptDefault' parser pretty pr def - --- | Create a prompt from a prompt string and a String representation --- of an optional default value. -mkDefPrompt :: String -> Maybe String -> String -mkDefPrompt pr def = pr ++ "?" ++ defStr def - where defStr Nothing = " " - defStr (Just s) = " [default: " ++ s ++ "] " - -promptListOptional :: (Text t, Eq t) - => String -- ^ prompt - -> [t] -- ^ choices - -> IO (Maybe (Either String t)) -promptListOptional pr choices = promptListOptional' pr choices display - -promptListOptional' :: Eq t - => String -- ^ prompt - -> [t] -- ^ choices - -> (t -> String) -- ^ show an item - -> IO (Maybe (Either String t)) -promptListOptional' pr choices displayItem = - fmap rearrange - $ promptList pr (Nothing : map Just choices) (Just Nothing) - (maybe "(none)" displayItem) True - where - rearrange = either (Just . Left) (fmap Right) - --- | Create a prompt from a list of items. -promptList :: Eq t - => String -- ^ prompt - -> [t] -- ^ choices - -> Maybe t -- ^ optional default value - -> (t -> String) -- ^ show an item - -> Bool -- ^ whether to allow an 'other' option - -> IO (Either String t) -promptList pr choices def displayItem other = do - putStrLn $ pr ++ ":" - let options1 = map (\c -> (Just c == def, displayItem c)) choices - options2 = zip ([1..]::[Int]) - (options1 ++ [(False, "Other (specify)") | other]) - mapM_ (putStrLn . \(n,(i,s)) -> showOption n i ++ s) options2 - promptList' displayItem (length options2) choices def other - where showOption n i | n < 10 = " " ++ star i ++ " " ++ rest - | otherwise = " " ++ star i ++ rest - where rest = show n ++ ") " - star True = "*" - star False = " " - -promptList' :: (t -> String) -> Int -> [t] -> Maybe t -> Bool -> IO (Either String t) -promptList' displayItem numChoices choices def other = do - putStr $ mkDefPrompt "Your choice" (displayItem `fmap` def) - inp <- getLine - case (inp, def) of - ("", Just d) -> return $ Right d - _ -> case readMaybe inp of - Nothing -> invalidChoice inp - Just n -> getChoice n - where invalidChoice inp = do putStrLn $ inp ++ " is not a valid choice." - promptList' displayItem numChoices choices def other - getChoice n | n < 1 || n > numChoices = invalidChoice (show n) - | n < numChoices || - (n == numChoices && not other) - = return . Right $ choices !! (n-1) - | otherwise = Left `fmap` promptStr "Please specify" Nothing - -readMaybe :: (Read a) => String -> Maybe a -readMaybe s = case reads s of - [(a,"")] -> Just a - _ -> Nothing - ---------------------------------------------------------------------------- --- File generation ------------------------------------------------------ ---------------------------------------------------------------------------- - -writeLicense :: InitFlags -> IO () -writeLicense flags = do - message flags "\nGenerating LICENSE..." - year <- show <$> getYear - let authors = fromMaybe "???" . flagToMaybe . author $ flags - let licenseFile = - case license flags of - Flag BSD2 - -> Just $ bsd2 authors year - - Flag BSD3 - -> Just $ bsd3 authors year - - Flag (GPL (Just (Version {versionBranch = [2]}))) - -> Just gplv2 - - Flag (GPL (Just (Version {versionBranch = [3]}))) - -> Just gplv3 - - Flag (LGPL (Just (Version {versionBranch = [2, 1]}))) - -> Just lgpl21 - - Flag (LGPL (Just (Version {versionBranch = [3]}))) - -> Just lgpl3 - - Flag (AGPL (Just (Version {versionBranch = [3]}))) - -> Just agplv3 - - Flag (Apache (Just (Version {versionBranch = [2, 0]}))) - -> Just apache20 - - Flag MIT - -> Just $ mit authors year - - Flag (MPL (Version {versionBranch = [2, 0]})) - -> Just mpl20 - - Flag ISC - -> Just $ isc authors year - - _ -> Nothing - - case licenseFile of - Just licenseText -> writeFileSafe flags "LICENSE" licenseText - Nothing -> message flags "Warning: unknown license type, you must put a copy in LICENSE yourself." - -getYear :: IO Integer -getYear = do - u <- getCurrentTime - z <- getCurrentTimeZone - let l = utcToLocalTime z u - (y, _, _) = toGregorian $ localDay l - return y - -writeSetupFile :: InitFlags -> IO () -writeSetupFile flags = do - message flags "Generating Setup.hs..." - writeFileSafe flags "Setup.hs" setupFile - where - setupFile = unlines - [ "import Distribution.Simple" - , "main = defaultMain" - ] - -writeCabalFile :: InitFlags -> IO Bool -writeCabalFile flags@(InitFlags{packageName = NoFlag}) = do - message flags "Error: no package name provided." - return False -writeCabalFile flags@(InitFlags{packageName = Flag p}) = do - let cabalFileName = display p ++ ".cabal" - message flags $ "Generating " ++ cabalFileName ++ "..." - writeFileSafe flags cabalFileName (generateCabalFile cabalFileName flags) - return True - --- | Write a file \"safely\", backing up any existing version (unless --- the overwrite flag is set). -writeFileSafe :: InitFlags -> FilePath -> String -> IO () -writeFileSafe flags fileName content = do - moveExistingFile flags fileName - writeFile fileName content - --- | Create source directories, if they were given. -createSourceDirectories :: InitFlags -> IO () -createSourceDirectories flags = case sourceDirs flags of - Just dirs -> forM_ dirs (createDirectoryIfMissing True) - Nothing -> return () - --- | Move an existing file, if there is one, and the overwrite flag is --- not set. -moveExistingFile :: InitFlags -> FilePath -> IO () -moveExistingFile flags fileName = - unless (overwrite flags == Flag True) $ do - e <- doesFileExist fileName - when e $ do - newName <- findNewName fileName - message flags $ "Warning: " ++ fileName ++ " already exists, backing up old version in " ++ newName - copyFile fileName newName - -findNewName :: FilePath -> IO FilePath -findNewName oldName = findNewName' 0 - where - findNewName' :: Integer -> IO FilePath - findNewName' n = do - let newName = oldName <.> ("save" ++ show n) - e <- doesFileExist newName - if e then findNewName' (n+1) else return newName - --- | Generate a .cabal file from an InitFlags structure. NOTE: this --- is rather ad-hoc! What we would REALLY like is to have a --- standard low-level AST type representing .cabal files, which --- preserves things like comments, and to write an *inverse* --- parser/pretty-printer pair between .cabal files and this AST. --- Then instead of this ad-hoc code we could just map an InitFlags --- structure onto a low-level AST structure and use the existing --- pretty-printing code to generate the file. -generateCabalFile :: String -> InitFlags -> String -generateCabalFile fileName c = - renderStyle style { lineLength = 79, ribbonsPerLine = 1.1 } $ - (if minimal c /= Flag True - then showComment (Just $ "Initial " ++ fileName ++ " generated by cabal " - ++ "init. For further documentation, see " - ++ "http://haskell.org/cabal/users-guide/") - $$ text "" - else empty) - $$ - vcat [ field "name" (packageName c) - (Just "The name of the package.") - True - - , field "version" (version c) - (Just $ "The package version. See the Haskell package versioning policy (PVP) for standards guiding when and how versions should be incremented.\nhttp://www.haskell.org/haskellwiki/Package_versioning_policy\n" - ++ "PVP summary: +-+------- breaking API changes\n" - ++ " | | +----- non-breaking API additions\n" - ++ " | | | +--- code changes with no API change") - True - - , fieldS "synopsis" (synopsis c) - (Just "A short (one-line) description of the package.") - True - - , fieldS "description" NoFlag - (Just "A longer description of the package.") - True - - , fieldS "homepage" (homepage c) - (Just "URL for the project homepage or repository.") - False - - , fieldS "bug-reports" NoFlag - (Just "A URL where users can report bugs.") - False - - , field "license" (license c) - (Just "The license under which the package is released.") - True - - , fieldS "license-file" (Flag "LICENSE") - (Just "The file containing the license text.") - True - - , fieldS "author" (author c) - (Just "The package author(s).") - True - - , fieldS "maintainer" (email c) - (Just "An email address to which users can send suggestions, bug reports, and patches.") - True - - , fieldS "copyright" NoFlag - (Just "A copyright notice.") - True - - , fieldS "category" (either id display `fmap` category c) - Nothing - True - - , fieldS "build-type" (Flag "Simple") - Nothing - True - - , fieldS "extra-source-files" (listFieldS (extraSrc c)) - (Just "Extra files to be distributed with the package, such as examples or a README.") - True - - , field "cabal-version" (Flag $ orLaterVersion (Version [1,10] [])) - (Just "Constraint on the version of Cabal needed to build this package.") - False - - , case packageType c of - Flag Executable -> - text "\nexecutable" <+> - text (maybe "" display . flagToMaybe $ packageName c) $$ - nest 2 (vcat - [ fieldS "main-is" (mainIs c) (Just ".hs or .lhs file containing the Main module.") True - - , generateBuildInfo Executable c - ]) - Flag Library -> text "\nlibrary" $$ nest 2 (vcat - [ fieldS "exposed-modules" (listField (exposedModules c)) - (Just "Modules exported by the library.") - True - - , generateBuildInfo Library c - ]) - _ -> empty - ] - where - generateBuildInfo :: PackageType -> InitFlags -> Doc - generateBuildInfo pkgtype c' = vcat - [ fieldS "other-modules" (listField (otherModules c')) - (Just $ case pkgtype of - Library -> "Modules included in this library but not exported." - Executable -> "Modules included in this executable, other than Main.") - True - - , fieldS "other-extensions" (listField (otherExts c')) - (Just "LANGUAGE extensions used by modules in this package.") - True - - , fieldS "build-depends" (listField (dependencies c')) - (Just "Other library packages from which modules are imported.") - True - - , fieldS "hs-source-dirs" (listFieldS (sourceDirs c')) - (Just "Directories containing source files.") - True - - , fieldS "build-tools" (listFieldS (buildTools c')) - (Just "Extra tools (e.g. alex, hsc2hs, ...) needed to build the source.") - False - - , field "default-language" (language c') - (Just "Base language which the package is written in.") - True - ] - - listField :: Text s => Maybe [s] -> Flag String - listField = listFieldS . fmap (map display) - - listFieldS :: Maybe [String] -> Flag String - listFieldS = Flag . maybe "" (intercalate ", ") - - field :: Text t => String -> Flag t -> Maybe String -> Bool -> Doc - field s f = fieldS s (fmap display f) - - fieldS :: String -- ^ Name of the field - -> Flag String -- ^ Field contents - -> Maybe String -- ^ Comment to explain the field - -> Bool -- ^ Should the field be included (commented out) even if blank? - -> Doc - fieldS _ NoFlag _ inc | not inc || (minimal c == Flag True) = empty - fieldS _ (Flag "") _ inc | not inc || (minimal c == Flag True) = empty - fieldS s f com _ = case (isJust com, noComments c, minimal c) of - (_, _, Flag True) -> id - (_, Flag True, _) -> id - (True, _, _) -> (showComment com $$) . ($$ text "") - (False, _, _) -> ($$ text "") - $ - comment f <> text s <> colon - <> text (replicate (20 - length s) ' ') - <> text (fromMaybe "" . flagToMaybe $ f) - comment NoFlag = text "-- " - comment (Flag "") = text "-- " - comment _ = text "" - - showComment :: Maybe String -> Doc - showComment (Just t) = vcat - . map (text . ("-- "++)) . lines - . renderStyle style { - lineLength = 76, - ribbonsPerLine = 1.05 - } - . vcat - . map (fcat . map text . breakLine) - . lines - $ t - showComment Nothing = text "" - - breakLine [] = [] - breakLine cs = case break (==' ') cs of (w,cs') -> w : breakLine' cs' - breakLine' [] = [] - breakLine' cs = case span (==' ') cs of (w,cs') -> w : breakLine cs' - --- | Generate warnings for missing fields etc. -generateWarnings :: InitFlags -> IO () -generateWarnings flags = do - message flags "" - when (synopsis flags `elem` [NoFlag, Flag ""]) - (message flags "Warning: no synopsis given. You should edit the .cabal file and add one.") - - message flags "You may want to edit the .cabal file and add a Description field." - --- | Possibly generate a message to stdout, taking into account the --- --quiet flag. -message :: InitFlags -> String -> IO () -message (InitFlags{quiet = Flag True}) _ = return () -message _ s = putStrLn s diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Install.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Install.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Install.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Install.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1565 +0,0 @@ -{-# LANGUAGE CPP #-} - ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Install --- Copyright : (c) 2005 David Himmelstrup --- 2007 Bjorn Bringert --- 2007-2010 Duncan Coutts --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- High level interface to package installation. ------------------------------------------------------------------------------ -module Distribution.Client.Install ( - -- * High-level interface - install, - - -- * Lower-level interface that allows to manipulate the install plan - makeInstallContext, - makeInstallPlan, - processInstallPlan, - InstallArgs, - InstallContext, - - -- * Prune certain packages from the install plan - pruneInstallPlan - ) where - -import Data.List - ( isPrefixOf, unfoldr, nub, sort, (\\) ) -import qualified Data.Map as Map -import qualified Data.Set as S -import Data.Maybe - ( isJust, fromMaybe, mapMaybe, maybeToList ) -import Control.Exception as Exception - ( Exception(toException), bracket, catches - , Handler(Handler), handleJust, IOException, SomeException ) -#ifndef mingw32_HOST_OS -import Control.Exception as Exception - ( Exception(fromException) ) -#endif -import System.Exit - ( ExitCode(..) ) -import Distribution.Compat.Exception - ( catchIO, catchExit ) -#if !MIN_VERSION_base(4,8,0) -import Control.Applicative - ( (<$>) ) -#endif -import Control.Monad - ( forM_, when, unless ) -import System.Directory - ( getTemporaryDirectory, doesDirectoryExist, doesFileExist, - createDirectoryIfMissing, removeFile, renameDirectory ) -import System.FilePath - ( (), (<.>), equalFilePath, takeDirectory ) -import System.IO - ( openFile, IOMode(AppendMode), hClose ) -import System.IO.Error - ( isDoesNotExistError, ioeGetFileName ) - -import Distribution.Client.Targets -import Distribution.Client.Configure - ( chooseCabalVersion ) -import Distribution.Client.Dependency -import Distribution.Client.Dependency.Types - ( Solver(..) ) -import Distribution.Client.FetchUtils -import qualified Distribution.Client.Haddock as Haddock (regenerateHaddockIndex) -import Distribution.Client.IndexUtils as IndexUtils - ( getSourcePackages, getInstalledPackages ) -import qualified Distribution.Client.InstallPlan as InstallPlan -import Distribution.Client.InstallPlan (InstallPlan) -import Distribution.Client.Setup - ( GlobalFlags(..) - , ConfigFlags(..), configureCommand, filterConfigureFlags - , ConfigExFlags(..), InstallFlags(..) ) -import Distribution.Client.Config - ( defaultCabalDir, defaultUserInstall ) -import Distribution.Client.Sandbox.Timestamp - ( withUpdateTimestamps ) -import Distribution.Client.Sandbox.Types - ( SandboxPackageInfo(..), UseSandbox(..), isUseSandbox - , whenUsingSandbox ) -import Distribution.Client.Tar (extractTarGzFile) -import Distribution.Client.Types as Source -import Distribution.Client.BuildReports.Types - ( ReportLevel(..) ) -import Distribution.Client.SetupWrapper - ( setupWrapper, SetupScriptOptions(..), defaultSetupScriptOptions ) -import qualified Distribution.Client.BuildReports.Anonymous as BuildReports -import qualified Distribution.Client.BuildReports.Storage as BuildReports - ( storeAnonymous, storeLocal, fromInstallPlan, fromPlanningFailure ) -import qualified Distribution.Client.InstallSymlink as InstallSymlink - ( symlinkBinaries ) -import qualified Distribution.Client.PackageIndex as SourcePackageIndex -import qualified Distribution.Client.Win32SelfUpgrade as Win32SelfUpgrade -import qualified Distribution.Client.World as World -import qualified Distribution.InstalledPackageInfo as Installed -import Distribution.Client.Compat.ExecutablePath -import Distribution.Client.JobControl - -import Distribution.Utils.NubList -import Distribution.Simple.Compiler - ( CompilerId(..), Compiler(compilerId), compilerFlavor - , CompilerInfo(..), compilerInfo, PackageDB(..), PackageDBStack ) -import Distribution.Simple.Program (ProgramConfiguration, - defaultProgramConfiguration) -import qualified Distribution.Simple.InstallDirs as InstallDirs -import qualified Distribution.Simple.PackageIndex as PackageIndex -import Distribution.Simple.PackageIndex (InstalledPackageIndex) -import Distribution.Simple.Setup - ( haddockCommand, HaddockFlags(..) - , buildCommand, BuildFlags(..), emptyBuildFlags - , toFlag, fromFlag, fromFlagOrDefault, flagToMaybe, defaultDistPref ) -import qualified Distribution.Simple.Setup as Cabal - ( Flag(..) - , copyCommand, CopyFlags(..), emptyCopyFlags - , registerCommand, RegisterFlags(..), emptyRegisterFlags - , testCommand, TestFlags(..), emptyTestFlags ) -import Distribution.Simple.Utils - ( createDirectoryIfMissingVerbose, rawSystemExit, comparing - , writeFileAtomic, withTempFile , withUTF8FileContents ) -import Distribution.Simple.InstallDirs as InstallDirs - ( PathTemplate, fromPathTemplate, toPathTemplate, substPathTemplate - , initialPathTemplateEnv, installDirsTemplateEnv ) -import Distribution.Package - ( PackageIdentifier(..), PackageId, packageName, packageVersion - , Package(..), PackageFixedDeps(..), PackageKey - , Dependency(..), thisPackageVersion, InstalledPackageId, installedPackageId ) -import qualified Distribution.PackageDescription as PackageDescription -import Distribution.PackageDescription - ( PackageDescription, GenericPackageDescription(..), Flag(..) - , FlagName(..), FlagAssignment ) -import Distribution.PackageDescription.Configuration - ( finalizePackageDescription ) -import Distribution.ParseUtils - ( showPWarning ) -import Distribution.Version - ( Version, VersionRange, foldVersionRange ) -import Distribution.Simple.Utils as Utils - ( notice, info, warn, debug, debugNoWrap, die - , intercalate, withTempDirectory ) -import Distribution.Client.Utils - ( determineNumJobs, inDir, mergeBy, MergeResult(..) - , tryCanonicalizePath ) -import Distribution.System - ( Platform, OS(Windows), buildOS ) -import Distribution.Text - ( display ) -import Distribution.Verbosity as Verbosity - ( Verbosity, showForCabal, normal, verbose ) -import Distribution.Simple.BuildPaths ( exeExtension ) - ---TODO: --- * assign flags to packages individually --- * complain about flags that do not apply to any package given as target --- so flags do not apply to dependencies, only listed, can use flag --- constraints for dependencies --- * only record applicable flags in world file --- * allow flag constraints --- * allow installed constraints --- * allow flag and installed preferences --- * change world file to use cabal section syntax --- * allow persistent configure flags for each package individually - --- ------------------------------------------------------------ --- * Top level user actions --- ------------------------------------------------------------ - --- | Installs the packages needed to satisfy a list of dependencies. --- -install - :: Verbosity - -> PackageDBStack - -> [Repo] - -> Compiler - -> Platform - -> ProgramConfiguration - -> UseSandbox - -> Maybe SandboxPackageInfo - -> GlobalFlags - -> ConfigFlags - -> ConfigExFlags - -> InstallFlags - -> HaddockFlags - -> [UserTarget] - -> IO () -install verbosity packageDBs repos comp platform conf useSandbox mSandboxPkgInfo - globalFlags configFlags configExFlags installFlags haddockFlags - userTargets0 = do - - installContext <- makeInstallContext verbosity args (Just userTargets0) - planResult <- foldProgress logMsg (return . Left) (return . Right) =<< - makeInstallPlan verbosity args installContext - - case planResult of - Left message -> do - reportPlanningFailure verbosity args installContext message - die' message - Right installPlan -> - processInstallPlan verbosity args installContext installPlan - where - args :: InstallArgs - args = (packageDBs, repos, comp, platform, conf, useSandbox, mSandboxPkgInfo, - globalFlags, configFlags, configExFlags, installFlags, - haddockFlags) - - die' message = die (message ++ if isUseSandbox useSandbox - then installFailedInSandbox else []) - -- TODO: use a better error message, remove duplication. - installFailedInSandbox = - "\nNote: when using a sandbox, all packages are required to have " - ++ "consistent dependencies. " - ++ "Try reinstalling/unregistering the offending packages or " - ++ "recreating the sandbox." - logMsg message rest = debugNoWrap verbosity message >> rest - --- TODO: Make InstallContext a proper data type with documented fields. --- | Common context for makeInstallPlan and processInstallPlan. -type InstallContext = ( InstalledPackageIndex, SourcePackageDb - , [UserTarget], [PackageSpecifier SourcePackage] ) - --- TODO: Make InstallArgs a proper data type with documented fields or just get --- rid of it completely. --- | Initial arguments given to 'install' or 'makeInstallContext'. -type InstallArgs = ( PackageDBStack - , [Repo] - , Compiler - , Platform - , ProgramConfiguration - , UseSandbox - , Maybe SandboxPackageInfo - , GlobalFlags - , ConfigFlags - , ConfigExFlags - , InstallFlags - , HaddockFlags ) - --- | Make an install context given install arguments. -makeInstallContext :: Verbosity -> InstallArgs -> Maybe [UserTarget] - -> IO InstallContext -makeInstallContext verbosity - (packageDBs, repos, comp, _, conf,_,_, - globalFlags, _, _, _, _) mUserTargets = do - - installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf - sourcePkgDb <- getSourcePackages verbosity repos - - (userTargets, pkgSpecifiers) <- case mUserTargets of - Nothing -> - -- We want to distinguish between the case where the user has given an - -- empty list of targets on the command-line and the case where we - -- specifically want to have an empty list of targets. - return ([], []) - Just userTargets0 -> do - -- For install, if no target is given it means we use the current - -- directory as the single target. - let userTargets | null userTargets0 = [UserTargetLocalDir "."] - | otherwise = userTargets0 - - pkgSpecifiers <- resolveUserTargets verbosity - (fromFlag $ globalWorldFile globalFlags) - (packageIndex sourcePkgDb) - userTargets - return (userTargets, pkgSpecifiers) - - return (installedPkgIndex, sourcePkgDb, userTargets, pkgSpecifiers) - --- | Make an install plan given install context and install arguments. -makeInstallPlan :: Verbosity -> InstallArgs -> InstallContext - -> IO (Progress String String InstallPlan) -makeInstallPlan verbosity - (_, _, comp, platform, _, _, mSandboxPkgInfo, - _, configFlags, configExFlags, installFlags, - _) - (installedPkgIndex, sourcePkgDb, - _, pkgSpecifiers) = do - - solver <- chooseSolver verbosity (fromFlag (configSolver configExFlags)) - (compilerInfo comp) - notice verbosity "Resolving dependencies..." - return $ planPackages comp platform mSandboxPkgInfo solver - configFlags configExFlags installFlags - installedPkgIndex sourcePkgDb pkgSpecifiers - --- | Given an install plan, perform the actual installations. -processInstallPlan :: Verbosity -> InstallArgs -> InstallContext - -> InstallPlan - -> IO () -processInstallPlan verbosity - args@(_,_, comp, _, _, _, _, _, _, _, installFlags, _) - (installedPkgIndex, sourcePkgDb, - userTargets, pkgSpecifiers) installPlan = do - checkPrintPlan verbosity comp installedPkgIndex installPlan sourcePkgDb - installFlags pkgSpecifiers - - unless (dryRun || nothingToInstall) $ do - installPlan' <- performInstallations verbosity - args installedPkgIndex installPlan - postInstallActions verbosity args userTargets installPlan' - where - dryRun = fromFlag (installDryRun installFlags) - nothingToInstall = null (InstallPlan.ready installPlan) - --- ------------------------------------------------------------ --- * Installation planning --- ------------------------------------------------------------ - -planPackages :: Compiler - -> Platform - -> Maybe SandboxPackageInfo - -> Solver - -> ConfigFlags - -> ConfigExFlags - -> InstallFlags - -> InstalledPackageIndex - -> SourcePackageDb - -> [PackageSpecifier SourcePackage] - -> Progress String String InstallPlan -planPackages comp platform mSandboxPkgInfo solver - configFlags configExFlags installFlags - installedPkgIndex sourcePkgDb pkgSpecifiers = - - resolveDependencies - platform (compilerInfo comp) - solver - resolverParams - - >>= if onlyDeps then pruneInstallPlan pkgSpecifiers else return - - where - resolverParams = - - setMaxBackjumps (if maxBackjumps < 0 then Nothing - else Just maxBackjumps) - - . setIndependentGoals independentGoals - - . setReorderGoals reorderGoals - - . setAvoidReinstalls avoidReinstalls - - . setShadowPkgs shadowPkgs - - . setStrongFlags strongFlags - - . setPreferenceDefault (if upgradeDeps then PreferAllLatest - else PreferLatestForSelected) - - . removeUpperBounds allowNewer - - . addPreferences - -- preferences from the config file or command line - [ PackageVersionPreference name ver - | Dependency name ver <- configPreferences configExFlags ] - - . addConstraints - -- version constraints from the config file or command line - (map userToPackageConstraint (configExConstraints configExFlags)) - - . addConstraints - --FIXME: this just applies all flags to all targets which - -- is silly. We should check if the flags are appropriate - [ PackageConstraintFlags (pkgSpecifierTarget pkgSpecifier) flags - | let flags = configConfigurationsFlags configFlags - , not (null flags) - , pkgSpecifier <- pkgSpecifiers ] - - . addConstraints - [ PackageConstraintStanzas (pkgSpecifierTarget pkgSpecifier) stanzas - | pkgSpecifier <- pkgSpecifiers ] - - . maybe id applySandboxInstallPolicy mSandboxPkgInfo - - . (if reinstall then reinstallTargets else id) - - $ standardInstallPolicy - installedPkgIndex sourcePkgDb pkgSpecifiers - - stanzas = concat - [ if testsEnabled then [TestStanzas] else [] - , if benchmarksEnabled then [BenchStanzas] else [] - ] - testsEnabled = fromFlagOrDefault False $ configTests configFlags - benchmarksEnabled = fromFlagOrDefault False $ configBenchmarks configFlags - - reinstall = fromFlag (installReinstall installFlags) - reorderGoals = fromFlag (installReorderGoals installFlags) - independentGoals = fromFlag (installIndependentGoals installFlags) - avoidReinstalls = fromFlag (installAvoidReinstalls installFlags) - shadowPkgs = fromFlag (installShadowPkgs installFlags) - strongFlags = fromFlag (installStrongFlags installFlags) - maxBackjumps = fromFlag (installMaxBackjumps installFlags) - upgradeDeps = fromFlag (installUpgradeDeps installFlags) - onlyDeps = fromFlag (installOnlyDeps installFlags) - allowNewer = fromFlag (configAllowNewer configExFlags) - --- | Remove the provided targets from the install plan. -pruneInstallPlan :: Package pkg => [PackageSpecifier pkg] -> InstallPlan - -> Progress String String InstallPlan -pruneInstallPlan pkgSpecifiers = - -- TODO: this is a general feature and should be moved to D.C.Dependency - -- Also, the InstallPlan.remove should return info more precise to the - -- problem, rather than the very general PlanProblem type. - either (Fail . explain) Done - . InstallPlan.remove (\pkg -> packageName pkg `elem` targetnames) - where - explain :: [InstallPlan.PlanProblem] -> String - explain problems = - "Cannot select only the dependencies (as requested by the " - ++ "'--only-dependencies' flag), " - ++ (case pkgids of - [pkgid] -> "the package " ++ display pkgid ++ " is " - _ -> "the packages " - ++ intercalate ", " (map display pkgids) ++ " are ") - ++ "required by a dependency of one of the other targets." - where - pkgids = - nub [ depid - | InstallPlan.PackageMissingDeps _ depids <- problems - , depid <- depids - , packageName depid `elem` targetnames ] - - targetnames = map pkgSpecifierTarget pkgSpecifiers - --- ------------------------------------------------------------ --- * Informational messages --- ------------------------------------------------------------ - --- | Perform post-solver checks of the install plan and print it if --- either requested or needed. -checkPrintPlan :: Verbosity - -> Compiler - -> InstalledPackageIndex - -> InstallPlan - -> SourcePackageDb - -> InstallFlags - -> [PackageSpecifier SourcePackage] - -> IO () -checkPrintPlan verbosity comp installed installPlan sourcePkgDb - installFlags pkgSpecifiers = do - - -- User targets that are already installed. - let preExistingTargets = - [ p | let tgts = map pkgSpecifierTarget pkgSpecifiers, - InstallPlan.PreExisting p <- InstallPlan.toList installPlan, - packageName p `elem` tgts ] - - -- If there's nothing to install, we print the already existing - -- target packages as an explanation. - when nothingToInstall $ - notice verbosity $ unlines $ - "All the requested packages are already installed:" - : map (display . packageId) preExistingTargets - ++ ["Use --reinstall if you want to reinstall anyway."] - - let lPlan = linearizeInstallPlan comp installed installPlan - -- Are any packages classified as reinstalls? - let reinstalledPkgs = concatMap (extractReinstalls . snd) lPlan - -- Packages that are already broken. - let oldBrokenPkgs = - map Installed.installedPackageId - . PackageIndex.reverseDependencyClosure installed - . map (Installed.installedPackageId . fst) - . PackageIndex.brokenPackages - $ installed - let excluded = reinstalledPkgs ++ oldBrokenPkgs - -- Packages that are reverse dependencies of replaced packages are very - -- likely to be broken. We exclude packages that are already broken. - let newBrokenPkgs = - filter (\ p -> not (Installed.installedPackageId p `elem` excluded)) - (PackageIndex.reverseDependencyClosure installed reinstalledPkgs) - let containsReinstalls = not (null reinstalledPkgs) - let breaksPkgs = not (null newBrokenPkgs) - - let adaptedVerbosity - | containsReinstalls && not overrideReinstall = verbosity `max` verbose - | otherwise = verbosity - - -- We print the install plan if we are in a dry-run or if we are confronted - -- with a dangerous install plan. - when (dryRun || containsReinstalls && not overrideReinstall) $ - printPlan (dryRun || breaksPkgs && not overrideReinstall) - adaptedVerbosity lPlan sourcePkgDb - - -- If the install plan is dangerous, we print various warning messages. In - -- particular, if we can see that packages are likely to be broken, we even - -- bail out (unless installation has been forced with --force-reinstalls). - when containsReinstalls $ do - if breaksPkgs - then do - (if dryRun || overrideReinstall then warn verbosity else die) $ unlines $ - "The following packages are likely to be broken by the reinstalls:" - : map (display . Installed.sourcePackageId) newBrokenPkgs - ++ if overrideReinstall - then if dryRun then [] else - ["Continuing even though the plan contains dangerous reinstalls."] - else - ["Use --force-reinstalls if you want to install anyway."] - else unless dryRun $ warn verbosity - "Note that reinstalls are always dangerous. Continuing anyway..." - - where - nothingToInstall = null (InstallPlan.ready installPlan) - - dryRun = fromFlag (installDryRun installFlags) - overrideReinstall = fromFlag (installOverrideReinstall installFlags) - -linearizeInstallPlan :: Compiler - -> InstalledPackageIndex - -> InstallPlan - -> [(ReadyPackage, PackageStatus)] -linearizeInstallPlan comp installedPkgIndex plan = - unfoldr next plan - where - next plan' = case InstallPlan.ready plan' of - [] -> Nothing - (pkg:_) -> Just ((pkg, status), plan'') - where - pkgid = installedPackageId pkg - status = packageStatus comp installedPkgIndex pkg - plan'' = InstallPlan.completed pkgid - (BuildOk DocsNotTried TestsNotTried - (Just $ Installed.emptyInstalledPackageInfo - { Installed.sourcePackageId = packageId pkg - , Installed.installedPackageId = pkgid })) - (InstallPlan.processing [pkg] plan') - --FIXME: This is a bit of a hack, - -- pretending that each package is installed - -- It's doubly a hack because the installed package ID - -- didn't get updated... - -data PackageStatus = NewPackage - | NewVersion [Version] - | Reinstall [InstalledPackageId] [PackageChange] - -type PackageChange = MergeResult PackageIdentifier PackageIdentifier - -extractReinstalls :: PackageStatus -> [InstalledPackageId] -extractReinstalls (Reinstall ipids _) = ipids -extractReinstalls _ = [] - -packageStatus :: Compiler -> InstalledPackageIndex -> ReadyPackage -> PackageStatus -packageStatus _comp installedPkgIndex cpkg = - case PackageIndex.lookupPackageName installedPkgIndex - (packageName cpkg) of - [] -> NewPackage - ps -> case filter ((== packageId cpkg) - . Installed.sourcePackageId) (concatMap snd ps) of - [] -> NewVersion (map fst ps) - pkgs@(pkg:_) -> Reinstall (map Installed.installedPackageId pkgs) - (changes pkg cpkg) - - where - - changes :: Installed.InstalledPackageInfo - -> ReadyPackage - -> [MergeResult PackageIdentifier PackageIdentifier] - changes pkg pkg' = - filter changed - $ mergeBy (comparing packageName) - -- get dependencies of installed package (convert to source pkg ids via - -- index) - (nub . sort . concatMap - (maybeToList . fmap Installed.sourcePackageId . - PackageIndex.lookupInstalledPackageId installedPkgIndex) . - Installed.depends $ pkg) - -- get dependencies of configured package - (nub . sort . depends $ pkg') - - changed (InBoth pkgid pkgid') = pkgid /= pkgid' - changed _ = True - -printPlan :: Bool -- is dry run - -> Verbosity - -> [(ReadyPackage, PackageStatus)] - -> SourcePackageDb - -> IO () -printPlan dryRun verbosity plan sourcePkgDb = case plan of - [] -> return () - pkgs - | verbosity >= Verbosity.verbose -> putStr $ unlines $ - ("In order, the following " ++ wouldWill ++ " be installed:") - : map showPkgAndReason pkgs - | otherwise -> notice verbosity $ unlines $ - ("In order, the following " ++ wouldWill - ++ " be installed (use -v for more details):") - : map showPkg pkgs - where - wouldWill | dryRun = "would" - | otherwise = "will" - - showPkg (pkg, _) = display (packageId pkg) ++ - showLatest (pkg) - - showPkgAndReason (pkg', pr) = display (packageId pkg') ++ - showLatest pkg' ++ - showFlagAssignment (nonDefaultFlags pkg') ++ - showStanzas (stanzas pkg') ++ - showDep pkg' ++ - case pr of - NewPackage -> " (new package)" - NewVersion _ -> " (new version)" - Reinstall _ cs -> " (reinstall)" ++ case cs of - [] -> "" - diff -> " (changes: " ++ intercalate ", " (map change diff) ++ ")" - - showLatest :: ReadyPackage -> String - showLatest pkg = case mLatestVersion of - Just latestVersion -> - if packageVersion pkg < latestVersion - then (" (latest: " ++ display latestVersion ++ ")") - else "" - Nothing -> "" - where - mLatestVersion :: Maybe Version - mLatestVersion = case SourcePackageIndex.lookupPackageName - (packageIndex sourcePkgDb) - (packageName pkg) of - [] -> Nothing - x -> Just $ packageVersion $ last x - - toFlagAssignment :: [Flag] -> FlagAssignment - toFlagAssignment = map (\ f -> (flagName f, flagDefault f)) - - nonDefaultFlags :: ReadyPackage -> FlagAssignment - nonDefaultFlags (ReadyPackage spkg fa _ _) = - let defaultAssignment = - toFlagAssignment - (genPackageFlags (Source.packageDescription spkg)) - in fa \\ defaultAssignment - - stanzas :: ReadyPackage -> [OptionalStanza] - stanzas (ReadyPackage _ _ sts _) = sts - - showStanzas :: [OptionalStanza] -> String - showStanzas = concatMap ((' ' :) . showStanza) - showStanza TestStanzas = "*test" - showStanza BenchStanzas = "*bench" - - -- FIXME: this should be a proper function in a proper place - showFlagAssignment :: FlagAssignment -> String - showFlagAssignment = concatMap ((' ' :) . showFlagValue) - showFlagValue (f, True) = '+' : showFlagName f - showFlagValue (f, False) = '-' : showFlagName f - showFlagName (FlagName f) = f - - change (OnlyInLeft pkgid) = display pkgid ++ " removed" - change (InBoth pkgid pkgid') = display pkgid ++ " -> " - ++ display (packageVersion pkgid') - change (OnlyInRight pkgid') = display pkgid' ++ " added" - - showDep pkg | Just rdeps <- Map.lookup (packageId pkg) revDeps - = " (via: " ++ unwords (map display rdeps) ++ ")" - | otherwise = "" - - revDepGraphEdges = [ (rpid,packageId pkg) | (pkg,_) <- plan, rpid <- depends pkg ] - - revDeps = Map.fromListWith (++) (map (fmap (:[])) revDepGraphEdges) - --- ------------------------------------------------------------ --- * Post installation stuff --- ------------------------------------------------------------ - --- | Report a solver failure. This works slightly differently to --- 'postInstallActions', as (by definition) we don't have an install plan. -reportPlanningFailure :: Verbosity -> InstallArgs -> InstallContext -> String -> IO () -reportPlanningFailure verbosity - (_, _, comp, platform, _, _, _ - ,_, configFlags, _, installFlags, _) - (_, sourcePkgDb, _, pkgSpecifiers) - message = do - - when reportFailure $ do - - -- Only create reports for explicitly named packages - let pkgids = - filter (SourcePackageIndex.elemByPackageId (packageIndex sourcePkgDb)) $ - mapMaybe theSpecifiedPackage pkgSpecifiers - - buildReports = BuildReports.fromPlanningFailure platform (compilerId comp) - pkgids (configConfigurationsFlags configFlags) - - when (not (null buildReports)) $ - info verbosity $ - "Solver failure will be reported for " - ++ intercalate "," (map display pkgids) - - -- Save reports - BuildReports.storeLocal (compilerInfo comp) - (fromNubList $ installSummaryFile installFlags) buildReports platform - - -- Save solver log - case logFile of - Nothing -> return () - Just template -> forM_ pkgids $ \pkgid -> - let env = initialPathTemplateEnv pkgid dummyPackageKey - (compilerInfo comp) platform - path = fromPathTemplate $ substPathTemplate env template - in writeFile path message - - where - reportFailure = fromFlag (installReportPlanningFailure installFlags) - logFile = flagToMaybe (installLogFile installFlags) - - -- A PackageKey is calculated from the transitive closure of - -- dependencies, but when the solver fails we don't have that. - -- So we fail. - dummyPackageKey = error "reportPlanningFailure: package key not available" - --- | If a 'PackageSpecifier' refers to a single package, return Just that package. -theSpecifiedPackage :: Package pkg => PackageSpecifier pkg -> Maybe PackageId -theSpecifiedPackage pkgSpec = - case pkgSpec of - NamedPackage name [PackageConstraintVersion name' version] - | name == name' -> PackageIdentifier name <$> trivialRange version - NamedPackage _ _ -> Nothing - SpecificSourcePackage pkg -> Just $ packageId pkg - where - -- | If a range includes only a single version, return Just that version. - trivialRange :: VersionRange -> Maybe Version - trivialRange = foldVersionRange - Nothing - Just -- "== v" - (\_ -> Nothing) - (\_ -> Nothing) - (\_ _ -> Nothing) - (\_ _ -> Nothing) - --- | Various stuff we do after successful or unsuccessfully installing a bunch --- of packages. This includes: --- --- * build reporting, local and remote --- * symlinking binaries --- * updating indexes --- * updating world file --- * error reporting --- -postInstallActions :: Verbosity - -> InstallArgs - -> [UserTarget] - -> InstallPlan - -> IO () -postInstallActions verbosity - (packageDBs, _, comp, platform, conf, useSandbox, mSandboxPkgInfo - ,globalFlags, configFlags, _, installFlags, _) - targets installPlan = do - - unless oneShot $ - World.insert verbosity worldFile - --FIXME: does not handle flags - [ World.WorldPkgInfo dep [] - | UserTargetNamed dep <- targets ] - - let buildReports = BuildReports.fromInstallPlan installPlan - BuildReports.storeLocal (compilerInfo comp) (fromNubList $ installSummaryFile installFlags) buildReports - (InstallPlan.planPlatform installPlan) - when (reportingLevel >= AnonymousReports) $ - BuildReports.storeAnonymous buildReports - when (reportingLevel == DetailedReports) $ - storeDetailedBuildReports verbosity logsDir buildReports - - regenerateHaddockIndex verbosity packageDBs comp platform conf useSandbox - configFlags installFlags installPlan - - symlinkBinaries verbosity comp configFlags installFlags installPlan - - printBuildFailures installPlan - - updateSandboxTimestampsFile useSandbox mSandboxPkgInfo - comp platform installPlan - - where - reportingLevel = fromFlag (installBuildReports installFlags) - logsDir = fromFlag (globalLogsDir globalFlags) - oneShot = fromFlag (installOneShot installFlags) - worldFile = fromFlag $ globalWorldFile globalFlags - -storeDetailedBuildReports :: Verbosity -> FilePath - -> [(BuildReports.BuildReport, Maybe Repo)] -> IO () -storeDetailedBuildReports verbosity logsDir reports = sequence_ - [ do dotCabal <- defaultCabalDir - let logFileName = display (BuildReports.package report) <.> "log" - logFile = logsDir logFileName - reportsDir = dotCabal "reports" remoteRepoName remoteRepo - reportFile = reportsDir logFileName - - handleMissingLogFile $ do - buildLog <- readFile logFile - createDirectoryIfMissing True reportsDir -- FIXME - writeFile reportFile (show (BuildReports.show report, buildLog)) - - | (report, Just Repo { repoKind = Left remoteRepo }) <- reports - , isLikelyToHaveLogFile (BuildReports.installOutcome report) ] - - where - isLikelyToHaveLogFile BuildReports.ConfigureFailed {} = True - isLikelyToHaveLogFile BuildReports.BuildFailed {} = True - isLikelyToHaveLogFile BuildReports.InstallFailed {} = True - isLikelyToHaveLogFile BuildReports.InstallOk {} = True - isLikelyToHaveLogFile _ = False - - handleMissingLogFile = Exception.handleJust missingFile $ \ioe -> - warn verbosity $ "Missing log file for build report: " - ++ fromMaybe "" (ioeGetFileName ioe) - - missingFile ioe - | isDoesNotExistError ioe = Just ioe - missingFile _ = Nothing - - -regenerateHaddockIndex :: Verbosity - -> [PackageDB] - -> Compiler - -> Platform - -> ProgramConfiguration - -> UseSandbox - -> ConfigFlags - -> InstallFlags - -> InstallPlan - -> IO () -regenerateHaddockIndex verbosity packageDBs comp platform conf useSandbox - configFlags installFlags installPlan - | haddockIndexFileIsRequested && shouldRegenerateHaddockIndex = do - - defaultDirs <- InstallDirs.defaultInstallDirs - (compilerFlavor comp) - (fromFlag (configUserInstall configFlags)) - True - let indexFileTemplate = fromFlag (installHaddockIndex installFlags) - indexFile = substHaddockIndexFileName defaultDirs indexFileTemplate - - notice verbosity $ - "Updating documentation index " ++ indexFile - - --TODO: might be nice if the install plan gave us the new InstalledPackageInfo - installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf - Haddock.regenerateHaddockIndex verbosity installedPkgIndex conf indexFile - - | otherwise = return () - where - haddockIndexFileIsRequested = - fromFlag (installDocumentation installFlags) - && isJust (flagToMaybe (installHaddockIndex installFlags)) - - -- We want to regenerate the index if some new documentation was actually - -- installed. Since the index can be only per-user or per-sandbox (see - -- #1337), we don't do it for global installs or special cases where we're - -- installing into a specific db. - shouldRegenerateHaddockIndex = (isUseSandbox useSandbox || normalUserInstall) - && someDocsWereInstalled installPlan - where - someDocsWereInstalled = any installedDocs . InstallPlan.toList - normalUserInstall = (UserPackageDB `elem` packageDBs) - && all (not . isSpecificPackageDB) packageDBs - - installedDocs (InstallPlan.Installed _ (BuildOk DocsOk _ _)) = True - installedDocs _ = False - isSpecificPackageDB (SpecificPackageDB _) = True - isSpecificPackageDB _ = False - - substHaddockIndexFileName defaultDirs = fromPathTemplate - . substPathTemplate env - where - env = env0 ++ installDirsTemplateEnv absoluteDirs - env0 = InstallDirs.compilerTemplateEnv (compilerInfo comp) - ++ InstallDirs.platformTemplateEnv platform - ++ InstallDirs.abiTemplateEnv (compilerInfo comp) platform - absoluteDirs = InstallDirs.substituteInstallDirTemplates - env0 templateDirs - templateDirs = InstallDirs.combineInstallDirs fromFlagOrDefault - defaultDirs (configInstallDirs configFlags) - - -symlinkBinaries :: Verbosity - -> Compiler - -> ConfigFlags - -> InstallFlags - -> InstallPlan -> IO () -symlinkBinaries verbosity comp configFlags installFlags plan = do - failed <- InstallSymlink.symlinkBinaries comp configFlags installFlags plan - case failed of - [] -> return () - [(_, exe, path)] -> - warn verbosity $ - "could not create a symlink in " ++ bindir ++ " for " - ++ exe ++ " because the file exists there already but is not " - ++ "managed by cabal. You can create a symlink for this executable " - ++ "manually if you wish. The executable file has been installed at " - ++ path - exes -> - warn verbosity $ - "could not create symlinks in " ++ bindir ++ " for " - ++ intercalate ", " [ exe | (_, exe, _) <- exes ] - ++ " because the files exist there already and are not " - ++ "managed by cabal. You can create symlinks for these executables " - ++ "manually if you wish. The executable files have been installed at " - ++ intercalate ", " [ path | (_, _, path) <- exes ] - where - bindir = fromFlag (installSymlinkBinDir installFlags) - - -printBuildFailures :: InstallPlan -> IO () -printBuildFailures plan = - case [ (pkg, reason) - | InstallPlan.Failed pkg reason <- InstallPlan.toList plan ] of - [] -> return () - failed -> die . unlines - $ "Error: some packages failed to install:" - : [ display (packageId pkg) ++ printFailureReason reason - | (pkg, reason) <- failed ] - where - printFailureReason reason = case reason of - DependentFailed pkgid -> " depends on " ++ display pkgid - ++ " which failed to install." - DownloadFailed e -> " failed while downloading the package." - ++ showException e - UnpackFailed e -> " failed while unpacking the package." - ++ showException e - ConfigureFailed e -> " failed during the configure step." - ++ showException e - BuildFailed e -> " failed during the building phase." - ++ showException e - TestsFailed e -> " failed during the tests phase." - ++ showException e - InstallFailed e -> " failed during the final install step." - ++ showException e - - -- This will never happen, but we include it for completeness - PlanningFailed -> " failed during the planning phase." - - showException e = " The exception was:\n " ++ show e ++ maybeOOM e -#ifdef mingw32_HOST_OS - maybeOOM _ = "" -#else - maybeOOM e = maybe "" onExitFailure (fromException e) - onExitFailure (ExitFailure n) - | n == 9 || n == -9 = - "\nThis may be due to an out-of-memory condition." - onExitFailure _ = "" -#endif - - --- | If we're working inside a sandbox and some add-source deps were installed, --- update the timestamps of those deps. -updateSandboxTimestampsFile :: UseSandbox -> Maybe SandboxPackageInfo - -> Compiler -> Platform -> InstallPlan - -> IO () -updateSandboxTimestampsFile (UseSandbox sandboxDir) - (Just (SandboxPackageInfo _ _ _ allAddSourceDeps)) - comp platform installPlan = - withUpdateTimestamps sandboxDir (compilerId comp) platform $ \_ -> do - let allInstalled = [ pkg | InstallPlan.Installed pkg _ - <- InstallPlan.toList installPlan ] - allSrcPkgs = [ pkg | ReadyPackage pkg _ _ _ <- allInstalled ] - allPaths = [ pth | LocalUnpackedPackage pth - <- map packageSource allSrcPkgs] - allPathsCanonical <- mapM tryCanonicalizePath allPaths - return $! filter (`S.member` allAddSourceDeps) allPathsCanonical - -updateSandboxTimestampsFile _ _ _ _ _ = return () - --- ------------------------------------------------------------ --- * Actually do the installations --- ------------------------------------------------------------ - -data InstallMisc = InstallMisc { - rootCmd :: Maybe FilePath, - libVersion :: Maybe Version - } - --- | If logging is enabled, contains location of the log file and the verbosity --- level for logging. -type UseLogFile = Maybe (PackageIdentifier -> PackageKey -> FilePath, Verbosity) - -performInstallations :: Verbosity - -> InstallArgs - -> InstalledPackageIndex - -> InstallPlan - -> IO InstallPlan -performInstallations verbosity - (packageDBs, _, comp, _, conf, useSandbox, _, - globalFlags, configFlags, configExFlags, installFlags, haddockFlags) - installedPkgIndex installPlan = do - - -- With 'install -j' it can be a bit hard to tell whether a sandbox is used. - whenUsingSandbox useSandbox $ \sandboxDir -> - when parallelInstall $ - notice verbosity $ "Notice: installing into a sandbox located at " - ++ sandboxDir - - jobControl <- if parallelInstall then newParallelJobControl - else newSerialJobControl - buildLimit <- newJobLimit numJobs - fetchLimit <- newJobLimit (min numJobs numFetchJobs) - installLock <- newLock -- serialise installation - cacheLock <- newLock -- serialise access to setup exe cache - - - executeInstallPlan verbosity comp jobControl useLogFile installPlan $ \rpkg -> - -- Calculate the package key (ToDo: Is this right for source install) - let pkg_key = readyPackageKey comp rpkg in - installReadyPackage platform cinfo configFlags - rpkg $ \configFlags' src pkg pkgoverride -> - fetchSourcePackage verbosity fetchLimit src $ \src' -> - installLocalPackage verbosity buildLimit - (packageId pkg) src' distPref $ \mpath -> - installUnpackedPackage verbosity buildLimit installLock numJobs pkg_key - (setupScriptOptions installedPkgIndex cacheLock) - miscOptions configFlags' installFlags haddockFlags - cinfo platform pkg pkgoverride mpath useLogFile - - where - platform = InstallPlan.planPlatform installPlan - cinfo = InstallPlan.planCompiler installPlan - - numJobs = determineNumJobs (installNumJobs installFlags) - numFetchJobs = 2 - parallelInstall = numJobs >= 2 - distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) - (configDistPref configFlags) - - setupScriptOptions index lock = SetupScriptOptions { - useCabalVersion = chooseCabalVersion configExFlags - (libVersion miscOptions), - useCompiler = Just comp, - usePlatform = Just platform, - -- Hack: we typically want to allow the UserPackageDB for finding the - -- Cabal lib when compiling any Setup.hs even if we're doing a global - -- install. However we also allow looking in a specific package db. - usePackageDB = if UserPackageDB `elem` packageDBs - then packageDBs - else let (db@GlobalPackageDB:dbs) = packageDBs - in db : UserPackageDB : dbs, - --TODO: use Ord instance: - -- insert UserPackageDB packageDBs - usePackageIndex = if UserPackageDB `elem` packageDBs - then Just index - else Nothing, - useProgramConfig = conf, - useDistPref = distPref, - useLoggingHandle = Nothing, - useWorkingDir = Nothing, - forceExternalSetupMethod = parallelInstall, - useWin32CleanHack = False, - setupCacheLock = Just lock - } - reportingLevel = fromFlag (installBuildReports installFlags) - logsDir = fromFlag (globalLogsDir globalFlags) - - -- Should the build output be written to a log file instead of stdout? - useLogFile :: UseLogFile - useLogFile = fmap ((\f -> (f, loggingVerbosity)) . substLogFileName) - logFileTemplate - where - installLogFile' = flagToMaybe $ installLogFile installFlags - defaultTemplate = toPathTemplate $ logsDir "$pkgid" <.> "log" - - -- If the user has specified --remote-build-reporting=detailed, use the - -- default log file location. If the --build-log option is set, use the - -- provided location. Otherwise don't use logging, unless building in - -- parallel (in which case the default location is used). - logFileTemplate :: Maybe PathTemplate - logFileTemplate - | useDefaultTemplate = Just defaultTemplate - | otherwise = installLogFile' - - -- If the user has specified --remote-build-reporting=detailed or - -- --build-log, use more verbose logging. - loggingVerbosity :: Verbosity - loggingVerbosity | overrideVerbosity = max Verbosity.verbose verbosity - | otherwise = verbosity - - useDefaultTemplate :: Bool - useDefaultTemplate - | reportingLevel == DetailedReports = True - | isJust installLogFile' = False - | parallelInstall = True - | otherwise = False - - overrideVerbosity :: Bool - overrideVerbosity - | reportingLevel == DetailedReports = True - | isJust installLogFile' = True - | parallelInstall = False - | otherwise = False - - substLogFileName :: PathTemplate -> PackageIdentifier -> PackageKey -> FilePath - substLogFileName template pkg pkg_key = fromPathTemplate - . substPathTemplate env - $ template - where env = initialPathTemplateEnv (packageId pkg) pkg_key - (compilerInfo comp) platform - - miscOptions = InstallMisc { - rootCmd = if fromFlag (configUserInstall configFlags) - || (isUseSandbox useSandbox) - then Nothing -- ignore --root-cmd if --user - -- or working inside a sandbox. - else flagToMaybe (installRootCmd installFlags), - libVersion = flagToMaybe (configCabalVersion configExFlags) - } - - -executeInstallPlan :: Verbosity - -> Compiler - -> JobControl IO (PackageId, PackageKey, BuildResult) - -> UseLogFile - -> InstallPlan - -> (ReadyPackage -> IO BuildResult) - -> IO InstallPlan -executeInstallPlan verbosity comp jobCtl useLogFile plan0 installPkg = - tryNewTasks 0 plan0 - where - tryNewTasks taskCount plan = do - case InstallPlan.ready plan of - [] | taskCount == 0 -> return plan - | otherwise -> waitForTasks taskCount plan - pkgs -> do - sequence_ - [ do info verbosity $ "Ready to install " ++ display pkgid - spawnJob jobCtl $ do - buildResult <- installPkg pkg - return (packageId pkg, pkg_key, buildResult) - | pkg <- pkgs - , let pkgid = packageId pkg - pkg_key = readyPackageKey comp pkg ] - - let taskCount' = taskCount + length pkgs - plan' = InstallPlan.processing pkgs plan - waitForTasks taskCount' plan' - - waitForTasks taskCount plan = do - info verbosity $ "Waiting for install task to finish..." - (pkgid, pkg_key, buildResult) <- collectJob jobCtl - printBuildResult pkgid pkg_key buildResult - let taskCount' = taskCount-1 - plan' = updatePlan pkgid buildResult plan - tryNewTasks taskCount' plan' - - updatePlan :: PackageIdentifier -> BuildResult -> InstallPlan -> InstallPlan - updatePlan pkgid (Right buildSuccess) = - InstallPlan.completed (Source.fakeInstalledPackageId pkgid) buildSuccess - - updatePlan pkgid (Left buildFailure) = - InstallPlan.failed (Source.fakeInstalledPackageId pkgid) buildFailure depsFailure - where - depsFailure = DependentFailed pkgid - -- So this first pkgid failed for whatever reason (buildFailure). - -- All the other packages that depended on this pkgid, which we - -- now cannot build, we mark as failing due to 'DependentFailed' - -- which kind of means it was not their fault. - - -- Print build log if something went wrong, and 'Installed $PKGID' - -- otherwise. - printBuildResult :: PackageId -> PackageKey -> BuildResult -> IO () - printBuildResult pkgid pkg_key buildResult = case buildResult of - (Right _) -> notice verbosity $ "Installed " ++ display pkgid - (Left _) -> do - notice verbosity $ "Failed to install " ++ display pkgid - when (verbosity >= normal) $ - case useLogFile of - Nothing -> return () - Just (mkLogFileName, _) -> do - let logName = mkLogFileName pkgid pkg_key - putStr $ "Build log ( " ++ logName ++ " ):\n" - printFile logName - - printFile :: FilePath -> IO () - printFile path = readFile path >>= putStr - --- | Call an installer for an 'SourcePackage' but override the configure --- flags with the ones given by the 'ReadyPackage'. In particular the --- 'ReadyPackage' specifies an exact 'FlagAssignment' and exactly --- versioned package dependencies. So we ignore any previous partial flag --- assignment or dependency constraints and use the new ones. --- --- NB: when updating this function, don't forget to also update --- 'configurePackage' in D.C.Configure. -installReadyPackage :: Platform -> CompilerInfo - -> ConfigFlags - -> ReadyPackage - -> (ConfigFlags -> PackageLocation (Maybe FilePath) - -> PackageDescription - -> PackageDescriptionOverride -> a) - -> a -installReadyPackage platform cinfo configFlags - (ReadyPackage (SourcePackage _ gpkg source pkgoverride) - flags stanzas deps) - installPkg = installPkg configFlags { - configConfigurationsFlags = flags, - -- We generate the legacy constraints as well as the new style precise deps. - -- In the end only one set gets passed to Setup.hs configure, depending on - -- the Cabal version we are talking to. - configConstraints = [ thisPackageVersion (packageId deppkg) - | deppkg <- deps ], - configDependencies = [ (packageName (Installed.sourcePackageId deppkg), - Installed.installedPackageId deppkg) - | deppkg <- deps ], - -- Use '--exact-configuration' if supported. - configExactConfiguration = toFlag True, - configBenchmarks = toFlag False, - configTests = toFlag (TestStanzas `elem` stanzas) - } source pkg pkgoverride - where - pkg = case finalizePackageDescription flags - (const True) - platform cinfo [] (enableStanzas stanzas gpkg) of - Left _ -> error "finalizePackageDescription ReadyPackage failed" - Right (desc, _) -> desc - -fetchSourcePackage - :: Verbosity - -> JobLimit - -> PackageLocation (Maybe FilePath) - -> (PackageLocation FilePath -> IO BuildResult) - -> IO BuildResult -fetchSourcePackage verbosity fetchLimit src installPkg = do - fetched <- checkFetched src - case fetched of - Just src' -> installPkg src' - Nothing -> onFailure DownloadFailed $ do - loc <- withJobLimit fetchLimit $ - fetchPackage verbosity src - installPkg loc - - -installLocalPackage - :: Verbosity - -> JobLimit - -> PackageIdentifier -> PackageLocation FilePath -> FilePath - -> (Maybe FilePath -> IO BuildResult) - -> IO BuildResult -installLocalPackage verbosity jobLimit pkgid location distPref installPkg = - - case location of - - LocalUnpackedPackage dir -> - installPkg (Just dir) - - LocalTarballPackage tarballPath -> - installLocalTarballPackage verbosity jobLimit - pkgid tarballPath distPref installPkg - - RemoteTarballPackage _ tarballPath -> - installLocalTarballPackage verbosity jobLimit - pkgid tarballPath distPref installPkg - - RepoTarballPackage _ _ tarballPath -> - installLocalTarballPackage verbosity jobLimit - pkgid tarballPath distPref installPkg - - -installLocalTarballPackage - :: Verbosity - -> JobLimit - -> PackageIdentifier -> FilePath -> FilePath - -> (Maybe FilePath -> IO BuildResult) - -> IO BuildResult -installLocalTarballPackage verbosity jobLimit pkgid - tarballPath distPref installPkg = do - tmp <- getTemporaryDirectory - withTempDirectory verbosity tmp "cabal-tmp" $ \tmpDirPath -> - onFailure UnpackFailed $ do - let relUnpackedPath = display pkgid - absUnpackedPath = tmpDirPath relUnpackedPath - descFilePath = absUnpackedPath - display (packageName pkgid) <.> "cabal" - withJobLimit jobLimit $ do - info verbosity $ "Extracting " ++ tarballPath - ++ " to " ++ tmpDirPath ++ "..." - extractTarGzFile tmpDirPath relUnpackedPath tarballPath - exists <- doesFileExist descFilePath - when (not exists) $ - die $ "Package .cabal file not found: " ++ show descFilePath - maybeRenameDistDir absUnpackedPath - - installPkg (Just absUnpackedPath) - - where - -- 'cabal sdist' puts pre-generated files in the 'dist' - -- directory. This fails when a nonstandard build directory name - -- is used (as is the case with sandboxes), so we need to rename - -- the 'dist' dir here. - -- - -- TODO: 'cabal get happy && cd sandbox && cabal install ../happy' still - -- fails even with this workaround. We probably can live with that. - maybeRenameDistDir :: FilePath -> IO () - maybeRenameDistDir absUnpackedPath = do - let distDirPath = absUnpackedPath defaultDistPref - distDirPathTmp = absUnpackedPath (defaultDistPref ++ "-tmp") - distDirPathNew = absUnpackedPath distPref - distDirExists <- doesDirectoryExist distDirPath - when (distDirExists - && (not $ distDirPath `equalFilePath` distDirPathNew)) $ do - -- NB: we need to handle the case when 'distDirPathNew' is a - -- subdirectory of 'distDirPath' (e.g. the former is - -- 'dist/dist-sandbox-3688fbc2' and the latter is 'dist'). - debug verbosity $ "Renaming '" ++ distDirPath ++ "' to '" - ++ distDirPathTmp ++ "'." - renameDirectory distDirPath distDirPathTmp - when (distDirPath `isPrefixOf` distDirPathNew) $ - createDirectoryIfMissingVerbose verbosity False distDirPath - debug verbosity $ "Renaming '" ++ distDirPathTmp ++ "' to '" - ++ distDirPathNew ++ "'." - renameDirectory distDirPathTmp distDirPathNew - -installUnpackedPackage - :: Verbosity - -> JobLimit - -> Lock - -> Int - -> PackageKey - -> SetupScriptOptions - -> InstallMisc - -> ConfigFlags - -> InstallFlags - -> HaddockFlags - -> CompilerInfo - -> Platform - -> PackageDescription - -> PackageDescriptionOverride - -> Maybe FilePath -- ^ Directory to change to before starting the installation. - -> UseLogFile -- ^ File to log output to (if any) - -> IO BuildResult -installUnpackedPackage verbosity buildLimit installLock numJobs pkg_key - scriptOptions miscOptions - configFlags installFlags haddockFlags - cinfo platform pkg pkgoverride workingDir useLogFile = do - - -- Override the .cabal file if necessary - case pkgoverride of - Nothing -> return () - Just pkgtxt -> do - let descFilePath = fromMaybe "." workingDir - display (packageName pkgid) <.> "cabal" - info verbosity $ - "Updating " ++ display (packageName pkgid) <.> "cabal" - ++ " with the latest revision from the index." - writeFileAtomic descFilePath pkgtxt - - -- Make sure that we pass --libsubdir etc to 'setup configure' (necessary if - -- the setup script was compiled against an old version of the Cabal lib). - configFlags' <- addDefaultInstallDirs configFlags - -- Filter out flags not supported by the old versions of the Cabal lib. - let configureFlags :: Version -> ConfigFlags - configureFlags = filterConfigureFlags configFlags' { - configVerbosity = toFlag verbosity' - } - - -- Path to the optional log file. - mLogPath <- maybeLogPath - - -- Configure phase - onFailure ConfigureFailed $ withJobLimit buildLimit $ do - when (numJobs > 1) $ notice verbosity $ - "Configuring " ++ display pkgid ++ "..." - setup configureCommand configureFlags mLogPath - - -- Build phase - onFailure BuildFailed $ do - when (numJobs > 1) $ notice verbosity $ - "Building " ++ display pkgid ++ "..." - setup buildCommand' buildFlags mLogPath - - -- Doc generation phase - docsResult <- if shouldHaddock - then (do setup haddockCommand haddockFlags' mLogPath - return DocsOk) - `catchIO` (\_ -> return DocsFailed) - `catchExit` (\_ -> return DocsFailed) - else return DocsNotTried - - -- Tests phase - onFailure TestsFailed $ do - when (testsEnabled && PackageDescription.hasTests pkg) $ - setup Cabal.testCommand testFlags mLogPath - - let testsResult | testsEnabled = TestsOk - | otherwise = TestsNotTried - - -- Install phase - onFailure InstallFailed $ criticalSection installLock $ do - -- Capture installed package configuration file - maybePkgConf <- maybeGenPkgConf mLogPath - - -- Actual installation - withWin32SelfUpgrade verbosity pkg_key configFlags cinfo platform pkg $ do - case rootCmd miscOptions of - (Just cmd) -> reexec cmd - Nothing -> do - setup Cabal.copyCommand copyFlags mLogPath - when shouldRegister $ do - setup Cabal.registerCommand registerFlags mLogPath - return (Right (BuildOk docsResult testsResult maybePkgConf)) - - where - pkgid = packageId pkg - buildCommand' = buildCommand defaultProgramConfiguration - buildFlags _ = emptyBuildFlags { - buildDistPref = configDistPref configFlags, - buildVerbosity = toFlag verbosity' - } - shouldHaddock = fromFlag (installDocumentation installFlags) - haddockFlags' _ = haddockFlags { - haddockVerbosity = toFlag verbosity', - haddockDistPref = configDistPref configFlags - } - testsEnabled = fromFlag (configTests configFlags) - && fromFlagOrDefault False (installRunTests installFlags) - testFlags _ = Cabal.emptyTestFlags { - Cabal.testDistPref = configDistPref configFlags - } - copyFlags _ = Cabal.emptyCopyFlags { - Cabal.copyDistPref = configDistPref configFlags, - Cabal.copyDest = toFlag InstallDirs.NoCopyDest, - Cabal.copyVerbosity = toFlag verbosity' - } - shouldRegister = PackageDescription.hasLibs pkg - registerFlags _ = Cabal.emptyRegisterFlags { - Cabal.regDistPref = configDistPref configFlags, - Cabal.regVerbosity = toFlag verbosity' - } - verbosity' = maybe verbosity snd useLogFile - tempTemplate name = name ++ "-" ++ display pkgid - - addDefaultInstallDirs :: ConfigFlags -> IO ConfigFlags - addDefaultInstallDirs configFlags' = do - defInstallDirs <- InstallDirs.defaultInstallDirs flavor userInstall False - return $ configFlags' { - configInstallDirs = fmap Cabal.Flag . - InstallDirs.substituteInstallDirTemplates env $ - InstallDirs.combineInstallDirs fromFlagOrDefault - defInstallDirs (configInstallDirs configFlags) - } - where - CompilerId flavor _ = compilerInfoId cinfo - env = initialPathTemplateEnv pkgid pkg_key cinfo platform - userInstall = fromFlagOrDefault defaultUserInstall - (configUserInstall configFlags') - - maybeGenPkgConf :: Maybe FilePath - -> IO (Maybe Installed.InstalledPackageInfo) - maybeGenPkgConf mLogPath = - if shouldRegister then do - tmp <- getTemporaryDirectory - withTempFile tmp (tempTemplate "pkgConf") $ \pkgConfFile handle -> do - hClose handle - let registerFlags' version = (registerFlags version) { - Cabal.regGenPkgConf = toFlag (Just pkgConfFile) - } - setup Cabal.registerCommand registerFlags' mLogPath - withUTF8FileContents pkgConfFile $ \pkgConfText -> - case Installed.parseInstalledPackageInfo pkgConfText of - Installed.ParseFailed perror -> pkgConfParseFailed perror - Installed.ParseOk warns pkgConf -> do - unless (null warns) $ - warn verbosity $ unlines (map (showPWarning pkgConfFile) warns) - return (Just pkgConf) - else return Nothing - - pkgConfParseFailed :: Installed.PError -> IO a - pkgConfParseFailed perror = - die $ "Couldn't parse the output of 'setup register --gen-pkg-config':" - ++ show perror - - maybeLogPath :: IO (Maybe FilePath) - maybeLogPath = - case useLogFile of - Nothing -> return Nothing - Just (mkLogFileName, _) -> do - let logFileName = mkLogFileName (packageId pkg) pkg_key - logDir = takeDirectory logFileName - unless (null logDir) $ createDirectoryIfMissing True logDir - logFileExists <- doesFileExist logFileName - when logFileExists $ removeFile logFileName - return (Just logFileName) - - setup cmd flags mLogPath = - Exception.bracket - (maybe (return Nothing) - (\path -> Just `fmap` openFile path AppendMode) mLogPath) - (maybe (return ()) hClose) - (\logFileHandle -> - setupWrapper verbosity - scriptOptions { useLoggingHandle = logFileHandle - , useWorkingDir = workingDir } - (Just pkg) - cmd flags []) - - reexec cmd = do - -- look for our own executable file and re-exec ourselves using a helper - -- program like sudo to elevate privileges: - self <- getExecutablePath - weExist <- doesFileExist self - if weExist - then inDir workingDir $ - rawSystemExit verbosity cmd - [self, "install", "--only" - ,"--verbose=" ++ showForCabal verbosity] - else die $ "Unable to find cabal executable at: " ++ self - - --- helper -onFailure :: (SomeException -> BuildFailure) -> IO BuildResult -> IO BuildResult -onFailure result action = - action `catches` - [ Handler $ \ioe -> handler (ioe :: IOException) - , Handler $ \exit -> handler (exit :: ExitCode) - ] - where - handler :: Exception e => e -> IO BuildResult - handler = return . Left . result . toException - - --- ------------------------------------------------------------ --- * Weird windows hacks --- ------------------------------------------------------------ - -withWin32SelfUpgrade :: Verbosity - -> PackageKey - -> ConfigFlags - -> CompilerInfo - -> Platform - -> PackageDescription - -> IO a -> IO a -withWin32SelfUpgrade _ _ _ _ _ _ action | buildOS /= Windows = action -withWin32SelfUpgrade verbosity pkg_key configFlags cinfo platform pkg action = do - - defaultDirs <- InstallDirs.defaultInstallDirs - compFlavor - (fromFlag (configUserInstall configFlags)) - (PackageDescription.hasLibs pkg) - - Win32SelfUpgrade.possibleSelfUpgrade verbosity - (exeInstallPaths defaultDirs) action - - where - pkgid = packageId pkg - (CompilerId compFlavor _) = compilerInfoId cinfo - - exeInstallPaths defaultDirs = - [ InstallDirs.bindir absoluteDirs exeName <.> exeExtension - | exe <- PackageDescription.executables pkg - , PackageDescription.buildable (PackageDescription.buildInfo exe) - , let exeName = prefix ++ PackageDescription.exeName exe ++ suffix - prefix = substTemplate prefixTemplate - suffix = substTemplate suffixTemplate ] - where - fromFlagTemplate = fromFlagOrDefault (InstallDirs.toPathTemplate "") - prefixTemplate = fromFlagTemplate (configProgPrefix configFlags) - suffixTemplate = fromFlagTemplate (configProgSuffix configFlags) - templateDirs = InstallDirs.combineInstallDirs fromFlagOrDefault - defaultDirs (configInstallDirs configFlags) - absoluteDirs = InstallDirs.absoluteInstallDirs - pkgid pkg_key - cinfo InstallDirs.NoCopyDest - platform templateDirs - substTemplate = InstallDirs.fromPathTemplate - . InstallDirs.substPathTemplate env - where env = InstallDirs.initialPathTemplateEnv pkgid pkg_key cinfo platform diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/InstallPlan.hs cabal-install-1.22-1.22.9.0/Distribution/Client/InstallPlan.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/InstallPlan.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/InstallPlan.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,627 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.InstallPlan --- Copyright : (c) Duncan Coutts 2008 --- License : BSD-like --- --- Maintainer : duncan@community.haskell.org --- Stability : provisional --- Portability : portable --- --- Package installation plan --- ------------------------------------------------------------------------------ -module Distribution.Client.InstallPlan ( - InstallPlan, - ConfiguredPackage(..), - PlanPackage(..), - - -- * Operations on 'InstallPlan's - new, - toList, - ready, - processing, - completed, - failed, - remove, - showPlanIndex, - showInstallPlan, - - -- ** Query functions - planPlatform, - planCompiler, - - -- * Checking validity of plans - valid, - closed, - consistent, - acyclic, - configuredPackageValid, - - -- ** Details on invalid plans - PlanProblem(..), - showPlanProblem, - PackageProblem(..), - showPackageProblem, - problems, - configuredPackageProblems - ) where - -import Distribution.Client.Types - ( SourcePackage(packageDescription), ConfiguredPackage(..) - , ReadyPackage(..), readyPackageToConfiguredPackage - , InstalledPackage, BuildFailure, BuildSuccess(..), enableStanzas - , InstalledPackage(..), fakeInstalledPackageId ) -import Distribution.Package - ( PackageIdentifier(..), PackageName(..), Package(..), packageName - , PackageFixedDeps(..), Dependency(..), InstalledPackageId - , PackageInstalled(..) ) -import Distribution.Version - ( Version, withinRange ) -import Distribution.PackageDescription - ( GenericPackageDescription(genPackageFlags) - , Flag(flagName), FlagName(..) ) -import Distribution.Client.PackageUtils - ( externalBuildDepends ) -import Distribution.PackageDescription.Configuration - ( finalizePackageDescription ) -import Distribution.Simple.PackageIndex - ( PackageIndex, FakeMap ) -import qualified Distribution.Simple.PackageIndex as PackageIndex -import Distribution.Text - ( display ) -import Distribution.System - ( Platform ) -import Distribution.Compiler - ( CompilerInfo(..) ) -import Distribution.Client.Utils - ( duplicates, duplicatesBy, mergeBy, MergeResult(..) ) -import Distribution.Simple.Utils - ( comparing, intercalate ) -import qualified Distribution.InstalledPackageInfo as Installed - -import Data.List - ( sort, sortBy ) -import Data.Maybe - ( fromMaybe, maybeToList ) -import qualified Data.Graph as Graph -import Data.Graph (Graph) -import Control.Exception - ( assert ) -import Data.Maybe (catMaybes) -import qualified Data.Map as Map - -type PlanIndex = PackageIndex PlanPackage - --- When cabal tries to install a number of packages, including all their --- dependencies it has a non-trivial problem to solve. --- --- The Problem: --- --- In general we start with a set of installed packages and a set of source --- packages. --- --- Installed packages have fixed dependencies. They have already been built and --- we know exactly what packages they were built against, including their exact --- versions. --- --- Source package have somewhat flexible dependencies. They are specified as --- version ranges, though really they're predicates. To make matters worse they --- have conditional flexible dependencies. Configuration flags can affect which --- packages are required and can place additional constraints on their --- versions. --- --- These two sets of package can and usually do overlap. There can be installed --- packages that are also available as source packages which means they could --- be re-installed if required, though there will also be packages which are --- not available as source and cannot be re-installed. Very often there will be --- extra versions available than are installed. Sometimes we may like to prefer --- installed packages over source ones or perhaps always prefer the latest --- available version whether installed or not. --- --- The goal is to calculate an installation plan that is closed, acyclic and --- consistent and where every configured package is valid. --- --- An installation plan is a set of packages that are going to be used --- together. It will consist of a mixture of installed packages and source --- packages along with their exact version dependencies. An installation plan --- is closed if for every package in the set, all of its dependencies are --- also in the set. It is consistent if for every package in the set, all --- dependencies which target that package have the same version. - --- Note that plans do not necessarily compose. You might have a valid plan for --- package A and a valid plan for package B. That does not mean the composition --- is simultaneously valid for A and B. In particular you're most likely to --- have problems with inconsistent dependencies. --- On the other hand it is true that every closed sub plan is valid. - -data PlanPackage = PreExisting InstalledPackage - | Configured ConfiguredPackage - | Processing ReadyPackage - | Installed ReadyPackage BuildSuccess - | Failed ConfiguredPackage BuildFailure - -- ^ NB: packages in the Failed state can be *either* Ready - -- or Configured. - -instance Package PlanPackage where - packageId (PreExisting pkg) = packageId pkg - packageId (Configured pkg) = packageId pkg - packageId (Processing pkg) = packageId pkg - packageId (Installed pkg _) = packageId pkg - packageId (Failed pkg _) = packageId pkg - -instance PackageFixedDeps PlanPackage where - depends (PreExisting pkg) = depends pkg - depends (Configured pkg) = depends pkg - depends (Processing pkg) = depends pkg - depends (Installed pkg _) = depends pkg - depends (Failed pkg _) = depends pkg - -instance PackageInstalled PlanPackage where - installedPackageId (PreExisting pkg) = installedPackageId pkg - installedPackageId (Configured pkg) = installedPackageId pkg - installedPackageId (Processing pkg) = installedPackageId pkg - -- NB: defer to the actual installed package info in this case - installedPackageId (Installed _ (BuildOk _ _ (Just ipkg))) = installedPackageId ipkg - installedPackageId (Installed pkg _) = installedPackageId pkg - installedPackageId (Failed pkg _) = installedPackageId pkg - - installedDepends (PreExisting pkg) = installedDepends pkg - installedDepends (Configured pkg) = installedDepends pkg - installedDepends (Processing pkg) = installedDepends pkg - installedDepends (Installed _ (BuildOk _ _ (Just ipkg))) = installedDepends ipkg - installedDepends (Installed pkg _) = installedDepends pkg - installedDepends (Failed pkg _) = installedDepends pkg - -data InstallPlan = InstallPlan { - planIndex :: PlanIndex, - planFakeMap :: FakeMap, - planGraph :: Graph, - planGraphRev :: Graph, - planPkgOf :: Graph.Vertex -> PlanPackage, - planVertexOf :: InstalledPackageId -> Graph.Vertex, - planPlatform :: Platform, - planCompiler :: CompilerInfo - } - -invariant :: InstallPlan -> Bool -invariant plan = - valid (planPlatform plan) (planCompiler plan) (planFakeMap plan) (planIndex plan) - -internalError :: String -> a -internalError msg = error $ "InstallPlan: internal error: " ++ msg - -showPlanIndex :: PlanIndex -> String -showPlanIndex index = - intercalate "\n" (map showPlanPackage (PackageIndex.allPackages index)) - where showPlanPackage p = - showPlanPackageTag p ++ " " - ++ display (packageId p) ++ " (" - ++ display (installedPackageId p) ++ ")" - -showInstallPlan :: InstallPlan -> String -showInstallPlan plan = - showPlanIndex (planIndex plan) ++ "\n" ++ - "fake map:\n " ++ intercalate "\n " (map showKV (Map.toList (planFakeMap plan))) - where showKV (k,v) = display k ++ " -> " ++ display v - -showPlanPackageTag :: PlanPackage -> String -showPlanPackageTag (PreExisting _) = "PreExisting" -showPlanPackageTag (Configured _) = "Configured" -showPlanPackageTag (Processing _) = "Processing" -showPlanPackageTag (Installed _ _) = "Installed" -showPlanPackageTag (Failed _ _) = "Failed" - --- | Build an installation plan from a valid set of resolved packages. --- -new :: Platform -> CompilerInfo -> PlanIndex - -> Either [PlanProblem] InstallPlan -new platform cinfo index = - -- NB: Need to pre-initialize the fake-map with pre-existing - -- packages - let isPreExisting (PreExisting _) = True - isPreExisting _ = False - fakeMap = Map.fromList - . map (\p -> (fakeInstalledPackageId (packageId p), installedPackageId p)) - . filter isPreExisting - $ PackageIndex.allPackages index in - case problems platform cinfo fakeMap index of - [] -> Right InstallPlan { - planIndex = index, - planFakeMap = fakeMap, - planGraph = graph, - planGraphRev = Graph.transposeG graph, - planPkgOf = vertexToPkgId, - planVertexOf = fromMaybe noSuchPkgId . pkgIdToVertex, - planPlatform = platform, - planCompiler = cinfo - } - where (graph, vertexToPkgId, pkgIdToVertex) = - PackageIndex.dependencyGraph index - -- NB: doesn't need to know planFakeMap because the - -- fakemap is empty at this point. - noSuchPkgId = internalError "package is not in the graph" - probs -> Left probs - -toList :: InstallPlan -> [PlanPackage] -toList = PackageIndex.allPackages . planIndex - --- | Remove packages from the install plan. This will result in an --- error if there are remaining packages that depend on any matching --- package. This is primarily useful for obtaining an install plan for --- the dependencies of a package or set of packages without actually --- installing the package itself, as when doing development. --- -remove :: (PlanPackage -> Bool) - -> InstallPlan - -> Either [PlanProblem] InstallPlan -remove shouldRemove plan = - new (planPlatform plan) (planCompiler plan) newIndex - where - newIndex = PackageIndex.fromList $ - filter (not . shouldRemove) (toList plan) - --- | The packages that are ready to be installed. That is they are in the --- configured state and have all their dependencies installed already. --- The plan is complete if the result is @[]@. --- -ready :: InstallPlan -> [ReadyPackage] -ready plan = assert check readyPackages - where - check = if null readyPackages && null processingPackages - then null configuredPackages - else True - configuredPackages = [ pkg | Configured pkg <- toList plan ] - processingPackages = [ pkg | Processing pkg <- toList plan] - - readyPackages :: [ReadyPackage] - readyPackages = - [ ReadyPackage srcPkg flags stanzas deps - | pkg@(ConfiguredPackage srcPkg flags stanzas _) <- configuredPackages - -- select only the package that have all of their deps installed: - , deps <- maybeToList (hasAllInstalledDeps pkg) - ] - - hasAllInstalledDeps :: ConfiguredPackage -> Maybe [Installed.InstalledPackageInfo] - hasAllInstalledDeps = mapM isInstalledDep . installedDepends - - isInstalledDep :: InstalledPackageId -> Maybe Installed.InstalledPackageInfo - isInstalledDep pkgid = - -- NB: Need to check if the ID has been updated in planFakeMap, in which case we - -- might be dealing with an old pointer - case PackageIndex.fakeLookupInstalledPackageId (planFakeMap plan) (planIndex plan) pkgid of - Just (Configured _) -> Nothing - Just (Processing _) -> Nothing - Just (Failed _ _) -> internalError depOnFailed - Just (PreExisting (InstalledPackage instPkg _)) -> Just instPkg - Just (Installed _ (BuildOk _ _ (Just instPkg))) -> Just instPkg - Just (Installed _ (BuildOk _ _ Nothing)) -> internalError depOnNonLib - Nothing -> internalError incomplete - incomplete = "install plan is not closed" - depOnFailed = "configured package depends on failed package" - depOnNonLib = "configured package depends on a non-library package" - --- | Marks packages in the graph as currently processing (e.g. building). --- --- * The package must exist in the graph and be in the configured state. --- -processing :: [ReadyPackage] -> InstallPlan -> InstallPlan -processing pkgs plan = assert (invariant plan') plan' - where - plan' = plan { - planIndex = PackageIndex.merge (planIndex plan) processingPkgs - } - processingPkgs = PackageIndex.fromList [Processing pkg | pkg <- pkgs] - --- | Marks a package in the graph as completed. Also saves the build result for --- the completed package in the plan. --- --- * The package must exist in the graph and be in the processing state. --- * The package must have had no uninstalled dependent packages. --- -completed :: InstalledPackageId - -> BuildSuccess - -> InstallPlan -> InstallPlan -completed pkgid buildResult plan = assert (invariant plan') plan' - where - plan' = plan { - -- NB: installation can change the IPID, so better - -- record it in the fake mapping... - planFakeMap = insert_fake_mapping buildResult - $ planFakeMap plan, - planIndex = PackageIndex.insert installed - . PackageIndex.deleteInstalledPackageId pkgid - $ planIndex plan - } - -- ...but be sure to use the *old* IPID for the lookup for the - -- preexisting record - installed = Installed (lookupProcessingPackage plan pkgid) buildResult - insert_fake_mapping (BuildOk _ _ (Just ipi)) = Map.insert pkgid (installedPackageId ipi) - insert_fake_mapping _ = id - --- | Marks a package in the graph as having failed. It also marks all the --- packages that depended on it as having failed. --- --- * The package must exist in the graph and be in the processing --- state. --- -failed :: InstalledPackageId -- ^ The id of the package that failed to install - -> BuildFailure -- ^ The build result to use for the failed package - -> BuildFailure -- ^ The build result to use for its dependencies - -> InstallPlan - -> InstallPlan -failed pkgid buildResult buildResult' plan = assert (invariant plan') plan' - where - -- NB: failures don't update IPIDs - plan' = plan { - planIndex = PackageIndex.merge (planIndex plan) failures - } - pkg = lookupProcessingPackage plan pkgid - failures = PackageIndex.fromList - $ Failed (readyPackageToConfiguredPackage pkg) buildResult - : [ Failed pkg' buildResult' - | Just pkg' <- map checkConfiguredPackage - $ packagesThatDependOn plan pkgid ] - --- | Lookup the reachable packages in the reverse dependency graph. --- -packagesThatDependOn :: InstallPlan - -> InstalledPackageId -> [PlanPackage] -packagesThatDependOn plan pkgid = map (planPkgOf plan) - . tail - . Graph.reachable (planGraphRev plan) - . planVertexOf plan - $ Map.findWithDefault pkgid pkgid (planFakeMap plan) - --- | Lookup a package that we expect to be in the processing state. --- -lookupProcessingPackage :: InstallPlan - -> InstalledPackageId -> ReadyPackage -lookupProcessingPackage plan pkgid = - -- NB: processing packages are guaranteed to not indirect through - -- planFakeMap - case PackageIndex.lookupInstalledPackageId (planIndex plan) pkgid of - Just (Processing pkg) -> pkg - _ -> internalError $ "not in processing state or no such pkg " ++ display pkgid - --- | Check a package that we expect to be in the configured or failed state. --- -checkConfiguredPackage :: PlanPackage -> Maybe ConfiguredPackage -checkConfiguredPackage (Configured pkg) = Just pkg -checkConfiguredPackage (Failed _ _) = Nothing -checkConfiguredPackage pkg = - internalError $ "not configured or no such pkg " ++ display (packageId pkg) - --- ------------------------------------------------------------ --- * Checking validity of plans --- ------------------------------------------------------------ - --- | A valid installation plan is a set of packages that is 'acyclic', --- 'closed' and 'consistent'. Also, every 'ConfiguredPackage' in the --- plan has to have a valid configuration (see 'configuredPackageValid'). --- --- * if the result is @False@ use 'problems' to get a detailed list. --- -valid :: Platform -> CompilerInfo -> FakeMap -> PlanIndex -> Bool -valid platform cinfo fakeMap index = null (problems platform cinfo fakeMap index) - -data PlanProblem = - PackageInvalid ConfiguredPackage [PackageProblem] - | PackageMissingDeps PlanPackage [PackageIdentifier] - | PackageCycle [PlanPackage] - | PackageInconsistency PackageName [(PackageIdentifier, Version)] - | PackageStateInvalid PlanPackage PlanPackage - -showPlanProblem :: PlanProblem -> String -showPlanProblem (PackageInvalid pkg packageProblems) = - "Package " ++ display (packageId pkg) - ++ " has an invalid configuration, in particular:\n" - ++ unlines [ " " ++ showPackageProblem problem - | problem <- packageProblems ] - -showPlanProblem (PackageMissingDeps pkg missingDeps) = - "Package " ++ display (packageId pkg) - ++ " depends on the following packages which are missing from the plan: " - ++ intercalate ", " (map display missingDeps) - -showPlanProblem (PackageCycle cycleGroup) = - "The following packages are involved in a dependency cycle " - ++ intercalate ", " (map (display.packageId) cycleGroup) - -showPlanProblem (PackageInconsistency name inconsistencies) = - "Package " ++ display name - ++ " is required by several packages," - ++ " but they require inconsistent versions:\n" - ++ unlines [ " package " ++ display pkg ++ " requires " - ++ display (PackageIdentifier name ver) - | (pkg, ver) <- inconsistencies ] - -showPlanProblem (PackageStateInvalid pkg pkg') = - "Package " ++ display (packageId pkg) - ++ " is in the " ++ showPlanState pkg - ++ " state but it depends on package " ++ display (packageId pkg') - ++ " which is in the " ++ showPlanState pkg' - ++ " state" - where - showPlanState (PreExisting _) = "pre-existing" - showPlanState (Configured _) = "configured" - showPlanState (Processing _) = "processing" - showPlanState (Installed _ _) = "installed" - showPlanState (Failed _ _) = "failed" - --- | For an invalid plan, produce a detailed list of problems as human readable --- error messages. This is mainly intended for debugging purposes. --- Use 'showPlanProblem' for a human readable explanation. --- -problems :: Platform -> CompilerInfo -> FakeMap - -> PlanIndex -> [PlanProblem] -problems platform cinfo fakeMap index = - [ PackageInvalid pkg packageProblems - | Configured pkg <- PackageIndex.allPackages index - , let packageProblems = configuredPackageProblems platform cinfo pkg - , not (null packageProblems) ] - - ++ [ PackageMissingDeps pkg (catMaybes (map (fmap packageId . PackageIndex.fakeLookupInstalledPackageId fakeMap index) missingDeps)) - | (pkg, missingDeps) <- PackageIndex.brokenPackages' fakeMap index ] - - ++ [ PackageCycle cycleGroup - | cycleGroup <- PackageIndex.dependencyCycles' fakeMap index ] - - ++ [ PackageInconsistency name inconsistencies - | (name, inconsistencies) <- PackageIndex.dependencyInconsistencies' fakeMap index ] - - ++ [ PackageStateInvalid pkg pkg' - | pkg <- PackageIndex.allPackages index - , Just pkg' <- map (PackageIndex.fakeLookupInstalledPackageId fakeMap index) (installedDepends pkg) - , not (stateDependencyRelation pkg pkg') ] - --- | The graph of packages (nodes) and dependencies (edges) must be acyclic. --- --- * if the result is @False@ use 'PackageIndex.dependencyCycles' to find out --- which packages are involved in dependency cycles. --- -acyclic :: PlanIndex -> Bool -acyclic = null . PackageIndex.dependencyCycles - --- | An installation plan is closed if for every package in the set, all of --- its dependencies are also in the set. That is, the set is closed under the --- dependency relation. --- --- * if the result is @False@ use 'PackageIndex.brokenPackages' to find out --- which packages depend on packages not in the index. --- -closed :: PlanIndex -> Bool -closed = null . PackageIndex.brokenPackages - --- | An installation plan is consistent if all dependencies that target a --- single package name, target the same version. --- --- This is slightly subtle. It is not the same as requiring that there be at --- most one version of any package in the set. It only requires that of --- packages which have more than one other package depending on them. We could --- actually make the condition even more precise and say that different --- versions are OK so long as they are not both in the transitive closure of --- any other package (or equivalently that their inverse closures do not --- intersect). The point is we do not want to have any packages depending --- directly or indirectly on two different versions of the same package. The --- current definition is just a safe approximation of that. --- --- * if the result is @False@ use 'PackageIndex.dependencyInconsistencies' to --- find out which packages are. --- -consistent :: PlanIndex -> Bool -consistent = null . PackageIndex.dependencyInconsistencies - --- | The states of packages have that depend on each other must respect --- this relation. That is for very case where package @a@ depends on --- package @b@ we require that @dependencyStatesOk a b = True@. --- -stateDependencyRelation :: PlanPackage -> PlanPackage -> Bool -stateDependencyRelation (PreExisting _) (PreExisting _) = True - -stateDependencyRelation (Configured _) (PreExisting _) = True -stateDependencyRelation (Configured _) (Configured _) = True -stateDependencyRelation (Configured _) (Processing _) = True -stateDependencyRelation (Configured _) (Installed _ _) = True - -stateDependencyRelation (Processing _) (PreExisting _) = True -stateDependencyRelation (Processing _) (Installed _ _) = True - -stateDependencyRelation (Installed _ _) (PreExisting _) = True -stateDependencyRelation (Installed _ _) (Installed _ _) = True - -stateDependencyRelation (Failed _ _) (PreExisting _) = True --- failed can depends on configured because a package can depend on --- several other packages and if one of the deps fail then we fail --- but we still depend on the other ones that did not fail: -stateDependencyRelation (Failed _ _) (Configured _) = True -stateDependencyRelation (Failed _ _) (Processing _) = True -stateDependencyRelation (Failed _ _) (Installed _ _) = True -stateDependencyRelation (Failed _ _) (Failed _ _) = True - -stateDependencyRelation _ _ = False - --- | A 'ConfiguredPackage' is valid if the flag assignment is total and if --- in the configuration given by the flag assignment, all the package --- dependencies are satisfied by the specified packages. --- -configuredPackageValid :: Platform -> CompilerInfo -> ConfiguredPackage -> Bool -configuredPackageValid platform cinfo pkg = - null (configuredPackageProblems platform cinfo pkg) - -data PackageProblem = DuplicateFlag FlagName - | MissingFlag FlagName - | ExtraFlag FlagName - | DuplicateDeps [PackageIdentifier] - | MissingDep Dependency - | ExtraDep PackageIdentifier - | InvalidDep Dependency PackageIdentifier - -showPackageProblem :: PackageProblem -> String -showPackageProblem (DuplicateFlag (FlagName flag)) = - "duplicate flag in the flag assignment: " ++ flag - -showPackageProblem (MissingFlag (FlagName flag)) = - "missing an assignment for the flag: " ++ flag - -showPackageProblem (ExtraFlag (FlagName flag)) = - "extra flag given that is not used by the package: " ++ flag - -showPackageProblem (DuplicateDeps pkgids) = - "duplicate packages specified as selected dependencies: " - ++ intercalate ", " (map display pkgids) - -showPackageProblem (MissingDep dep) = - "the package has a dependency " ++ display dep - ++ " but no package has been selected to satisfy it." - -showPackageProblem (ExtraDep pkgid) = - "the package configuration specifies " ++ display pkgid - ++ " but (with the given flag assignment) the package does not actually" - ++ " depend on any version of that package." - -showPackageProblem (InvalidDep dep pkgid) = - "the package depends on " ++ display dep - ++ " but the configuration specifies " ++ display pkgid - ++ " which does not satisfy the dependency." - -configuredPackageProblems :: Platform -> CompilerInfo - -> ConfiguredPackage -> [PackageProblem] -configuredPackageProblems platform cinfo - (ConfiguredPackage pkg specifiedFlags stanzas specifiedDeps) = - [ DuplicateFlag flag | ((flag,_):_) <- duplicates specifiedFlags ] - ++ [ MissingFlag flag | OnlyInLeft flag <- mergedFlags ] - ++ [ ExtraFlag flag | OnlyInRight flag <- mergedFlags ] - ++ [ DuplicateDeps pkgs - | pkgs <- duplicatesBy (comparing packageName) specifiedDeps ] - ++ [ MissingDep dep | OnlyInLeft dep <- mergedDeps ] - ++ [ ExtraDep pkgid | OnlyInRight pkgid <- mergedDeps ] - ++ [ InvalidDep dep pkgid | InBoth dep pkgid <- mergedDeps - , not (packageSatisfiesDependency pkgid dep) ] - where - mergedFlags = mergeBy compare - (sort $ map flagName (genPackageFlags (packageDescription pkg))) - (sort $ map fst specifiedFlags) - - mergedDeps = mergeBy - (\dep pkgid -> dependencyName dep `compare` packageName pkgid) - (sortBy (comparing dependencyName) requiredDeps) - (sortBy (comparing packageName) specifiedDeps) - - packageSatisfiesDependency - (PackageIdentifier name version) - (Dependency name' versionRange) = assert (name == name') $ - version `withinRange` versionRange - - dependencyName (Dependency name _) = name - - requiredDeps :: [Dependency] - requiredDeps = - --TODO: use something lower level than finalizePackageDescription - case finalizePackageDescription specifiedFlags - (const True) - platform cinfo - [] - (enableStanzas stanzas $ packageDescription pkg) of - Right (resolvedPkg, _) -> externalBuildDepends resolvedPkg - Left _ -> error "configuredPackageInvalidDeps internal error" diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/InstallSymlink.hs cabal-install-1.22-1.22.9.0/Distribution/Client/InstallSymlink.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/InstallSymlink.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/InstallSymlink.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,245 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.InstallSymlink --- Copyright : (c) Duncan Coutts 2008 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Managing installing binaries with symlinks. ------------------------------------------------------------------------------ -module Distribution.Client.InstallSymlink ( - symlinkBinaries, - symlinkBinary, - ) where - -#if mingw32_HOST_OS - -import Distribution.Package (PackageIdentifier) -import Distribution.Client.InstallPlan (InstallPlan) -import Distribution.Client.Setup (InstallFlags) -import Distribution.Simple.Setup (ConfigFlags) -import Distribution.Simple.Compiler - -symlinkBinaries :: Compiler - -> ConfigFlags - -> InstallFlags - -> InstallPlan - -> IO [(PackageIdentifier, String, FilePath)] -symlinkBinaries _ _ _ _ = return [] - -symlinkBinary :: FilePath -> FilePath -> String -> String -> IO Bool -symlinkBinary _ _ _ _ = fail "Symlinking feature not available on Windows" - -#else - -import Distribution.Client.Types - ( SourcePackage(..), ReadyPackage(..), enableStanzas ) -import Distribution.Client.Setup - ( InstallFlags(installSymlinkBinDir) ) -import qualified Distribution.Client.InstallPlan as InstallPlan -import Distribution.Client.InstallPlan (InstallPlan) - -import Distribution.Package - ( PackageIdentifier, Package(packageId), mkPackageKey, PackageKey ) -import Distribution.Compiler - ( CompilerId(..) ) -import qualified Distribution.PackageDescription as PackageDescription -import Distribution.PackageDescription - ( PackageDescription ) -import Distribution.PackageDescription.Configuration - ( finalizePackageDescription ) -import Distribution.Simple.Setup - ( ConfigFlags(..), fromFlag, fromFlagOrDefault, flagToMaybe ) -import qualified Distribution.Simple.InstallDirs as InstallDirs -import qualified Distribution.InstalledPackageInfo as Installed -import Distribution.Simple.Compiler - ( Compiler, CompilerInfo(..), packageKeySupported ) - -import System.Posix.Files - ( getSymbolicLinkStatus, isSymbolicLink, createSymbolicLink - , removeLink ) -import System.Directory - ( canonicalizePath ) -import System.FilePath - ( (), splitPath, joinPath, isAbsolute ) - -import Prelude hiding (ioError) -import System.IO.Error - ( isDoesNotExistError, ioError ) -import Distribution.Compat.Exception ( catchIO ) -import Control.Exception - ( assert ) -import Data.Maybe - ( catMaybes ) - --- | We would like by default to install binaries into some location that is on --- the user's PATH. For per-user installations on Unix systems that basically --- means the @~/bin/@ directory. On the majority of platforms the @~/bin/@ --- directory will be on the user's PATH. However some people are a bit nervous --- about letting a package manager install programs into @~/bin/@. --- --- A compromise solution is that instead of installing binaries directly into --- @~/bin/@, we could install them in a private location under @~/.cabal/bin@ --- and then create symlinks in @~/bin/@. We can be careful when setting up the --- symlinks that we do not overwrite any binary that the user installed. We can --- check if it was a symlink we made because it would point to the private dir --- where we install our binaries. This means we can install normally without --- worrying and in a later phase set up symlinks, and if that fails then we --- report it to the user, but even in this case the package is still in an OK --- installed state. --- --- This is an optional feature that users can choose to use or not. It is --- controlled from the config file. Of course it only works on POSIX systems --- with symlinks so is not available to Windows users. --- -symlinkBinaries :: Compiler - -> ConfigFlags - -> InstallFlags - -> InstallPlan - -> IO [(PackageIdentifier, String, FilePath)] -symlinkBinaries comp configFlags installFlags plan = - case flagToMaybe (installSymlinkBinDir installFlags) of - Nothing -> return [] - Just symlinkBinDir - | null exes -> return [] - | otherwise -> do - publicBinDir <- canonicalizePath symlinkBinDir --- TODO: do we want to do this here? : --- createDirectoryIfMissing True publicBinDir - fmap catMaybes $ sequence - [ do privateBinDir <- pkgBinDir pkg pkg_key - ok <- symlinkBinary - publicBinDir privateBinDir - publicExeName privateExeName - if ok - then return Nothing - else return (Just (pkgid, publicExeName, - privateBinDir privateExeName)) - | (ReadyPackage _ _flags _ deps, pkg, exe) <- exes - , let pkgid = packageId pkg - pkg_key = mkPackageKey (packageKeySupported comp) pkgid - (map Installed.packageKey deps) [] - publicExeName = PackageDescription.exeName exe - privateExeName = prefix ++ publicExeName ++ suffix - prefix = substTemplate pkgid pkg_key prefixTemplate - suffix = substTemplate pkgid pkg_key suffixTemplate ] - where - exes = - [ (cpkg, pkg, exe) - | InstallPlan.Installed cpkg _ <- InstallPlan.toList plan - , let pkg = pkgDescription cpkg - , exe <- PackageDescription.executables pkg - , PackageDescription.buildable (PackageDescription.buildInfo exe) ] - - pkgDescription :: ReadyPackage -> PackageDescription - pkgDescription (ReadyPackage (SourcePackage _ pkg _ _) flags stanzas _) = - case finalizePackageDescription flags - (const True) - platform cinfo [] (enableStanzas stanzas pkg) of - Left _ -> error "finalizePackageDescription ReadyPackage failed" - Right (desc, _) -> desc - - -- This is sadly rather complicated. We're kind of re-doing part of the - -- configuration for the package. :-( - pkgBinDir :: PackageDescription -> PackageKey -> IO FilePath - pkgBinDir pkg pkg_key = do - defaultDirs <- InstallDirs.defaultInstallDirs - compilerFlavor - (fromFlag (configUserInstall configFlags)) - (PackageDescription.hasLibs pkg) - let templateDirs = InstallDirs.combineInstallDirs fromFlagOrDefault - defaultDirs (configInstallDirs configFlags) - absoluteDirs = InstallDirs.absoluteInstallDirs - (packageId pkg) pkg_key - cinfo InstallDirs.NoCopyDest - platform templateDirs - canonicalizePath (InstallDirs.bindir absoluteDirs) - - substTemplate pkgid pkg_key = InstallDirs.fromPathTemplate - . InstallDirs.substPathTemplate env - where env = InstallDirs.initialPathTemplateEnv pkgid pkg_key - cinfo platform - - fromFlagTemplate = fromFlagOrDefault (InstallDirs.toPathTemplate "") - prefixTemplate = fromFlagTemplate (configProgPrefix configFlags) - suffixTemplate = fromFlagTemplate (configProgSuffix configFlags) - platform = InstallPlan.planPlatform plan - cinfo = InstallPlan.planCompiler plan - (CompilerId compilerFlavor _) = compilerInfoId cinfo - -symlinkBinary :: FilePath -- ^ The canonical path of the public bin dir - -- eg @/home/user/bin@ - -> FilePath -- ^ The canonical path of the private bin dir - -- eg @/home/user/.cabal/bin@ - -> String -- ^ The name of the executable to go in the public - -- bin dir, eg @foo@ - -> String -- ^ The name of the executable to in the private bin - -- dir, eg @foo-1.0@ - -> IO Bool -- ^ If creating the symlink was successful. @False@ - -- if there was another file there already that we - -- did not own. Other errors like permission errors - -- just propagate as exceptions. -symlinkBinary publicBindir privateBindir publicName privateName = do - ok <- targetOkToOverwrite (publicBindir publicName) - (privateBindir privateName) - case ok of - NotOurFile -> return False - NotExists -> mkLink >> return True - OkToOverwrite -> rmLink >> mkLink >> return True - where - relativeBindir = makeRelative publicBindir privateBindir - mkLink = createSymbolicLink (relativeBindir privateName) - (publicBindir publicName) - rmLink = removeLink (publicBindir publicName) - --- | Check a file path of a symlink that we would like to create to see if it --- is OK. For it to be OK to overwrite it must either not already exist yet or --- be a symlink to our target (in which case we can assume ownership). --- -targetOkToOverwrite :: FilePath -- ^ The file path of the symlink to the private - -- binary that we would like to create - -> FilePath -- ^ The canonical path of the private binary. - -- Use 'canonicalizePath' to make this. - -> IO SymlinkStatus -targetOkToOverwrite symlink target = handleNotExist $ do - status <- getSymbolicLinkStatus symlink - if not (isSymbolicLink status) - then return NotOurFile - else do target' <- canonicalizePath symlink - -- This relies on canonicalizePath handling symlinks - if target == target' - then return OkToOverwrite - else return NotOurFile - - where - handleNotExist action = catchIO action $ \ioexception -> - -- If the target doesn't exist then there's no problem overwriting it! - if isDoesNotExistError ioexception - then return NotExists - else ioError ioexception - -data SymlinkStatus - = NotExists -- ^ The file doesn't exist so we can make a symlink. - | OkToOverwrite -- ^ A symlink already exists, though it is ours. We'll - -- have to delete it first before we make a new symlink. - | NotOurFile -- ^ A file already exists and it is not one of our existing - -- symlinks (either because it is not a symlink or because - -- it points somewhere other than our managed space). - deriving Show - --- | Take two canonical paths and produce a relative path to get from the first --- to the second, even if it means adding @..@ path components. --- -makeRelative :: FilePath -> FilePath -> FilePath -makeRelative a b = assert (isAbsolute a && isAbsolute b) $ - let as = splitPath a - bs = splitPath b - commonLen = length $ takeWhile id $ zipWith (==) as bs - in joinPath $ [ ".." | _ <- drop commonLen as ] - ++ drop commonLen bs - -#endif diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/JobControl.hs cabal-install-1.22-1.22.9.0/Distribution/Client/JobControl.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/JobControl.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/JobControl.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.JobControl --- Copyright : (c) Duncan Coutts 2012 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- A job control concurrency abstraction ------------------------------------------------------------------------------ -module Distribution.Client.JobControl ( - JobControl, - newSerialJobControl, - newParallelJobControl, - spawnJob, - collectJob, - - JobLimit, - newJobLimit, - withJobLimit, - - Lock, - newLock, - criticalSection - ) where - -import Control.Monad -import Control.Concurrent hiding (QSem, newQSem, waitQSem, signalQSem) -import Control.Exception (SomeException, bracket_, mask, throw, try) -import Distribution.Client.Compat.Semaphore - -data JobControl m a = JobControl { - spawnJob :: m a -> m (), - collectJob :: m a - } - - -newSerialJobControl :: IO (JobControl IO a) -newSerialJobControl = do - queue <- newChan - return JobControl { - spawnJob = spawn queue, - collectJob = collect queue - } - where - spawn :: Chan (IO a) -> IO a -> IO () - spawn = writeChan - - collect :: Chan (IO a) -> IO a - collect = join . readChan - -newParallelJobControl :: IO (JobControl IO a) -newParallelJobControl = do - resultVar <- newEmptyMVar - return JobControl { - spawnJob = spawn resultVar, - collectJob = collect resultVar - } - where - spawn :: MVar (Either SomeException a) -> IO a -> IO () - spawn resultVar job = - mask $ \restore -> - forkIO (do res <- try (restore job) - putMVar resultVar res) - >> return () - - collect :: MVar (Either SomeException a) -> IO a - collect resultVar = - takeMVar resultVar >>= either throw return - -data JobLimit = JobLimit QSem - -newJobLimit :: Int -> IO JobLimit -newJobLimit n = - fmap JobLimit (newQSem n) - -withJobLimit :: JobLimit -> IO a -> IO a -withJobLimit (JobLimit sem) = - bracket_ (waitQSem sem) (signalQSem sem) - -newtype Lock = Lock (MVar ()) - -newLock :: IO Lock -newLock = fmap Lock $ newMVar () - -criticalSection :: Lock -> IO a -> IO a -criticalSection (Lock lck) act = bracket_ (takeMVar lck) (putMVar lck ()) act diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/List.hs cabal-install-1.22-1.22.9.0/Distribution/Client/List.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/List.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/List.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,589 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.List --- Copyright : (c) David Himmelstrup 2005 --- Duncan Coutts 2008-2011 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- --- Search for and print information about packages ------------------------------------------------------------------------------ -module Distribution.Client.List ( - list, info - ) where - -import Distribution.Package - ( PackageName(..), Package(..), packageName, packageVersion - , Dependency(..), simplifyDependency ) -import Distribution.ModuleName (ModuleName) -import Distribution.License (License) -import qualified Distribution.InstalledPackageInfo as Installed -import qualified Distribution.PackageDescription as Source -import Distribution.PackageDescription - ( Flag(..), FlagName(..) ) -import Distribution.PackageDescription.Configuration - ( flattenPackageDescription ) - -import Distribution.Simple.Compiler - ( Compiler, PackageDBStack ) -import Distribution.Simple.Program (ProgramConfiguration) -import Distribution.Simple.Utils - ( equating, comparing, die, notice ) -import Distribution.Simple.Setup (fromFlag) -import Distribution.Simple.PackageIndex (InstalledPackageIndex) -import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex -import qualified Distribution.Client.PackageIndex as PackageIndex -import Distribution.Version - ( Version(..), VersionRange, withinRange, anyVersion - , intersectVersionRanges, simplifyVersionRange ) -import Distribution.Verbosity (Verbosity) -import Distribution.Text - ( Text(disp), display ) - -import Distribution.Client.Types - ( SourcePackage(..), Repo, SourcePackageDb(..) ) -import Distribution.Client.Dependency.Types - ( PackageConstraint(..), ExtDependency(..) ) -import Distribution.Client.Targets - ( UserTarget, resolveUserTargets, PackageSpecifier(..) ) -import Distribution.Client.Setup - ( GlobalFlags(..), ListFlags(..), InfoFlags(..) ) -import Distribution.Client.Utils - ( mergeBy, MergeResult(..) ) -import Distribution.Client.IndexUtils as IndexUtils - ( getSourcePackages, getInstalledPackages ) -import Distribution.Client.FetchUtils - ( isFetched ) - -import Data.List - ( sortBy, groupBy, sort, nub, intersperse, maximumBy, partition ) -import Data.Maybe - ( listToMaybe, fromJust, fromMaybe, isJust ) -import qualified Data.Map as Map -import Data.Tree as Tree -import Control.Monad - ( MonadPlus(mplus), join ) -import Control.Exception - ( assert ) -import Text.PrettyPrint as Disp -import System.Directory - ( doesDirectoryExist ) - - --- | Return a list of packages matching given search strings. -getPkgList :: Verbosity - -> PackageDBStack - -> [Repo] - -> Compiler - -> ProgramConfiguration - -> ListFlags - -> [String] - -> IO [PackageDisplayInfo] -getPkgList verbosity packageDBs repos comp conf listFlags pats = do - installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf - sourcePkgDb <- getSourcePackages verbosity repos - let sourcePkgIndex = packageIndex sourcePkgDb - prefs name = fromMaybe anyVersion - (Map.lookup name (packagePreferences sourcePkgDb)) - - pkgsInfo :: - [(PackageName, [Installed.InstalledPackageInfo], [SourcePackage])] - pkgsInfo - -- gather info for all packages - | null pats = mergePackages - (InstalledPackageIndex.allPackages installedPkgIndex) - ( PackageIndex.allPackages sourcePkgIndex) - - -- gather info for packages matching search term - | otherwise = pkgsInfoMatching - - pkgsInfoMatching :: - [(PackageName, [Installed.InstalledPackageInfo], [SourcePackage])] - pkgsInfoMatching = - let matchingInstalled = matchingPackages - InstalledPackageIndex.searchByNameSubstring - installedPkgIndex - matchingSource = matchingPackages - (\ idx n -> - concatMap snd - (PackageIndex.searchByNameSubstring idx n)) - sourcePkgIndex - in mergePackages matchingInstalled matchingSource - - matches :: [PackageDisplayInfo] - matches = [ mergePackageInfo pref - installedPkgs sourcePkgs selectedPkg False - | (pkgname, installedPkgs, sourcePkgs) <- pkgsInfo - , not onlyInstalled || not (null installedPkgs) - , let pref = prefs pkgname - selectedPkg = latestWithPref pref sourcePkgs ] - return matches - where - onlyInstalled = fromFlag (listInstalled listFlags) - matchingPackages search index = - [ pkg - | pat <- pats - , pkg <- search index pat ] - - --- | Show information about packages. -list :: Verbosity - -> PackageDBStack - -> [Repo] - -> Compiler - -> ProgramConfiguration - -> ListFlags - -> [String] - -> IO () -list verbosity packageDBs repos comp conf listFlags pats = do - matches <- getPkgList verbosity packageDBs repos comp conf listFlags pats - - if simpleOutput - then putStr $ unlines - [ display (pkgName pkg) ++ " " ++ display version - | pkg <- matches - , version <- if onlyInstalled - then installedVersions pkg - else nub . sort $ installedVersions pkg - ++ sourceVersions pkg ] - -- Note: this only works because for 'list', one cannot currently - -- specify any version constraints, so listing all installed - -- and source ones works. - else - if null matches - then notice verbosity "No matches found." - else putStr $ unlines (map showPackageSummaryInfo matches) - where - onlyInstalled = fromFlag (listInstalled listFlags) - simpleOutput = fromFlag (listSimpleOutput listFlags) - -info :: Verbosity - -> PackageDBStack - -> [Repo] - -> Compiler - -> ProgramConfiguration - -> GlobalFlags - -> InfoFlags - -> [UserTarget] - -> IO () -info verbosity _ _ _ _ _ _ [] = - notice verbosity "No packages requested. Nothing to do." - -info verbosity packageDBs repos comp conf - globalFlags _listFlags userTargets = do - - installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf - sourcePkgDb <- getSourcePackages verbosity repos - let sourcePkgIndex = packageIndex sourcePkgDb - prefs name = fromMaybe anyVersion - (Map.lookup name (packagePreferences sourcePkgDb)) - - -- Users may specify names of packages that are only installed, not - -- just available source packages, so we must resolve targets using - -- the combination of installed and source packages. - let sourcePkgs' = PackageIndex.fromList - $ map packageId - (InstalledPackageIndex.allPackages installedPkgIndex) - ++ map packageId - (PackageIndex.allPackages sourcePkgIndex) - pkgSpecifiers <- resolveUserTargets verbosity - (fromFlag $ globalWorldFile globalFlags) - sourcePkgs' userTargets - - pkgsinfo <- sequence - [ do pkginfo <- either die return $ - gatherPkgInfo prefs - installedPkgIndex sourcePkgIndex - pkgSpecifier - updateFileSystemPackageDetails pkginfo - | pkgSpecifier <- pkgSpecifiers ] - - putStr $ unlines (map showPackageDetailedInfo pkgsinfo) - - where - gatherPkgInfo :: (PackageName -> VersionRange) -> - InstalledPackageIndex -> - PackageIndex.PackageIndex SourcePackage -> - PackageSpecifier SourcePackage -> - Either String PackageDisplayInfo - gatherPkgInfo prefs installedPkgIndex sourcePkgIndex - (NamedPackage name constraints) - | null (selectedInstalledPkgs) && null (selectedSourcePkgs) - = Left $ "There is no available version of " ++ display name - ++ " that satisfies " - ++ display (simplifyVersionRange verConstraint) - - | otherwise - = Right $ mergePackageInfo pref installedPkgs - sourcePkgs selectedSourcePkg' - showPkgVersion - where - (pref, installedPkgs, sourcePkgs) = - sourcePkgsInfo prefs name installedPkgIndex sourcePkgIndex - - selectedInstalledPkgs = InstalledPackageIndex.lookupDependency - installedPkgIndex - (Dependency name verConstraint) - selectedSourcePkgs = PackageIndex.lookupDependency sourcePkgIndex - (Dependency name verConstraint) - selectedSourcePkg' = latestWithPref pref selectedSourcePkgs - - -- display a specific package version if the user - -- supplied a non-trivial version constraint - showPkgVersion = not (null verConstraints) - verConstraint = foldr intersectVersionRanges anyVersion verConstraints - verConstraints = [ vr | PackageConstraintVersion _ vr <- constraints ] - - gatherPkgInfo prefs installedPkgIndex sourcePkgIndex - (SpecificSourcePackage pkg) = - Right $ mergePackageInfo pref installedPkgs sourcePkgs - selectedPkg True - where - name = packageName pkg - selectedPkg = Just pkg - (pref, installedPkgs, sourcePkgs) = - sourcePkgsInfo prefs name installedPkgIndex sourcePkgIndex - -sourcePkgsInfo :: - (PackageName -> VersionRange) - -> PackageName - -> InstalledPackageIndex - -> PackageIndex.PackageIndex SourcePackage - -> (VersionRange, [Installed.InstalledPackageInfo], [SourcePackage]) -sourcePkgsInfo prefs name installedPkgIndex sourcePkgIndex = - (pref, installedPkgs, sourcePkgs) - where - pref = prefs name - installedPkgs = concatMap snd (InstalledPackageIndex.lookupPackageName - installedPkgIndex name) - sourcePkgs = PackageIndex.lookupPackageName sourcePkgIndex name - - --- | The info that we can display for each package. It is information per --- package name and covers all installed and available versions. --- -data PackageDisplayInfo = PackageDisplayInfo { - pkgName :: PackageName, - selectedVersion :: Maybe Version, - selectedSourcePkg :: Maybe SourcePackage, - installedVersions :: [Version], - sourceVersions :: [Version], - preferredVersions :: VersionRange, - homepage :: String, - bugReports :: String, - sourceRepo :: String, - synopsis :: String, - description :: String, - category :: String, - license :: License, - author :: String, - maintainer :: String, - dependencies :: [ExtDependency], - flags :: [Flag], - hasLib :: Bool, - hasExe :: Bool, - executables :: [String], - modules :: [ModuleName], - haddockHtml :: FilePath, - haveTarball :: Bool - } - -showPackageSummaryInfo :: PackageDisplayInfo -> String -showPackageSummaryInfo pkginfo = - renderStyle (style {lineLength = 80, ribbonsPerLine = 1}) $ - char '*' <+> disp (pkgName pkginfo) - $+$ - (nest 4 $ vcat [ - maybeShow (synopsis pkginfo) "Synopsis:" reflowParagraphs - , text "Default available version:" <+> - case selectedSourcePkg pkginfo of - Nothing -> text "[ Not available from any configured repository ]" - Just pkg -> disp (packageVersion pkg) - , text "Installed versions:" <+> - case installedVersions pkginfo of - [] | hasLib pkginfo -> text "[ Not installed ]" - | otherwise -> text "[ Unknown ]" - versions -> dispTopVersions 4 - (preferredVersions pkginfo) versions - , maybeShow (homepage pkginfo) "Homepage:" text - , text "License: " <+> text (display (license pkginfo)) - ]) - $+$ text "" - where - maybeShow [] _ _ = empty - maybeShow l s f = text s <+> (f l) - -showPackageDetailedInfo :: PackageDisplayInfo -> String -showPackageDetailedInfo pkginfo = - renderStyle (style {lineLength = 80, ribbonsPerLine = 1}) $ - char '*' <+> disp (pkgName pkginfo) - <> maybe empty (\v -> char '-' <> disp v) (selectedVersion pkginfo) - <+> text (replicate (16 - length (display (pkgName pkginfo))) ' ') - <> parens pkgkind - $+$ - (nest 4 $ vcat [ - entry "Synopsis" synopsis hideIfNull reflowParagraphs - , entry "Versions available" sourceVersions - (altText null "[ Not available from server ]") - (dispTopVersions 9 (preferredVersions pkginfo)) - , entry "Versions installed" installedVersions - (altText null (if hasLib pkginfo then "[ Not installed ]" - else "[ Unknown ]")) - (dispTopVersions 4 (preferredVersions pkginfo)) - , entry "Homepage" homepage orNotSpecified text - , entry "Bug reports" bugReports orNotSpecified text - , entry "Description" description hideIfNull reflowParagraphs - , entry "Category" category hideIfNull text - , entry "License" license alwaysShow disp - , entry "Author" author hideIfNull reflowLines - , entry "Maintainer" maintainer hideIfNull reflowLines - , entry "Source repo" sourceRepo orNotSpecified text - , entry "Executables" executables hideIfNull (commaSep text) - , entry "Flags" flags hideIfNull (commaSep dispFlag) - , entry "Dependencies" dependencies hideIfNull (commaSep disp) - , entry "Documentation" haddockHtml showIfInstalled text - , entry "Cached" haveTarball alwaysShow dispYesNo - , if not (hasLib pkginfo) then empty else - text "Modules:" $+$ nest 4 (vcat (map disp . sort . modules $ pkginfo)) - ]) - $+$ text "" - where - entry fname field cond format = case cond (field pkginfo) of - Nothing -> label <+> format (field pkginfo) - Just Nothing -> empty - Just (Just other) -> label <+> text other - where - label = text fname <> char ':' <> padding - padding = text (replicate (13 - length fname ) ' ') - - normal = Nothing - hide = Just Nothing - replace msg = Just (Just msg) - - alwaysShow = const normal - hideIfNull v = if null v then hide else normal - showIfInstalled v - | not isInstalled = hide - | null v = replace "[ Not installed ]" - | otherwise = normal - altText nul msg v = if nul v then replace msg else normal - orNotSpecified = altText null "[ Not specified ]" - - commaSep f = Disp.fsep . Disp.punctuate (Disp.char ',') . map f - dispFlag f = case flagName f of FlagName n -> text n - dispYesNo True = text "Yes" - dispYesNo False = text "No" - - isInstalled = not (null (installedVersions pkginfo)) - hasExes = length (executables pkginfo) >= 2 - --TODO: exclude non-buildable exes - pkgkind | hasLib pkginfo && hasExes = text "programs and library" - | hasLib pkginfo && hasExe pkginfo = text "program and library" - | hasLib pkginfo = text "library" - | hasExes = text "programs" - | hasExe pkginfo = text "program" - | otherwise = empty - - -reflowParagraphs :: String -> Doc -reflowParagraphs = - vcat - . intersperse (text "") -- re-insert blank lines - . map (fsep . map text . concatMap words) -- reflow paragraphs - . filter (/= [""]) - . groupBy (\x y -> "" `notElem` [x,y]) -- break on blank lines - . lines - -reflowLines :: String -> Doc -reflowLines = vcat . map text . lines - --- | We get the 'PackageDisplayInfo' by combining the info for the installed --- and available versions of a package. --- --- * We're building info about a various versions of a single named package so --- the input package info records are all supposed to refer to the same --- package name. --- -mergePackageInfo :: VersionRange - -> [Installed.InstalledPackageInfo] - -> [SourcePackage] - -> Maybe SourcePackage - -> Bool - -> PackageDisplayInfo -mergePackageInfo versionPref installedPkgs sourcePkgs selectedPkg showVer = - assert (length installedPkgs + length sourcePkgs > 0) $ - PackageDisplayInfo { - pkgName = combine packageName source - packageName installed, - selectedVersion = if showVer then fmap packageVersion selectedPkg - else Nothing, - selectedSourcePkg = sourceSelected, - installedVersions = map packageVersion installedPkgs, - sourceVersions = map packageVersion sourcePkgs, - preferredVersions = versionPref, - - license = combine Source.license source - Installed.license installed, - maintainer = combine Source.maintainer source - Installed.maintainer installed, - author = combine Source.author source - Installed.author installed, - homepage = combine Source.homepage source - Installed.homepage installed, - bugReports = maybe "" Source.bugReports source, - sourceRepo = fromMaybe "" . join - . fmap (uncons Nothing Source.repoLocation - . sortBy (comparing Source.repoKind) - . Source.sourceRepos) - $ source, - --TODO: installed package info is missing synopsis - synopsis = maybe "" Source.synopsis source, - description = combine Source.description source - Installed.description installed, - category = combine Source.category source - Installed.category installed, - flags = maybe [] Source.genPackageFlags sourceGeneric, - hasLib = isJust installed - || fromMaybe False - (fmap (isJust . Source.condLibrary) sourceGeneric), - hasExe = fromMaybe False - (fmap (not . null . Source.condExecutables) sourceGeneric), - executables = map fst (maybe [] Source.condExecutables sourceGeneric), - modules = combine (map Installed.exposedName . Installed.exposedModules) - installed - (maybe [] getListOfExposedModules . Source.library) - source, - dependencies = - combine (map (SourceDependency . simplifyDependency) - . Source.buildDepends) source - (map InstalledDependency . Installed.depends) installed, - haddockHtml = fromMaybe "" . join - . fmap (listToMaybe . Installed.haddockHTMLs) - $ installed, - haveTarball = False - } - where - combine f x g y = fromJust (fmap f x `mplus` fmap g y) - installed :: Maybe Installed.InstalledPackageInfo - installed = latestWithPref versionPref installedPkgs - - getListOfExposedModules lib = Source.exposedModules lib - ++ map Source.moduleReexportName - (Source.reexportedModules lib) - - sourceSelected - | isJust selectedPkg = selectedPkg - | otherwise = latestWithPref versionPref sourcePkgs - sourceGeneric = fmap packageDescription sourceSelected - source = fmap flattenPackageDescription sourceGeneric - - uncons :: b -> (a -> b) -> [a] -> b - uncons z _ [] = z - uncons _ f (x:_) = f x - - --- | Not all the info is pure. We have to check if the docs really are --- installed, because the registered package info lies. Similarly we have to --- check if the tarball has indeed been fetched. --- -updateFileSystemPackageDetails :: PackageDisplayInfo -> IO PackageDisplayInfo -updateFileSystemPackageDetails pkginfo = do - fetched <- maybe (return False) (isFetched . packageSource) - (selectedSourcePkg pkginfo) - docsExist <- doesDirectoryExist (haddockHtml pkginfo) - return pkginfo { - haveTarball = fetched, - haddockHtml = if docsExist then haddockHtml pkginfo else "" - } - -latestWithPref :: Package pkg => VersionRange -> [pkg] -> Maybe pkg -latestWithPref _ [] = Nothing -latestWithPref pref pkgs = Just (maximumBy (comparing prefThenVersion) pkgs) - where - prefThenVersion pkg = let ver = packageVersion pkg - in (withinRange ver pref, ver) - - --- | Rearrange installed and source packages into groups referring to the --- same package by name. In the result pairs, the lists are guaranteed to not --- both be empty. --- -mergePackages :: [Installed.InstalledPackageInfo] - -> [SourcePackage] - -> [( PackageName - , [Installed.InstalledPackageInfo] - , [SourcePackage] )] -mergePackages installedPkgs sourcePkgs = - map collect - $ mergeBy (\i a -> fst i `compare` fst a) - (groupOn packageName installedPkgs) - (groupOn packageName sourcePkgs) - where - collect (OnlyInLeft (name,is) ) = (name, is, []) - collect ( InBoth (_,is) (name,as)) = (name, is, as) - collect (OnlyInRight (name,as)) = (name, [], as) - -groupOn :: Ord key => (a -> key) -> [a] -> [(key,[a])] -groupOn key = map (\xs -> (key (head xs), xs)) - . groupBy (equating key) - . sortBy (comparing key) - -dispTopVersions :: Int -> VersionRange -> [Version] -> Doc -dispTopVersions n pref vs = - (Disp.fsep . Disp.punctuate (Disp.char ',') - . map (\ver -> if ispref ver then disp ver else parens (disp ver)) - . sort . take n . interestingVersions ispref - $ vs) - <+> trailingMessage - - where - ispref ver = withinRange ver pref - extra = length vs - n - trailingMessage - | extra <= 0 = Disp.empty - | otherwise = Disp.parens $ Disp.text "and" - <+> Disp.int (length vs - n) - <+> if extra == 1 then Disp.text "other" - else Disp.text "others" - --- | Reorder a bunch of versions to put the most interesting / significant --- versions first. A preferred version range is taken into account. --- --- This may be used in a user interface to select a small number of versions --- to present to the user, e.g. --- --- > let selectVersions = sort . take 5 . interestingVersions pref --- -interestingVersions :: (Version -> Bool) -> [Version] -> [Version] -interestingVersions pref = - map ((\ns -> Version ns []) . fst) . filter snd - . concat . Tree.levels - . swizzleTree - . reorderTree (\(Node (v,_) _) -> pref (Version v [])) - . reverseTree - . mkTree - . map versionBranch - - where - swizzleTree = unfoldTree (spine []) - where - spine ts' (Node x []) = (x, ts') - spine ts' (Node x (t:ts)) = spine (Node x ts:ts') t - - reorderTree _ (Node x []) = Node x [] - reorderTree p (Node x ts) = Node x (ts' ++ ts'') - where - (ts',ts'') = partition p (map (reorderTree p) ts) - - reverseTree (Node x cs) = Node x (reverse (map reverseTree cs)) - - mkTree xs = unfoldTree step (False, [], xs) - where - step (node,ns,vs) = - ( (reverse ns, node) - , [ (any null vs', n:ns, filter (not . null) vs') - | (n, vs') <- groups vs ] - ) - groups = map (\g -> (head (head g), map tail g)) - . groupBy (equating head) diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/PackageIndex.hs cabal-install-1.22-1.22.9.0/Distribution/Client/PackageIndex.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/PackageIndex.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/PackageIndex.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,490 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE DeriveFunctor #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.PackageIndex --- Copyright : (c) David Himmelstrup 2005, --- Bjorn Bringert 2007, --- Duncan Coutts 2008 --- --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- An index of packages. --- -module Distribution.Client.PackageIndex ( - -- * Package index data type - PackageIndex, - - -- * Creating an index - fromList, - - -- * Updates - merge, - insert, - deletePackageName, - deletePackageId, - deleteDependency, - - -- * Queries - - -- ** Precise lookups - elemByPackageId, - elemByPackageName, - lookupPackageName, - lookupPackageId, - lookupDependency, - - -- ** Case-insensitive searches - searchByName, - SearchResult(..), - searchByNameSubstring, - - -- ** Bulk queries - allPackages, - allPackagesByName, - - -- ** Special queries - brokenPackages, - dependencyClosure, - reverseDependencyClosure, - topologicalOrder, - reverseTopologicalOrder, - dependencyInconsistencies, - dependencyCycles, - dependencyGraph, - ) where - -import Prelude hiding (lookup) -import Control.Exception (assert) -import qualified Data.Map as Map -import Data.Map (Map) -import qualified Data.Tree as Tree -import qualified Data.Graph as Graph -import qualified Data.Array as Array -import Data.Array ((!)) -import Data.List (groupBy, sortBy, nub, isInfixOf) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid (Monoid(..)) -#endif -import Data.Maybe (isJust, isNothing, fromMaybe, catMaybes) - -import Distribution.Package - ( PackageName(..), PackageIdentifier(..) - , Package(..), packageName, packageVersion - , Dependency(Dependency), PackageFixedDeps(..) ) -import Distribution.Version - ( Version, withinRange ) -import Distribution.Simple.Utils (lowercase, equating, comparing) - - --- | The collection of information about packages from one or more 'PackageDB's. --- --- It can be searched efficiently by package name and version. --- -newtype PackageIndex pkg = PackageIndex - -- This index package names to all the package records matching that package - -- name case-sensitively. It includes all versions. - -- - -- This allows us to find all versions satisfying a dependency. - -- Most queries are a map lookup followed by a linear scan of the bucket. - -- - (Map PackageName [pkg]) - - deriving (Show, Read, Functor) - -instance Package pkg => Monoid (PackageIndex pkg) where - mempty = PackageIndex Map.empty - mappend = merge - --save one mappend with empty in the common case: - mconcat [] = mempty - mconcat xs = foldr1 mappend xs - -invariant :: Package pkg => PackageIndex pkg -> Bool -invariant (PackageIndex m) = all (uncurry goodBucket) (Map.toList m) - where - goodBucket _ [] = False - goodBucket name (pkg0:pkgs0) = check (packageId pkg0) pkgs0 - where - check pkgid [] = packageName pkgid == name - check pkgid (pkg':pkgs) = packageName pkgid == name - && pkgid < pkgid' - && check pkgid' pkgs - where pkgid' = packageId pkg' - --- --- * Internal helpers --- - -mkPackageIndex :: Package pkg => Map PackageName [pkg] -> PackageIndex pkg -mkPackageIndex index = assert (invariant (PackageIndex index)) - (PackageIndex index) - -internalError :: String -> a -internalError name = error ("PackageIndex." ++ name ++ ": internal error") - --- | Lookup a name in the index to get all packages that match that name --- case-sensitively. --- -lookup :: Package pkg => PackageIndex pkg -> PackageName -> [pkg] -lookup (PackageIndex m) name = fromMaybe [] $ Map.lookup name m - --- --- * Construction --- - --- | Build an index out of a bunch of packages. --- --- If there are duplicates, later ones mask earlier ones. --- -fromList :: Package pkg => [pkg] -> PackageIndex pkg -fromList pkgs = mkPackageIndex - . Map.map fixBucket - . Map.fromListWith (++) - $ [ (packageName pkg, [pkg]) - | pkg <- pkgs ] - where - fixBucket = -- out of groups of duplicates, later ones mask earlier ones - -- but Map.fromListWith (++) constructs groups in reverse order - map head - -- Eq instance for PackageIdentifier is wrong, so use Ord: - . groupBy (\a b -> EQ == comparing packageId a b) - -- relies on sortBy being a stable sort so we - -- can pick consistently among duplicates - . sortBy (comparing packageId) - --- --- * Updates --- - --- | Merge two indexes. --- --- Packages from the second mask packages of the same exact name --- (case-sensitively) from the first. --- -merge :: Package pkg => PackageIndex pkg -> PackageIndex pkg -> PackageIndex pkg -merge i1@(PackageIndex m1) i2@(PackageIndex m2) = - assert (invariant i1 && invariant i2) $ - mkPackageIndex (Map.unionWith mergeBuckets m1 m2) - --- | Elements in the second list mask those in the first. -mergeBuckets :: Package pkg => [pkg] -> [pkg] -> [pkg] -mergeBuckets [] ys = ys -mergeBuckets xs [] = xs -mergeBuckets xs@(x:xs') ys@(y:ys') = - case packageId x `compare` packageId y of - GT -> y : mergeBuckets xs ys' - EQ -> y : mergeBuckets xs' ys' - LT -> x : mergeBuckets xs' ys - --- | Inserts a single package into the index. --- --- This is equivalent to (but slightly quicker than) using 'mappend' or --- 'merge' with a singleton index. --- -insert :: Package pkg => pkg -> PackageIndex pkg -> PackageIndex pkg -insert pkg (PackageIndex index) = mkPackageIndex $ - Map.insertWith (\_ -> insertNoDup) (packageName pkg) [pkg] index - where - pkgid = packageId pkg - insertNoDup [] = [pkg] - insertNoDup pkgs@(pkg':pkgs') = case compare pkgid (packageId pkg') of - LT -> pkg : pkgs - EQ -> pkg : pkgs' - GT -> pkg' : insertNoDup pkgs' - --- | Internal delete helper. --- -delete :: Package pkg => PackageName -> (pkg -> Bool) -> PackageIndex pkg -> PackageIndex pkg -delete name p (PackageIndex index) = mkPackageIndex $ - Map.update filterBucket name index - where - filterBucket = deleteEmptyBucket - . filter (not . p) - deleteEmptyBucket [] = Nothing - deleteEmptyBucket remaining = Just remaining - --- | Removes a single package from the index. --- -deletePackageId :: Package pkg => PackageIdentifier -> PackageIndex pkg -> PackageIndex pkg -deletePackageId pkgid = - delete (packageName pkgid) (\pkg -> packageId pkg == pkgid) - --- | Removes all packages with this (case-sensitive) name from the index. --- -deletePackageName :: Package pkg => PackageName -> PackageIndex pkg -> PackageIndex pkg -deletePackageName name = - delete name (\pkg -> packageName pkg == name) - --- | Removes all packages satisfying this dependency from the index. --- -deleteDependency :: Package pkg => Dependency -> PackageIndex pkg -> PackageIndex pkg -deleteDependency (Dependency name verstionRange) = - delete name (\pkg -> packageVersion pkg `withinRange` verstionRange) - --- --- * Bulk queries --- - --- | Get all the packages from the index. --- -allPackages :: Package pkg => PackageIndex pkg -> [pkg] -allPackages (PackageIndex m) = concat (Map.elems m) - --- | Get all the packages from the index. --- --- They are grouped by package name, case-sensitively. --- -allPackagesByName :: Package pkg => PackageIndex pkg -> [[pkg]] -allPackagesByName (PackageIndex m) = Map.elems m - --- --- * Lookups --- - -elemByPackageId :: Package pkg => PackageIndex pkg -> PackageIdentifier -> Bool -elemByPackageId index = isJust . lookupPackageId index - -elemByPackageName :: Package pkg => PackageIndex pkg -> PackageName -> Bool -elemByPackageName index = not . null . lookupPackageName index - - --- | Does a lookup by package id (name & version). --- --- Since multiple package DBs mask each other case-sensitively by package name, --- then we get back at most one package. --- -lookupPackageId :: Package pkg => PackageIndex pkg -> PackageIdentifier -> Maybe pkg -lookupPackageId index pkgid = - case [ pkg | pkg <- lookup index (packageName pkgid) - , packageId pkg == pkgid ] of - [] -> Nothing - [pkg] -> Just pkg - _ -> internalError "lookupPackageIdentifier" - --- | Does a case-sensitive search by package name. --- -lookupPackageName :: Package pkg => PackageIndex pkg -> PackageName -> [pkg] -lookupPackageName index name = - [ pkg | pkg <- lookup index name - , packageName pkg == name ] - --- | Does a case-sensitive search by package name and a range of versions. --- --- We get back any number of versions of the specified package name, all --- satisfying the version range constraint. --- -lookupDependency :: Package pkg => PackageIndex pkg -> Dependency -> [pkg] -lookupDependency index (Dependency name versionRange) = - [ pkg | pkg <- lookup index name - , packageName pkg == name - , packageVersion pkg `withinRange` versionRange ] - --- --- * Case insensitive name lookups --- - --- | Does a case-insensitive search by package name. --- --- If there is only one package that compares case-insensitively to this name --- then the search is unambiguous and we get back all versions of that package. --- If several match case-insensitively but one matches exactly then it is also --- unambiguous. --- --- If however several match case-insensitively and none match exactly then we --- have an ambiguous result, and we get back all the versions of all the --- packages. The list of ambiguous results is split by exact package name. So --- it is a non-empty list of non-empty lists. --- -searchByName :: Package pkg => PackageIndex pkg - -> String -> [(PackageName, [pkg])] -searchByName (PackageIndex m) name = - [ pkgs - | pkgs@(PackageName name',_) <- Map.toList m - , lowercase name' == lname ] - where - lname = lowercase name - -data SearchResult a = None | Unambiguous a | Ambiguous [a] - --- | Does a case-insensitive substring search by package name. --- --- That is, all packages that contain the given string in their name. --- -searchByNameSubstring :: Package pkg => PackageIndex pkg - -> String -> [(PackageName, [pkg])] -searchByNameSubstring (PackageIndex m) searchterm = - [ pkgs - | pkgs@(PackageName name, _) <- Map.toList m - , lsearchterm `isInfixOf` lowercase name ] - where - lsearchterm = lowercase searchterm - --- --- * Special queries --- - --- | All packages that have dependencies that are not in the index. --- --- Returns such packages along with the dependencies that they're missing. --- -brokenPackages :: PackageFixedDeps pkg - => PackageIndex pkg - -> [(pkg, [PackageIdentifier])] -brokenPackages index = - [ (pkg, missing) - | pkg <- allPackages index - , let missing = [ pkg' | pkg' <- depends pkg - , isNothing (lookupPackageId index pkg') ] - , not (null missing) ] - --- | Tries to take the transitive closure of the package dependencies. --- --- If the transitive closure is complete then it returns that subset of the --- index. Otherwise it returns the broken packages as in 'brokenPackages'. --- --- * Note that if the result is @Right []@ it is because at least one of --- the original given 'PackageIdentifier's do not occur in the index. --- -dependencyClosure :: PackageFixedDeps pkg - => PackageIndex pkg - -> [PackageIdentifier] - -> Either (PackageIndex pkg) - [(pkg, [PackageIdentifier])] -dependencyClosure index pkgids0 = case closure mempty [] pkgids0 of - (completed, []) -> Left completed - (completed, _) -> Right (brokenPackages completed) - where - closure completed failed [] = (completed, failed) - closure completed failed (pkgid:pkgids) = case lookupPackageId index pkgid of - Nothing -> closure completed (pkgid:failed) pkgids - Just pkg -> case lookupPackageId completed (packageId pkg) of - Just _ -> closure completed failed pkgids - Nothing -> closure completed' failed pkgids' - where completed' = insert pkg completed - pkgids' = depends pkg ++ pkgids - --- | Takes the transitive closure of the packages reverse dependencies. --- --- * The given 'PackageIdentifier's must be in the index. --- -reverseDependencyClosure :: PackageFixedDeps pkg - => PackageIndex pkg - -> [PackageIdentifier] - -> [pkg] -reverseDependencyClosure index = - map vertexToPkg - . concatMap Tree.flatten - . Graph.dfs reverseDepGraph - . map (fromMaybe noSuchPkgId . pkgIdToVertex) - - where - (depGraph, vertexToPkg, pkgIdToVertex) = dependencyGraph index - reverseDepGraph = Graph.transposeG depGraph - noSuchPkgId = error "reverseDependencyClosure: package is not in the graph" - -topologicalOrder :: PackageFixedDeps pkg => PackageIndex pkg -> [pkg] -topologicalOrder index = map toPkgId - . Graph.topSort - $ graph - where (graph, toPkgId, _) = dependencyGraph index - -reverseTopologicalOrder :: PackageFixedDeps pkg => PackageIndex pkg -> [pkg] -reverseTopologicalOrder index = map toPkgId - . Graph.topSort - . Graph.transposeG - $ graph - where (graph, toPkgId, _) = dependencyGraph index - --- | Given a package index where we assume we want to use all the packages --- (use 'dependencyClosure' if you need to get such a index subset) find out --- if the dependencies within it use consistent versions of each package. --- Return all cases where multiple packages depend on different versions of --- some other package. --- --- Each element in the result is a package name along with the packages that --- depend on it and the versions they require. These are guaranteed to be --- distinct. --- -dependencyInconsistencies :: PackageFixedDeps pkg - => PackageIndex pkg - -> [(PackageName, [(PackageIdentifier, Version)])] -dependencyInconsistencies index = - [ (name, inconsistencies) - | (name, uses) <- Map.toList inverseIndex - , let inconsistencies = duplicatesBy uses - versions = map snd inconsistencies - , reallyIsInconsistent name (nub versions) ] - - where inverseIndex = Map.fromListWith (++) - [ (packageName dep, [(packageId pkg, packageVersion dep)]) - | pkg <- allPackages index - , dep <- depends pkg ] - - duplicatesBy = (\groups -> if length groups == 1 - then [] - else concat groups) - . groupBy (equating snd) - . sortBy (comparing snd) - - reallyIsInconsistent :: PackageName -> [Version] -> Bool - reallyIsInconsistent _ [] = False - reallyIsInconsistent name [v1, v2] = - case (mpkg1, mpkg2) of - (Just pkg1, Just pkg2) -> pkgid1 `notElem` depends pkg2 - && pkgid2 `notElem` depends pkg1 - _ -> True - where - pkgid1 = PackageIdentifier name v1 - pkgid2 = PackageIdentifier name v2 - mpkg1 = lookupPackageId index pkgid1 - mpkg2 = lookupPackageId index pkgid2 - - reallyIsInconsistent _ _ = True - --- | Find if there are any cycles in the dependency graph. If there are no --- cycles the result is @[]@. --- --- This actually computes the strongly connected components. So it gives us a --- list of groups of packages where within each group they all depend on each --- other, directly or indirectly. --- -dependencyCycles :: PackageFixedDeps pkg - => PackageIndex pkg - -> [[pkg]] -dependencyCycles index = - [ vs | Graph.CyclicSCC vs <- Graph.stronglyConnComp adjacencyList ] - where - adjacencyList = [ (pkg, packageId pkg, depends pkg) - | pkg <- allPackages index ] - --- | Builds a graph of the package dependencies. --- --- Dependencies on other packages that are not in the index are discarded. --- You can check if there are any such dependencies with 'brokenPackages'. --- -dependencyGraph :: PackageFixedDeps pkg - => PackageIndex pkg - -> (Graph.Graph, - Graph.Vertex -> pkg, - PackageIdentifier -> Maybe Graph.Vertex) -dependencyGraph index = (graph, vertexToPkg, pkgIdToVertex) - where - graph = Array.listArray bounds $ - map (catMaybes . map pkgIdToVertex . depends) pkgs - vertexToPkg vertex = pkgTable ! vertex - pkgIdToVertex = binarySearch 0 topBound - - pkgTable = Array.listArray bounds pkgs - pkgIdTable = Array.listArray bounds (map packageId pkgs) - pkgs = sortBy (comparing packageId) (allPackages index) - topBound = length pkgs - 1 - bounds = (0, topBound) - - binarySearch a b key - | a > b = Nothing - | otherwise = case compare key (pkgIdTable ! mid) of - LT -> binarySearch a (mid-1) key - EQ -> Just mid - GT -> binarySearch (mid+1) b key - where mid = (a + b) `div` 2 diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/PackageUtils.hs cabal-install-1.22-1.22.9.0/Distribution/Client/PackageUtils.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/PackageUtils.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/PackageUtils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.PackageUtils --- Copyright : (c) Duncan Coutts 2010 --- License : BSD-like --- --- Maintainer : cabal-devel@gmail.com --- Stability : provisional --- Portability : portable --- --- Various package description utils that should be in the Cabal lib ------------------------------------------------------------------------------ -module Distribution.Client.PackageUtils ( - externalBuildDepends, - ) where - -import Distribution.Package - ( packageVersion, packageName, Dependency(..) ) -import Distribution.PackageDescription - ( PackageDescription(..) ) -import Distribution.Version - ( withinRange ) - --- | The list of dependencies that refer to external packages --- rather than internal package components. --- -externalBuildDepends :: PackageDescription -> [Dependency] -externalBuildDepends pkg = filter (not . internal) (buildDepends pkg) - where - -- True if this dependency is an internal one (depends on a library - -- defined in the same package). - internal (Dependency depName versionRange) = - depName == packageName pkg && - packageVersion pkg `withinRange` versionRange diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/ParseUtils.hs cabal-install-1.22-1.22.9.0/Distribution/Client/ParseUtils.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/ParseUtils.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/ParseUtils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.ParseUtils --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- Parsing utilities. ------------------------------------------------------------------------------ - -module Distribution.Client.ParseUtils ( parseFields, ppFields, ppSection ) - where - -import Distribution.ParseUtils - ( FieldDescr(..), ParseResult(..), warning, lineNo ) -import qualified Distribution.ParseUtils as ParseUtils - ( Field(..) ) - -import Control.Monad ( foldM ) -import Text.PrettyPrint ( (<>), (<+>), ($+$) ) -import qualified Data.Map as Map -import qualified Text.PrettyPrint as Disp - ( Doc, text, colon, vcat, empty, isEmpty, nest ) - ---FIXME: replace this with something better -parseFields :: [FieldDescr a] -> a -> [ParseUtils.Field] -> ParseResult a -parseFields fields = foldM setField - where - fieldMap = Map.fromList - [ (name, f) | f@(FieldDescr name _ _) <- fields ] - setField accum (ParseUtils.F line name value) = - case Map.lookup name fieldMap of - Just (FieldDescr _ _ set) -> set line value accum - Nothing -> do - warning $ "Unrecognized field " ++ name ++ " on line " ++ show line - return accum - setField accum f = do - warning $ "Unrecognized stanza on line " ++ show (lineNo f) - return accum - --- | This is a customised version of the functions from Distribution.ParseUtils --- that also optionally print default values for empty fields as comments. --- -ppFields :: [FieldDescr a] -> (Maybe a) -> a -> Disp.Doc -ppFields fields def cur = Disp.vcat [ ppField name (fmap getter def) (getter cur) - | FieldDescr name getter _ <- fields] - -ppField :: String -> (Maybe Disp.Doc) -> Disp.Doc -> Disp.Doc -ppField name mdef cur - | Disp.isEmpty cur = maybe Disp.empty - (\def -> Disp.text "--" <+> Disp.text name - <> Disp.colon <+> def) mdef - | otherwise = Disp.text name <> Disp.colon <+> cur - -ppSection :: String -> String -> [FieldDescr a] -> (Maybe a) -> a -> Disp.Doc -ppSection name arg fields def cur - | Disp.isEmpty fieldsDoc = Disp.empty - | otherwise = Disp.text name <+> argDoc - $+$ (Disp.nest 2 fieldsDoc) - where - fieldsDoc = ppFields fields def cur - argDoc | arg == "" = Disp.empty - | otherwise = Disp.text arg diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Run.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Run.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Run.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Run.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Run --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- Implementation of the 'run' command. ------------------------------------------------------------------------------ - -module Distribution.Client.Run ( run, splitRunArgs ) - where - -import Distribution.Client.Utils (tryCanonicalizePath) - -import Distribution.PackageDescription (Executable (..), - PackageDescription (..)) -import Distribution.Simple.Compiler (compilerFlavor, CompilerFlavor(..)) -import Distribution.Simple.Build.PathsModule (pkgPathEnvVar) -import Distribution.Simple.BuildPaths (exeExtension) -import Distribution.Simple.LocalBuildInfo (ComponentName (..), - LocalBuildInfo (..), - getComponentLocalBuildInfo, - depLibraryPaths) -import Distribution.Simple.Utils (die, notice, rawSystemExitWithEnv, - addLibraryPath) -import Distribution.System (Platform (..)) -import Distribution.Verbosity (Verbosity) - -import qualified Distribution.Simple.GHCJS as GHCJS - -#if !MIN_VERSION_base(4,8,0) -import Data.Functor ((<$>)) -#endif -import Data.List (find) -import System.Directory (getCurrentDirectory) -import Distribution.Compat.Environment (getEnvironment) -import System.FilePath ((<.>), ()) - - --- | Return the executable to run and any extra arguments that should be --- forwarded to it. -splitRunArgs :: LocalBuildInfo -> [String] -> IO (Executable, [String]) -splitRunArgs lbi args = - case exes of - [] -> die "Couldn't find any executables." - [exe] -> case args of - [] -> return (exe, []) - (x:xs) | x == exeName exe -> return (exe, xs) - | otherwise -> return (exe, args) - _ -> case args of - [] -> die $ "This package contains multiple executables. " - ++ "You must pass the executable name as the first argument " - ++ "to 'cabal run'." - (x:xs) -> case find (\exe -> exeName exe == x) exes of - Nothing -> die $ "No executable named '" ++ x ++ "'." - Just exe -> return (exe, xs) - where - pkg_descr = localPkgDescr lbi - exes = executables pkg_descr - - --- | Run a given executable. -run :: Verbosity -> LocalBuildInfo -> Executable -> [String] -> IO () -run verbosity lbi exe exeArgs = do - curDir <- getCurrentDirectory - let buildPref = buildDir lbi - pkg_descr = localPkgDescr lbi - dataDirEnvVar = (pkgPathEnvVar pkg_descr "datadir", - curDir dataDir pkg_descr) - - (path, runArgs) <- - case compilerFlavor (compiler lbi) of - GHCJS -> do - let (script, cmd, cmdArgs) = - GHCJS.runCmd (withPrograms lbi) - (buildPref exeName exe exeName exe) - script' <- tryCanonicalizePath script - return (cmd, cmdArgs ++ [script']) - _ -> do - p <- tryCanonicalizePath $ - buildPref exeName exe (exeName exe <.> exeExtension) - return (p, []) - - env <- (dataDirEnvVar:) <$> getEnvironment - -- Add (DY)LD_LIBRARY_PATH if needed - env' <- if withDynExe lbi - then do let (Platform _ os) = hostPlatform lbi - clbi = getComponentLocalBuildInfo lbi - (CExeName (exeName exe)) - paths <- depLibraryPaths True False lbi clbi - return (addLibraryPath os paths env) - else return env - notice verbosity $ "Running " ++ exeName exe ++ "..." - rawSystemExitWithEnv verbosity path (runArgs++exeArgs) env' diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox/Index.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox/Index.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox/Index.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox/Index.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,242 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Sandbox.Index --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- Querying and modifying local build tree references in the package index. ------------------------------------------------------------------------------ - -module Distribution.Client.Sandbox.Index ( - createEmpty, - addBuildTreeRefs, - removeBuildTreeRefs, - ListIgnoredBuildTreeRefs(..), RefTypesToList(..), - listBuildTreeRefs, - validateIndexPath, - - defaultIndexFileName - ) where - -import qualified Distribution.Client.Tar as Tar -import Distribution.Client.IndexUtils ( BuildTreeRefType(..) - , refTypeFromTypeCode - , typeCodeFromRefType - , updatePackageIndexCacheFile - , getSourcePackagesStrict ) -import Distribution.Client.PackageIndex ( allPackages ) -import Distribution.Client.Types ( Repo(..), LocalRepo(..) - , SourcePackageDb(..) - , SourcePackage(..), PackageLocation(..) ) -import Distribution.Client.Utils ( byteStringToFilePath, filePathToByteString - , makeAbsoluteToCwd, tryCanonicalizePath - , canonicalizePathNoThrow - , tryFindAddSourcePackageDesc ) - -import Distribution.Simple.Utils ( die, debug ) -import Distribution.Verbosity ( Verbosity ) - -import qualified Data.ByteString.Lazy as BS -import Control.Exception ( evaluate ) -import Control.Monad ( liftM, unless ) -import Data.List ( (\\), intersect, nub ) -import Data.Maybe ( catMaybes ) -import System.Directory ( createDirectoryIfMissing, - doesDirectoryExist, doesFileExist, - renameFile ) -import System.FilePath ( (), (<.>), takeDirectory, takeExtension - , replaceExtension ) -import System.IO ( IOMode(..), SeekMode(..) - , hSeek, withBinaryFile ) - --- | A reference to a local build tree. -data BuildTreeRef = BuildTreeRef { - buildTreeRefType :: !BuildTreeRefType, - buildTreePath :: !FilePath - } - -defaultIndexFileName :: FilePath -defaultIndexFileName = "00-index.tar" - --- | Given a path, ensure that it refers to a local build tree. -buildTreeRefFromPath :: BuildTreeRefType -> FilePath -> IO (Maybe BuildTreeRef) -buildTreeRefFromPath refType dir = do - dirExists <- doesDirectoryExist dir - unless dirExists $ - die $ "directory '" ++ dir ++ "' does not exist" - _ <- tryFindAddSourcePackageDesc dir "Error adding source reference." - return . Just $ BuildTreeRef refType dir - --- | Given a tar archive entry, try to parse it as a local build tree reference. -readBuildTreeRef :: Tar.Entry -> Maybe BuildTreeRef -readBuildTreeRef entry = case Tar.entryContent entry of - (Tar.OtherEntryType typeCode bs size) - | (Tar.isBuildTreeRefTypeCode typeCode) - && (size == BS.length bs) -> Just $! BuildTreeRef - (refTypeFromTypeCode typeCode) - (byteStringToFilePath bs) - | otherwise -> Nothing - _ -> Nothing - --- | Given a sequence of tar archive entries, extract all references to local --- build trees. -readBuildTreeRefs :: Tar.Entries -> [BuildTreeRef] -readBuildTreeRefs = - catMaybes - . Tar.foldrEntries (\e r -> readBuildTreeRef e : r) - [] error - --- | Given a path to a tar archive, extract all references to local build trees. -readBuildTreeRefsFromFile :: FilePath -> IO [BuildTreeRef] -readBuildTreeRefsFromFile = liftM (readBuildTreeRefs . Tar.read) . BS.readFile - --- | Given a local build tree ref, serialise it to a tar archive entry. -writeBuildTreeRef :: BuildTreeRef -> Tar.Entry -writeBuildTreeRef (BuildTreeRef refType path) = Tar.simpleEntry tarPath content - where - bs = filePathToByteString path - -- Provide a filename for tools that treat custom entries as ordinary files. - tarPath' = "local-build-tree-reference" - -- fromRight can't fail because the path is shorter than 255 characters. - tarPath = fromRight $ Tar.toTarPath True tarPath' - content = Tar.OtherEntryType (typeCodeFromRefType refType) bs (BS.length bs) - - -- TODO: Move this to D.C.Utils? - fromRight (Left err) = error err - fromRight (Right a) = a - --- | Check that the provided path is either an existing directory, or a tar --- archive in an existing directory. -validateIndexPath :: FilePath -> IO FilePath -validateIndexPath path' = do - path <- makeAbsoluteToCwd path' - if (== ".tar") . takeExtension $ path - then return path - else do dirExists <- doesDirectoryExist path - unless dirExists $ - die $ "directory does not exist: '" ++ path ++ "'" - return $ path defaultIndexFileName - --- | Create an empty index file. -createEmpty :: Verbosity -> FilePath -> IO () -createEmpty verbosity path = do - indexExists <- doesFileExist path - if indexExists - then debug verbosity $ "Package index already exists: " ++ path - else do - debug verbosity $ "Creating the index file '" ++ path ++ "'" - createDirectoryIfMissing True (takeDirectory path) - -- Equivalent to 'tar cvf empty.tar --files-from /dev/null'. - let zeros = BS.replicate (512*20) 0 - BS.writeFile path zeros - --- | Add given local build tree references to the index. -addBuildTreeRefs :: Verbosity -> FilePath -> [FilePath] -> BuildTreeRefType - -> IO () -addBuildTreeRefs _ _ [] _ = - error "Distribution.Client.Sandbox.Index.addBuildTreeRefs: unexpected" -addBuildTreeRefs verbosity path l' refType = do - checkIndexExists path - l <- liftM nub . mapM tryCanonicalizePath $ l' - treesInIndex <- fmap (map buildTreePath) (readBuildTreeRefsFromFile path) - -- Add only those paths that aren't already in the index. - treesToAdd <- mapM (buildTreeRefFromPath refType) (l \\ treesInIndex) - let entries = map writeBuildTreeRef (catMaybes treesToAdd) - unless (null entries) $ do - offset <- - fmap (Tar.foldrEntries (\e acc -> Tar.entrySizeInBytes e + acc) 0 error - . Tar.read) $ BS.readFile path - _ <- evaluate offset - debug verbosity $ "Writing at offset: " ++ show offset - withBinaryFile path ReadWriteMode $ \h -> do - hSeek h AbsoluteSeek (fromIntegral offset) - BS.hPut h (Tar.write entries) - debug verbosity $ "Successfully appended to '" ++ path ++ "'" - updatePackageIndexCacheFile verbosity path - (path `replaceExtension` "cache") - --- | Remove given local build tree references from the index. -removeBuildTreeRefs :: Verbosity -> FilePath -> [FilePath] -> IO [FilePath] -removeBuildTreeRefs _ _ [] = - error "Distribution.Client.Sandbox.Index.removeBuildTreeRefs: unexpected" -removeBuildTreeRefs verbosity path l' = do - checkIndexExists path - l <- mapM canonicalizePathNoThrow l' - let tmpFile = path <.> "tmp" - -- Performance note: on my system, it takes 'index --remove-source' - -- approx. 3,5s to filter a 65M file. Real-life indices are expected to be - -- much smaller. - BS.writeFile tmpFile . Tar.writeEntries . Tar.filterEntries (p l) . Tar.read - =<< BS.readFile path - renameFile tmpFile path - debug verbosity $ "Successfully renamed '" ++ tmpFile - ++ "' to '" ++ path ++ "'" - updatePackageIndexCacheFile verbosity path (path `replaceExtension` "cache") - -- FIXME: return only the refs that vere actually removed. - return l - where - p l entry = case readBuildTreeRef entry of - Nothing -> True - -- FIXME: removing snapshot deps is done with `delete-source - -- .cabal-sandbox/snapshots/$SNAPSHOT_NAME`. Perhaps we also want to - -- support removing snapshots by providing the original path. - (Just (BuildTreeRef _ pth)) -> pth `notElem` l - --- | A build tree ref can become ignored if the user later adds a build tree ref --- with the same package ID. We display ignored build tree refs when the user --- runs 'cabal sandbox list-sources', but do not look at their timestamps in --- 'reinstallAddSourceDeps'. -data ListIgnoredBuildTreeRefs = ListIgnored | DontListIgnored - --- | Which types of build tree refs should be listed? -data RefTypesToList = OnlySnapshots | OnlyLinks | LinksAndSnapshots - --- | List the local build trees that are referred to from the index. -listBuildTreeRefs :: Verbosity -> ListIgnoredBuildTreeRefs -> RefTypesToList - -> FilePath - -> IO [FilePath] -listBuildTreeRefs verbosity listIgnored refTypesToList path = do - checkIndexExists path - buildTreeRefs <- - case listIgnored of - DontListIgnored -> do - paths <- listWithoutIgnored - case refTypesToList of - LinksAndSnapshots -> return paths - _ -> do - allPathsFiltered <- fmap (map buildTreePath . filter predicate) - listWithIgnored - _ <- evaluate (length allPathsFiltered) - return (paths `intersect` allPathsFiltered) - - ListIgnored -> fmap (map buildTreePath . filter predicate) listWithIgnored - - _ <- evaluate (length buildTreeRefs) - return buildTreeRefs - - where - predicate :: BuildTreeRef -> Bool - predicate = case refTypesToList of - OnlySnapshots -> (==) SnapshotRef . buildTreeRefType - OnlyLinks -> (==) LinkRef . buildTreeRefType - LinksAndSnapshots -> const True - - listWithIgnored :: IO [BuildTreeRef] - listWithIgnored = readBuildTreeRefsFromFile $ path - - listWithoutIgnored :: IO [FilePath] - listWithoutIgnored = do - let repo = Repo { repoKind = Right LocalRepo - , repoLocalDir = takeDirectory path } - pkgIndex <- fmap packageIndex - . getSourcePackagesStrict verbosity $ [repo] - return [ pkgPath | (LocalUnpackedPackage pkgPath) <- - map packageSource . allPackages $ pkgIndex ] - - --- | Check that the package index file exists and exit with error if it does not. -checkIndexExists :: FilePath -> IO () -checkIndexExists path = do - indexExists <- doesFileExist path - unless indexExists $ - die $ "index does not exist: '" ++ path ++ "'" diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox/PackageEnvironment.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox/PackageEnvironment.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox/PackageEnvironment.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox/PackageEnvironment.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,573 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Sandbox.PackageEnvironment --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- Utilities for working with the package environment file. Patterned after --- Distribution.Client.Config. ------------------------------------------------------------------------------ - -module Distribution.Client.Sandbox.PackageEnvironment ( - PackageEnvironment(..) - , IncludeComments(..) - , PackageEnvironmentType(..) - , classifyPackageEnvironment - , createPackageEnvironmentFile - , tryLoadSandboxPackageEnvironmentFile - , readPackageEnvironmentFile - , showPackageEnvironment - , showPackageEnvironmentWithComments - , setPackageDB - , sandboxPackageDBPath - , loadUserConfig - - , basePackageEnvironment - , initialPackageEnvironment - , commentPackageEnvironment - , sandboxPackageEnvironmentFile - , userPackageEnvironmentFile - ) where - -import Distribution.Client.Config ( SavedConfig(..), commentSavedConfig - , loadConfig, configFieldDescriptions - , haddockFlagsFields - , installDirsFields, withProgramsFields - , withProgramOptionsFields - , defaultCompiler ) -import Distribution.Client.ParseUtils ( parseFields, ppFields, ppSection ) -import Distribution.Client.Setup ( GlobalFlags(..), ConfigExFlags(..) - , InstallFlags(..) - , defaultSandboxLocation ) -import Distribution.Utils.NubList ( toNubList ) -import Distribution.Simple.Compiler ( Compiler, PackageDB(..) - , compilerFlavor, showCompilerIdWithAbi ) -import Distribution.Simple.InstallDirs ( InstallDirs(..), PathTemplate - , defaultInstallDirs, combineInstallDirs - , fromPathTemplate, toPathTemplate ) -import Distribution.Simple.Setup ( Flag(..) - , ConfigFlags(..), HaddockFlags(..) - , fromFlagOrDefault, toFlag, flagToMaybe ) -import Distribution.Simple.Utils ( die, info, notice, warn ) -import Distribution.ParseUtils ( FieldDescr(..), ParseResult(..) - , commaListField, commaNewLineListField - , liftField, lineNo, locatedErrorMsg - , parseFilePathQ, readFields - , showPWarning, simpleField - , syntaxError, warning ) -import Distribution.System ( Platform ) -import Distribution.Verbosity ( Verbosity, normal ) -import Control.Monad ( foldM, liftM2, when, unless ) -import Data.List ( partition ) -import Data.Maybe ( isJust ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid ( Monoid(..) ) -#endif -import Distribution.Compat.Exception ( catchIO ) -import System.Directory ( doesDirectoryExist, doesFileExist - , renameFile ) -import System.FilePath ( (<.>), (), takeDirectory ) -import System.IO.Error ( isDoesNotExistError ) -import Text.PrettyPrint ( ($+$) ) - -import qualified Text.PrettyPrint as Disp -import qualified Distribution.Compat.ReadP as Parse -import qualified Distribution.ParseUtils as ParseUtils ( Field(..) ) -import qualified Distribution.Text as Text - - --- --- * Configuration saved in the package environment file --- - --- TODO: would be nice to remove duplication between --- D.C.Sandbox.PackageEnvironment and D.C.Config. -data PackageEnvironment = PackageEnvironment { - -- The 'inherit' feature is not used ATM, but could be useful in the future - -- for constructing nested sandboxes (see discussion in #1196). - pkgEnvInherit :: Flag FilePath, - pkgEnvSavedConfig :: SavedConfig -} - -instance Monoid PackageEnvironment where - mempty = PackageEnvironment { - pkgEnvInherit = mempty, - pkgEnvSavedConfig = mempty - } - - mappend a b = PackageEnvironment { - pkgEnvInherit = combine pkgEnvInherit, - pkgEnvSavedConfig = combine pkgEnvSavedConfig - } - where - combine f = f a `mappend` f b - --- | The automatically-created package environment file that should not be --- touched by the user. -sandboxPackageEnvironmentFile :: FilePath -sandboxPackageEnvironmentFile = "cabal.sandbox.config" - --- | Optional package environment file that can be used to customize the default --- settings. Created by the user. -userPackageEnvironmentFile :: FilePath -userPackageEnvironmentFile = "cabal.config" - --- | Type of the current package environment. -data PackageEnvironmentType = - SandboxPackageEnvironment -- ^ './cabal.sandbox.config' - | UserPackageEnvironment -- ^ './cabal.config' - | AmbientPackageEnvironment -- ^ '~/.cabal/config' - --- | Is there a 'cabal.sandbox.config' or 'cabal.config' in this --- directory? -classifyPackageEnvironment :: FilePath -> Flag FilePath -> Flag Bool - -> IO PackageEnvironmentType -classifyPackageEnvironment pkgEnvDir sandboxConfigFileFlag ignoreSandboxFlag = - do isSandbox <- liftM2 (||) (return forceSandboxConfig) - (configExists sandboxPackageEnvironmentFile) - isUser <- configExists userPackageEnvironmentFile - return (classify isSandbox isUser) - where - configExists fname = doesFileExist (pkgEnvDir fname) - ignoreSandbox = fromFlagOrDefault False ignoreSandboxFlag - forceSandboxConfig = isJust . flagToMaybe $ sandboxConfigFileFlag - - classify :: Bool -> Bool -> PackageEnvironmentType - classify True _ - | not ignoreSandbox = SandboxPackageEnvironment - classify _ True = UserPackageEnvironment - classify _ False = AmbientPackageEnvironment - --- | Defaults common to 'initialPackageEnvironment' and --- 'commentPackageEnvironment'. -commonPackageEnvironmentConfig :: FilePath -> SavedConfig -commonPackageEnvironmentConfig sandboxDir = - mempty { - savedConfigureFlags = mempty { - -- TODO: Currently, we follow cabal-dev and set 'user-install: False' in - -- the config file. In the future we may want to distinguish between - -- global, sandbox and user install types. - configUserInstall = toFlag False, - configInstallDirs = installDirs - }, - savedUserInstallDirs = installDirs, - savedGlobalInstallDirs = installDirs, - savedGlobalFlags = mempty { - globalLogsDir = toFlag $ sandboxDir "logs", - -- Is this right? cabal-dev uses the global world file. - globalWorldFile = toFlag $ sandboxDir "world" - } - } - where - installDirs = sandboxInstallDirs sandboxDir - --- | 'commonPackageEnvironmentConfig' wrapped inside a 'PackageEnvironment'. -commonPackageEnvironment :: FilePath -> PackageEnvironment -commonPackageEnvironment sandboxDir = mempty { - pkgEnvSavedConfig = commonPackageEnvironmentConfig sandboxDir - } - --- | Given a path to a sandbox, return the corresponding InstallDirs record. -sandboxInstallDirs :: FilePath -> InstallDirs (Flag PathTemplate) -sandboxInstallDirs sandboxDir = mempty { - prefix = toFlag (toPathTemplate sandboxDir) - } - --- | These are the absolute basic defaults, the fields that must be --- initialised. When we load the package environment from the file we layer the --- loaded values over these ones. -basePackageEnvironment :: PackageEnvironment -basePackageEnvironment = - mempty { - pkgEnvSavedConfig = mempty { - savedConfigureFlags = mempty { - configHcFlavor = toFlag defaultCompiler, - configVerbosity = toFlag normal - } - } - } - --- | Initial configuration that we write out to the package environment file if --- it does not exist. When the package environment gets loaded this --- configuration gets layered on top of 'basePackageEnvironment'. -initialPackageEnvironment :: FilePath -> Compiler -> Platform - -> IO PackageEnvironment -initialPackageEnvironment sandboxDir compiler platform = do - defInstallDirs <- defaultInstallDirs (compilerFlavor compiler) - {- userInstall= -} False {- _hasLibs= -} False - let initialConfig = commonPackageEnvironmentConfig sandboxDir - installDirs = combineInstallDirs (\d f -> Flag $ fromFlagOrDefault d f) - defInstallDirs (savedUserInstallDirs initialConfig) - return $ mempty { - pkgEnvSavedConfig = initialConfig { - savedUserInstallDirs = installDirs, - savedGlobalInstallDirs = installDirs, - savedGlobalFlags = (savedGlobalFlags initialConfig) { - globalLocalRepos = toNubList [sandboxDir "packages"] - }, - savedConfigureFlags = setPackageDB sandboxDir compiler platform - (savedConfigureFlags initialConfig), - savedInstallFlags = (savedInstallFlags initialConfig) { - installSummaryFile = toNubList [toPathTemplate (sandboxDir - "logs" "build.log")] - } - } - } - --- | Return the path to the sandbox package database. -sandboxPackageDBPath :: FilePath -> Compiler -> Platform -> String -sandboxPackageDBPath sandboxDir compiler platform = - sandboxDir - (Text.display platform ++ "-" - ++ showCompilerIdWithAbi compiler - ++ "-packages.conf.d") --- The path in sandboxPackageDBPath should be kept in sync with the --- path in the bootstrap.sh which is used to bootstrap cabal-install --- into a sandbox. - --- | Use the package DB location specific for this compiler. -setPackageDB :: FilePath -> Compiler -> Platform -> ConfigFlags -> ConfigFlags -setPackageDB sandboxDir compiler platform configFlags = - configFlags { - configPackageDBs = [Just (SpecificPackageDB $ sandboxPackageDBPath - sandboxDir - compiler - platform)] - } - --- | Almost the same as 'savedConf `mappend` pkgEnv', but some settings are --- overridden instead of mappend'ed. -overrideSandboxSettings :: PackageEnvironment -> PackageEnvironment -> - PackageEnvironment -overrideSandboxSettings pkgEnv0 pkgEnv = - pkgEnv { - pkgEnvSavedConfig = mappendedConf { - savedConfigureFlags = (savedConfigureFlags mappendedConf) { - configPackageDBs = configPackageDBs pkgEnvConfigureFlags - } - , savedInstallFlags = (savedInstallFlags mappendedConf) { - installSummaryFile = installSummaryFile pkgEnvInstallFlags - } - }, - pkgEnvInherit = pkgEnvInherit pkgEnv0 - } - where - pkgEnvConf = pkgEnvSavedConfig pkgEnv - mappendedConf = (pkgEnvSavedConfig pkgEnv0) `mappend` pkgEnvConf - pkgEnvConfigureFlags = savedConfigureFlags pkgEnvConf - pkgEnvInstallFlags = savedInstallFlags pkgEnvConf - --- | Default values that get used if no value is given. Used here to include in --- comments when we write out the initial package environment. -commentPackageEnvironment :: FilePath -> IO PackageEnvironment -commentPackageEnvironment sandboxDir = do - commentConf <- commentSavedConfig - let baseConf = commonPackageEnvironmentConfig sandboxDir - return $ mempty { - pkgEnvSavedConfig = commentConf `mappend` baseConf - } - --- | If this package environment inherits from some other package environment, --- return that package environment; otherwise return mempty. -inheritedPackageEnvironment :: Verbosity -> PackageEnvironment - -> IO PackageEnvironment -inheritedPackageEnvironment verbosity pkgEnv = do - case (pkgEnvInherit pkgEnv) of - NoFlag -> return mempty - confPathFlag@(Flag _) -> do - conf <- loadConfig verbosity confPathFlag NoFlag - return $ mempty { pkgEnvSavedConfig = conf } - --- | Load the user package environment if it exists (the optional "cabal.config" --- file). -userPackageEnvironment :: Verbosity -> FilePath -> IO PackageEnvironment -userPackageEnvironment verbosity pkgEnvDir = do - let path = pkgEnvDir userPackageEnvironmentFile - minp <- readPackageEnvironmentFile mempty path - case minp of - Nothing -> return mempty - Just (ParseOk warns parseResult) -> do - when (not $ null warns) $ warn verbosity $ - unlines (map (showPWarning path) warns) - return parseResult - Just (ParseFailed err) -> do - let (line, msg) = locatedErrorMsg err - warn verbosity $ "Error parsing user package environment file " ++ path - ++ maybe "" (\n -> ":" ++ show n) line ++ ":\n" ++ msg - return mempty - --- | Same as @userPackageEnvironmentFile@, but returns a SavedConfig. -loadUserConfig :: Verbosity -> FilePath -> IO SavedConfig -loadUserConfig verbosity pkgEnvDir = fmap pkgEnvSavedConfig - $ userPackageEnvironment verbosity pkgEnvDir - --- | Common error handling code used by 'tryLoadSandboxPackageEnvironment' and --- 'updatePackageEnvironment'. -handleParseResult :: Verbosity -> FilePath - -> Maybe (ParseResult PackageEnvironment) - -> IO PackageEnvironment -handleParseResult verbosity path minp = - case minp of - Nothing -> die $ - "The package environment file '" ++ path ++ "' doesn't exist" - Just (ParseOk warns parseResult) -> do - when (not $ null warns) $ warn verbosity $ - unlines (map (showPWarning path) warns) - return parseResult - Just (ParseFailed err) -> do - let (line, msg) = locatedErrorMsg err - die $ "Error parsing package environment file " ++ path - ++ maybe "" (\n -> ":" ++ show n) line ++ ":\n" ++ msg - --- | Try to load the given package environment file, exiting with error if it --- doesn't exist. Also returns the path to the sandbox directory. The path --- parameter should refer to an existing file. -tryLoadSandboxPackageEnvironmentFile :: Verbosity -> FilePath -> (Flag FilePath) - -> IO (FilePath, PackageEnvironment) -tryLoadSandboxPackageEnvironmentFile verbosity pkgEnvFile configFileFlag = do - let pkgEnvDir = takeDirectory pkgEnvFile - minp <- readPackageEnvironmentFile mempty pkgEnvFile - pkgEnv <- handleParseResult verbosity pkgEnvFile minp - - -- Get the saved sandbox directory. - -- TODO: Use substPathTemplate with - -- compilerTemplateEnv ++ platformTemplateEnv ++ abiTemplateEnv. - let sandboxDir = fromFlagOrDefault defaultSandboxLocation - . fmap fromPathTemplate . prefix . savedUserInstallDirs - . pkgEnvSavedConfig $ pkgEnv - - -- Do some sanity checks - dirExists <- doesDirectoryExist sandboxDir - -- TODO: Also check for an initialised package DB? - unless dirExists $ - die ("No sandbox exists at " ++ sandboxDir) - info verbosity $ "Using a sandbox located at " ++ sandboxDir - - let base = basePackageEnvironment - let common = commonPackageEnvironment sandboxDir - user <- userPackageEnvironment verbosity pkgEnvDir - inherited <- inheritedPackageEnvironment verbosity user - - -- Layer the package environment settings over settings from ~/.cabal/config. - cabalConfig <- fmap unsetSymlinkBinDir $ - loadConfig verbosity configFileFlag NoFlag - return (sandboxDir, - updateInstallDirs $ - (base `mappend` (toPkgEnv cabalConfig) `mappend` - common `mappend` inherited `mappend` user) - `overrideSandboxSettings` pkgEnv) - where - toPkgEnv config = mempty { pkgEnvSavedConfig = config } - - updateInstallDirs pkgEnv = - let config = pkgEnvSavedConfig pkgEnv - configureFlags = savedConfigureFlags config - installDirs = savedUserInstallDirs config - in pkgEnv { - pkgEnvSavedConfig = config { - savedConfigureFlags = configureFlags { - configInstallDirs = installDirs - } - } - } - - -- We don't want to inherit the value of 'symlink-bindir' from - -- '~/.cabal/config'. See #1514. - unsetSymlinkBinDir config = - let installFlags = savedInstallFlags config - in config { - savedInstallFlags = installFlags { - installSymlinkBinDir = NoFlag - } - } - --- | Should the generated package environment file include comments? -data IncludeComments = IncludeComments | NoComments - --- | Create a new package environment file, replacing the existing one if it --- exists. Note that the path parameters should point to existing directories. -createPackageEnvironmentFile :: Verbosity -> FilePath -> FilePath - -> IncludeComments - -> Compiler - -> Platform - -> IO () -createPackageEnvironmentFile verbosity sandboxDir pkgEnvFile incComments - compiler platform = do - notice verbosity $ "Writing a default package environment file to " - ++ pkgEnvFile - - commentPkgEnv <- commentPackageEnvironment sandboxDir - initialPkgEnv <- initialPackageEnvironment sandboxDir compiler platform - writePackageEnvironmentFile pkgEnvFile incComments commentPkgEnv initialPkgEnv - --- | Descriptions of all fields in the package environment file. -pkgEnvFieldDescrs :: [FieldDescr PackageEnvironment] -pkgEnvFieldDescrs = [ - simpleField "inherit" - (fromFlagOrDefault Disp.empty . fmap Disp.text) (optional parseFilePathQ) - pkgEnvInherit (\v pkgEnv -> pkgEnv { pkgEnvInherit = v }) - - -- FIXME: Should we make these fields part of ~/.cabal/config ? - , commaNewLineListField "constraints" - Text.disp Text.parse - (configExConstraints . savedConfigureExFlags . pkgEnvSavedConfig) - (\v pkgEnv -> updateConfigureExFlags pkgEnv - (\flags -> flags { configExConstraints = v })) - - , commaListField "preferences" - Text.disp Text.parse - (configPreferences . savedConfigureExFlags . pkgEnvSavedConfig) - (\v pkgEnv -> updateConfigureExFlags pkgEnv - (\flags -> flags { configPreferences = v })) - ] - ++ map toPkgEnv configFieldDescriptions' - where - optional = Parse.option mempty . fmap toFlag - - configFieldDescriptions' :: [FieldDescr SavedConfig] - configFieldDescriptions' = filter - (\(FieldDescr name _ _) -> name /= "preference" && name /= "constraint") - configFieldDescriptions - - toPkgEnv :: FieldDescr SavedConfig -> FieldDescr PackageEnvironment - toPkgEnv fieldDescr = - liftField pkgEnvSavedConfig - (\savedConfig pkgEnv -> pkgEnv { pkgEnvSavedConfig = savedConfig}) - fieldDescr - - updateConfigureExFlags :: PackageEnvironment - -> (ConfigExFlags -> ConfigExFlags) - -> PackageEnvironment - updateConfigureExFlags pkgEnv f = pkgEnv { - pkgEnvSavedConfig = (pkgEnvSavedConfig pkgEnv) { - savedConfigureExFlags = f . savedConfigureExFlags . pkgEnvSavedConfig - $ pkgEnv - } - } - --- | Read the package environment file. -readPackageEnvironmentFile :: PackageEnvironment -> FilePath - -> IO (Maybe (ParseResult PackageEnvironment)) -readPackageEnvironmentFile initial file = - handleNotExists $ - fmap (Just . parsePackageEnvironment initial) (readFile file) - where - handleNotExists action = catchIO action $ \ioe -> - if isDoesNotExistError ioe - then return Nothing - else ioError ioe - --- | Parse the package environment file. -parsePackageEnvironment :: PackageEnvironment -> String - -> ParseResult PackageEnvironment -parsePackageEnvironment initial str = do - fields <- readFields str - let (knownSections, others) = partition isKnownSection fields - pkgEnv <- parse others - let config = pkgEnvSavedConfig pkgEnv - installDirs0 = savedUserInstallDirs config - (haddockFlags, installDirs, paths, args) <- - foldM parseSections - (savedHaddockFlags config, installDirs0, [], []) - knownSections - return pkgEnv { - pkgEnvSavedConfig = config { - savedConfigureFlags = (savedConfigureFlags config) { - configProgramPaths = paths, - configProgramArgs = args - }, - savedHaddockFlags = haddockFlags, - savedUserInstallDirs = installDirs, - savedGlobalInstallDirs = installDirs - } - } - - where - isKnownSection :: ParseUtils.Field -> Bool - isKnownSection (ParseUtils.Section _ "haddock" _ _) = True - isKnownSection (ParseUtils.Section _ "install-dirs" _ _) = True - isKnownSection (ParseUtils.Section _ "program-locations" _ _) = True - isKnownSection (ParseUtils.Section _ "program-default-options" _ _) = True - isKnownSection _ = False - - parse :: [ParseUtils.Field] -> ParseResult PackageEnvironment - parse = parseFields pkgEnvFieldDescrs initial - - parseSections :: SectionsAccum -> ParseUtils.Field - -> ParseResult SectionsAccum - parseSections accum@(h,d,p,a) - (ParseUtils.Section _ "haddock" name fs) - | name == "" = do h' <- parseFields haddockFlagsFields h fs - return (h', d, p, a) - | otherwise = do - warning "The 'haddock' section should be unnamed" - return accum - parseSections (h,d,p,a) - (ParseUtils.Section line "install-dirs" name fs) - | name == "" = do d' <- parseFields installDirsFields d fs - return (h, d',p,a) - | otherwise = - syntaxError line $ - "Named 'install-dirs' section: '" ++ name - ++ "'. Note that named 'install-dirs' sections are not allowed in the '" - ++ userPackageEnvironmentFile ++ "' file." - parseSections accum@(h, d,p,a) - (ParseUtils.Section _ "program-locations" name fs) - | name == "" = do p' <- parseFields withProgramsFields p fs - return (h, d, p', a) - | otherwise = do - warning "The 'program-locations' section should be unnamed" - return accum - parseSections accum@(h, d, p, a) - (ParseUtils.Section _ "program-default-options" name fs) - | name == "" = do a' <- parseFields withProgramOptionsFields a fs - return (h, d, p, a') - | otherwise = do - warning "The 'program-default-options' section should be unnamed" - return accum - parseSections accum f = do - warning $ "Unrecognized stanza on line " ++ show (lineNo f) - return accum - --- | Accumulator type for 'parseSections'. -type SectionsAccum = (HaddockFlags, InstallDirs (Flag PathTemplate) - , [(String, FilePath)], [(String, [String])]) - --- | Write out the package environment file. -writePackageEnvironmentFile :: FilePath -> IncludeComments - -> PackageEnvironment -> PackageEnvironment - -> IO () -writePackageEnvironmentFile path incComments comments pkgEnv = do - let tmpPath = (path <.> "tmp") - writeFile tmpPath $ explanation ++ pkgEnvStr ++ "\n" - renameFile tmpPath path - where - pkgEnvStr = case incComments of - IncludeComments -> showPackageEnvironmentWithComments - (Just comments) pkgEnv - NoComments -> showPackageEnvironment pkgEnv - explanation = unlines - ["-- This is a Cabal package environment file." - ,"-- THIS FILE IS AUTO-GENERATED. DO NOT EDIT DIRECTLY." - ,"-- Please create a 'cabal.config' file in the same directory" - ,"-- if you want to change the default settings for this sandbox." - ,"","" - ] - --- | Pretty-print the package environment. -showPackageEnvironment :: PackageEnvironment -> String -showPackageEnvironment pkgEnv = showPackageEnvironmentWithComments Nothing pkgEnv - --- | Pretty-print the package environment with default values for empty fields --- commented out (just like the default ~/.cabal/config). -showPackageEnvironmentWithComments :: (Maybe PackageEnvironment) - -> PackageEnvironment - -> String -showPackageEnvironmentWithComments mdefPkgEnv pkgEnv = Disp.render $ - ppFields pkgEnvFieldDescrs mdefPkgEnv pkgEnv - $+$ Disp.text "" - $+$ ppSection "install-dirs" "" installDirsFields - (fmap installDirsSection mdefPkgEnv) (installDirsSection pkgEnv) - where - installDirsSection = savedUserInstallDirs . pkgEnvSavedConfig diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox/Timestamp.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox/Timestamp.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox/Timestamp.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox/Timestamp.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,292 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Sandbox.Timestamp --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- Timestamp file handling (for add-source dependencies). ------------------------------------------------------------------------------ - -module Distribution.Client.Sandbox.Timestamp ( - AddSourceTimestamp, - withAddTimestamps, - withRemoveTimestamps, - withUpdateTimestamps, - maybeAddCompilerTimestampRecord, - listModifiedDeps, - ) where - -import Control.Exception (IOException) -import Control.Monad (filterM, forM, when) -import Data.Char (isSpace) -import Data.List (partition) -import System.Directory (renameFile) -import System.FilePath ((<.>), ()) -import qualified Data.Map as M - -import Distribution.Compiler (CompilerId) -import Distribution.Package (packageName) -import Distribution.PackageDescription.Configuration (flattenPackageDescription) -import Distribution.PackageDescription.Parse (readPackageDescription) -import Distribution.Simple.Setup (Flag (..), - SDistFlags (..), - defaultSDistFlags, - sdistCommand) -import Distribution.Simple.Utils (debug, die, warn) -import Distribution.System (Platform) -import Distribution.Text (display) -import Distribution.Verbosity (Verbosity, lessVerbose, - normal) -import Distribution.Version (Version (..), - orLaterVersion) - -import Distribution.Client.Sandbox.Index - (ListIgnoredBuildTreeRefs (ListIgnored), RefTypesToList(OnlyLinks) - ,listBuildTreeRefs) -import Distribution.Client.SetupWrapper (SetupScriptOptions (..), - defaultSetupScriptOptions, - setupWrapper) -import Distribution.Client.Utils - (inDir, removeExistingFile, tryCanonicalizePath, tryFindAddSourcePackageDesc) - -import Distribution.Compat.Exception (catchIO) -import Distribution.Client.Compat.Time (EpochTime, getCurTime, - getModTime) - - --- | Timestamp of an add-source dependency. -type AddSourceTimestamp = (FilePath, EpochTime) --- | Timestamp file record - a string identifying the compiler & platform plus a --- list of add-source timestamps. -type TimestampFileRecord = (String, [AddSourceTimestamp]) - -timestampRecordKey :: CompilerId -> Platform -> String -timestampRecordKey compId platform = display platform ++ "-" ++ display compId - --- | The 'add-source-timestamps' file keeps the timestamps of all add-source --- dependencies. It is initially populated by 'sandbox add-source' and kept --- current by 'reinstallAddSourceDeps' and 'configure -w'. The user can install --- add-source deps manually with 'cabal install' after having edited them, so we --- can err on the side of caution sometimes. --- FIXME: We should keep this info in the index file, together with build tree --- refs. -timestampFileName :: FilePath -timestampFileName = "add-source-timestamps" - --- | Read the timestamp file. Exits with error if the timestamp file is --- corrupted. Returns an empty list if the file doesn't exist. -readTimestampFile :: FilePath -> IO [TimestampFileRecord] -readTimestampFile timestampFile = do - timestampString <- readFile timestampFile `catchIO` \_ -> return "[]" - case reads timestampString of - [(timestamps, s)] | all isSpace s -> return timestamps - _ -> - die $ "The timestamps file is corrupted. " - ++ "Please delete & recreate the sandbox." - --- | Write the timestamp file, atomically. -writeTimestampFile :: FilePath -> [TimestampFileRecord] -> IO () -writeTimestampFile timestampFile timestamps = do - writeFile timestampTmpFile (show timestamps) - renameFile timestampTmpFile timestampFile - where - timestampTmpFile = timestampFile <.> "tmp" - --- | Read, process and write the timestamp file in one go. -withTimestampFile :: FilePath - -> ([TimestampFileRecord] -> IO [TimestampFileRecord]) - -> IO () -withTimestampFile sandboxDir process = do - let timestampFile = sandboxDir timestampFileName - timestampRecords <- readTimestampFile timestampFile >>= process - writeTimestampFile timestampFile timestampRecords - --- | Given a list of 'AddSourceTimestamp's, a list of paths to add-source deps --- we've added and an initial timestamp, add an 'AddSourceTimestamp' to the list --- for each path. If a timestamp for a given path already exists in the list, --- update it. -addTimestamps :: EpochTime -> [AddSourceTimestamp] -> [FilePath] - -> [AddSourceTimestamp] -addTimestamps initial timestamps newPaths = - [ (p, initial) | p <- newPaths ] ++ oldTimestamps - where - (oldTimestamps, _toBeUpdated) = - partition (\(path, _) -> path `notElem` newPaths) timestamps - --- | Given a list of 'AddSourceTimestamp's, a list of paths to add-source deps --- we've reinstalled and a new timestamp value, update the timestamp value for --- the deps in the list. If there are new paths in the list, ignore them. -updateTimestamps :: [AddSourceTimestamp] -> [FilePath] -> EpochTime - -> [AddSourceTimestamp] -updateTimestamps timestamps pathsToUpdate newTimestamp = - foldr updateTimestamp [] timestamps - where - updateTimestamp t@(path, _oldTimestamp) rest - | path `elem` pathsToUpdate = (path, newTimestamp) : rest - | otherwise = t : rest - --- | Given a list of 'TimestampFileRecord's and a list of paths to add-source --- deps we've removed, remove those deps from the list. -removeTimestamps :: [AddSourceTimestamp] -> [FilePath] -> [AddSourceTimestamp] -removeTimestamps l pathsToRemove = foldr removeTimestamp [] l - where - removeTimestamp t@(path, _oldTimestamp) rest = - if path `elem` pathsToRemove - then rest - else t : rest - --- | If a timestamp record for this compiler doesn't exist, add a new one. -maybeAddCompilerTimestampRecord :: Verbosity -> FilePath -> FilePath - -> CompilerId -> Platform - -> IO () -maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile - compId platform = do - let key = timestampRecordKey compId platform - withTimestampFile sandboxDir $ \timestampRecords -> do - case lookup key timestampRecords of - Just _ -> return timestampRecords - Nothing -> do - buildTreeRefs <- listBuildTreeRefs verbosity ListIgnored OnlyLinks - indexFile - now <- getCurTime - let timestamps = map (\p -> (p, now)) buildTreeRefs - return $ (key, timestamps):timestampRecords - --- | Given an IO action that returns a list of build tree refs, add those --- build tree refs to the timestamps file (for all compilers). -withAddTimestamps :: FilePath -> IO [FilePath] -> IO () -withAddTimestamps sandboxDir act = do - let initialTimestamp = 0 - withActionOnAllTimestamps (addTimestamps initialTimestamp) sandboxDir act - --- | Given an IO action that returns a list of build tree refs, remove those --- build tree refs from the timestamps file (for all compilers). -withRemoveTimestamps :: FilePath -> IO [FilePath] -> IO () -withRemoveTimestamps = withActionOnAllTimestamps removeTimestamps - --- | Given an IO action that returns a list of build tree refs, update the --- timestamps of the returned build tree refs to the current time (only for the --- given compiler & platform). -withUpdateTimestamps :: FilePath -> CompilerId -> Platform - ->([AddSourceTimestamp] -> IO [FilePath]) - -> IO () -withUpdateTimestamps = - withActionOnCompilerTimestamps updateTimestamps - --- | Helper for implementing 'withAddTimestamps' and --- 'withRemoveTimestamps'. Runs a given action on the list of --- 'AddSourceTimestamp's for all compilers, applies 'f' to the result and then --- updates the timestamp file. The IO action is run only once. -withActionOnAllTimestamps :: ([AddSourceTimestamp] -> [FilePath] - -> [AddSourceTimestamp]) - -> FilePath - -> IO [FilePath] - -> IO () -withActionOnAllTimestamps f sandboxDir act = - withTimestampFile sandboxDir $ \timestampRecords -> do - paths <- act - return [(key, f timestamps paths) | (key, timestamps) <- timestampRecords] - --- | Helper for implementing 'withUpdateTimestamps'. Runs a given action on the --- list of 'AddSourceTimestamp's for this compiler, applies 'f' to the result --- and then updates the timestamp file record. The IO action is run only once. -withActionOnCompilerTimestamps :: ([AddSourceTimestamp] - -> [FilePath] -> EpochTime - -> [AddSourceTimestamp]) - -> FilePath - -> CompilerId - -> Platform - -> ([AddSourceTimestamp] -> IO [FilePath]) - -> IO () -withActionOnCompilerTimestamps f sandboxDir compId platform act = do - let needle = timestampRecordKey compId platform - withTimestampFile sandboxDir $ \timestampRecords -> do - timestampRecords' <- forM timestampRecords $ \r@(key, timestamps) -> - if key == needle - then do paths <- act timestamps - now <- getCurTime - return (key, f timestamps paths now) - else return r - return timestampRecords' - --- | List all source files of a given add-source dependency. Exits with error if --- something is wrong (e.g. there is no .cabal file in the given directory). --- FIXME: This function is not thread-safe because of 'inDir'. -allPackageSourceFiles :: Verbosity -> FilePath -> IO [FilePath] -allPackageSourceFiles verbosity packageDir = inDir (Just packageDir) $ do - pkg <- do - let err = "Error reading source files of add-source dependency." - desc <- tryFindAddSourcePackageDesc packageDir err - flattenPackageDescription `fmap` readPackageDescription verbosity desc - let file = "cabal-sdist-list-sources" - flags = defaultSDistFlags { - sDistVerbosity = Flag $ if verbosity == normal - then lessVerbose verbosity else verbosity, - sDistListSources = Flag file - } - setupOpts = defaultSetupScriptOptions { - -- 'sdist --list-sources' was introduced in Cabal 1.18. - useCabalVersion = orLaterVersion $ Version [1,18,0] [] - } - - doListSources :: IO [FilePath] - doListSources = do - setupWrapper verbosity setupOpts (Just pkg) sdistCommand (const flags) [] - srcs <- fmap lines . readFile $ file - mapM tryCanonicalizePath srcs - - onFailedListSources :: IOException -> IO () - onFailedListSources e = do - warn verbosity $ - "Could not list sources of the add-source dependency '" - ++ display (packageName pkg) ++ "'. Skipping the timestamp check." - debug verbosity $ - "Exception was: " ++ show e - - -- Run setup sdist --list-sources=TMPFILE - ret <- doListSources `catchIO` (\e -> onFailedListSources e >> return []) - removeExistingFile file - return ret - --- | Has this dependency been modified since we have last looked at it? -isDepModified :: Verbosity -> EpochTime -> AddSourceTimestamp -> IO Bool -isDepModified verbosity now (packageDir, timestamp) = do - debug verbosity ("Checking whether the dependency is modified: " ++ packageDir) - depSources <- allPackageSourceFiles verbosity packageDir - go depSources - - where - go [] = return False - go (dep:rest) = do - -- FIXME: What if the clock jumps backwards at any point? For now we only - -- print a warning. - modTime <- getModTime dep - when (modTime > now) $ - warn verbosity $ "File '" ++ dep - ++ "' has a modification time that is in the future." - if modTime >= timestamp - then do - debug verbosity ("Dependency has a modified source file: " ++ dep) - return True - else go rest - --- | List all modified dependencies. -listModifiedDeps :: Verbosity -> FilePath -> CompilerId -> Platform - -> M.Map FilePath a - -- ^ The set of all installed add-source deps. - -> IO [FilePath] -listModifiedDeps verbosity sandboxDir compId platform installedDepsMap = do - timestampRecords <- readTimestampFile (sandboxDir timestampFileName) - let needle = timestampRecordKey compId platform - timestamps <- maybe noTimestampRecord return - (lookup needle timestampRecords) - now <- getCurTime - fmap (map fst) . filterM (isDepModified verbosity now) - . filter (\ts -> fst ts `M.member` installedDepsMap) - $ timestamps - - where - noTimestampRecord = die $ "Сouldn't find a timestamp record for the given " - ++ "compiler/platform pair. " - ++ "Please report this on the Cabal bug tracker: " - ++ "https://github.com/haskell/cabal/issues/new ." diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox/Types.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox/Types.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox/Types.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox/Types.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,64 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Sandbox.Types --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- Helpers for writing code that works both inside and outside a sandbox. ------------------------------------------------------------------------------ - -module Distribution.Client.Sandbox.Types ( - UseSandbox(..), isUseSandbox, whenUsingSandbox, - SandboxPackageInfo(..) - ) where - -import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex -import Distribution.Client.Types (SourcePackage) - -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid -#endif -import qualified Data.Set as S - --- | Are we using a sandbox? -data UseSandbox = UseSandbox FilePath | NoSandbox - -instance Monoid UseSandbox where - mempty = NoSandbox - - NoSandbox `mappend` s = s - u0@(UseSandbox _) `mappend` NoSandbox = u0 - (UseSandbox _) `mappend` u1@(UseSandbox _) = u1 - --- | Convert a @UseSandbox@ value to a boolean. Useful in conjunction with --- @when@. -isUseSandbox :: UseSandbox -> Bool -isUseSandbox (UseSandbox _) = True -isUseSandbox NoSandbox = False - --- | Execute an action only if we're in a sandbox, feeding to it the path to the --- sandbox directory. -whenUsingSandbox :: UseSandbox -> (FilePath -> IO ()) -> IO () -whenUsingSandbox NoSandbox _ = return () -whenUsingSandbox (UseSandbox sandboxDir) act = act sandboxDir - --- | Data about the packages installed in the sandbox that is passed from --- 'reinstallAddSourceDeps' to the solver. -data SandboxPackageInfo = SandboxPackageInfo { - modifiedAddSourceDependencies :: ![SourcePackage], - -- ^ Modified add-source deps that we want to reinstall. These are guaranteed - -- to be already installed in the sandbox. - - otherAddSourceDependencies :: ![SourcePackage], - -- ^ Remaining add-source deps. Some of these may be not installed in the - -- sandbox. - - otherInstalledSandboxPackages :: !InstalledPackageIndex.InstalledPackageIndex, - -- ^ All packages installed in the sandbox. Intersection with - -- 'modifiedAddSourceDependencies' and/or 'otherAddSourceDependencies' can be - -- non-empty. - - allAddSourceDependencies :: !(S.Set FilePath) - -- ^ A set of paths to all add-source dependencies, for convenience. - } diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Sandbox.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Sandbox.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,766 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Sandbox --- Maintainer : cabal-devel@haskell.org --- Portability : portable --- --- UI for the sandboxing functionality. ------------------------------------------------------------------------------ - -module Distribution.Client.Sandbox ( - sandboxInit, - sandboxDelete, - sandboxAddSource, - sandboxAddSourceSnapshot, - sandboxDeleteSource, - sandboxListSources, - sandboxHcPkg, - dumpPackageEnvironment, - withSandboxBinDirOnSearchPath, - - getSandboxConfigFilePath, - loadConfigOrSandboxConfig, - initPackageDBIfNeeded, - maybeWithSandboxDirOnSearchPath, - - WereDepsReinstalled(..), - reinstallAddSourceDeps, - maybeReinstallAddSourceDeps, - - SandboxPackageInfo(..), - maybeWithSandboxPackageInfo, - - tryGetIndexFilePath, - sandboxBuildDir, - getInstalledPackagesInSandbox, - updateSandboxConfigFileFlag, - - -- FIXME: move somewhere else - configPackageDB', configCompilerAux' - ) where - -import Distribution.Client.Setup - ( SandboxFlags(..), ConfigFlags(..), ConfigExFlags(..), InstallFlags(..) - , GlobalFlags(..), defaultConfigExFlags, defaultInstallFlags - , defaultSandboxLocation, globalRepos ) -import Distribution.Client.Sandbox.Timestamp ( listModifiedDeps - , maybeAddCompilerTimestampRecord - , withAddTimestamps - , withRemoveTimestamps ) -import Distribution.Client.Config ( SavedConfig(..), loadConfig ) -import Distribution.Client.Dependency ( foldProgress ) -import Distribution.Client.IndexUtils ( BuildTreeRefType(..) ) -import Distribution.Client.Install ( InstallArgs, - makeInstallContext, - makeInstallPlan, - processInstallPlan ) -import Distribution.Utils.NubList ( fromNubList ) - -import Distribution.Client.Sandbox.PackageEnvironment - ( PackageEnvironment(..), IncludeComments(..), PackageEnvironmentType(..) - , createPackageEnvironmentFile, classifyPackageEnvironment - , tryLoadSandboxPackageEnvironmentFile, loadUserConfig - , commentPackageEnvironment, showPackageEnvironmentWithComments - , sandboxPackageEnvironmentFile, userPackageEnvironmentFile ) -import Distribution.Client.Sandbox.Types ( SandboxPackageInfo(..) - , UseSandbox(..) ) -import Distribution.Client.Types ( PackageLocation(..) - , SourcePackage(..) ) -import Distribution.Client.Utils ( inDir, tryCanonicalizePath - , tryFindAddSourcePackageDesc ) -import Distribution.PackageDescription.Configuration - ( flattenPackageDescription ) -import Distribution.PackageDescription.Parse ( readPackageDescription ) -import Distribution.Simple.Compiler ( Compiler(..), PackageDB(..) - , PackageDBStack ) -import Distribution.Simple.Configure ( configCompilerAuxEx - , interpretPackageDbFlags - , getPackageDBContents ) -import Distribution.Simple.PreProcess ( knownSuffixHandlers ) -import Distribution.Simple.Program ( ProgramConfiguration ) -import Distribution.Simple.Setup ( Flag(..), HaddockFlags(..) - , fromFlagOrDefault ) -import Distribution.Simple.SrcDist ( prepareTree ) -import Distribution.Simple.Utils ( die, debug, notice, info, warn - , debugNoWrap, defaultPackageDesc - , intercalate, topHandlerWith - , createDirectoryIfMissingVerbose ) -import Distribution.Package ( Package(..) ) -import Distribution.System ( Platform ) -import Distribution.Text ( display ) -import Distribution.Verbosity ( Verbosity, lessVerbose ) -import Distribution.Client.Compat.Environment ( lookupEnv, setEnv ) -import Distribution.Client.Compat.FilePerms ( setFileHidden ) -import qualified Distribution.Client.Sandbox.Index as Index -import Distribution.Simple.PackageIndex ( InstalledPackageIndex ) -import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex -import qualified Distribution.Simple.Register as Register -import qualified Data.Map as M -import qualified Data.Set as S -import Control.Exception ( assert, bracket_ ) -import Control.Monad ( forM, liftM2, unless, when ) -import Data.Bits ( shiftL, shiftR, xor ) -import Data.Char ( ord ) -import Data.IORef ( newIORef, writeIORef, readIORef ) -import Data.List ( delete, foldl' ) -import Data.Maybe ( fromJust, fromMaybe ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid ( mempty, mappend ) -#endif -import Data.Word ( Word32 ) -import Numeric ( showHex ) -import System.Directory ( createDirectory - , doesDirectoryExist - , doesFileExist - , getCurrentDirectory - , removeDirectoryRecursive - , removeFile - , renameDirectory ) -import System.FilePath ( (), getSearchPath - , searchPathSeparator - , takeDirectory ) - - --- --- * Constants --- - --- | The name of the sandbox subdirectory where we keep snapshots of add-source --- dependencies. -snapshotDirectoryName :: FilePath -snapshotDirectoryName = "snapshots" - --- | Non-standard build dir that is used for building add-source deps instead of --- "dist". Fixes surprising behaviour in some cases (see issue #1281). -sandboxBuildDir :: FilePath -> FilePath -sandboxBuildDir sandboxDir = "dist/dist-sandbox-" ++ showHex sandboxDirHash "" - where - sandboxDirHash = jenkins sandboxDir - - -- See http://en.wikipedia.org/wiki/Jenkins_hash_function - jenkins :: String -> Word32 - jenkins str = loop_finish $ foldl' loop 0 str - where - loop :: Word32 -> Char -> Word32 - loop hash key_i' = hash''' - where - key_i = toEnum . ord $ key_i' - hash' = hash + key_i - hash'' = hash' + (shiftL hash' 10) - hash''' = hash'' `xor` (shiftR hash'' 6) - - loop_finish :: Word32 -> Word32 - loop_finish hash = hash''' - where - hash' = hash + (shiftL hash 3) - hash'' = hash' `xor` (shiftR hash' 11) - hash''' = hash'' + (shiftL hash'' 15) - --- --- * Basic sandbox functions. --- - --- | If @--sandbox-config-file@ wasn't given on the command-line, set it to the --- value of the @CABAL_SANDBOX_CONFIG@ environment variable, or else to --- 'NoFlag'. -updateSandboxConfigFileFlag :: GlobalFlags -> IO GlobalFlags -updateSandboxConfigFileFlag globalFlags = - case globalSandboxConfigFile globalFlags of - Flag _ -> return globalFlags - NoFlag -> do - f' <- fmap (fromMaybe NoFlag . fmap Flag) . lookupEnv - $ "CABAL_SANDBOX_CONFIG" - return globalFlags { globalSandboxConfigFile = f' } - --- | Return the path to the sandbox config file - either the default or the one --- specified with @--sandbox-config-file@. -getSandboxConfigFilePath :: GlobalFlags -> IO FilePath -getSandboxConfigFilePath globalFlags = do - let sandboxConfigFileFlag = globalSandboxConfigFile globalFlags - case sandboxConfigFileFlag of - NoFlag -> do pkgEnvDir <- getCurrentDirectory - return (pkgEnvDir sandboxPackageEnvironmentFile) - Flag path -> return path - --- | Load the @cabal.sandbox.config@ file (and possibly the optional --- @cabal.config@). In addition to a @PackageEnvironment@, also return a --- canonical path to the sandbox. Exit with error if the sandbox directory or --- the package environment file do not exist. -tryLoadSandboxConfig :: Verbosity -> GlobalFlags - -> IO (FilePath, PackageEnvironment) -tryLoadSandboxConfig verbosity globalFlags = do - path <- getSandboxConfigFilePath globalFlags - tryLoadSandboxPackageEnvironmentFile verbosity path - (globalConfigFile globalFlags) - --- | Return the name of the package index file for this package environment. -tryGetIndexFilePath :: SavedConfig -> IO FilePath -tryGetIndexFilePath config = tryGetIndexFilePath' (savedGlobalFlags config) - --- | The same as 'tryGetIndexFilePath', but takes 'GlobalFlags' instead of --- 'SavedConfig'. -tryGetIndexFilePath' :: GlobalFlags -> IO FilePath -tryGetIndexFilePath' globalFlags = do - let paths = fromNubList $ globalLocalRepos globalFlags - case paths of - [] -> die $ "Distribution.Client.Sandbox.tryGetIndexFilePath: " ++ - "no local repos found. " ++ checkConfiguration - _ -> return $ (last paths) Index.defaultIndexFileName - where - checkConfiguration = "Please check your configuration ('" - ++ userPackageEnvironmentFile ++ "')." - --- | Try to extract a 'PackageDB' from 'ConfigFlags'. Gives a better error --- message than just pattern-matching. -getSandboxPackageDB :: ConfigFlags -> IO PackageDB -getSandboxPackageDB configFlags = do - case configPackageDBs configFlags of - [Just sandboxDB@(SpecificPackageDB _)] -> return sandboxDB - -- TODO: should we allow multiple package DBs (e.g. with 'inherit')? - - [] -> - die $ "Sandbox package DB is not specified. " ++ sandboxConfigCorrupt - [_] -> - die $ "Unexpected contents of the 'package-db' field. " - ++ sandboxConfigCorrupt - _ -> - die $ "Too many package DBs provided. " ++ sandboxConfigCorrupt - - where - sandboxConfigCorrupt = "Your 'cabal.sandbox.config' is probably corrupt." - - --- | Which packages are installed in the sandbox package DB? -getInstalledPackagesInSandbox :: Verbosity -> ConfigFlags - -> Compiler -> ProgramConfiguration - -> IO InstalledPackageIndex -getInstalledPackagesInSandbox verbosity configFlags comp conf = do - sandboxDB <- getSandboxPackageDB configFlags - getPackageDBContents verbosity comp sandboxDB conf - --- | Temporarily add $SANDBOX_DIR/bin to $PATH. -withSandboxBinDirOnSearchPath :: FilePath -> IO a -> IO a -withSandboxBinDirOnSearchPath sandboxDir = bracket_ addBinDir rmBinDir - where - -- TODO: Instead of modifying the global process state, it'd be better to - -- set the environment individually for each subprocess invocation. This - -- will have to wait until the Shell monad is implemented; without it the - -- required changes are too intrusive. - addBinDir :: IO () - addBinDir = do - mbOldPath <- lookupEnv "PATH" - let newPath = maybe sandboxBin ((++) sandboxBin . (:) searchPathSeparator) - mbOldPath - setEnv "PATH" newPath - - rmBinDir :: IO () - rmBinDir = do - oldPath <- getSearchPath - let newPath = intercalate [searchPathSeparator] - (delete sandboxBin oldPath) - setEnv "PATH" newPath - - sandboxBin = sandboxDir "bin" - --- | Initialise a package DB for this compiler if it doesn't exist. -initPackageDBIfNeeded :: Verbosity -> ConfigFlags - -> Compiler -> ProgramConfiguration - -> IO () -initPackageDBIfNeeded verbosity configFlags comp conf = do - SpecificPackageDB dbPath <- getSandboxPackageDB configFlags - packageDBExists <- doesDirectoryExist dbPath - unless packageDBExists $ - Register.initPackageDB verbosity comp conf dbPath - when packageDBExists $ - debug verbosity $ "The package database already exists: " ++ dbPath - --- | Entry point for the 'cabal sandbox dump-pkgenv' command. -dumpPackageEnvironment :: Verbosity -> SandboxFlags -> GlobalFlags -> IO () -dumpPackageEnvironment verbosity _sandboxFlags globalFlags = do - (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags - commentPkgEnv <- commentPackageEnvironment sandboxDir - putStrLn . showPackageEnvironmentWithComments (Just commentPkgEnv) $ pkgEnv - --- | Entry point for the 'cabal sandbox init' command. -sandboxInit :: Verbosity -> SandboxFlags -> GlobalFlags -> IO () -sandboxInit verbosity sandboxFlags globalFlags = do - -- Warn if there's a 'cabal-dev' sandbox. - isCabalDevSandbox <- liftM2 (&&) (doesDirectoryExist "cabal-dev") - (doesFileExist $ "cabal-dev" "cabal.config") - when isCabalDevSandbox $ - warn verbosity $ - "You are apparently using a legacy (cabal-dev) sandbox. " - ++ "Legacy sandboxes may interact badly with native Cabal sandboxes. " - ++ "You may want to delete the 'cabal-dev' directory to prevent issues." - - -- Create the sandbox directory. - let sandboxDir' = fromFlagOrDefault defaultSandboxLocation - (sandboxLocation sandboxFlags) - createDirectoryIfMissingVerbose verbosity True sandboxDir' - sandboxDir <- tryCanonicalizePath sandboxDir' - setFileHidden sandboxDir - - -- Determine which compiler to use (using the value from ~/.cabal/config). - userConfig <- loadConfig verbosity (globalConfigFile globalFlags) NoFlag - (comp, platform, conf) <- configCompilerAuxEx (savedConfigureFlags userConfig) - - -- Create the package environment file. - pkgEnvFile <- getSandboxConfigFilePath globalFlags - createPackageEnvironmentFile verbosity sandboxDir pkgEnvFile - NoComments comp platform - (_sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags - let config = pkgEnvSavedConfig pkgEnv - configFlags = savedConfigureFlags config - - -- Create the index file if it doesn't exist. - indexFile <- tryGetIndexFilePath config - indexFileExists <- doesFileExist indexFile - if indexFileExists - then notice verbosity $ "Using an existing sandbox located at " ++ sandboxDir - else notice verbosity $ "Creating a new sandbox at " ++ sandboxDir - Index.createEmpty verbosity indexFile - - -- Create the package DB for the default compiler. - initPackageDBIfNeeded verbosity configFlags comp conf - maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile - (compilerId comp) platform - --- | Entry point for the 'cabal sandbox delete' command. -sandboxDelete :: Verbosity -> SandboxFlags -> GlobalFlags -> IO () -sandboxDelete verbosity _sandboxFlags globalFlags = do - (useSandbox, _) <- loadConfigOrSandboxConfig - verbosity - globalFlags { globalRequireSandbox = Flag False } - mempty - case useSandbox of - NoSandbox -> warn verbosity "Not in a sandbox." - UseSandbox sandboxDir -> do - curDir <- getCurrentDirectory - pkgEnvFile <- getSandboxConfigFilePath globalFlags - - -- Remove the @cabal.sandbox.config@ file, unless it's in a non-standard - -- location. - let isNonDefaultConfigLocation = - pkgEnvFile /= (curDir sandboxPackageEnvironmentFile) - - if isNonDefaultConfigLocation - then warn verbosity $ "Sandbox config file is in non-default location: '" - ++ pkgEnvFile ++ "'.\n Please delete manually." - else removeFile pkgEnvFile - - -- Remove the sandbox directory, unless we're using a shared sandbox. - let isNonDefaultSandboxLocation = - sandboxDir /= (curDir defaultSandboxLocation) - - when isNonDefaultSandboxLocation $ - die $ "Non-default sandbox location used: '" ++ sandboxDir - ++ "'.\nAssuming a shared sandbox. Please delete '" - ++ sandboxDir ++ "' manually." - - notice verbosity $ "Deleting the sandbox located at " ++ sandboxDir - removeDirectoryRecursive sandboxDir - --- Common implementation of 'sandboxAddSource' and 'sandboxAddSourceSnapshot'. -doAddSource :: Verbosity -> [FilePath] -> FilePath -> PackageEnvironment - -> BuildTreeRefType - -> IO () -doAddSource verbosity buildTreeRefs sandboxDir pkgEnv refType = do - let savedConfig = pkgEnvSavedConfig pkgEnv - indexFile <- tryGetIndexFilePath savedConfig - - -- If we're running 'sandbox add-source' for the first time for this compiler, - -- we need to create an initial timestamp record. - (comp, platform, _) <- configCompilerAuxEx . savedConfigureFlags $ savedConfig - maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile - (compilerId comp) platform - - withAddTimestamps sandboxDir $ do - -- FIXME: path canonicalisation is done in addBuildTreeRefs, but we do it - -- twice because of the timestamps file. - buildTreeRefs' <- mapM tryCanonicalizePath buildTreeRefs - Index.addBuildTreeRefs verbosity indexFile buildTreeRefs' refType - return buildTreeRefs' - --- | Entry point for the 'cabal sandbox add-source' command. -sandboxAddSource :: Verbosity -> [FilePath] -> SandboxFlags -> GlobalFlags - -> IO () -sandboxAddSource verbosity buildTreeRefs sandboxFlags globalFlags = do - (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags - - if fromFlagOrDefault False (sandboxSnapshot sandboxFlags) - then sandboxAddSourceSnapshot verbosity buildTreeRefs sandboxDir pkgEnv - else doAddSource verbosity buildTreeRefs sandboxDir pkgEnv LinkRef - --- | Entry point for the 'cabal sandbox add-source --snapshot' command. -sandboxAddSourceSnapshot :: Verbosity -> [FilePath] -> FilePath - -> PackageEnvironment - -> IO () -sandboxAddSourceSnapshot verbosity buildTreeRefs sandboxDir pkgEnv = do - let snapshotDir = sandboxDir snapshotDirectoryName - - -- Use 'D.S.SrcDist.prepareTree' to copy each package's files to our private - -- location. - createDirectoryIfMissingVerbose verbosity True snapshotDir - - -- Collect the package descriptions first, so that if some path does not refer - -- to a cabal package, we fail immediately. - pkgs <- forM buildTreeRefs $ \buildTreeRef -> - inDir (Just buildTreeRef) $ - return . flattenPackageDescription - =<< readPackageDescription verbosity - =<< defaultPackageDesc verbosity - - -- Copy the package sources to "snapshots/$PKGNAME-$VERSION-tmp". If - -- 'prepareTree' throws an error at any point, the old snapshots will still be - -- in consistent state. - tmpDirs <- forM (zip buildTreeRefs pkgs) $ \(buildTreeRef, pkg) -> - inDir (Just buildTreeRef) $ do - let targetDir = snapshotDir (display . packageId $ pkg) - targetTmpDir = targetDir ++ "-tmp" - dirExists <- doesDirectoryExist targetTmpDir - when dirExists $ - removeDirectoryRecursive targetDir - createDirectory targetTmpDir - prepareTree verbosity pkg Nothing targetTmpDir knownSuffixHandlers - return (targetTmpDir, targetDir) - - -- Now rename the "snapshots/$PKGNAME-$VERSION-tmp" dirs to - -- "snapshots/$PKGNAME-$VERSION". - snapshots <- forM tmpDirs $ \(targetTmpDir, targetDir) -> do - dirExists <- doesDirectoryExist targetDir - when dirExists $ - removeDirectoryRecursive targetDir - renameDirectory targetTmpDir targetDir - return targetDir - - -- Once the packages are copied, just 'add-source' them as usual. - doAddSource verbosity snapshots sandboxDir pkgEnv SnapshotRef - --- | Entry point for the 'cabal sandbox delete-source' command. -sandboxDeleteSource :: Verbosity -> [FilePath] -> SandboxFlags -> GlobalFlags - -> IO () -sandboxDeleteSource verbosity buildTreeRefs _sandboxFlags globalFlags = do - (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags - indexFile <- tryGetIndexFilePath (pkgEnvSavedConfig pkgEnv) - - withRemoveTimestamps sandboxDir $ do - Index.removeBuildTreeRefs verbosity indexFile buildTreeRefs - - notice verbosity $ "Note: 'sandbox delete-source' only unregisters the " ++ - "source dependency, but does not remove the package " ++ - "from the sandbox package DB.\n\n" ++ - "Use 'sandbox hc-pkg -- unregister' to do that." - --- | Entry point for the 'cabal sandbox list-sources' command. -sandboxListSources :: Verbosity -> SandboxFlags -> GlobalFlags - -> IO () -sandboxListSources verbosity _sandboxFlags globalFlags = do - (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags - indexFile <- tryGetIndexFilePath (pkgEnvSavedConfig pkgEnv) - - refs <- Index.listBuildTreeRefs verbosity - Index.ListIgnored Index.LinksAndSnapshots indexFile - when (null refs) $ - notice verbosity $ "Index file '" ++ indexFile - ++ "' has no references to local build trees." - when (not . null $ refs) $ do - notice verbosity $ "Source dependencies registered " - ++ "in the current sandbox ('" ++ sandboxDir ++ "'):\n\n" - mapM_ putStrLn refs - notice verbosity $ "\nTo unregister source dependencies, " - ++ "use the 'sandbox delete-source' command." - --- | Entry point for the 'cabal sandbox hc-pkg' command. Invokes the @hc-pkg@ --- tool with provided arguments, restricted to the sandbox. -sandboxHcPkg :: Verbosity -> SandboxFlags -> GlobalFlags -> [String] -> IO () -sandboxHcPkg verbosity _sandboxFlags globalFlags extraArgs = do - (_sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags - let configFlags = savedConfigureFlags . pkgEnvSavedConfig $ pkgEnv - dbStack = configPackageDB' configFlags - (comp, _platform, conf) <- configCompilerAux' configFlags - - Register.invokeHcPkg verbosity comp conf dbStack extraArgs - --- | Check which type of package environment we're in and return a --- correctly-initialised @SavedConfig@ and a @UseSandbox@ value that indicates --- whether we're working in a sandbox. -loadConfigOrSandboxConfig :: Verbosity - -> GlobalFlags -- ^ For @--config-file@ and - -- @--sandbox-config-file@. - -> Flag Bool -- ^ Ignored if we're in a sandbox. - -> IO (UseSandbox, SavedConfig) -loadConfigOrSandboxConfig verbosity globalFlags userInstallFlag = do - let configFileFlag = globalConfigFile globalFlags - sandboxConfigFileFlag = globalSandboxConfigFile globalFlags - ignoreSandboxFlag = globalIgnoreSandbox globalFlags - - pkgEnvDir <- getPkgEnvDir sandboxConfigFileFlag - pkgEnvType <- classifyPackageEnvironment pkgEnvDir sandboxConfigFileFlag - ignoreSandboxFlag - case pkgEnvType of - -- A @cabal.sandbox.config@ file (and possibly @cabal.config@) is present. - SandboxPackageEnvironment -> do - (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags - -- ^ Prints an error message and exits on error. - let config = pkgEnvSavedConfig pkgEnv - return (UseSandbox sandboxDir, config) - - -- Only @cabal.config@ is present. - UserPackageEnvironment -> do - config <- loadConfig verbosity configFileFlag userInstallFlag - userConfig <- loadUserConfig verbosity pkgEnvDir - let config' = config `mappend` userConfig - dieIfSandboxRequired config' - return (NoSandbox, config') - - -- Neither @cabal.sandbox.config@ nor @cabal.config@ are present. - AmbientPackageEnvironment -> do - config <- loadConfig verbosity configFileFlag userInstallFlag - dieIfSandboxRequired config - return (NoSandbox, config) - - where - -- Return the path to the package environment directory - either the - -- current directory or the one that @--sandbox-config-file@ resides in. - getPkgEnvDir :: (Flag FilePath) -> IO FilePath - getPkgEnvDir sandboxConfigFileFlag = do - case sandboxConfigFileFlag of - NoFlag -> getCurrentDirectory - Flag path -> tryCanonicalizePath . takeDirectory $ path - - -- Die if @--require-sandbox@ was specified and we're not inside a sandbox. - dieIfSandboxRequired :: SavedConfig -> IO () - dieIfSandboxRequired config = checkFlag flag - where - flag = (globalRequireSandbox . savedGlobalFlags $ config) - `mappend` (globalRequireSandbox globalFlags) - checkFlag (Flag True) = - die $ "'require-sandbox' is set to True, but no sandbox is present. " - ++ "Use '--no-require-sandbox' if you want to override " - ++ "'require-sandbox' temporarily." - checkFlag (Flag False) = return () - checkFlag (NoFlag) = return () - --- | If we're in a sandbox, call @withSandboxBinDirOnSearchPath@, otherwise do --- nothing. -maybeWithSandboxDirOnSearchPath :: UseSandbox -> IO a -> IO a -maybeWithSandboxDirOnSearchPath NoSandbox act = act -maybeWithSandboxDirOnSearchPath (UseSandbox sandboxDir) act = - withSandboxBinDirOnSearchPath sandboxDir $ act - --- | Had reinstallAddSourceDeps actually reinstalled any dependencies? -data WereDepsReinstalled = ReinstalledSomeDeps | NoDepsReinstalled - --- | Reinstall those add-source dependencies that have been modified since --- we've last installed them. Assumes that we're working inside a sandbox. -reinstallAddSourceDeps :: Verbosity - -> ConfigFlags -> ConfigExFlags - -> InstallFlags -> GlobalFlags - -> FilePath - -> IO WereDepsReinstalled -reinstallAddSourceDeps verbosity configFlags' configExFlags - installFlags globalFlags sandboxDir = topHandler' $ do - let sandboxDistPref = sandboxBuildDir sandboxDir - configFlags = configFlags' - { configDistPref = Flag sandboxDistPref } - haddockFlags = mempty - { haddockDistPref = Flag sandboxDistPref } - (comp, platform, conf) <- configCompilerAux' configFlags - retVal <- newIORef NoDepsReinstalled - - withSandboxPackageInfo verbosity configFlags globalFlags - comp platform conf sandboxDir $ \sandboxPkgInfo -> - unless (null $ modifiedAddSourceDependencies sandboxPkgInfo) $ do - - let args :: InstallArgs - args = ((configPackageDB' configFlags) - ,(globalRepos globalFlags) - ,comp, platform, conf - ,UseSandbox sandboxDir, Just sandboxPkgInfo - ,globalFlags, configFlags, configExFlags, installFlags - ,haddockFlags) - - -- This can actually be replaced by a call to 'install', but we use a - -- lower-level API because of layer separation reasons. Additionally, we - -- might want to use some lower-level features this in the future. - withSandboxBinDirOnSearchPath sandboxDir $ do - installContext <- makeInstallContext verbosity args Nothing - installPlan <- foldProgress logMsg die' return =<< - makeInstallPlan verbosity args installContext - - processInstallPlan verbosity args installContext installPlan - writeIORef retVal ReinstalledSomeDeps - - readIORef retVal - - where - die' message = die (message ++ installFailedInSandbox) - -- TODO: use a better error message, remove duplication. - installFailedInSandbox = - "Note: when using a sandbox, all packages are required to have " - ++ "consistent dependencies. " - ++ "Try reinstalling/unregistering the offending packages or " - ++ "recreating the sandbox." - logMsg message rest = debugNoWrap verbosity message >> rest - - topHandler' = topHandlerWith $ \_ -> do - warn verbosity "Couldn't reinstall some add-source dependencies." - -- Here we can't know whether any deps have been reinstalled, so we have - -- to be conservative. - return ReinstalledSomeDeps - --- | Produce a 'SandboxPackageInfo' and feed it to the given action. Note that --- we don't update the timestamp file here - this is done in --- 'postInstallActions'. -withSandboxPackageInfo :: Verbosity -> ConfigFlags -> GlobalFlags - -> Compiler -> Platform -> ProgramConfiguration - -> FilePath - -> (SandboxPackageInfo -> IO ()) - -> IO () -withSandboxPackageInfo verbosity configFlags globalFlags - comp platform conf sandboxDir cont = do - -- List all add-source deps. - indexFile <- tryGetIndexFilePath' globalFlags - buildTreeRefs <- Index.listBuildTreeRefs verbosity - Index.DontListIgnored Index.OnlyLinks indexFile - let allAddSourceDepsSet = S.fromList buildTreeRefs - - -- List all packages installed in the sandbox. - installedPkgIndex <- getInstalledPackagesInSandbox verbosity - configFlags comp conf - let err = "Error reading sandbox package information." - -- Get the package descriptions for all add-source deps. - depsCabalFiles <- mapM (flip tryFindAddSourcePackageDesc err) buildTreeRefs - depsPkgDescs <- mapM (readPackageDescription verbosity) depsCabalFiles - let depsMap = M.fromList (zip buildTreeRefs depsPkgDescs) - isInstalled pkgid = not . null - . InstalledPackageIndex.lookupSourcePackageId installedPkgIndex $ pkgid - installedDepsMap = M.filter (isInstalled . packageId) depsMap - - -- Get the package ids of modified (and installed) add-source deps. - modifiedAddSourceDeps <- listModifiedDeps verbosity sandboxDir - (compilerId comp) platform installedDepsMap - -- 'fromJust' here is safe because 'modifiedAddSourceDeps' are guaranteed to - -- be a subset of the keys of 'depsMap'. - let modifiedDeps = [ (modDepPath, fromJust $ M.lookup modDepPath depsMap) - | modDepPath <- modifiedAddSourceDeps ] - modifiedDepsMap = M.fromList modifiedDeps - - assert (all (`S.member` allAddSourceDepsSet) modifiedAddSourceDeps) (return ()) - if (null modifiedDeps) - then info verbosity $ "Found no modified add-source deps." - else notice verbosity $ "Some add-source dependencies have been modified. " - ++ "They will be reinstalled..." - - -- Get the package ids of the remaining add-source deps (some are possibly not - -- installed). - let otherDeps = M.assocs (depsMap `M.difference` modifiedDepsMap) - - -- Finally, assemble a 'SandboxPackageInfo'. - cont $ SandboxPackageInfo (map toSourcePackage modifiedDeps) - (map toSourcePackage otherDeps) installedPkgIndex allAddSourceDepsSet - - where - toSourcePackage (path, pkgDesc) = SourcePackage - (packageId pkgDesc) pkgDesc (LocalUnpackedPackage path) Nothing - --- | Same as 'withSandboxPackageInfo' if we're inside a sandbox and a no-op --- otherwise. -maybeWithSandboxPackageInfo :: Verbosity -> ConfigFlags -> GlobalFlags - -> Compiler -> Platform -> ProgramConfiguration - -> UseSandbox - -> (Maybe SandboxPackageInfo -> IO ()) - -> IO () -maybeWithSandboxPackageInfo verbosity configFlags globalFlags - comp platform conf useSandbox cont = - case useSandbox of - NoSandbox -> cont Nothing - UseSandbox sandboxDir -> withSandboxPackageInfo verbosity - configFlags globalFlags - comp platform conf sandboxDir - (\spi -> cont (Just spi)) - --- | Check if a sandbox is present and call @reinstallAddSourceDeps@ in that --- case. -maybeReinstallAddSourceDeps :: Verbosity - -> Flag (Maybe Int) -- ^ The '-j' flag - -> ConfigFlags -- ^ Saved configure flags - -- (from dist/setup-config) - -> GlobalFlags - -> IO (UseSandbox, SavedConfig - ,WereDepsReinstalled) -maybeReinstallAddSourceDeps verbosity numJobsFlag configFlags' globalFlags' = do - (useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags' - (configUserInstall configFlags') - case useSandbox of - NoSandbox -> return (NoSandbox, config, NoDepsReinstalled) - UseSandbox sandboxDir -> do - -- Reinstall the modified add-source deps. - let configFlags = savedConfigureFlags config - `mappendSomeSavedFlags` - configFlags' - configExFlags = defaultConfigExFlags - `mappend` savedConfigureExFlags config - installFlags' = defaultInstallFlags - `mappend` savedInstallFlags config - installFlags = installFlags' { - installNumJobs = installNumJobs installFlags' - `mappend` numJobsFlag - } - globalFlags = savedGlobalFlags config - -- This makes it possible to override things like 'remote-repo-cache' - -- from the command line. These options are hidden, and are only - -- useful for debugging, so this should be fine. - `mappend` globalFlags' - depsReinstalled <- reinstallAddSourceDeps verbosity - configFlags configExFlags installFlags globalFlags - sandboxDir - return (UseSandbox sandboxDir, config, depsReinstalled) - - where - - -- NOTE: we can't simply do @sandboxConfigFlags `mappend` savedFlags@ - -- because we don't want to auto-enable things like 'library-profiling' for - -- all add-source dependencies even if the user has passed - -- '--enable-library-profiling' to 'cabal configure'. These options are - -- supposed to be set in 'cabal.config'. - mappendSomeSavedFlags :: ConfigFlags -> ConfigFlags -> ConfigFlags - mappendSomeSavedFlags sandboxConfigFlags savedFlags = - sandboxConfigFlags { - configHcFlavor = configHcFlavor sandboxConfigFlags - `mappend` configHcFlavor savedFlags, - configHcPath = configHcPath sandboxConfigFlags - `mappend` configHcPath savedFlags, - configHcPkg = configHcPkg sandboxConfigFlags - `mappend` configHcPkg savedFlags, - configProgramPaths = configProgramPaths sandboxConfigFlags - `mappend` configProgramPaths savedFlags, - configProgramArgs = configProgramArgs sandboxConfigFlags - `mappend` configProgramArgs savedFlags, - -- NOTE: Unconditionally choosing the value from - -- 'dist/setup-config'. Sandbox package DB location may have been - -- changed by 'configure -w'. - configPackageDBs = configPackageDBs savedFlags - -- FIXME: Is this compatible with the 'inherit' feature? - } - --- --- Utils (transitionary) --- --- FIXME: configPackageDB' and configCompilerAux' don't really belong in this --- module --- - -configPackageDB' :: ConfigFlags -> PackageDBStack -configPackageDB' cfg = - interpretPackageDbFlags userInstall (configPackageDBs cfg) - where - userInstall = fromFlagOrDefault True (configUserInstall cfg) - -configCompilerAux' :: ConfigFlags - -> IO (Compiler, Platform, ProgramConfiguration) -configCompilerAux' configFlags = - configCompilerAuxEx configFlags - --FIXME: make configCompilerAux use a sensible verbosity - { configVerbosity = fmap lessVerbose (configVerbosity configFlags) } diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Setup.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Setup.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Setup.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2155 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Setup --- Copyright : (c) David Himmelstrup 2005 --- License : BSD-like --- --- Maintainer : lemmih@gmail.com --- Stability : provisional --- Portability : portable --- --- ------------------------------------------------------------------------------ -module Distribution.Client.Setup - ( globalCommand, GlobalFlags(..), defaultGlobalFlags, globalRepos - , configureCommand, ConfigFlags(..), filterConfigureFlags - , configureExCommand, ConfigExFlags(..), defaultConfigExFlags - , configureExOptions - , buildCommand, BuildFlags(..), BuildExFlags(..), SkipAddSourceDepsCheck(..) - , replCommand, testCommand, benchmarkCommand - , installCommand, InstallFlags(..), installOptions, defaultInstallFlags - , listCommand, ListFlags(..) - , updateCommand - , upgradeCommand - , infoCommand, InfoFlags(..) - , fetchCommand, FetchFlags(..) - , freezeCommand, FreezeFlags(..) - , getCommand, unpackCommand, GetFlags(..) - , checkCommand - , formatCommand - , uploadCommand, UploadFlags(..) - , reportCommand, ReportFlags(..) - , runCommand - , initCommand, IT.InitFlags(..) - , sdistCommand, SDistFlags(..), SDistExFlags(..), ArchiveFormat(..) - , win32SelfUpgradeCommand, Win32SelfUpgradeFlags(..) - , sandboxCommand, defaultSandboxLocation, SandboxFlags(..) - , execCommand, ExecFlags(..) - , userConfigCommand, UserConfigFlags(..) - - , parsePackageArgs - --TODO: stop exporting these: - , showRepo - , parseRepo - ) where - -import Distribution.Client.Types - ( Username(..), Password(..), Repo(..), RemoteRepo(..), LocalRepo(..) ) -import Distribution.Client.BuildReports.Types - ( ReportLevel(..) ) -import Distribution.Client.Dependency.Types - ( AllowNewer(..), PreSolver(..) ) -import qualified Distribution.Client.Init.Types as IT - ( InitFlags(..), PackageType(..) ) -import Distribution.Client.Targets - ( UserConstraint, readUserConstraint ) -import Distribution.Utils.NubList - ( NubList, toNubList, fromNubList) - -import Distribution.Simple.Compiler (PackageDB) -import Distribution.Simple.Program - ( defaultProgramConfiguration ) -import Distribution.Simple.Command hiding (boolOpt, boolOpt') -import qualified Distribution.Simple.Command as Command -import qualified Distribution.Simple.Setup as Cabal -import Distribution.Simple.Setup - ( ConfigFlags(..), BuildFlags(..), ReplFlags - , TestFlags(..), BenchmarkFlags(..) - , SDistFlags(..), HaddockFlags(..) - , readPackageDbList, showPackageDbList - , Flag(..), toFlag, fromFlag, flagToMaybe, flagToList - , optionVerbosity, boolOpt, boolOpt', trueArg, falseArg, optionNumJobs ) -import Distribution.Simple.InstallDirs - ( PathTemplate, InstallDirs(sysconfdir) - , toPathTemplate, fromPathTemplate ) -import Distribution.Version - ( Version(Version), anyVersion, thisVersion ) -import Distribution.Package - ( PackageIdentifier, packageName, packageVersion, Dependency(..) ) -import Distribution.PackageDescription - ( RepoKind(..) ) -import Distribution.Text - ( Text(..), display ) -import Distribution.ReadE - ( ReadE(..), readP_to_E, succeedReadE ) -import qualified Distribution.Compat.ReadP as Parse - ( ReadP, readP_to_S, readS_to_P, char, munch1, pfail, sepBy1, (+++) ) -import Distribution.Verbosity - ( Verbosity, normal ) -import Distribution.Simple.Utils - ( wrapText, wrapLine ) - -import Data.Char - ( isSpace, isAlphaNum ) -import Data.List - ( intercalate, delete, deleteFirstsBy ) -import Data.Maybe - ( listToMaybe, maybeToList, fromMaybe ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid - ( Monoid(..) ) -#endif -import Control.Monad - ( liftM ) -import System.FilePath - ( () ) -import Network.URI - ( parseAbsoluteURI, uriToString ) - --- ------------------------------------------------------------ --- * Global flags --- ------------------------------------------------------------ - --- | Flags that apply at the top level, not to any sub-command. -data GlobalFlags = GlobalFlags { - globalVersion :: Flag Bool, - globalNumericVersion :: Flag Bool, - globalConfigFile :: Flag FilePath, - globalSandboxConfigFile :: Flag FilePath, - globalRemoteRepos :: NubList RemoteRepo, -- ^ Available Hackage servers. - globalCacheDir :: Flag FilePath, - globalLocalRepos :: NubList FilePath, - globalLogsDir :: Flag FilePath, - globalWorldFile :: Flag FilePath, - globalRequireSandbox :: Flag Bool, - globalIgnoreSandbox :: Flag Bool - } - -defaultGlobalFlags :: GlobalFlags -defaultGlobalFlags = GlobalFlags { - globalVersion = Flag False, - globalNumericVersion = Flag False, - globalConfigFile = mempty, - globalSandboxConfigFile = mempty, - globalRemoteRepos = mempty, - globalCacheDir = mempty, - globalLocalRepos = mempty, - globalLogsDir = mempty, - globalWorldFile = mempty, - globalRequireSandbox = Flag False, - globalIgnoreSandbox = Flag False - } - -globalCommand :: [Command action] -> CommandUI GlobalFlags -globalCommand commands = CommandUI { - commandName = "", - commandSynopsis = - "Command line interface to the Haskell Cabal infrastructure.", - commandUsage = \pname -> - "See http://www.haskell.org/cabal/ for more information.\n" - ++ "\n" - ++ "Usage: " ++ pname ++ " [GLOBAL FLAGS] [COMMAND [FLAGS]]\n", - commandDescription = Just $ \pname -> - let - commands' = commands ++ [commandAddAction helpCommandUI undefined] - cmdDescs = getNormalCommandDescriptions commands' - -- if new commands are added, we want them to appear even if they - -- are not included in the custom listing below. Thus, we calculate - -- the `otherCmds` list and append it under the `other` category. - -- Alternatively, a new testcase could be added that ensures that - -- the set of commands listed here is equal to the set of commands - -- that are actually available. - otherCmds = deleteFirstsBy (==) (map fst cmdDescs) - [ "help" - , "update" - , "install" - , "fetch" - , "list" - , "info" - , "user-config" - , "get" - , "init" - , "configure" - , "build" - , "clean" - , "run" - , "repl" - , "test" - , "bench" - , "check" - , "sdist" - , "upload" - , "report" - , "freeze" - , "haddock" - , "hscolour" - , "copy" - , "register" - , "sandbox" - , "exec" - ] - maxlen = maximum $ [length name | (name, _) <- cmdDescs] - align str = str ++ replicate (maxlen - length str) ' ' - startGroup n = " ["++n++"]" - par = "" - addCmd n = case lookup n cmdDescs of - Nothing -> "" - Just d -> " " ++ align n ++ " " ++ d - addCmdCustom n d = case lookup n cmdDescs of -- make sure that the - -- command still exists. - Nothing -> "" - Just _ -> " " ++ align n ++ " " ++ d - in - "Commands:\n" - ++ unlines ( - [ startGroup "global" - , addCmd "update" - , addCmd "install" - , par - , addCmd "help" - , addCmd "info" - , addCmd "list" - , addCmd "fetch" - , addCmd "user-config" - , par - , startGroup "package" - , addCmd "get" - , addCmd "init" - , par - , addCmd "configure" - , addCmd "build" - , addCmd "clean" - , par - , addCmd "run" - , addCmd "repl" - , addCmd "test" - , addCmd "bench" - , par - , addCmd "check" - , addCmd "sdist" - , addCmd "upload" - , addCmd "report" - , par - , addCmd "freeze" - , addCmd "haddock" - , addCmd "hscolour" - , addCmd "copy" - , addCmd "register" - , par - , startGroup "sandbox" - , addCmd "sandbox" - , addCmd "exec" - , addCmdCustom "repl" "Open interpreter with access to sandbox packages." - ] ++ if null otherCmds then [] else par - :startGroup "other" - :[addCmd n | n <- otherCmds]) - ++ "\n" - ++ "For more information about a command use:\n" - ++ " " ++ pname ++ " COMMAND --help\n" - ++ "or " ++ pname ++ " help COMMAND\n" - ++ "\n" - ++ "To install Cabal packages from hackage use:\n" - ++ " " ++ pname ++ " install foo [--dry-run]\n" - ++ "\n" - ++ "Occasionally you need to update the list of available packages:\n" - ++ " " ++ pname ++ " update\n", - commandNotes = Nothing, - commandDefaultFlags = mempty, - commandOptions = \showOrParseArgs -> - (case showOrParseArgs of ShowArgs -> take 6; ParseArgs -> id) - [option ['V'] ["version"] - "Print version information" - globalVersion (\v flags -> flags { globalVersion = v }) - trueArg - - ,option [] ["numeric-version"] - "Print just the version number" - globalNumericVersion (\v flags -> flags { globalNumericVersion = v }) - trueArg - - ,option [] ["config-file"] - "Set an alternate location for the config file" - globalConfigFile (\v flags -> flags { globalConfigFile = v }) - (reqArgFlag "FILE") - - ,option [] ["sandbox-config-file"] - "Set an alternate location for the sandbox config file (default: './cabal.sandbox.config')" - globalConfigFile (\v flags -> flags { globalSandboxConfigFile = v }) - (reqArgFlag "FILE") - - ,option [] ["require-sandbox"] - "requiring the presence of a sandbox for sandbox-aware commands" - globalRequireSandbox (\v flags -> flags { globalRequireSandbox = v }) - (boolOpt' ([], ["require-sandbox"]) ([], ["no-require-sandbox"])) - - ,option [] ["ignore-sandbox"] - "Ignore any existing sandbox" - globalIgnoreSandbox (\v flags -> flags { globalIgnoreSandbox = v }) - trueArg - - ,option [] ["remote-repo"] - "The name and url for a remote repository" - globalRemoteRepos (\v flags -> flags { globalRemoteRepos = v }) - (reqArg' "NAME:URL" (toNubList . maybeToList . readRepo) (map showRepo . fromNubList)) - - ,option [] ["remote-repo-cache"] - "The location where downloads from all remote repos are cached" - globalCacheDir (\v flags -> flags { globalCacheDir = v }) - (reqArgFlag "DIR") - - ,option [] ["local-repo"] - "The location of a local repository" - globalLocalRepos (\v flags -> flags { globalLocalRepos = v }) - (reqArg' "DIR" (\x -> toNubList [x]) fromNubList) - - ,option [] ["logs-dir"] - "The location to put log files" - globalLogsDir (\v flags -> flags { globalLogsDir = v }) - (reqArgFlag "DIR") - - ,option [] ["world-file"] - "The location of the world file" - globalWorldFile (\v flags -> flags { globalWorldFile = v }) - (reqArgFlag "FILE") - ] - } - -instance Monoid GlobalFlags where - mempty = GlobalFlags { - globalVersion = mempty, - globalNumericVersion = mempty, - globalConfigFile = mempty, - globalSandboxConfigFile = mempty, - globalRemoteRepos = mempty, - globalCacheDir = mempty, - globalLocalRepos = mempty, - globalLogsDir = mempty, - globalWorldFile = mempty, - globalRequireSandbox = mempty, - globalIgnoreSandbox = mempty - } - mappend a b = GlobalFlags { - globalVersion = combine globalVersion, - globalNumericVersion = combine globalNumericVersion, - globalConfigFile = combine globalConfigFile, - globalSandboxConfigFile = combine globalConfigFile, - globalRemoteRepos = combine globalRemoteRepos, - globalCacheDir = combine globalCacheDir, - globalLocalRepos = combine globalLocalRepos, - globalLogsDir = combine globalLogsDir, - globalWorldFile = combine globalWorldFile, - globalRequireSandbox = combine globalRequireSandbox, - globalIgnoreSandbox = combine globalIgnoreSandbox - } - where combine field = field a `mappend` field b - -globalRepos :: GlobalFlags -> [Repo] -globalRepos globalFlags = remoteRepos ++ localRepos - where - remoteRepos = - [ Repo (Left remote) cacheDir - | remote <- fromNubList $ globalRemoteRepos globalFlags - , let cacheDir = fromFlag (globalCacheDir globalFlags) - remoteRepoName remote ] - localRepos = - [ Repo (Right LocalRepo) local - | local <- fromNubList $ globalLocalRepos globalFlags ] - --- ------------------------------------------------------------ --- * Config flags --- ------------------------------------------------------------ - -configureCommand :: CommandUI ConfigFlags -configureCommand = (Cabal.configureCommand defaultProgramConfiguration) { - commandDefaultFlags = mempty - } - -configureOptions :: ShowOrParseArgs -> [OptionField ConfigFlags] -configureOptions = commandOptions configureCommand - -filterConfigureFlags :: ConfigFlags -> Version -> ConfigFlags -filterConfigureFlags flags cabalLibVersion - | cabalLibVersion >= Version [1,22,0] [] = flags_latest - -- ^ NB: we expect the latest version to be the most common case. - | cabalLibVersion < Version [1,3,10] [] = flags_1_3_10 - | cabalLibVersion < Version [1,10,0] [] = flags_1_10_0 - | cabalLibVersion < Version [1,14,0] [] = flags_1_14_0 - | cabalLibVersion < Version [1,18,0] [] = flags_1_18_0 - | cabalLibVersion < Version [1,19,1] [] = flags_1_19_0 - | cabalLibVersion < Version [1,19,2] [] = flags_1_19_1 - | cabalLibVersion < Version [1,21,1] [] = flags_1_20_0 - | cabalLibVersion < Version [1,22,0] [] = flags_1_21_0 - | otherwise = flags_latest - where - -- Cabal >= 1.19.1 uses '--dependency' and does not need '--constraint'. - flags_latest = flags { configConstraints = [] } - - -- Cabal < 1.22 doesn't know about '--disable-debug-info'. - flags_1_21_0 = flags_latest { configDebugInfo = NoFlag } - - -- Cabal < 1.21.1 doesn't know about 'disable-relocatable' - -- Cabal < 1.21.1 doesn't know about 'enable-profiling' - flags_1_20_0 = - flags_1_21_0 { configRelocatable = NoFlag - , configProfExe = configProfExe flags - , configProfLib = configProfLib flags - , configCoverage = NoFlag - , configLibCoverage = configCoverage flags - -- HACK: See #2409. - , configProgramPaths = - ("cabalConfProf", "/TRUE") `delete` configProgramPaths flags - } - -- Cabal < 1.19.2 doesn't know about '--exact-configuration' and - -- '--enable-library-stripping'. - flags_1_19_1 = flags_1_20_0 { configExactConfiguration = NoFlag - , configStripLibs = NoFlag } - -- Cabal < 1.19.1 uses '--constraint' instead of '--dependency'. - flags_1_19_0 = flags_1_19_1 { configDependencies = [] - , configConstraints = configConstraints flags } - -- Cabal < 1.18.0 doesn't know about --extra-prog-path and --sysconfdir. - flags_1_18_0 = flags_1_19_0 { configProgramPathExtra = toNubList [] - , configInstallDirs = configInstallDirs_1_18_0} - configInstallDirs_1_18_0 = (configInstallDirs flags) { sysconfdir = NoFlag } - -- Cabal < 1.14.0 doesn't know about '--disable-benchmarks'. - flags_1_14_0 = flags_1_18_0 { configBenchmarks = NoFlag } - -- Cabal < 1.10.0 doesn't know about '--disable-tests'. - flags_1_10_0 = flags_1_14_0 { configTests = NoFlag } - -- Cabal < 1.3.10 does not grok the '--constraints' flag. - flags_1_3_10 = flags_1_10_0 { configConstraints = [] } - --- ------------------------------------------------------------ --- * Config extra flags --- ------------------------------------------------------------ - --- | cabal configure takes some extra flags beyond runghc Setup configure --- -data ConfigExFlags = ConfigExFlags { - configCabalVersion :: Flag Version, - configExConstraints:: [UserConstraint], - configPreferences :: [Dependency], - configSolver :: Flag PreSolver, - configAllowNewer :: Flag AllowNewer - } - -defaultConfigExFlags :: ConfigExFlags -defaultConfigExFlags = mempty { configSolver = Flag defaultSolver - , configAllowNewer = Flag AllowNewerNone } - -configureExCommand :: CommandUI (ConfigFlags, ConfigExFlags) -configureExCommand = configureCommand { - commandDefaultFlags = (mempty, defaultConfigExFlags), - commandOptions = \showOrParseArgs -> - liftOptions fst setFst - (filter ((`notElem` ["constraint", "dependency", "exact-configuration"]) - . optionName) $ configureOptions showOrParseArgs) - ++ liftOptions snd setSnd (configureExOptions showOrParseArgs) - } - where - setFst a (_,b) = (a,b) - setSnd b (a,_) = (a,b) - -configureExOptions :: ShowOrParseArgs -> [OptionField ConfigExFlags] -configureExOptions _showOrParseArgs = - [ option [] ["cabal-lib-version"] - ("Select which version of the Cabal lib to use to build packages " - ++ "(useful for testing).") - configCabalVersion (\v flags -> flags { configCabalVersion = v }) - (reqArg "VERSION" (readP_to_E ("Cannot parse cabal lib version: "++) - (fmap toFlag parse)) - (map display . flagToList)) - , option [] ["constraint"] - "Specify constraints on a package (version, installed/source, flags)" - configExConstraints (\v flags -> flags { configExConstraints = v }) - (reqArg "CONSTRAINT" - (fmap (\x -> [x]) (ReadE readUserConstraint)) - (map display)) - - , option [] ["preference"] - "Specify preferences (soft constraints) on the version of a package" - configPreferences (\v flags -> flags { configPreferences = v }) - (reqArg "CONSTRAINT" - (readP_to_E (const "dependency expected") - (fmap (\x -> [x]) parse)) - (map display)) - - , optionSolver configSolver (\v flags -> flags { configSolver = v }) - - , option [] ["allow-newer"] - ("Ignore upper bounds in all dependencies or " ++ allowNewerArgument) - configAllowNewer (\v flags -> flags { configAllowNewer = v}) - (optArg allowNewerArgument - (fmap Flag allowNewerParser) (Flag AllowNewerAll) - allowNewerPrinter) - - ] - where allowNewerArgument = "DEPS" - -instance Monoid ConfigExFlags where - mempty = ConfigExFlags { - configCabalVersion = mempty, - configExConstraints= mempty, - configPreferences = mempty, - configSolver = mempty, - configAllowNewer = mempty - } - mappend a b = ConfigExFlags { - configCabalVersion = combine configCabalVersion, - configExConstraints= combine configExConstraints, - configPreferences = combine configPreferences, - configSolver = combine configSolver, - configAllowNewer = combine configAllowNewer - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Build flags --- ------------------------------------------------------------ - -data SkipAddSourceDepsCheck = - SkipAddSourceDepsCheck | DontSkipAddSourceDepsCheck - deriving Eq - -data BuildExFlags = BuildExFlags { - buildOnly :: Flag SkipAddSourceDepsCheck -} - -buildExOptions :: ShowOrParseArgs -> [OptionField BuildExFlags] -buildExOptions _showOrParseArgs = - option [] ["only"] - "Don't reinstall add-source dependencies (sandbox-only)" - buildOnly (\v flags -> flags { buildOnly = v }) - (noArg (Flag SkipAddSourceDepsCheck)) - - : [] - -buildCommand :: CommandUI (BuildFlags, BuildExFlags) -buildCommand = parent { - commandDefaultFlags = (commandDefaultFlags parent, mempty), - commandOptions = - \showOrParseArgs -> liftOptions fst setFst - (commandOptions parent showOrParseArgs) - ++ - liftOptions snd setSnd (buildExOptions showOrParseArgs) - } - where - setFst a (_,b) = (a,b) - setSnd b (a,_) = (a,b) - - parent = Cabal.buildCommand defaultProgramConfiguration - -instance Monoid BuildExFlags where - mempty = BuildExFlags { - buildOnly = mempty - } - mappend a b = BuildExFlags { - buildOnly = combine buildOnly - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Repl command --- ------------------------------------------------------------ - -replCommand :: CommandUI (ReplFlags, BuildExFlags) -replCommand = parent { - commandDefaultFlags = (commandDefaultFlags parent, mempty), - commandOptions = - \showOrParseArgs -> liftOptions fst setFst - (commandOptions parent showOrParseArgs) - ++ - liftOptions snd setSnd (buildExOptions showOrParseArgs) - } - where - setFst a (_,b) = (a,b) - setSnd b (a,_) = (a,b) - - parent = Cabal.replCommand defaultProgramConfiguration - --- ------------------------------------------------------------ --- * Test command --- ------------------------------------------------------------ - -testCommand :: CommandUI (TestFlags, BuildFlags, BuildExFlags) -testCommand = parent { - commandDefaultFlags = (commandDefaultFlags parent, - Cabal.defaultBuildFlags, mempty), - commandOptions = - \showOrParseArgs -> liftOptions get1 set1 - (commandOptions parent showOrParseArgs) - ++ - liftOptions get2 set2 - (Cabal.buildOptions progConf showOrParseArgs) - ++ - liftOptions get3 set3 (buildExOptions showOrParseArgs) - } - where - get1 (a,_,_) = a; set1 a (_,b,c) = (a,b,c) - get2 (_,b,_) = b; set2 b (a,_,c) = (a,b,c) - get3 (_,_,c) = c; set3 c (a,b,_) = (a,b,c) - - parent = Cabal.testCommand - progConf = defaultProgramConfiguration - --- ------------------------------------------------------------ --- * Bench command --- ------------------------------------------------------------ - -benchmarkCommand :: CommandUI (BenchmarkFlags, BuildFlags, BuildExFlags) -benchmarkCommand = parent { - commandDefaultFlags = (commandDefaultFlags parent, - Cabal.defaultBuildFlags, mempty), - commandOptions = - \showOrParseArgs -> liftOptions get1 set1 - (commandOptions parent showOrParseArgs) - ++ - liftOptions get2 set2 - (Cabal.buildOptions progConf showOrParseArgs) - ++ - liftOptions get3 set3 (buildExOptions showOrParseArgs) - } - where - get1 (a,_,_) = a; set1 a (_,b,c) = (a,b,c) - get2 (_,b,_) = b; set2 b (a,_,c) = (a,b,c) - get3 (_,_,c) = c; set3 c (a,b,_) = (a,b,c) - - parent = Cabal.benchmarkCommand - progConf = defaultProgramConfiguration - --- ------------------------------------------------------------ --- * Fetch command --- ------------------------------------------------------------ - -data FetchFlags = FetchFlags { --- fetchOutput :: Flag FilePath, - fetchDeps :: Flag Bool, - fetchDryRun :: Flag Bool, - fetchSolver :: Flag PreSolver, - fetchMaxBackjumps :: Flag Int, - fetchReorderGoals :: Flag Bool, - fetchIndependentGoals :: Flag Bool, - fetchShadowPkgs :: Flag Bool, - fetchStrongFlags :: Flag Bool, - fetchVerbosity :: Flag Verbosity - } - -defaultFetchFlags :: FetchFlags -defaultFetchFlags = FetchFlags { --- fetchOutput = mempty, - fetchDeps = toFlag True, - fetchDryRun = toFlag False, - fetchSolver = Flag defaultSolver, - fetchMaxBackjumps = Flag defaultMaxBackjumps, - fetchReorderGoals = Flag False, - fetchIndependentGoals = Flag False, - fetchShadowPkgs = Flag False, - fetchStrongFlags = Flag False, - fetchVerbosity = toFlag normal - } - -fetchCommand :: CommandUI FetchFlags -fetchCommand = CommandUI { - commandName = "fetch", - commandSynopsis = "Downloads packages for later installation.", - commandUsage = usageAlternatives "fetch" [ "[FLAGS] PACKAGES" - ], - commandDescription = Just $ \_ -> - "Note that it currently is not possible to fetch the dependencies for a\n" - ++ "package in the current directory.\n", - commandNotes = Nothing, - commandDefaultFlags = defaultFetchFlags, - commandOptions = \ showOrParseArgs -> [ - optionVerbosity fetchVerbosity (\v flags -> flags { fetchVerbosity = v }) - --- , option "o" ["output"] --- "Put the package(s) somewhere specific rather than the usual cache." --- fetchOutput (\v flags -> flags { fetchOutput = v }) --- (reqArgFlag "PATH") - - , option [] ["dependencies", "deps"] - "Resolve and fetch dependencies (default)" - fetchDeps (\v flags -> flags { fetchDeps = v }) - trueArg - - , option [] ["no-dependencies", "no-deps"] - "Ignore dependencies" - fetchDeps (\v flags -> flags { fetchDeps = v }) - falseArg - - , option [] ["dry-run"] - "Do not install anything, only print what would be installed." - fetchDryRun (\v flags -> flags { fetchDryRun = v }) - trueArg - - ] ++ - - optionSolver fetchSolver (\v flags -> flags { fetchSolver = v }) : - optionSolverFlags showOrParseArgs - fetchMaxBackjumps (\v flags -> flags { fetchMaxBackjumps = v }) - fetchReorderGoals (\v flags -> flags { fetchReorderGoals = v }) - fetchIndependentGoals (\v flags -> flags { fetchIndependentGoals = v }) - fetchShadowPkgs (\v flags -> flags { fetchShadowPkgs = v }) - fetchStrongFlags (\v flags -> flags { fetchStrongFlags = v }) - - } - --- ------------------------------------------------------------ --- * Freeze command --- ------------------------------------------------------------ - -data FreezeFlags = FreezeFlags { - freezeDryRun :: Flag Bool, - freezeTests :: Flag Bool, - freezeBenchmarks :: Flag Bool, - freezeSolver :: Flag PreSolver, - freezeMaxBackjumps :: Flag Int, - freezeReorderGoals :: Flag Bool, - freezeIndependentGoals :: Flag Bool, - freezeShadowPkgs :: Flag Bool, - freezeStrongFlags :: Flag Bool, - freezeVerbosity :: Flag Verbosity - } - -defaultFreezeFlags :: FreezeFlags -defaultFreezeFlags = FreezeFlags { - freezeDryRun = toFlag False, - freezeTests = toFlag False, - freezeBenchmarks = toFlag False, - freezeSolver = Flag defaultSolver, - freezeMaxBackjumps = Flag defaultMaxBackjumps, - freezeReorderGoals = Flag False, - freezeIndependentGoals = Flag False, - freezeShadowPkgs = Flag False, - freezeStrongFlags = Flag False, - freezeVerbosity = toFlag normal - } - -freezeCommand :: CommandUI FreezeFlags -freezeCommand = CommandUI { - commandName = "freeze", - commandSynopsis = "Freeze dependencies.", - commandDescription = Just $ \_ -> wrapText $ - "Calculates a valid set of dependencies and their exact versions. " - ++ "If successful, saves the result to the file `cabal.config`.\n" - ++ "\n" - ++ "The package versions specified in `cabal.config` will be used for " - ++ "any future installs.\n" - ++ "\n" - ++ "An existing `cabal.config` is ignored and overwritten.\n", - commandNotes = Nothing, - commandUsage = usageAlternatives "freeze" ["" - ,"PACKAGES" - ], - commandDefaultFlags = defaultFreezeFlags, - commandOptions = \ showOrParseArgs -> [ - optionVerbosity freezeVerbosity (\v flags -> flags { freezeVerbosity = v }) - - , option [] ["dry-run"] - "Do not freeze anything, only print what would be frozen" - freezeDryRun (\v flags -> flags { freezeDryRun = v }) - trueArg - - , option [] ["tests"] - "freezing of the dependencies of any tests suites in the package description file." - freezeTests (\v flags -> flags { freezeTests = v }) - (boolOpt [] []) - - , option [] ["benchmarks"] - "freezing of the dependencies of any benchmarks suites in the package description file." - freezeBenchmarks (\v flags -> flags { freezeBenchmarks = v }) - (boolOpt [] []) - - ] ++ - - optionSolver freezeSolver (\v flags -> flags { freezeSolver = v }) : - optionSolverFlags showOrParseArgs - freezeMaxBackjumps (\v flags -> flags { freezeMaxBackjumps = v }) - freezeReorderGoals (\v flags -> flags { freezeReorderGoals = v }) - freezeIndependentGoals (\v flags -> flags { freezeIndependentGoals = v }) - freezeShadowPkgs (\v flags -> flags { freezeShadowPkgs = v }) - freezeStrongFlags (\v flags -> flags { freezeStrongFlags = v }) - - } - --- ------------------------------------------------------------ --- * Other commands --- ------------------------------------------------------------ - -updateCommand :: CommandUI (Flag Verbosity) -updateCommand = CommandUI { - commandName = "update", - commandSynopsis = "Updates list of known packages.", - commandDescription = Just $ \_ -> - "For all known remote repositories, download the package list.\n", - commandNotes = Just $ \_ -> - relevantConfigValuesText ["remote-repo" - ,"remote-repo-cache" - ,"local-repo"], - commandUsage = usageFlags "update", - commandDefaultFlags = toFlag normal, - commandOptions = \_ -> [optionVerbosity id const] - } - -upgradeCommand :: CommandUI (ConfigFlags, ConfigExFlags, InstallFlags, HaddockFlags) -upgradeCommand = configureCommand { - commandName = "upgrade", - commandSynopsis = "(command disabled, use install instead)", - commandDescription = Nothing, - commandUsage = usageFlagsOrPackages "upgrade", - commandDefaultFlags = (mempty, mempty, mempty, mempty), - commandOptions = commandOptions installCommand - } - -{- -cleanCommand :: CommandUI () -cleanCommand = makeCommand name shortDesc longDesc emptyFlags options - where - name = "clean" - shortDesc = "Removes downloaded files" - longDesc = Nothing - emptyFlags = () - options _ = [] --} - -checkCommand :: CommandUI (Flag Verbosity) -checkCommand = CommandUI { - commandName = "check", - commandSynopsis = "Check the package for common mistakes.", - commandDescription = Just $ \_ -> wrapText $ - "Expects a .cabal package file in the current directory.\n" - ++ "\n" - ++ "The checks correspond to the requirements to packages on Hackage. " - ++ "If no errors and warnings are reported, Hackage will accept this " - ++ "package.\n", - commandNotes = Nothing, - commandUsage = \pname -> "Usage: " ++ pname ++ " check\n", - commandDefaultFlags = toFlag normal, - commandOptions = \_ -> [] - } - -formatCommand :: CommandUI (Flag Verbosity) -formatCommand = CommandUI { - commandName = "format", - commandSynopsis = "Reformat the .cabal file using the standard style.", - commandDescription = Nothing, - commandNotes = Nothing, - commandUsage = usageAlternatives "format" ["[FILE]"], - commandDefaultFlags = toFlag normal, - commandOptions = \_ -> [] - } - -runCommand :: CommandUI (BuildFlags, BuildExFlags) -runCommand = CommandUI { - commandName = "run", - commandSynopsis = "Builds and runs an executable.", - commandDescription = Just $ \_ -> wrapText $ - "Builds and then runs the specified executable. If no executable is " - ++ "specified, but the package contains just one executable, that one " - ++ "is built and executed.\n", - commandNotes = Nothing, - commandUsage = usageAlternatives "run" - ["[FLAGS] [EXECUTABLE] [-- EXECUTABLE_FLAGS]"], - commandDefaultFlags = mempty, - commandOptions = - \showOrParseArgs -> liftOptions fst setFst - (commandOptions parent showOrParseArgs) - ++ - liftOptions snd setSnd - (buildExOptions showOrParseArgs) - } - where - setFst a (_,b) = (a,b) - setSnd b (a,_) = (a,b) - - parent = Cabal.buildCommand defaultProgramConfiguration - --- ------------------------------------------------------------ --- * Report flags --- ------------------------------------------------------------ - -data ReportFlags = ReportFlags { - reportUsername :: Flag Username, - reportPassword :: Flag Password, - reportVerbosity :: Flag Verbosity - } - -defaultReportFlags :: ReportFlags -defaultReportFlags = ReportFlags { - reportUsername = mempty, - reportPassword = mempty, - reportVerbosity = toFlag normal - } - -reportCommand :: CommandUI ReportFlags -reportCommand = CommandUI { - commandName = "report", - commandSynopsis = "Upload build reports to a remote server.", - commandDescription = Nothing, - commandNotes = Just $ \_ -> - "You can store your Hackage login in the ~/.cabal/config file\n", - commandUsage = usageAlternatives "report" ["[FLAGS]"], - commandDefaultFlags = defaultReportFlags, - commandOptions = \_ -> - [optionVerbosity reportVerbosity (\v flags -> flags { reportVerbosity = v }) - - ,option ['u'] ["username"] - "Hackage username." - reportUsername (\v flags -> flags { reportUsername = v }) - (reqArg' "USERNAME" (toFlag . Username) - (flagToList . fmap unUsername)) - - ,option ['p'] ["password"] - "Hackage password." - reportPassword (\v flags -> flags { reportPassword = v }) - (reqArg' "PASSWORD" (toFlag . Password) - (flagToList . fmap unPassword)) - ] - } - -instance Monoid ReportFlags where - mempty = ReportFlags { - reportUsername = mempty, - reportPassword = mempty, - reportVerbosity = mempty - } - mappend a b = ReportFlags { - reportUsername = combine reportUsername, - reportPassword = combine reportPassword, - reportVerbosity = combine reportVerbosity - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Get flags --- ------------------------------------------------------------ - -data GetFlags = GetFlags { - getDestDir :: Flag FilePath, - getPristine :: Flag Bool, - getSourceRepository :: Flag (Maybe RepoKind), - getVerbosity :: Flag Verbosity - } - -defaultGetFlags :: GetFlags -defaultGetFlags = GetFlags { - getDestDir = mempty, - getPristine = mempty, - getSourceRepository = mempty, - getVerbosity = toFlag normal - } - -getCommand :: CommandUI GetFlags -getCommand = CommandUI { - commandName = "get", - commandSynopsis = "Download/Extract a package's source code (repository).", - commandDescription = Just $ \_ -> wrapText $ - "Creates a local copy of a package's source code. By default it gets " - ++ "the source\ntarball and unpacks it in a local subdirectory. " - ++ "Alternatively, with -s it will\nget the code from the source " - ++ "repository specified by the package.\n", - commandNotes = Nothing, - commandUsage = usagePackages "get", - commandDefaultFlags = defaultGetFlags, - commandOptions = \_ -> [ - optionVerbosity getVerbosity (\v flags -> flags { getVerbosity = v }) - - ,option "d" ["destdir"] - "Where to place the package source, defaults to the current directory." - getDestDir (\v flags -> flags { getDestDir = v }) - (reqArgFlag "PATH") - - ,option "s" ["source-repository"] - "Copy the package's source repository (ie git clone, darcs get, etc as appropriate)." - getSourceRepository (\v flags -> flags { getSourceRepository = v }) - (optArg "[head|this|...]" (readP_to_E (const "invalid source-repository") - (fmap (toFlag . Just) parse)) - (Flag Nothing) - (map (fmap show) . flagToList)) - - , option [] ["pristine"] - ("Unpack the original pristine tarball, rather than updating the " - ++ ".cabal file with the latest revision from the package archive.") - getPristine (\v flags -> flags { getPristine = v }) - trueArg - ] - } - --- 'cabal unpack' is a deprecated alias for 'cabal get'. -unpackCommand :: CommandUI GetFlags -unpackCommand = getCommand { - commandName = "unpack", - commandUsage = usagePackages "unpack" - } - -instance Monoid GetFlags where - mempty = GetFlags { - getDestDir = mempty, - getPristine = mempty, - getSourceRepository = mempty, - getVerbosity = mempty - } - mappend a b = GetFlags { - getDestDir = combine getDestDir, - getPristine = combine getPristine, - getSourceRepository = combine getSourceRepository, - getVerbosity = combine getVerbosity - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * List flags --- ------------------------------------------------------------ - -data ListFlags = ListFlags { - listInstalled :: Flag Bool, - listSimpleOutput :: Flag Bool, - listVerbosity :: Flag Verbosity, - listPackageDBs :: [Maybe PackageDB] - } - -defaultListFlags :: ListFlags -defaultListFlags = ListFlags { - listInstalled = Flag False, - listSimpleOutput = Flag False, - listVerbosity = toFlag normal, - listPackageDBs = [] - } - -listCommand :: CommandUI ListFlags -listCommand = CommandUI { - commandName = "list", - commandSynopsis = "List packages matching a search string.", - commandDescription = Just $ \_ -> wrapText $ - "List all packages, or all packages matching one of the search" - ++ " strings.\n" - ++ "\n" - ++ "If there is a sandbox in the current directory and " - ++ "config:ignore-sandbox is False, use the sandbox package database. " - ++ "Otherwise, use the package database specified with --package-db. " - ++ "If not specified, use the user package database.\n", - commandNotes = Nothing, - commandUsage = usageAlternatives "list" [ "[FLAGS]" - , "[FLAGS] STRINGS"], - commandDefaultFlags = defaultListFlags, - commandOptions = \_ -> [ - optionVerbosity listVerbosity (\v flags -> flags { listVerbosity = v }) - - , option [] ["installed"] - "Only print installed packages" - listInstalled (\v flags -> flags { listInstalled = v }) - trueArg - - , option [] ["simple-output"] - "Print in a easy-to-parse format" - listSimpleOutput (\v flags -> flags { listSimpleOutput = v }) - trueArg - - , option "" ["package-db"] - "Use a given package database. May be a specific file, 'global', 'user' or 'clear'." - listPackageDBs (\v flags -> flags { listPackageDBs = v }) - (reqArg' "DB" readPackageDbList showPackageDbList) - - ] - } - -instance Monoid ListFlags where - mempty = ListFlags { - listInstalled = mempty, - listSimpleOutput = mempty, - listVerbosity = mempty, - listPackageDBs = mempty - } - mappend a b = ListFlags { - listInstalled = combine listInstalled, - listSimpleOutput = combine listSimpleOutput, - listVerbosity = combine listVerbosity, - listPackageDBs = combine listPackageDBs - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Info flags --- ------------------------------------------------------------ - -data InfoFlags = InfoFlags { - infoVerbosity :: Flag Verbosity, - infoPackageDBs :: [Maybe PackageDB] - } - -defaultInfoFlags :: InfoFlags -defaultInfoFlags = InfoFlags { - infoVerbosity = toFlag normal, - infoPackageDBs = [] - } - -infoCommand :: CommandUI InfoFlags -infoCommand = CommandUI { - commandName = "info", - commandSynopsis = "Display detailed information about a particular package.", - commandDescription = Just $ \_ -> wrapText $ - "If there is a sandbox in the current directory and " - ++ "config:ignore-sandbox is False, use the sandbox package database. " - ++ "Otherwise, use the package database specified with --package-db. " - ++ "If not specified, use the user package database.\n", - commandNotes = Nothing, - commandUsage = usageAlternatives "info" ["[FLAGS] PACKAGES"], - commandDefaultFlags = defaultInfoFlags, - commandOptions = \_ -> [ - optionVerbosity infoVerbosity (\v flags -> flags { infoVerbosity = v }) - - , option "" ["package-db"] - "Use a given package database. May be a specific file, 'global', 'user' or 'clear'." - infoPackageDBs (\v flags -> flags { infoPackageDBs = v }) - (reqArg' "DB" readPackageDbList showPackageDbList) - - ] - } - -instance Monoid InfoFlags where - mempty = InfoFlags { - infoVerbosity = mempty, - infoPackageDBs = mempty - } - mappend a b = InfoFlags { - infoVerbosity = combine infoVerbosity, - infoPackageDBs = combine infoPackageDBs - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Install flags --- ------------------------------------------------------------ - --- | Install takes the same flags as configure along with a few extras. --- -data InstallFlags = InstallFlags { - installDocumentation :: Flag Bool, - installHaddockIndex :: Flag PathTemplate, - installDryRun :: Flag Bool, - installMaxBackjumps :: Flag Int, - installReorderGoals :: Flag Bool, - installIndependentGoals :: Flag Bool, - installShadowPkgs :: Flag Bool, - installStrongFlags :: Flag Bool, - installReinstall :: Flag Bool, - installAvoidReinstalls :: Flag Bool, - installOverrideReinstall :: Flag Bool, - installUpgradeDeps :: Flag Bool, - installOnly :: Flag Bool, - installOnlyDeps :: Flag Bool, - installRootCmd :: Flag String, - installSummaryFile :: NubList PathTemplate, - installLogFile :: Flag PathTemplate, - installBuildReports :: Flag ReportLevel, - installReportPlanningFailure :: Flag Bool, - installSymlinkBinDir :: Flag FilePath, - installOneShot :: Flag Bool, - installNumJobs :: Flag (Maybe Int), - installRunTests :: Flag Bool - } - -defaultInstallFlags :: InstallFlags -defaultInstallFlags = InstallFlags { - installDocumentation = Flag False, - installHaddockIndex = Flag docIndexFile, - installDryRun = Flag False, - installMaxBackjumps = Flag defaultMaxBackjumps, - installReorderGoals = Flag False, - installIndependentGoals= Flag False, - installShadowPkgs = Flag False, - installStrongFlags = Flag False, - installReinstall = Flag False, - installAvoidReinstalls = Flag False, - installOverrideReinstall = Flag False, - installUpgradeDeps = Flag False, - installOnly = Flag False, - installOnlyDeps = Flag False, - installRootCmd = mempty, - installSummaryFile = mempty, - installLogFile = mempty, - installBuildReports = Flag NoReports, - installReportPlanningFailure = Flag False, - installSymlinkBinDir = mempty, - installOneShot = Flag False, - installNumJobs = mempty, - installRunTests = mempty - } - where - docIndexFile = toPathTemplate ("$datadir" "doc" - "$arch-$os-$compiler" "index.html") - -allowNewerParser :: ReadE AllowNewer -allowNewerParser = ReadE $ \s -> - case s of - "" -> Right AllowNewerNone - "False" -> Right AllowNewerNone - "True" -> Right AllowNewerAll - _ -> - case readPToMaybe pkgsParser s of - Just pkgs -> Right . AllowNewerSome $ pkgs - Nothing -> Left ("Cannot parse the list of packages: " ++ s) - where - pkgsParser = Parse.sepBy1 parse (Parse.char ',') - -allowNewerPrinter :: Flag AllowNewer -> [Maybe String] -allowNewerPrinter (Flag AllowNewerNone) = [Just "False"] -allowNewerPrinter (Flag AllowNewerAll) = [Just "True"] -allowNewerPrinter (Flag (AllowNewerSome pkgs)) = - [Just . intercalate "," . map display $ pkgs] -allowNewerPrinter NoFlag = [] - - -defaultMaxBackjumps :: Int -defaultMaxBackjumps = 2000 - -defaultSolver :: PreSolver -defaultSolver = Choose - -allSolvers :: String -allSolvers = intercalate ", " (map display ([minBound .. maxBound] :: [PreSolver])) - -installCommand :: CommandUI (ConfigFlags, ConfigExFlags, InstallFlags, HaddockFlags) -installCommand = CommandUI { - commandName = "install", - commandSynopsis = "Install packages.", - commandUsage = usageAlternatives "install" [ "[FLAGS]" - , "[FLAGS] PACKAGES" - ], - commandDescription = Just $ \_ -> wrapText $ - "Installs one or more packages. By default, the installed package" - ++ " will be registered in the user's package database or, if a sandbox" - ++ " is present in the current directory, inside the sandbox.\n" - ++ "\n" - ++ "If PACKAGES are specified, downloads and installs those packages." - ++ " Otherwise, install the package in the current directory (and/or its" - ++ " dependencies) (there must be exactly one .cabal file in the current" - ++ " directory).\n" - ++ "\n" - ++ "When using a sandbox, the flags for `install` only affect the" - ++ " current command and have no effect on future commands. (To achieve" - ++ " that, `configure` must be used.)\n" - ++ " In contrast, without a sandbox, the flags to `install` are saved and" - ++ " affect future commands such as `build` and `repl`. See the help for" - ++ " `configure` for a list of commands being affected.\n", - commandNotes = Just $ \pname -> - ( case commandNotes configureCommand of - Just desc -> desc pname ++ "\n" - Nothing -> "" ) - ++ "Examples:\n" - ++ " " ++ pname ++ " install " - ++ " Package in the current directory\n" - ++ " " ++ pname ++ " install foo " - ++ " Package from the hackage server\n" - ++ " " ++ pname ++ " install foo-1.0 " - ++ " Specific version of a package\n" - ++ " " ++ pname ++ " install 'foo < 2' " - ++ " Constrained package version\n", - commandDefaultFlags = (mempty, mempty, mempty, mempty), - commandOptions = \showOrParseArgs -> - liftOptions get1 set1 - (filter ((`notElem` ["constraint", "dependency" - , "exact-configuration"]) - . optionName) $ - configureOptions showOrParseArgs) - ++ liftOptions get2 set2 (configureExOptions showOrParseArgs) - ++ liftOptions get3 set3 (installOptions showOrParseArgs) - ++ liftOptions get4 set4 (haddockOptions showOrParseArgs) - } - where - get1 (a,_,_,_) = a; set1 a (_,b,c,d) = (a,b,c,d) - get2 (_,b,_,_) = b; set2 b (a,_,c,d) = (a,b,c,d) - get3 (_,_,c,_) = c; set3 c (a,b,_,d) = (a,b,c,d) - get4 (_,_,_,d) = d; set4 d (a,b,c,_) = (a,b,c,d) - -haddockOptions :: ShowOrParseArgs -> [OptionField HaddockFlags] -haddockOptions showOrParseArgs - = [ opt { optionName = "haddock-" ++ name, - optionDescr = [ fmapOptFlags (\(_, lflags) -> ([], map ("haddock-" ++) lflags)) descr - | descr <- optionDescr opt] } - | opt <- commandOptions Cabal.haddockCommand showOrParseArgs - , let name = optionName opt - , name `elem` ["hoogle", "html", "html-location" - ,"executables", "tests", "benchmarks", "all", "internal", "css" - ,"hyperlink-source", "hscolour-css" - ,"contents-location"] - ] - where - fmapOptFlags :: (OptFlags -> OptFlags) -> OptDescr a -> OptDescr a - fmapOptFlags modify (ReqArg d f p r w) = ReqArg d (modify f) p r w - fmapOptFlags modify (OptArg d f p r i w) = OptArg d (modify f) p r i w - fmapOptFlags modify (ChoiceOpt xs) = ChoiceOpt [(d, modify f, i, w) | (d, f, i, w) <- xs] - fmapOptFlags modify (BoolOpt d f1 f2 r w) = BoolOpt d (modify f1) (modify f2) r w - -installOptions :: ShowOrParseArgs -> [OptionField InstallFlags] -installOptions showOrParseArgs = - [ option "" ["documentation"] - "building of documentation" - installDocumentation (\v flags -> flags { installDocumentation = v }) - (boolOpt [] []) - - , option [] ["doc-index-file"] - "A central index of haddock API documentation (template cannot use $pkgid)" - installHaddockIndex (\v flags -> flags { installHaddockIndex = v }) - (reqArg' "TEMPLATE" (toFlag.toPathTemplate) - (flagToList . fmap fromPathTemplate)) - - , option [] ["dry-run"] - "Do not install anything, only print what would be installed." - installDryRun (\v flags -> flags { installDryRun = v }) - trueArg - ] ++ - - optionSolverFlags showOrParseArgs - installMaxBackjumps (\v flags -> flags { installMaxBackjumps = v }) - installReorderGoals (\v flags -> flags { installReorderGoals = v }) - installIndependentGoals (\v flags -> flags { installIndependentGoals = v }) - installShadowPkgs (\v flags -> flags { installShadowPkgs = v }) - installStrongFlags (\v flags -> flags { installStrongFlags = v }) ++ - - [ option [] ["reinstall"] - "Install even if it means installing the same version again." - installReinstall (\v flags -> flags { installReinstall = v }) - (yesNoOpt showOrParseArgs) - - , option [] ["avoid-reinstalls"] - "Do not select versions that would destructively overwrite installed packages." - installAvoidReinstalls (\v flags -> flags { installAvoidReinstalls = v }) - (yesNoOpt showOrParseArgs) - - , option [] ["force-reinstalls"] - "Reinstall packages even if they will most likely break other installed packages." - installOverrideReinstall (\v flags -> flags { installOverrideReinstall = v }) - (yesNoOpt showOrParseArgs) - - , option [] ["upgrade-dependencies"] - "Pick the latest version for all dependencies, rather than trying to pick an installed version." - installUpgradeDeps (\v flags -> flags { installUpgradeDeps = v }) - (yesNoOpt showOrParseArgs) - - , option [] ["only-dependencies"] - "Install only the dependencies necessary to build the given packages" - installOnlyDeps (\v flags -> flags { installOnlyDeps = v }) - (yesNoOpt showOrParseArgs) - - , option [] ["dependencies-only"] - "A synonym for --only-dependencies" - installOnlyDeps (\v flags -> flags { installOnlyDeps = v }) - (yesNoOpt showOrParseArgs) - - , option [] ["root-cmd"] - "Command used to gain root privileges, when installing with --global." - installRootCmd (\v flags -> flags { installRootCmd = v }) - (reqArg' "COMMAND" toFlag flagToList) - - , option [] ["symlink-bindir"] - "Add symlinks to installed executables into this directory." - installSymlinkBinDir (\v flags -> flags { installSymlinkBinDir = v }) - (reqArgFlag "DIR") - - , option [] ["build-summary"] - "Save build summaries to file (name template can use $pkgid, $compiler, $os, $arch)" - installSummaryFile (\v flags -> flags { installSummaryFile = v }) - (reqArg' "TEMPLATE" (\x -> toNubList [toPathTemplate x]) (map fromPathTemplate . fromNubList)) - - , option [] ["build-log"] - "Log all builds to file (name template can use $pkgid, $compiler, $os, $arch)" - installLogFile (\v flags -> flags { installLogFile = v }) - (reqArg' "TEMPLATE" (toFlag.toPathTemplate) - (flagToList . fmap fromPathTemplate)) - - , option [] ["remote-build-reporting"] - "Generate build reports to send to a remote server (none, anonymous or detailed)." - installBuildReports (\v flags -> flags { installBuildReports = v }) - (reqArg "LEVEL" (readP_to_E (const $ "report level must be 'none', " - ++ "'anonymous' or 'detailed'") - (toFlag `fmap` parse)) - (flagToList . fmap display)) - - , option [] ["report-planning-failure"] - "Generate build reports when the dependency solver fails. This is used by the Hackage build bot." - installReportPlanningFailure (\v flags -> flags { installReportPlanningFailure = v }) - trueArg - - , option [] ["one-shot"] - "Do not record the packages in the world file." - installOneShot (\v flags -> flags { installOneShot = v }) - (yesNoOpt showOrParseArgs) - - , option [] ["run-tests"] - "Run package test suites during installation." - installRunTests (\v flags -> flags { installRunTests = v }) - trueArg - - , optionNumJobs - installNumJobs (\v flags -> flags { installNumJobs = v }) - - ] ++ case showOrParseArgs of -- TODO: remove when "cabal install" - -- avoids - ParseArgs -> - [ option [] ["only"] - "Only installs the package in the current directory." - installOnly (\v flags -> flags { installOnly = v }) - trueArg ] - _ -> [] - - -instance Monoid InstallFlags where - mempty = InstallFlags { - installDocumentation = mempty, - installHaddockIndex = mempty, - installDryRun = mempty, - installReinstall = mempty, - installAvoidReinstalls = mempty, - installOverrideReinstall = mempty, - installMaxBackjumps = mempty, - installUpgradeDeps = mempty, - installReorderGoals = mempty, - installIndependentGoals= mempty, - installShadowPkgs = mempty, - installStrongFlags = mempty, - installOnly = mempty, - installOnlyDeps = mempty, - installRootCmd = mempty, - installSummaryFile = mempty, - installLogFile = mempty, - installBuildReports = mempty, - installReportPlanningFailure = mempty, - installSymlinkBinDir = mempty, - installOneShot = mempty, - installNumJobs = mempty, - installRunTests = mempty - } - mappend a b = InstallFlags { - installDocumentation = combine installDocumentation, - installHaddockIndex = combine installHaddockIndex, - installDryRun = combine installDryRun, - installReinstall = combine installReinstall, - installAvoidReinstalls = combine installAvoidReinstalls, - installOverrideReinstall = combine installOverrideReinstall, - installMaxBackjumps = combine installMaxBackjumps, - installUpgradeDeps = combine installUpgradeDeps, - installReorderGoals = combine installReorderGoals, - installIndependentGoals= combine installIndependentGoals, - installShadowPkgs = combine installShadowPkgs, - installStrongFlags = combine installStrongFlags, - installOnly = combine installOnly, - installOnlyDeps = combine installOnlyDeps, - installRootCmd = combine installRootCmd, - installSummaryFile = combine installSummaryFile, - installLogFile = combine installLogFile, - installBuildReports = combine installBuildReports, - installReportPlanningFailure = combine installReportPlanningFailure, - installSymlinkBinDir = combine installSymlinkBinDir, - installOneShot = combine installOneShot, - installNumJobs = combine installNumJobs, - installRunTests = combine installRunTests - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Upload flags --- ------------------------------------------------------------ - -data UploadFlags = UploadFlags { - uploadCheck :: Flag Bool, - uploadUsername :: Flag Username, - uploadPassword :: Flag Password, - uploadVerbosity :: Flag Verbosity - } - -defaultUploadFlags :: UploadFlags -defaultUploadFlags = UploadFlags { - uploadCheck = toFlag False, - uploadUsername = mempty, - uploadPassword = mempty, - uploadVerbosity = toFlag normal - } - -uploadCommand :: CommandUI UploadFlags -uploadCommand = CommandUI { - commandName = "upload", - commandSynopsis = "Uploads source packages to Hackage.", - commandDescription = Nothing, - commandNotes = Just $ \_ -> - "You can store your Hackage login in the ~/.cabal/config file\n" - ++ relevantConfigValuesText ["username", "password"], - commandUsage = \pname -> - "Usage: " ++ pname ++ " upload [FLAGS] TARFILES\n", - commandDefaultFlags = defaultUploadFlags, - commandOptions = \_ -> - [optionVerbosity uploadVerbosity (\v flags -> flags { uploadVerbosity = v }) - - ,option ['c'] ["check"] - "Do not upload, just do QA checks." - uploadCheck (\v flags -> flags { uploadCheck = v }) - trueArg - - ,option ['u'] ["username"] - "Hackage username." - uploadUsername (\v flags -> flags { uploadUsername = v }) - (reqArg' "USERNAME" (toFlag . Username) - (flagToList . fmap unUsername)) - - ,option ['p'] ["password"] - "Hackage password." - uploadPassword (\v flags -> flags { uploadPassword = v }) - (reqArg' "PASSWORD" (toFlag . Password) - (flagToList . fmap unPassword)) - ] - } - -instance Monoid UploadFlags where - mempty = UploadFlags { - uploadCheck = mempty, - uploadUsername = mempty, - uploadPassword = mempty, - uploadVerbosity = mempty - } - mappend a b = UploadFlags { - uploadCheck = combine uploadCheck, - uploadUsername = combine uploadUsername, - uploadPassword = combine uploadPassword, - uploadVerbosity = combine uploadVerbosity - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Init flags --- ------------------------------------------------------------ - -emptyInitFlags :: IT.InitFlags -emptyInitFlags = mempty - -defaultInitFlags :: IT.InitFlags -defaultInitFlags = emptyInitFlags { IT.initVerbosity = toFlag normal } - -initCommand :: CommandUI IT.InitFlags -initCommand = CommandUI { - commandName = "init", - commandSynopsis = "Create a new .cabal package file (interactively).", - commandDescription = Just $ \_ -> wrapText $ - "Cabalise a project by creating a .cabal, Setup.hs, and " - ++ "optionally a LICENSE file.\n" - ++ "\n" - ++ "Calling init with no arguments (recommended) uses an " - ++ "interactive mode, which will try to guess as much as " - ++ "possible and prompt you for the rest. Command-line " - ++ "arguments are provided for scripting purposes. " - ++ "If you don't want interactive mode, be sure to pass " - ++ "the -n flag.\n", - commandNotes = Nothing, - commandUsage = \pname -> - "Usage: " ++ pname ++ " init [FLAGS]\n", - commandDefaultFlags = defaultInitFlags, - commandOptions = \_ -> - [ option ['n'] ["non-interactive"] - "Non-interactive mode." - IT.nonInteractive (\v flags -> flags { IT.nonInteractive = v }) - trueArg - - , option ['q'] ["quiet"] - "Do not generate log messages to stdout." - IT.quiet (\v flags -> flags { IT.quiet = v }) - trueArg - - , option [] ["no-comments"] - "Do not generate explanatory comments in the .cabal file." - IT.noComments (\v flags -> flags { IT.noComments = v }) - trueArg - - , option ['m'] ["minimal"] - "Generate a minimal .cabal file, that is, do not include extra empty fields. Also implies --no-comments." - IT.minimal (\v flags -> flags { IT.minimal = v }) - trueArg - - , option [] ["overwrite"] - "Overwrite any existing .cabal, LICENSE, or Setup.hs files without warning." - IT.overwrite (\v flags -> flags { IT.overwrite = v }) - trueArg - - , option [] ["package-dir"] - "Root directory of the package (default = current directory)." - IT.packageDir (\v flags -> flags { IT.packageDir = v }) - (reqArgFlag "DIRECTORY") - - , option ['p'] ["package-name"] - "Name of the Cabal package to create." - IT.packageName (\v flags -> flags { IT.packageName = v }) - (reqArg "PACKAGE" (readP_to_E ("Cannot parse package name: "++) - (toFlag `fmap` parse)) - (flagToList . fmap display)) - - , option [] ["version"] - "Initial version of the package." - IT.version (\v flags -> flags { IT.version = v }) - (reqArg "VERSION" (readP_to_E ("Cannot parse package version: "++) - (toFlag `fmap` parse)) - (flagToList . fmap display)) - - , option [] ["cabal-version"] - "Required version of the Cabal library." - IT.cabalVersion (\v flags -> flags { IT.cabalVersion = v }) - (reqArg "VERSION_RANGE" (readP_to_E ("Cannot parse Cabal version range: "++) - (toFlag `fmap` parse)) - (flagToList . fmap display)) - - , option ['l'] ["license"] - "Project license." - IT.license (\v flags -> flags { IT.license = v }) - (reqArg "LICENSE" (readP_to_E ("Cannot parse license: "++) - (toFlag `fmap` parse)) - (flagToList . fmap display)) - - , option ['a'] ["author"] - "Name of the project's author." - IT.author (\v flags -> flags { IT.author = v }) - (reqArgFlag "NAME") - - , option ['e'] ["email"] - "Email address of the maintainer." - IT.email (\v flags -> flags { IT.email = v }) - (reqArgFlag "EMAIL") - - , option ['u'] ["homepage"] - "Project homepage and/or repository." - IT.homepage (\v flags -> flags { IT.homepage = v }) - (reqArgFlag "URL") - - , option ['s'] ["synopsis"] - "Short project synopsis." - IT.synopsis (\v flags -> flags { IT.synopsis = v }) - (reqArgFlag "TEXT") - - , option ['c'] ["category"] - "Project category." - IT.category (\v flags -> flags { IT.category = v }) - (reqArg' "CATEGORY" (\s -> toFlag $ maybe (Left s) Right (readMaybe s)) - (flagToList . fmap (either id show))) - - , option ['x'] ["extra-source-file"] - "Extra source file to be distributed with tarball." - IT.extraSrc (\v flags -> flags { IT.extraSrc = v }) - (reqArg' "FILE" (Just . (:[])) - (fromMaybe [])) - - , option [] ["is-library"] - "Build a library." - IT.packageType (\v flags -> flags { IT.packageType = v }) - (noArg (Flag IT.Library)) - - , option [] ["is-executable"] - "Build an executable." - IT.packageType - (\v flags -> flags { IT.packageType = v }) - (noArg (Flag IT.Executable)) - - , option [] ["main-is"] - "Specify the main module." - IT.mainIs - (\v flags -> flags { IT.mainIs = v }) - (reqArgFlag "FILE") - - , option [] ["language"] - "Specify the default language." - IT.language - (\v flags -> flags { IT.language = v }) - (reqArg "LANGUAGE" (readP_to_E ("Cannot parse language: "++) - (toFlag `fmap` parse)) - (flagToList . fmap display)) - - , option ['o'] ["expose-module"] - "Export a module from the package." - IT.exposedModules - (\v flags -> flags { IT.exposedModules = v }) - (reqArg "MODULE" (readP_to_E ("Cannot parse module name: "++) - ((Just . (:[])) `fmap` parse)) - (maybe [] (fmap display))) - - , option [] ["extension"] - "Use a LANGUAGE extension (in the other-extensions field)." - IT.otherExts - (\v flags -> flags { IT.otherExts = v }) - (reqArg "EXTENSION" (readP_to_E ("Cannot parse extension: "++) - ((Just . (:[])) `fmap` parse)) - (maybe [] (fmap display))) - - , option ['d'] ["dependency"] - "Package dependency." - IT.dependencies (\v flags -> flags { IT.dependencies = v }) - (reqArg "PACKAGE" (readP_to_E ("Cannot parse dependency: "++) - ((Just . (:[])) `fmap` parse)) - (maybe [] (fmap display))) - - , option [] ["source-dir"] - "Directory containing package source." - IT.sourceDirs (\v flags -> flags { IT.sourceDirs = v }) - (reqArg' "DIR" (Just . (:[])) - (fromMaybe [])) - - , option [] ["build-tool"] - "Required external build tool." - IT.buildTools (\v flags -> flags { IT.buildTools = v }) - (reqArg' "TOOL" (Just . (:[])) - (fromMaybe [])) - - , optionVerbosity IT.initVerbosity (\v flags -> flags { IT.initVerbosity = v }) - ] - } - where readMaybe s = case reads s of - [(x,"")] -> Just x - _ -> Nothing - --- ------------------------------------------------------------ --- * SDist flags --- ------------------------------------------------------------ - --- | Extra flags to @sdist@ beyond runghc Setup sdist --- -data SDistExFlags = SDistExFlags { - sDistFormat :: Flag ArchiveFormat - } - deriving Show - -data ArchiveFormat = TargzFormat | ZipFormat -- | ... - deriving (Show, Eq) - -defaultSDistExFlags :: SDistExFlags -defaultSDistExFlags = SDistExFlags { - sDistFormat = Flag TargzFormat - } - -sdistCommand :: CommandUI (SDistFlags, SDistExFlags) -sdistCommand = Cabal.sdistCommand { - commandDefaultFlags = (commandDefaultFlags Cabal.sdistCommand, defaultSDistExFlags), - commandOptions = \showOrParseArgs -> - liftOptions fst setFst (commandOptions Cabal.sdistCommand showOrParseArgs) - ++ liftOptions snd setSnd sdistExOptions - } - where - setFst a (_,b) = (a,b) - setSnd b (a,_) = (a,b) - - sdistExOptions = - [option [] ["archive-format"] "archive-format" - sDistFormat (\v flags -> flags { sDistFormat = v }) - (choiceOpt - [ (Flag TargzFormat, ([], ["targz"]), - "Produce a '.tar.gz' format archive (default and required for uploading to hackage)") - , (Flag ZipFormat, ([], ["zip"]), - "Produce a '.zip' format archive") - ]) - ] - -instance Monoid SDistExFlags where - mempty = SDistExFlags { - sDistFormat = mempty - } - mappend a b = SDistExFlags { - sDistFormat = combine sDistFormat - } - where - combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Win32SelfUpgrade flags --- ------------------------------------------------------------ - -data Win32SelfUpgradeFlags = Win32SelfUpgradeFlags { - win32SelfUpgradeVerbosity :: Flag Verbosity -} - -defaultWin32SelfUpgradeFlags :: Win32SelfUpgradeFlags -defaultWin32SelfUpgradeFlags = Win32SelfUpgradeFlags { - win32SelfUpgradeVerbosity = toFlag normal -} - -win32SelfUpgradeCommand :: CommandUI Win32SelfUpgradeFlags -win32SelfUpgradeCommand = CommandUI { - commandName = "win32selfupgrade", - commandSynopsis = "Self-upgrade the executable on Windows", - commandDescription = Nothing, - commandNotes = Nothing, - commandUsage = \pname -> - "Usage: " ++ pname ++ " win32selfupgrade PID PATH\n", - commandDefaultFlags = defaultWin32SelfUpgradeFlags, - commandOptions = \_ -> - [optionVerbosity win32SelfUpgradeVerbosity - (\v flags -> flags { win32SelfUpgradeVerbosity = v}) - ] -} - -instance Monoid Win32SelfUpgradeFlags where - mempty = Win32SelfUpgradeFlags { - win32SelfUpgradeVerbosity = mempty - } - mappend a b = Win32SelfUpgradeFlags { - win32SelfUpgradeVerbosity = combine win32SelfUpgradeVerbosity - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Sandbox-related flags --- ------------------------------------------------------------ - -data SandboxFlags = SandboxFlags { - sandboxVerbosity :: Flag Verbosity, - sandboxSnapshot :: Flag Bool, -- FIXME: this should be an 'add-source'-only - -- flag. - sandboxLocation :: Flag FilePath -} - -defaultSandboxLocation :: FilePath -defaultSandboxLocation = ".cabal-sandbox" - -defaultSandboxFlags :: SandboxFlags -defaultSandboxFlags = SandboxFlags { - sandboxVerbosity = toFlag normal, - sandboxSnapshot = toFlag False, - sandboxLocation = toFlag defaultSandboxLocation - } - -sandboxCommand :: CommandUI SandboxFlags -sandboxCommand = CommandUI { - commandName = "sandbox", - commandSynopsis = "Create/modify/delete a sandbox.", - commandDescription = Just $ \pname -> concat - [ paragraph $ "Sandboxes are isolated package databases that can be used" - ++ " to prevent dependency conflicts that arise when many different" - ++ " packages are installed in the same database (i.e. the user's" - ++ " database in the home directory)." - , paragraph $ "A sandbox in the current directory (created by" - ++ " `sandbox init`) will be used instead of the user's database for" - ++ " commands such as `install` and `build`. Note that (a directly" - ++ " invoked) GHC will not automatically be aware of sandboxes;" - ++ " only if called via appropriate " ++ pname - ++ " commands, e.g. `repl`, `build`, `exec`." - , paragraph $ "Currently, " ++ pname ++ " will not search for a sandbox" - ++ " in folders above the current one, so cabal will not see the sandbox" - ++ " if you are in a subfolder of a sandboxes." - , paragraph "Subcommands:" - , headLine "init:" - , indentParagraph $ "Initialize a sandbox in the current directory." - ++ " An existing package database will not be modified, but settings" - ++ " (such as the location of the database) can be modified this way." - , headLine "delete:" - , indentParagraph $ "Remove the sandbox; deleting all the packages" - ++ " installed inside." - , headLine "add-source:" - , indentParagraph $ "Make one or more local package available in the" - ++ " sandbox. PATHS may be relative or absolute." - ++ " Typical usecase is when you need" - ++ " to make a (temporary) modification to a dependency: You download" - ++ " the package into a different directory, make the modification," - ++ " and add that directory to the sandbox with `add-source`." - , indentParagraph $ "Unless given `--snapshot`, any add-source'd" - ++ " dependency that was modified since the last build will be" - ++ " re-installed automatically." - , headLine "delete-source:" - , indentParagraph $ "Remove an add-source dependency; however, this will" - ++ " not delete the package(s) that have been installed in the sandbox" - ++ " from this dependency. You can either unregister the package(s) via" - ++ " `" ++ pname ++ " sandbox hc-pkg unregister` or re-create the" - ++ " sandbox (`sandbox delete; sandbox init`)." - , headLine "list-sources:" - , indentParagraph $ "List the directories of local packages made" - ++ " available via `" ++ pname ++ " add-source`." - , headLine "hc-pkg:" - , indentParagraph $ "Similar to `ghc-pkg`, but for the sandbox package" - ++ " database. Can be used to list specific/all packages that are" - ++ " installed in the sandbox. For subcommands, see the help for" - ++ " ghc-pkg. Affected by the compiler version specified by `configure`." - ], - commandNotes = Just $ \_ -> - relevantConfigValuesText ["require-sandbox" - ,"ignore-sandbox"], - commandUsage = usageAlternatives "sandbox" - [ "init [FLAGS]" - , "delete [FLAGS]" - , "add-source [FLAGS] PATHS" - , "delete-source [FLAGS] PATHS" - , "list-sources [FLAGS]" - , "hc-pkg [FLAGS] [--] COMMAND [--] [ARGS]" - ], - - commandDefaultFlags = defaultSandboxFlags, - commandOptions = \_ -> - [ optionVerbosity sandboxVerbosity - (\v flags -> flags { sandboxVerbosity = v }) - - , option [] ["snapshot"] - "Take a snapshot instead of creating a link (only applies to 'add-source')" - sandboxSnapshot (\v flags -> flags { sandboxSnapshot = v }) - trueArg - - , option [] ["sandbox"] - "Sandbox location (default: './.cabal-sandbox')." - sandboxLocation (\v flags -> flags { sandboxLocation = v }) - (reqArgFlag "DIR") - ] - } - -instance Monoid SandboxFlags where - mempty = SandboxFlags { - sandboxVerbosity = mempty, - sandboxSnapshot = mempty, - sandboxLocation = mempty - } - mappend a b = SandboxFlags { - sandboxVerbosity = combine sandboxVerbosity, - sandboxSnapshot = combine sandboxSnapshot, - sandboxLocation = combine sandboxLocation - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * Exec Flags --- ------------------------------------------------------------ - -data ExecFlags = ExecFlags { - execVerbosity :: Flag Verbosity -} - -defaultExecFlags :: ExecFlags -defaultExecFlags = ExecFlags { - execVerbosity = toFlag normal - } - -execCommand :: CommandUI ExecFlags -execCommand = CommandUI { - commandName = "exec", - commandSynopsis = "Give a command access to the sandbox package repository.", - commandDescription = Just $ \pname -> wrapText $ - -- TODO: this is too GHC-focused for my liking.. - "A directly invoked GHC will not automatically be aware of any" - ++ " sandboxes: the GHC_PACKAGE_PATH environment variable controls what" - ++ " GHC uses. `" ++ pname ++ " exec` can be used to modify this variable:" - ++ " COMMAND will be executed in a modified environment and thereby uses" - ++ " the sandbox package database.\n" - ++ "\n" - ++ "If there is no sandbox, behaves as identity (executing COMMAND).\n" - ++ "\n" - ++ "Note that other " ++ pname ++ " commands change the environment" - ++ " variable appropriately already, so there is no need to wrap those" - ++ " in `" ++ pname ++ " exec`. But with `" ++ pname ++ " exec`, the user" - ++ " has more control and can, for example, execute custom scripts which" - ++ " indirectly execute GHC.\n" - ++ "\n" - ++ "See `" ++ pname ++ " sandbox`.", - commandNotes = Just $ \pname -> - "Examples:\n" - ++ " Install the executable package pandoc into a sandbox and run it:\n" - ++ " " ++ pname ++ " sandbox init\n" - ++ " " ++ pname ++ " install pandoc\n" - ++ " " ++ pname ++ " exec pandoc foo.md\n\n" - ++ " Install the executable package hlint into the user package database\n" - ++ " and run it:\n" - ++ " " ++ pname ++ " install --user hlint\n" - ++ " " ++ pname ++ " exec hlint Foo.hs\n\n" - ++ " Execute runghc on Foo.hs with runghc configured to use the\n" - ++ " sandbox package database (if a sandbox is being used):\n" - ++ " " ++ pname ++ " exec runghc Foo.hs\n", - commandUsage = \pname -> - "Usage: " ++ pname ++ " exec [FLAGS] [--] COMMAND [--] [ARGS]\n", - - commandDefaultFlags = defaultExecFlags, - commandOptions = \_ -> - [ optionVerbosity execVerbosity - (\v flags -> flags { execVerbosity = v }) - ] - } - -instance Monoid ExecFlags where - mempty = ExecFlags { - execVerbosity = mempty - } - mappend a b = ExecFlags { - execVerbosity = combine execVerbosity - } - where combine field = field a `mappend` field b - --- ------------------------------------------------------------ --- * UserConfig flags --- ------------------------------------------------------------ - -data UserConfigFlags = UserConfigFlags { - userConfigVerbosity :: Flag Verbosity -} - -instance Monoid UserConfigFlags where - mempty = UserConfigFlags { - userConfigVerbosity = toFlag normal - } - mappend a b = UserConfigFlags { - userConfigVerbosity = combine userConfigVerbosity - } - where combine field = field a `mappend` field b - -userConfigCommand :: CommandUI UserConfigFlags -userConfigCommand = CommandUI { - commandName = "user-config", - commandSynopsis = "Display and update the user's global cabal configuration.", - commandDescription = Just $ \_ -> wrapText $ - "When upgrading cabal, the set of configuration keys and their default" - ++ " values may change. This command provides means to merge the existing" - ++ " config in ~/.cabal/config" - ++ " (i.e. all bindings that are actually defined and not commented out)" - ++ " and the default config of the new version.\n" - ++ "\n" - ++ "diff: Shows a pseudo-diff of the user's ~/.cabal/config file and" - ++ " the default configuration that would be created by cabal if the" - ++ " config file did not exist.\n" - ++ "update: Applies the pseudo-diff to the configuration that would be" - ++ " created by default, and write the result back to ~/.cabal/config.", - - commandNotes = Nothing, - commandUsage = usageAlternatives "user-config" ["diff", "update"], - commandDefaultFlags = mempty, - commandOptions = \ _ -> [ - optionVerbosity userConfigVerbosity (\v flags -> flags { userConfigVerbosity = v }) - ] - } - --- ------------------------------------------------------------ --- * GetOpt Utils --- ------------------------------------------------------------ - -reqArgFlag :: ArgPlaceHolder -> - MkOptDescr (b -> Flag String) (Flag String -> b -> b) b -reqArgFlag ad = reqArg ad (succeedReadE Flag) flagToList - -liftOptions :: (b -> a) -> (a -> b -> b) - -> [OptionField a] -> [OptionField b] -liftOptions get set = map (liftOption get set) - -yesNoOpt :: ShowOrParseArgs -> MkOptDescr (b -> Flag Bool) (Flag Bool -> b -> b) b -yesNoOpt ShowArgs sf lf = trueArg sf lf -yesNoOpt _ sf lf = Command.boolOpt' flagToMaybe Flag (sf, lf) ([], map ("no-" ++) lf) sf lf - -optionSolver :: (flags -> Flag PreSolver) - -> (Flag PreSolver -> flags -> flags) - -> OptionField flags -optionSolver get set = - option [] ["solver"] - ("Select dependency solver to use (default: " ++ display defaultSolver ++ "). Choices: " ++ allSolvers ++ ", where 'choose' chooses between 'topdown' and 'modular' based on compiler version.") - get set - (reqArg "SOLVER" (readP_to_E (const $ "solver must be one of: " ++ allSolvers) - (toFlag `fmap` parse)) - (flagToList . fmap display)) - -optionSolverFlags :: ShowOrParseArgs - -> (flags -> Flag Int ) -> (Flag Int -> flags -> flags) - -> (flags -> Flag Bool ) -> (Flag Bool -> flags -> flags) - -> (flags -> Flag Bool ) -> (Flag Bool -> flags -> flags) - -> (flags -> Flag Bool ) -> (Flag Bool -> flags -> flags) - -> (flags -> Flag Bool ) -> (Flag Bool -> flags -> flags) - -> [OptionField flags] -optionSolverFlags showOrParseArgs getmbj setmbj getrg setrg _getig _setig getsip setsip getstrfl setstrfl = - [ option [] ["max-backjumps"] - ("Maximum number of backjumps allowed while solving (default: " ++ show defaultMaxBackjumps ++ "). Use a negative number to enable unlimited backtracking. Use 0 to disable backtracking completely.") - getmbj setmbj - (reqArg "NUM" (readP_to_E ("Cannot parse number: "++) - (fmap toFlag (Parse.readS_to_P reads))) - (map show . flagToList)) - , option [] ["reorder-goals"] - "Try to reorder goals according to certain heuristics. Slows things down on average, but may make backtracking faster for some packages." - getrg setrg - (yesNoOpt showOrParseArgs) - -- TODO: Disabled for now because it does not work as advertised (yet). -{- - , option [] ["independent-goals"] - "Treat several goals on the command line as independent. If several goals depend on the same package, different versions can be chosen." - getig setig - (yesNoOpt showOrParseArgs) --} - , option [] ["shadow-installed-packages"] - "If multiple package instances of the same version are installed, treat all but one as shadowed." - getsip setsip - (yesNoOpt showOrParseArgs) - , option [] ["strong-flags"] - "Do not defer flag choices (this used to be the default in cabal-install <= 1.20)." - getstrfl setstrfl - (yesNoOpt showOrParseArgs) - ] - -usageFlagsOrPackages :: String -> String -> String -usageFlagsOrPackages name pname = - "Usage: " ++ pname ++ " " ++ name ++ " [FLAGS]\n" - ++ " or: " ++ pname ++ " " ++ name ++ " [PACKAGES]\n" - -usagePackages :: String -> String -> String -usagePackages name pname = - "Usage: " ++ pname ++ " " ++ name ++ " [PACKAGES]\n" - -usageFlags :: String -> String -> String -usageFlags name pname = - "Usage: " ++ pname ++ " " ++ name ++ " [FLAGS]\n" - ---TODO: do we want to allow per-package flags? -parsePackageArgs :: [String] -> Either String [Dependency] -parsePackageArgs = parsePkgArgs [] - where - parsePkgArgs ds [] = Right (reverse ds) - parsePkgArgs ds (arg:args) = - case readPToMaybe parseDependencyOrPackageId arg of - Just dep -> parsePkgArgs (dep:ds) args - Nothing -> Left $ - show arg ++ " is not valid syntax for a package name or" - ++ " package dependency." - -readPToMaybe :: Parse.ReadP a a -> String -> Maybe a -readPToMaybe p str = listToMaybe [ r | (r,s) <- Parse.readP_to_S p str - , all isSpace s ] - -parseDependencyOrPackageId :: Parse.ReadP r Dependency -parseDependencyOrPackageId = parse Parse.+++ liftM pkgidToDependency parse - where - pkgidToDependency :: PackageIdentifier -> Dependency - pkgidToDependency p = case packageVersion p of - Version [] _ -> Dependency (packageName p) anyVersion - version -> Dependency (packageName p) (thisVersion version) - -showRepo :: RemoteRepo -> String -showRepo repo = remoteRepoName repo ++ ":" - ++ uriToString id (remoteRepoURI repo) [] - -readRepo :: String -> Maybe RemoteRepo -readRepo = readPToMaybe parseRepo - -parseRepo :: Parse.ReadP r RemoteRepo -parseRepo = do - name <- Parse.munch1 (\c -> isAlphaNum c || c `elem` "_-.") - _ <- Parse.char ':' - uriStr <- Parse.munch1 (\c -> isAlphaNum c || c `elem` "+-=._/*()@'$:;&!?~") - uri <- maybe Parse.pfail return (parseAbsoluteURI uriStr) - return $ RemoteRepo { - remoteRepoName = name, - remoteRepoURI = uri - } - --- ------------------------------------------------------------ --- * Helpers for Documentation --- ------------------------------------------------------------ - -headLine :: String -> String -headLine = unlines - . map unwords - . wrapLine 79 - . words - -paragraph :: String -> String -paragraph = (++"\n") - . unlines - . map unwords - . wrapLine 79 - . words - -indentParagraph :: String -> String -indentParagraph = unlines - . map ((" "++).unwords) - . wrapLine 77 - . words - -relevantConfigValuesText :: [String] -> String -relevantConfigValuesText vs = - "Relevant global configuration keys:\n" - ++ concat [" " ++ v ++ "\n" |v <- vs] diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/SetupWrapper.hs cabal-install-1.22-1.22.9.0/Distribution/Client/SetupWrapper.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/SetupWrapper.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/SetupWrapper.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,579 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.SetupWrapper --- Copyright : (c) The University of Glasgow 2006, --- Duncan Coutts 2008 --- --- Maintainer : cabal-devel@haskell.org --- Stability : alpha --- Portability : portable --- --- An interface to building and installing Cabal packages. --- If the @Built-Type@ field is specified as something other than --- 'Custom', and the current version of Cabal is acceptable, this performs --- setup actions directly. Otherwise it builds the setup script and --- runs it with the given arguments. - -module Distribution.Client.SetupWrapper ( - setupWrapper, - SetupScriptOptions(..), - defaultSetupScriptOptions, - ) where - -import qualified Distribution.Make as Make -import qualified Distribution.Simple as Simple -import Distribution.Version - ( Version(..), VersionRange, anyVersion - , intersectVersionRanges, orLaterVersion - , withinRange ) -import Distribution.InstalledPackageInfo (installedPackageId) -import Distribution.Package - ( InstalledPackageId(..), PackageIdentifier(..), - PackageName(..), Package(..), packageName - , packageVersion, Dependency(..) ) -import Distribution.PackageDescription - ( GenericPackageDescription(packageDescription) - , PackageDescription(..), specVersion - , BuildType(..), knownBuildTypes, defaultRenaming ) -import Distribution.PackageDescription.Parse - ( readPackageDescription ) -import Distribution.Simple.Configure - ( configCompilerEx ) -import Distribution.Compiler - ( buildCompilerId, CompilerFlavor(GHC, GHCJS) ) -import Distribution.Simple.Compiler - ( Compiler(compilerId), compilerFlavor, PackageDB(..), PackageDBStack ) -import Distribution.Simple.PreProcess - ( runSimplePreProcessor, ppUnlit ) -import Distribution.Simple.Program - ( ProgramConfiguration, emptyProgramConfiguration - , getProgramSearchPath, getDbProgramOutput, runDbProgram, ghcProgram - , ghcjsProgram ) -import Distribution.Simple.Program.Find - ( programSearchPathAsPATHVar ) -import Distribution.Simple.Program.Run - ( getEffectiveEnvironment ) -import qualified Distribution.Simple.Program.Strip as Strip -import Distribution.Simple.BuildPaths - ( defaultDistPref, exeExtension ) -import Distribution.Simple.Command - ( CommandUI(..), commandShowOptions ) -import Distribution.Simple.Program.GHC - ( GhcMode(..), GhcOptions(..), renderGhcOptions ) -import qualified Distribution.Simple.PackageIndex as PackageIndex -import Distribution.Simple.PackageIndex (InstalledPackageIndex) -import Distribution.Client.Config - ( defaultCabalDir ) -import Distribution.Client.IndexUtils - ( getInstalledPackages ) -import Distribution.Client.JobControl - ( Lock, criticalSection ) -import Distribution.Simple.Setup - ( Flag(..) ) -import Distribution.Simple.Utils - ( die, debug, info, cabalVersion, tryFindPackageDesc, comparing - , createDirectoryIfMissingVerbose, installExecutableFile - , copyFileVerbose, rewriteFile, intercalate ) -import Distribution.Client.Utils - ( inDir, tryCanonicalizePath - , existsAndIsMoreRecentThan, moreRecentFile -#if mingw32_HOST_OS - , canonicalizePathNoThrow -#endif - ) -import Distribution.System ( Platform(..), buildPlatform ) -import Distribution.Text - ( display ) -import Distribution.Utils.NubList - ( toNubListR ) -import Distribution.Verbosity - ( Verbosity ) -import Distribution.Compat.Exception - ( catchIO ) - -import System.Directory ( doesFileExist ) -import System.FilePath ( (), (<.>) ) -import System.IO ( Handle, hPutStr ) -import System.Exit ( ExitCode(..), exitWith ) -import System.Process ( runProcess, waitForProcess ) -#if !MIN_VERSION_base(4,8,0) -import Control.Applicative ( (<$>), (<*>) ) -#endif -import Control.Monad ( when, unless ) -import Data.List ( foldl1' ) -import Data.Maybe ( fromMaybe, isJust ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid ( mempty ) -#endif -import Data.Char ( isSpace ) - -#ifdef mingw32_HOST_OS -import Distribution.Simple.Utils - ( withTempDirectory ) - -import Control.Exception ( bracket ) -import System.FilePath ( equalFilePath, takeDirectory ) -import System.Directory ( doesDirectoryExist ) -import qualified System.Win32 as Win32 -#endif - -data SetupScriptOptions = SetupScriptOptions { - useCabalVersion :: VersionRange, - useCompiler :: Maybe Compiler, - usePlatform :: Maybe Platform, - usePackageDB :: PackageDBStack, - usePackageIndex :: Maybe InstalledPackageIndex, - useProgramConfig :: ProgramConfiguration, - useDistPref :: FilePath, - useLoggingHandle :: Maybe Handle, - useWorkingDir :: Maybe FilePath, - forceExternalSetupMethod :: Bool, - - -- Used only by 'cabal clean' on Windows. - -- - -- Note: win32 clean hack - ------------------------- - -- On Windows, running './dist/setup/setup clean' doesn't work because the - -- setup script will try to delete itself (which causes it to fail horribly, - -- unlike on Linux). So we have to move the setup exe out of the way first - -- and then delete it manually. This applies only to the external setup - -- method. - useWin32CleanHack :: Bool, - - -- Used only when calling setupWrapper from parallel code to serialise - -- access to the setup cache; should be Nothing otherwise. - -- - -- Note: setup exe cache - ------------------------ - -- When we are installing in parallel, we always use the external setup - -- method. Since compiling the setup script each time adds noticeable - -- overhead, we use a shared setup script cache - -- ('~/.cabal/setup-exe-cache'). For each (compiler, platform, Cabal - -- version) combination the cache holds a compiled setup script - -- executable. This only affects the Simple build type; for the Custom, - -- Configure and Make build types we always compile the setup script anew. - setupCacheLock :: Maybe Lock - } - -defaultSetupScriptOptions :: SetupScriptOptions -defaultSetupScriptOptions = SetupScriptOptions { - useCabalVersion = anyVersion, - useCompiler = Nothing, - usePlatform = Nothing, - usePackageDB = [GlobalPackageDB, UserPackageDB], - usePackageIndex = Nothing, - useProgramConfig = emptyProgramConfiguration, - useDistPref = defaultDistPref, - useLoggingHandle = Nothing, - useWorkingDir = Nothing, - useWin32CleanHack = False, - forceExternalSetupMethod = False, - setupCacheLock = Nothing - } - -setupWrapper :: Verbosity - -> SetupScriptOptions - -> Maybe PackageDescription - -> CommandUI flags - -> (Version -> flags) - -> [String] - -> IO () -setupWrapper verbosity options mpkg cmd flags extraArgs = do - pkg <- maybe getPkg return mpkg - let setupMethod = determineSetupMethod options' buildType' - options' = options { - useCabalVersion = intersectVersionRanges - (useCabalVersion options) - (orLaterVersion (specVersion pkg)) - } - buildType' = fromMaybe Custom (buildType pkg) - mkArgs cabalLibVersion = commandName cmd - : commandShowOptions cmd (flags cabalLibVersion) - ++ extraArgs - checkBuildType buildType' - setupMethod verbosity options' (packageId pkg) buildType' mkArgs - where - getPkg = tryFindPackageDesc (fromMaybe "." (useWorkingDir options)) - >>= readPackageDescription verbosity - >>= return . packageDescription - - checkBuildType (UnknownBuildType name) = - die $ "The build-type '" ++ name ++ "' is not known. Use one of: " - ++ intercalate ", " (map display knownBuildTypes) ++ "." - checkBuildType _ = return () - --- | Decide if we're going to be able to do a direct internal call to the --- entry point in the Cabal library or if we're going to have to compile --- and execute an external Setup.hs script. --- -determineSetupMethod :: SetupScriptOptions -> BuildType -> SetupMethod -determineSetupMethod options buildType' - | forceExternalSetupMethod options = externalSetupMethod - | isJust (useLoggingHandle options) - || buildType' == Custom = externalSetupMethod - | cabalVersion `withinRange` - useCabalVersion options = internalSetupMethod - | otherwise = externalSetupMethod - -type SetupMethod = Verbosity - -> SetupScriptOptions - -> PackageIdentifier - -> BuildType - -> (Version -> [String]) -> IO () - --- ------------------------------------------------------------ --- * Internal SetupMethod --- ------------------------------------------------------------ - -internalSetupMethod :: SetupMethod -internalSetupMethod verbosity options _ bt mkargs = do - let args = mkargs cabalVersion - debug verbosity $ "Using internal setup method with build-type " ++ show bt - ++ " and args:\n " ++ show args - inDir (useWorkingDir options) $ - buildTypeAction bt args - -buildTypeAction :: BuildType -> ([String] -> IO ()) -buildTypeAction Simple = Simple.defaultMainArgs -buildTypeAction Configure = Simple.defaultMainWithHooksArgs - Simple.autoconfUserHooks -buildTypeAction Make = Make.defaultMainArgs -buildTypeAction Custom = error "buildTypeAction Custom" -buildTypeAction (UnknownBuildType _) = error "buildTypeAction UnknownBuildType" - --- ------------------------------------------------------------ --- * External SetupMethod --- ------------------------------------------------------------ - -externalSetupMethod :: SetupMethod -externalSetupMethod verbosity options pkg bt mkargs = do - debug verbosity $ "Using external setup method with build-type " ++ show bt - createDirectoryIfMissingVerbose verbosity True setupDir - (cabalLibVersion, mCabalLibInstalledPkgId, options') <- cabalLibVersionToUse - debug verbosity $ "Using Cabal library version " ++ display cabalLibVersion - path <- if useCachedSetupExecutable - then getCachedSetupExecutable options' - cabalLibVersion mCabalLibInstalledPkgId - else compileSetupExecutable options' - cabalLibVersion mCabalLibInstalledPkgId False - invokeSetupScript options' path (mkargs cabalLibVersion) - - where - workingDir = case fromMaybe "" (useWorkingDir options) of - [] -> "." - dir -> dir - setupDir = workingDir useDistPref options "setup" - setupVersionFile = setupDir "setup" <.> "version" - setupHs = setupDir "setup" <.> "hs" - setupProgFile = setupDir "setup" <.> exeExtension - platform = fromMaybe buildPlatform (usePlatform options) - - useCachedSetupExecutable = (bt == Simple || bt == Configure || bt == Make) - - maybeGetInstalledPackages :: SetupScriptOptions -> Compiler - -> ProgramConfiguration -> IO InstalledPackageIndex - maybeGetInstalledPackages options' comp conf = - case usePackageIndex options' of - Just index -> return index - Nothing -> getInstalledPackages verbosity - comp (usePackageDB options') conf - - cabalLibVersionToUse :: IO (Version, (Maybe InstalledPackageId) - ,SetupScriptOptions) - cabalLibVersionToUse = do - savedVer <- savedVersion - case savedVer of - Just version | version `withinRange` useCabalVersion options - -> do updateSetupScript version bt - -- Does the previously compiled setup executable still exist and - -- is it up-to date? - useExisting <- canUseExistingSetup version - if useExisting - then return (version, Nothing, options) - else installedVersion - _ -> installedVersion - where - -- This check duplicates the checks in 'getCachedSetupExecutable' / - -- 'compileSetupExecutable'. Unfortunately, we have to perform it twice - -- because the selected Cabal version may change as a result of this - -- check. - canUseExistingSetup :: Version -> IO Bool - canUseExistingSetup version = - if useCachedSetupExecutable - then do - (_, cachedSetupProgFile) <- cachedSetupDirAndProg options version - doesFileExist cachedSetupProgFile - else - (&&) <$> setupProgFile `existsAndIsMoreRecentThan` setupHs - <*> setupProgFile `existsAndIsMoreRecentThan` setupVersionFile - - installedVersion :: IO (Version, Maybe InstalledPackageId - ,SetupScriptOptions) - installedVersion = do - (comp, conf, options') <- configureCompiler options - (version, mipkgid, options'') <- installedCabalVersion options' comp conf - updateSetupScript version bt - writeFile setupVersionFile (show version ++ "\n") - return (version, mipkgid, options'') - - savedVersion :: IO (Maybe Version) - savedVersion = do - versionString <- readFile setupVersionFile `catchIO` \_ -> return "" - case reads versionString of - [(version,s)] | all isSpace s -> return (Just version) - _ -> return Nothing - - -- | Update a Setup.hs script, creating it if necessary. - updateSetupScript :: Version -> BuildType -> IO () - updateSetupScript _ Custom = do - useHs <- doesFileExist customSetupHs - useLhs <- doesFileExist customSetupLhs - unless (useHs || useLhs) $ die - "Using 'build-type: Custom' but there is no Setup.hs or Setup.lhs script." - let src = (if useHs then customSetupHs else customSetupLhs) - srcNewer <- src `moreRecentFile` setupHs - when srcNewer $ if useHs - then copyFileVerbose verbosity src setupHs - else runSimplePreProcessor ppUnlit src setupHs verbosity - where - customSetupHs = workingDir "Setup.hs" - customSetupLhs = workingDir "Setup.lhs" - - updateSetupScript cabalLibVersion _ = - rewriteFile setupHs (buildTypeScript cabalLibVersion) - - buildTypeScript :: Version -> String - buildTypeScript cabalLibVersion = case bt of - Simple -> "import Distribution.Simple; main = defaultMain\n" - Configure -> "import Distribution.Simple; main = defaultMainWithHooks " - ++ if cabalLibVersion >= Version [1,3,10] [] - then "autoconfUserHooks\n" - else "defaultUserHooks\n" - Make -> "import Distribution.Make; main = defaultMain\n" - Custom -> error "buildTypeScript Custom" - UnknownBuildType _ -> error "buildTypeScript UnknownBuildType" - - installedCabalVersion :: SetupScriptOptions -> Compiler -> ProgramConfiguration - -> IO (Version, Maybe InstalledPackageId - ,SetupScriptOptions) - installedCabalVersion options' _ _ | packageName pkg == PackageName "Cabal" = - return (packageVersion pkg, Nothing, options') - installedCabalVersion options' compiler conf = do - index <- maybeGetInstalledPackages options' compiler conf - let cabalDep = Dependency (PackageName "Cabal") (useCabalVersion options') - options'' = options' { usePackageIndex = Just index } - case PackageIndex.lookupDependency index cabalDep of - [] -> die $ "The package '" ++ display (packageName pkg) - ++ "' requires Cabal library version " - ++ display (useCabalVersion options) - ++ " but no suitable version is installed." - pkgs -> let ipkginfo = head . snd . bestVersion fst $ pkgs - in return (packageVersion ipkginfo - ,Just . installedPackageId $ ipkginfo, options'') - - bestVersion :: (a -> Version) -> [a] -> a - bestVersion f = firstMaximumBy (comparing (preference . f)) - where - -- Like maximumBy, but picks the first maximum element instead of the - -- last. In general, we expect the preferred version to go first in the - -- list. For the default case, this has the effect of choosing the version - -- installed in the user package DB instead of the global one. See #1463. - -- - -- Note: firstMaximumBy could be written as just - -- `maximumBy cmp . reverse`, but the problem is that the behaviour of - -- maximumBy is not fully specified in the case when there is not a single - -- greatest element. - firstMaximumBy :: (a -> a -> Ordering) -> [a] -> a - firstMaximumBy _ [] = - error "Distribution.Client.firstMaximumBy: empty list" - firstMaximumBy cmp xs = foldl1' maxBy xs - where - maxBy x y = case cmp x y of { GT -> x; EQ -> x; LT -> y; } - - preference version = (sameVersion, sameMajorVersion - ,stableVersion, latestVersion) - where - sameVersion = version == cabalVersion - sameMajorVersion = majorVersion version == majorVersion cabalVersion - majorVersion = take 2 . versionBranch - stableVersion = case versionBranch version of - (_:x:_) -> even x - _ -> False - latestVersion = version - - configureCompiler :: SetupScriptOptions - -> IO (Compiler, ProgramConfiguration, SetupScriptOptions) - configureCompiler options' = do - (comp, conf) <- case useCompiler options' of - Just comp -> return (comp, useProgramConfig options') - Nothing -> do (comp, _, conf) <- - configCompilerEx (Just GHC) Nothing Nothing - (useProgramConfig options') verbosity - return (comp, conf) - -- Whenever we need to call configureCompiler, we also need to access the - -- package index, so let's cache it in SetupScriptOptions. - index <- maybeGetInstalledPackages options' comp conf - return (comp, conf, options' { useCompiler = Just comp, - usePackageIndex = Just index, - useProgramConfig = conf }) - - -- | Path to the setup exe cache directory and path to the cached setup - -- executable. - cachedSetupDirAndProg :: SetupScriptOptions -> Version - -> IO (FilePath, FilePath) - cachedSetupDirAndProg options' cabalLibVersion = do - cabalDir <- defaultCabalDir - let setupCacheDir = cabalDir "setup-exe-cache" - cachedSetupProgFile = setupCacheDir - ("setup-" ++ buildTypeString ++ "-" - ++ cabalVersionString ++ "-" - ++ platformString ++ "-" - ++ compilerVersionString) - <.> exeExtension - return (setupCacheDir, cachedSetupProgFile) - where - buildTypeString = show bt - cabalVersionString = "Cabal-" ++ (display cabalLibVersion) - compilerVersionString = display $ - fromMaybe buildCompilerId - (fmap compilerId . useCompiler $ options') - platformString = display platform - - -- | Look up the setup executable in the cache; update the cache if the setup - -- executable is not found. - getCachedSetupExecutable :: SetupScriptOptions - -> Version -> Maybe InstalledPackageId - -> IO FilePath - getCachedSetupExecutable options' cabalLibVersion - maybeCabalLibInstalledPkgId = do - (setupCacheDir, cachedSetupProgFile) <- - cachedSetupDirAndProg options' cabalLibVersion - cachedSetupExists <- doesFileExist cachedSetupProgFile - if cachedSetupExists - then debug verbosity $ - "Found cached setup executable: " ++ cachedSetupProgFile - else criticalSection' $ do - -- The cache may have been populated while we were waiting. - cachedSetupExists' <- doesFileExist cachedSetupProgFile - if cachedSetupExists' - then debug verbosity $ - "Found cached setup executable: " ++ cachedSetupProgFile - else do - debug verbosity $ "Setup executable not found in the cache." - src <- compileSetupExecutable options' - cabalLibVersion maybeCabalLibInstalledPkgId True - createDirectoryIfMissingVerbose verbosity True setupCacheDir - installExecutableFile verbosity src cachedSetupProgFile - -- Do not strip if we're using GHCJS, since the result may be a script - when (maybe True ((/=GHCJS).compilerFlavor) $ useCompiler options') $ - Strip.stripExe verbosity platform (useProgramConfig options') - cachedSetupProgFile - return cachedSetupProgFile - where - criticalSection' = fromMaybe id - (fmap criticalSection $ setupCacheLock options') - - -- | If the Setup.hs is out of date wrt the executable then recompile it. - -- Currently this is GHC/GHCJS only. It should really be generalised. - -- - compileSetupExecutable :: SetupScriptOptions - -> Version -> Maybe InstalledPackageId -> Bool - -> IO FilePath - compileSetupExecutable options' cabalLibVersion maybeCabalLibInstalledPkgId - forceCompile = do - setupHsNewer <- setupHs `moreRecentFile` setupProgFile - cabalVersionNewer <- setupVersionFile `moreRecentFile` setupProgFile - let outOfDate = setupHsNewer || cabalVersionNewer - when (outOfDate || forceCompile) $ do - debug verbosity "Setup executable needs to be updated, compiling..." - (compiler, conf, options'') <- configureCompiler options' - let cabalPkgid = PackageIdentifier (PackageName "Cabal") cabalLibVersion - (program, extraOpts) - = case compilerFlavor compiler of - GHCJS -> (ghcjsProgram, ["-build-runner"]) - _ -> (ghcProgram, ["-threaded"]) - ghcOptions = mempty { - ghcOptVerbosity = Flag verbosity - , ghcOptMode = Flag GhcModeMake - , ghcOptInputFiles = toNubListR [setupHs] - , ghcOptOutputFile = Flag setupProgFile - , ghcOptObjDir = Flag setupDir - , ghcOptHiDir = Flag setupDir - , ghcOptSourcePathClear = Flag True - , ghcOptSourcePath = toNubListR [workingDir] - , ghcOptPackageDBs = usePackageDB options'' - , ghcOptPackages = toNubListR $ - maybe [] (\ipkgid -> [(ipkgid, cabalPkgid, defaultRenaming)]) - maybeCabalLibInstalledPkgId - , ghcOptExtra = toNubListR extraOpts - } - let ghcCmdLine = renderGhcOptions compiler ghcOptions - case useLoggingHandle options of - Nothing -> runDbProgram verbosity program conf ghcCmdLine - - -- If build logging is enabled, redirect compiler output to the log file. - (Just logHandle) -> do output <- getDbProgramOutput verbosity program - conf ghcCmdLine - hPutStr logHandle output - return setupProgFile - - invokeSetupScript :: SetupScriptOptions -> FilePath -> [String] -> IO () - invokeSetupScript options' path args = do - info verbosity $ unwords (path : args) - case useLoggingHandle options' of - Nothing -> return () - Just logHandle -> info verbosity $ "Redirecting build log to " - ++ show logHandle - - -- Since useWorkingDir can change the relative path, the path argument must - -- be turned into an absolute path. On some systems, runProcess will take - -- path as relative to the new working directory instead of the current - -- working directory. - path' <- tryCanonicalizePath path - - -- See 'Note: win32 clean hack' above. -#if mingw32_HOST_OS - -- setupProgFile may not exist if we're using a cached program - setupProgFile' <- canonicalizePathNoThrow setupProgFile - let win32CleanHackNeeded = (useWin32CleanHack options') - -- Skip when a cached setup script is used. - && setupProgFile' `equalFilePath` path' - if win32CleanHackNeeded then doWin32CleanHack path' else doInvoke path' -#else - doInvoke path' -#endif - - where - doInvoke path' = do - searchpath <- programSearchPathAsPATHVar - (getProgramSearchPath (useProgramConfig options')) - env <- getEffectiveEnvironment [("PATH", Just searchpath)] - - process <- runProcess path' args - (useWorkingDir options') env Nothing - (useLoggingHandle options') (useLoggingHandle options') - exitCode <- waitForProcess process - unless (exitCode == ExitSuccess) $ exitWith exitCode - -#if mingw32_HOST_OS - doWin32CleanHack path' = do - info verbosity $ "Using the Win32 clean hack." - -- Recursively removes the temp dir on exit. - withTempDirectory verbosity workingDir "cabal-tmp" $ \tmpDir -> - bracket (moveOutOfTheWay tmpDir path') - (maybeRestore path') - doInvoke - - moveOutOfTheWay tmpDir path' = do - let newPath = tmpDir "setup" <.> exeExtension - Win32.moveFile path' newPath - return newPath - - maybeRestore oldPath path' = do - let oldPathDir = takeDirectory oldPath - oldPathDirExists <- doesDirectoryExist oldPathDir - -- 'setup clean' didn't complete, 'dist/setup' still exists. - when oldPathDirExists $ - Win32.moveFile path' oldPath -#endif diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/SrcDist.hs cabal-install-1.22-1.22.9.0/Distribution/Client/SrcDist.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/SrcDist.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/SrcDist.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,139 +0,0 @@ --- Implements the \"@.\/cabal sdist@\" command, which creates a source --- distribution for this package. That is, packs up the source code --- into a tarball, making use of the corresponding Cabal module. -module Distribution.Client.SrcDist ( - sdist - ) where - - -import Distribution.Client.SetupWrapper - ( SetupScriptOptions(..), defaultSetupScriptOptions, setupWrapper ) -import Distribution.Client.Tar (createTarGzFile) - -import Distribution.Package - ( Package(..) ) -import Distribution.PackageDescription - ( PackageDescription ) -import Distribution.PackageDescription.Configuration - ( flattenPackageDescription ) -import Distribution.PackageDescription.Parse - ( readPackageDescription ) -import Distribution.Simple.Utils - ( createDirectoryIfMissingVerbose, defaultPackageDesc - , die, notice, withTempDirectory ) -import Distribution.Client.Setup - ( SDistFlags(..), SDistExFlags(..), ArchiveFormat(..) ) -import Distribution.Simple.Setup - ( Flag(..), sdistCommand, flagToList, fromFlag, fromFlagOrDefault ) -import Distribution.Simple.BuildPaths ( srcPref) -import Distribution.Simple.Program (requireProgram, simpleProgram, programPath) -import Distribution.Simple.Program.Db (emptyProgramDb) -import Distribution.Text ( display ) -import Distribution.Verbosity (Verbosity) -import Distribution.Version (Version(..), orLaterVersion) - -import System.FilePath ((), (<.>)) -import Control.Monad (when, unless) -import System.Directory (doesFileExist, removeFile, canonicalizePath) -import System.Process (runProcess, waitForProcess) -import System.Exit (ExitCode(..)) - --- |Create a source distribution. -sdist :: SDistFlags -> SDistExFlags -> IO () -sdist flags exflags = do - pkg <- return . flattenPackageDescription - =<< readPackageDescription verbosity - =<< defaultPackageDesc verbosity - let withDir = if not needMakeArchive then (\f -> f tmpTargetDir) - else withTempDirectory verbosity tmpTargetDir "sdist." - -- 'withTempDir' fails if we don't create 'tmpTargetDir'... - when needMakeArchive $ - createDirectoryIfMissingVerbose verbosity True tmpTargetDir - withDir $ \tmpDir -> do - let outDir = if isOutDirectory then tmpDir else tmpDir tarBallName pkg - flags' = (if not needMakeArchive then flags - else flags { sDistDirectory = Flag outDir }) - unless isListSources $ - createDirectoryIfMissingVerbose verbosity True outDir - - -- Run 'setup sdist --output-directory=tmpDir' (or - -- '--list-source'/'--output-directory=someOtherDir') in case we were passed - -- those options. - setupWrapper verbosity setupOpts (Just pkg) sdistCommand (const flags') [] - - -- Unless we were given --list-sources or --output-directory ourselves, - -- create an archive. - when needMakeArchive $ - createArchive verbosity pkg tmpDir distPref - - when isOutDirectory $ - notice verbosity $ "Source directory created: " ++ tmpTargetDir - - when isListSources $ - notice verbosity $ "List of package sources written to file '" - ++ (fromFlag . sDistListSources $ flags) ++ "'" - - where - flagEnabled f = not . null . flagToList . f $ flags - - isListSources = flagEnabled sDistListSources - isOutDirectory = flagEnabled sDistDirectory - needMakeArchive = not (isListSources || isOutDirectory) - verbosity = fromFlag (sDistVerbosity flags) - distPref = fromFlag (sDistDistPref flags) - tmpTargetDir = fromFlagOrDefault (srcPref distPref) (sDistDirectory flags) - setupOpts = defaultSetupScriptOptions { - -- The '--output-directory' sdist flag was introduced in Cabal 1.12, and - -- '--list-sources' in 1.17. - useCabalVersion = if isListSources - then orLaterVersion $ Version [1,17,0] [] - else orLaterVersion $ Version [1,12,0] [] - } - format = fromFlag (sDistFormat exflags) - createArchive = case format of - TargzFormat -> createTarGzArchive - ZipFormat -> createZipArchive - -tarBallName :: PackageDescription -> String -tarBallName = display . packageId - --- | Create a tar.gz archive from a tree of source files. -createTarGzArchive :: Verbosity -> PackageDescription -> FilePath -> FilePath - -> IO () -createTarGzArchive verbosity pkg tmpDir targetPref = do - createTarGzFile tarBallFilePath tmpDir (tarBallName pkg) - notice verbosity $ "Source tarball created: " ++ tarBallFilePath - where - tarBallFilePath = targetPref tarBallName pkg <.> "tar.gz" - --- | Create a zip archive from a tree of source files. -createZipArchive :: Verbosity -> PackageDescription -> FilePath -> FilePath - -> IO () -createZipArchive verbosity pkg tmpDir targetPref = do - let dir = tarBallName pkg - zipfile = targetPref dir <.> "zip" - (zipProg, _) <- requireProgram verbosity zipProgram emptyProgramDb - - -- zip has an annoying habit of updating the target rather than creating - -- it from scratch. While that might sound like an optimisation, it doesn't - -- remove files already in the archive that are no longer present in the - -- uncompressed tree. - alreadyExists <- doesFileExist zipfile - when alreadyExists $ removeFile zipfile - - -- We call zip with a different CWD, so have to make the path - -- absolute. Can't just use 'canonicalizePath zipfile' since this function - -- requires its argument to refer to an existing file. - zipfileAbs <- fmap ( dir <.> "zip") . canonicalizePath $ targetPref - - --TODO: use runProgramInvocation, but has to be able to set CWD - hnd <- runProcess (programPath zipProg) ["-q", "-r", zipfileAbs, dir] - (Just tmpDir) - Nothing Nothing Nothing Nothing - exitCode <- waitForProcess hnd - unless (exitCode == ExitSuccess) $ - die $ "Generating the zip file failed " - ++ "(zip returned exit code " ++ show exitCode ++ ")" - notice verbosity $ "Source zip archive created: " ++ zipfile - where - zipProgram = simpleProgram "zip" diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Targets.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Targets.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Targets.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Targets.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,774 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Targets --- Copyright : (c) Duncan Coutts 2011 --- License : BSD-like --- --- Maintainer : duncan@community.haskell.org --- --- Handling for user-specified targets ------------------------------------------------------------------------------ -module Distribution.Client.Targets ( - -- * User targets - UserTarget(..), - readUserTargets, - - -- * Package specifiers - PackageSpecifier(..), - pkgSpecifierTarget, - pkgSpecifierConstraints, - - -- * Resolving user targets to package specifiers - resolveUserTargets, - - -- ** Detailed interface - UserTargetProblem(..), - readUserTarget, - reportUserTargetProblems, - expandUserTarget, - - PackageTarget(..), - fetchPackageTarget, - readPackageTarget, - - PackageTargetProblem(..), - reportPackageTargetProblems, - - disambiguatePackageTargets, - disambiguatePackageName, - - -- * User constraints - UserConstraint(..), - readUserConstraint, - userToPackageConstraint - - ) where - -import Distribution.Package - ( Package(..), PackageName(..) - , PackageIdentifier(..), packageName, packageVersion - , Dependency(Dependency) ) -import Distribution.Client.Types - ( SourcePackage(..), PackageLocation(..), OptionalStanza(..) ) -import Distribution.Client.Dependency.Types - ( PackageConstraint(..) ) - -import qualified Distribution.Client.World as World -import Distribution.Client.PackageIndex (PackageIndex) -import qualified Distribution.Client.PackageIndex as PackageIndex -import qualified Distribution.Client.Tar as Tar -import Distribution.Client.FetchUtils -import Distribution.Client.Utils ( tryFindPackageDesc ) - -import Distribution.PackageDescription - ( GenericPackageDescription, FlagName(..), FlagAssignment ) -import Distribution.PackageDescription.Parse - ( readPackageDescription, parsePackageDescription, ParseResult(..) ) -import Distribution.Version - ( Version(Version), thisVersion, anyVersion, isAnyVersion - , VersionRange ) -import Distribution.Text - ( Text(..), display ) -import Distribution.Verbosity (Verbosity) -import Distribution.Simple.Utils - ( die, warn, intercalate, fromUTF8, lowercase ) - -import Data.List - ( find, nub ) -import Data.Maybe - ( listToMaybe ) -import Data.Either - ( partitionEithers ) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid - ( Monoid(..) ) -#endif -import qualified Data.Map as Map -import qualified Data.ByteString.Lazy as BS -import qualified Data.ByteString.Lazy.Char8 as BS.Char8 -import qualified Distribution.Client.GZipUtils as GZipUtils -import Control.Monad (liftM) -import qualified Distribution.Compat.ReadP as Parse -import Distribution.Compat.ReadP - ( (+++), (<++) ) -import qualified Text.PrettyPrint as Disp -import Text.PrettyPrint - ( (<>), (<+>) ) -import Data.Char - ( isSpace, isAlphaNum ) -import System.FilePath - ( takeExtension, dropExtension, takeDirectory, splitPath ) -import System.Directory - ( doesFileExist, doesDirectoryExist ) -import Network.URI - ( URI(..), URIAuth(..), parseAbsoluteURI ) - --- ------------------------------------------------------------ --- * User targets --- ------------------------------------------------------------ - --- | Various ways that a user may specify a package or package collection. --- -data UserTarget = - - -- | A partially specified package, identified by name and possibly with - -- an exact version or a version constraint. - -- - -- > cabal install foo - -- > cabal install foo-1.0 - -- > cabal install 'foo < 2' - -- - UserTargetNamed Dependency - - -- | A special virtual package that refers to the collection of packages - -- recorded in the world file that the user specifically installed. - -- - -- > cabal install world - -- - | UserTargetWorld - - -- | A specific package that is unpacked in a local directory, often the - -- current directory. - -- - -- > cabal install . - -- > cabal install ../lib/other - -- - -- * Note: in future, if multiple @.cabal@ files are allowed in a single - -- directory then this will refer to the collection of packages. - -- - | UserTargetLocalDir FilePath - - -- | A specific local unpacked package, identified by its @.cabal@ file. - -- - -- > cabal install foo.cabal - -- > cabal install ../lib/other/bar.cabal - -- - | UserTargetLocalCabalFile FilePath - - -- | A specific package that is available as a local tarball file - -- - -- > cabal install dist/foo-1.0.tar.gz - -- > cabal install ../build/baz-1.0.tar.gz - -- - | UserTargetLocalTarball FilePath - - -- | A specific package that is available as a remote tarball file - -- - -- > cabal install http://code.haskell.org/~user/foo/foo-0.9.tar.gz - -- - | UserTargetRemoteTarball URI - deriving (Show,Eq) - - --- ------------------------------------------------------------ --- * Package specifier --- ------------------------------------------------------------ - --- | A fully or partially resolved reference to a package. --- -data PackageSpecifier pkg = - - -- | A partially specified reference to a package (either source or - -- installed). It is specified by package name and optionally some - -- additional constraints. Use a dependency resolver to pick a specific - -- package satisfying these constraints. - -- - NamedPackage PackageName [PackageConstraint] - - -- | A fully specified source package. - -- - | SpecificSourcePackage pkg - deriving Show - -pkgSpecifierTarget :: Package pkg => PackageSpecifier pkg -> PackageName -pkgSpecifierTarget (NamedPackage name _) = name -pkgSpecifierTarget (SpecificSourcePackage pkg) = packageName pkg - -pkgSpecifierConstraints :: Package pkg - => PackageSpecifier pkg -> [PackageConstraint] -pkgSpecifierConstraints (NamedPackage _ constraints) = constraints -pkgSpecifierConstraints (SpecificSourcePackage pkg) = - [PackageConstraintVersion (packageName pkg) - (thisVersion (packageVersion pkg))] - - --- ------------------------------------------------------------ --- * Parsing and checking user targets --- ------------------------------------------------------------ - -readUserTargets :: Verbosity -> [String] -> IO [UserTarget] -readUserTargets _verbosity targetStrs = do - (problems, targets) <- liftM partitionEithers - (mapM readUserTarget targetStrs) - reportUserTargetProblems problems - return targets - - -data UserTargetProblem - = UserTargetUnexpectedFile String - | UserTargetNonexistantFile String - | UserTargetUnexpectedUriScheme String - | UserTargetUnrecognisedUri String - | UserTargetUnrecognised String - | UserTargetBadWorldPkg - deriving Show - -readUserTarget :: String -> IO (Either UserTargetProblem UserTarget) -readUserTarget targetstr = - case testNamedTargets targetstr of - Just (Dependency (PackageName "world") verrange) - | verrange == anyVersion -> return (Right UserTargetWorld) - | otherwise -> return (Left UserTargetBadWorldPkg) - Just dep -> return (Right (UserTargetNamed dep)) - Nothing -> do - fileTarget <- testFileTargets targetstr - case fileTarget of - Just target -> return target - Nothing -> - case testUriTargets targetstr of - Just target -> return target - Nothing -> return (Left (UserTargetUnrecognised targetstr)) - where - testNamedTargets = readPToMaybe parseDependencyOrPackageId - - testFileTargets filename = do - isDir <- doesDirectoryExist filename - isFile <- doesFileExist filename - parentDirExists <- case takeDirectory filename of - [] -> return False - dir -> doesDirectoryExist dir - let result - | isDir - = Just (Right (UserTargetLocalDir filename)) - - | isFile && extensionIsTarGz filename - = Just (Right (UserTargetLocalTarball filename)) - - | isFile && takeExtension filename == ".cabal" - = Just (Right (UserTargetLocalCabalFile filename)) - - | isFile - = Just (Left (UserTargetUnexpectedFile filename)) - - | parentDirExists - = Just (Left (UserTargetNonexistantFile filename)) - - | otherwise - = Nothing - return result - - testUriTargets str = - case parseAbsoluteURI str of - Just uri@URI { - uriScheme = scheme, - uriAuthority = Just URIAuth { uriRegName = host } - } - | scheme /= "http:" -> - Just (Left (UserTargetUnexpectedUriScheme targetstr)) - - | null host -> - Just (Left (UserTargetUnrecognisedUri targetstr)) - - | otherwise -> - Just (Right (UserTargetRemoteTarball uri)) - _ -> Nothing - - extensionIsTarGz f = takeExtension f == ".gz" - && takeExtension (dropExtension f) == ".tar" - - parseDependencyOrPackageId :: Parse.ReadP r Dependency - parseDependencyOrPackageId = parse - +++ liftM pkgidToDependency parse - where - pkgidToDependency :: PackageIdentifier -> Dependency - pkgidToDependency p = case packageVersion p of - Version [] _ -> Dependency (packageName p) anyVersion - version -> Dependency (packageName p) (thisVersion version) - -readPToMaybe :: Parse.ReadP a a -> String -> Maybe a -readPToMaybe p str = listToMaybe [ r | (r,s) <- Parse.readP_to_S p str - , all isSpace s ] - - -reportUserTargetProblems :: [UserTargetProblem] -> IO () -reportUserTargetProblems problems = do - case [ target | UserTargetUnrecognised target <- problems ] of - [] -> return () - target -> die - $ unlines - [ "Unrecognised target '" ++ name ++ "'." - | name <- target ] - ++ "Targets can be:\n" - ++ " - package names, e.g. 'pkgname', 'pkgname-1.0.1', 'pkgname < 2.0'\n" - ++ " - the special 'world' target\n" - ++ " - cabal files 'pkgname.cabal' or package directories 'pkgname/'\n" - ++ " - package tarballs 'pkgname.tar.gz' or 'http://example.com/pkgname.tar.gz'" - - case [ () | UserTargetBadWorldPkg <- problems ] of - [] -> return () - _ -> die "The special 'world' target does not take any version." - - case [ target | UserTargetNonexistantFile target <- problems ] of - [] -> return () - target -> die - $ unlines - [ "The file does not exist '" ++ name ++ "'." - | name <- target ] - - case [ target | UserTargetUnexpectedFile target <- problems ] of - [] -> return () - target -> die - $ unlines - [ "Unrecognised file target '" ++ name ++ "'." - | name <- target ] - ++ "File targets can be either package tarballs 'pkgname.tar.gz' " - ++ "or cabal files 'pkgname.cabal'." - - case [ target | UserTargetUnexpectedUriScheme target <- problems ] of - [] -> return () - target -> die - $ unlines - [ "URL target not supported '" ++ name ++ "'." - | name <- target ] - ++ "Only 'http://' URLs are supported." - - case [ target | UserTargetUnrecognisedUri target <- problems ] of - [] -> return () - target -> die - $ unlines - [ "Unrecognise URL target '" ++ name ++ "'." - | name <- target ] - - --- ------------------------------------------------------------ --- * Resolving user targets to package specifiers --- ------------------------------------------------------------ - --- | Given a bunch of user-specified targets, try to resolve what it is they --- refer to. They can either be specific packages (local dirs, tarballs etc) --- or they can be named packages (with or without version info). --- -resolveUserTargets :: Package pkg - => Verbosity - -> FilePath - -> PackageIndex pkg - -> [UserTarget] - -> IO [PackageSpecifier SourcePackage] -resolveUserTargets verbosity worldFile available userTargets = do - - -- given the user targets, get a list of fully or partially resolved - -- package references - packageTargets <- mapM (readPackageTarget verbosity) - =<< mapM (fetchPackageTarget verbosity) . concat - =<< mapM (expandUserTarget worldFile) userTargets - - -- users are allowed to give package names case-insensitively, so we must - -- disambiguate named package references - let (problems, packageSpecifiers) = - disambiguatePackageTargets available availableExtra packageTargets - - -- use any extra specific available packages to help us disambiguate - availableExtra = [ packageName pkg - | PackageTargetLocation pkg <- packageTargets ] - - reportPackageTargetProblems verbosity problems - - return packageSpecifiers - - --- ------------------------------------------------------------ --- * Package targets --- ------------------------------------------------------------ - --- | An intermediate between a 'UserTarget' and a resolved 'PackageSpecifier'. --- Unlike a 'UserTarget', a 'PackageTarget' refers only to a single package. --- -data PackageTarget pkg = - PackageTargetNamed PackageName [PackageConstraint] UserTarget - - -- | A package identified by name, but case insensitively, so it needs - -- to be resolved to the right case-sensitive name. - | PackageTargetNamedFuzzy PackageName [PackageConstraint] UserTarget - | PackageTargetLocation pkg - deriving Show - - --- ------------------------------------------------------------ --- * Converting user targets to package targets --- ------------------------------------------------------------ - --- | Given a user-specified target, expand it to a bunch of package targets --- (each of which refers to only one package). --- -expandUserTarget :: FilePath - -> UserTarget - -> IO [PackageTarget (PackageLocation ())] -expandUserTarget worldFile userTarget = case userTarget of - - UserTargetNamed (Dependency name vrange) -> - let constraints = [ PackageConstraintVersion name vrange - | not (isAnyVersion vrange) ] - in return [PackageTargetNamedFuzzy name constraints userTarget] - - UserTargetWorld -> do - worldPkgs <- World.getContents worldFile - --TODO: should we warn if there are no world targets? - return [ PackageTargetNamed name constraints userTarget - | World.WorldPkgInfo (Dependency name vrange) flags <- worldPkgs - , let constraints = [ PackageConstraintVersion name vrange - | not (isAnyVersion vrange) ] - ++ [ PackageConstraintFlags name flags - | not (null flags) ] ] - - UserTargetLocalDir dir -> - return [ PackageTargetLocation (LocalUnpackedPackage dir) ] - - UserTargetLocalCabalFile file -> do - let dir = takeDirectory file - _ <- tryFindPackageDesc dir (localPackageError dir) -- just as a check - return [ PackageTargetLocation (LocalUnpackedPackage dir) ] - - UserTargetLocalTarball tarballFile -> - return [ PackageTargetLocation (LocalTarballPackage tarballFile) ] - - UserTargetRemoteTarball tarballURL -> - return [ PackageTargetLocation (RemoteTarballPackage tarballURL ()) ] - -localPackageError :: FilePath -> String -localPackageError dir = - "Error reading local package.\nCouldn't find .cabal file in: " ++ dir - --- ------------------------------------------------------------ --- * Fetching and reading package targets --- ------------------------------------------------------------ - - --- | Fetch any remote targets so that they can be read. --- -fetchPackageTarget :: Verbosity - -> PackageTarget (PackageLocation ()) - -> IO (PackageTarget (PackageLocation FilePath)) -fetchPackageTarget verbosity target = case target of - PackageTargetNamed n cs ut -> return (PackageTargetNamed n cs ut) - PackageTargetNamedFuzzy n cs ut -> return (PackageTargetNamedFuzzy n cs ut) - PackageTargetLocation location -> do - location' <- fetchPackage verbosity (fmap (const Nothing) location) - return (PackageTargetLocation location') - - --- | Given a package target that has been fetched, read the .cabal file. --- --- This only affects targets given by location, named targets are unaffected. --- -readPackageTarget :: Verbosity - -> PackageTarget (PackageLocation FilePath) - -> IO (PackageTarget SourcePackage) -readPackageTarget verbosity target = case target of - - PackageTargetNamed pkgname constraints userTarget -> - return (PackageTargetNamed pkgname constraints userTarget) - - PackageTargetNamedFuzzy pkgname constraints userTarget -> - return (PackageTargetNamedFuzzy pkgname constraints userTarget) - - PackageTargetLocation location -> case location of - - LocalUnpackedPackage dir -> do - pkg <- tryFindPackageDesc dir (localPackageError dir) >>= - readPackageDescription verbosity - return $ PackageTargetLocation $ - SourcePackage { - packageInfoId = packageId pkg, - packageDescription = pkg, - packageSource = fmap Just location, - packageDescrOverride = Nothing - } - - LocalTarballPackage tarballFile -> - readTarballPackageTarget location tarballFile tarballFile - - RemoteTarballPackage tarballURL tarballFile -> - readTarballPackageTarget location tarballFile (show tarballURL) - - RepoTarballPackage _repo _pkgid _ -> - error "TODO: readPackageTarget RepoTarballPackage" - -- For repo tarballs this info should be obtained from the index. - - where - readTarballPackageTarget location tarballFile tarballOriginalLoc = do - (filename, content) <- extractTarballPackageCabalFile - tarballFile tarballOriginalLoc - case parsePackageDescription' content of - Nothing -> die $ "Could not parse the cabal file " - ++ filename ++ " in " ++ tarballFile - Just pkg -> - return $ PackageTargetLocation $ - SourcePackage { - packageInfoId = packageId pkg, - packageDescription = pkg, - packageSource = fmap Just location, - packageDescrOverride = Nothing - } - - extractTarballPackageCabalFile :: FilePath -> String - -> IO (FilePath, BS.ByteString) - extractTarballPackageCabalFile tarballFile tarballOriginalLoc = - either (die . formatErr) return - . check - . Tar.entriesIndex - . Tar.filterEntries isCabalFile - . Tar.read - . GZipUtils.maybeDecompress - =<< BS.readFile tarballFile - where - formatErr msg = "Error reading " ++ tarballOriginalLoc ++ ": " ++ msg - - check (Left e) = Left e - check (Right m) = case Map.elems m of - [] -> Left noCabalFile - [file] -> case Tar.entryContent file of - Tar.NormalFile content _ -> Right (Tar.entryPath file, content) - _ -> Left noCabalFile - _files -> Left multipleCabalFiles - where - noCabalFile = "No cabal file found" - multipleCabalFiles = "Multiple cabal files found" - - isCabalFile e = case splitPath (Tar.entryPath e) of - [ _dir, file] -> takeExtension file == ".cabal" - [".", _dir, file] -> takeExtension file == ".cabal" - _ -> False - - parsePackageDescription' :: BS.ByteString -> Maybe GenericPackageDescription - parsePackageDescription' content = - case parsePackageDescription . fromUTF8 . BS.Char8.unpack $ content of - ParseOk _ pkg -> Just pkg - _ -> Nothing - - --- ------------------------------------------------------------ --- * Checking package targets --- ------------------------------------------------------------ - -data PackageTargetProblem - = PackageNameUnknown PackageName UserTarget - | PackageNameAmbigious PackageName [PackageName] UserTarget - deriving Show - - --- | Users are allowed to give package names case-insensitively, so we must --- disambiguate named package references. --- -disambiguatePackageTargets :: Package pkg' - => PackageIndex pkg' - -> [PackageName] - -> [PackageTarget pkg] - -> ( [PackageTargetProblem] - , [PackageSpecifier pkg] ) -disambiguatePackageTargets availablePkgIndex availableExtra targets = - partitionEithers (map disambiguatePackageTarget targets) - where - disambiguatePackageTarget packageTarget = case packageTarget of - PackageTargetLocation pkg -> Right (SpecificSourcePackage pkg) - - PackageTargetNamed pkgname constraints userTarget - | null (PackageIndex.lookupPackageName availablePkgIndex pkgname) - -> Left (PackageNameUnknown pkgname userTarget) - | otherwise -> Right (NamedPackage pkgname constraints) - - PackageTargetNamedFuzzy pkgname constraints userTarget -> - case disambiguatePackageName packageNameEnv pkgname of - None -> Left (PackageNameUnknown - pkgname userTarget) - Ambiguous pkgnames -> Left (PackageNameAmbigious - pkgname pkgnames userTarget) - Unambiguous pkgname' -> Right (NamedPackage pkgname' constraints') - where - constraints' = map (renamePackageConstraint pkgname') constraints - - -- use any extra specific available packages to help us disambiguate - packageNameEnv :: PackageNameEnv - packageNameEnv = mappend (indexPackageNameEnv availablePkgIndex) - (extraPackageNameEnv availableExtra) - - --- | Report problems to the user. That is, if there are any problems --- then raise an exception. -reportPackageTargetProblems :: Verbosity - -> [PackageTargetProblem] -> IO () -reportPackageTargetProblems verbosity problems = do - case [ pkg | PackageNameUnknown pkg originalTarget <- problems - , not (isUserTagetWorld originalTarget) ] of - [] -> return () - pkgs -> die $ unlines - [ "There is no package named '" ++ display name ++ "'. " - | name <- pkgs ] - ++ "You may need to run 'cabal update' to get the latest " - ++ "list of available packages." - - case [ (pkg, matches) | PackageNameAmbigious pkg matches _ <- problems ] of - [] -> return () - ambiguities -> die $ unlines - [ "The package name '" ++ display name - ++ "' is ambigious. It could be: " - ++ intercalate ", " (map display matches) - | (name, matches) <- ambiguities ] - - case [ pkg | PackageNameUnknown pkg UserTargetWorld <- problems ] of - [] -> return () - pkgs -> warn verbosity $ - "The following 'world' packages will be ignored because " - ++ "they refer to packages that cannot be found: " - ++ intercalate ", " (map display pkgs) ++ "\n" - ++ "You can suppress this warning by correcting the world file." - where - isUserTagetWorld UserTargetWorld = True; isUserTagetWorld _ = False - - --- ------------------------------------------------------------ --- * Disambiguating package names --- ------------------------------------------------------------ - -data MaybeAmbigious a = None | Unambiguous a | Ambiguous [a] - --- | Given a package name and a list of matching names, figure out which one it --- might be referring to. If there is an exact case-sensitive match then that's --- ok. If it matches just one package case-insensitively then that's also ok. --- The only problem is if it matches multiple packages case-insensitively, in --- that case it is ambigious. --- -disambiguatePackageName :: PackageNameEnv - -> PackageName - -> MaybeAmbigious PackageName -disambiguatePackageName (PackageNameEnv pkgNameLookup) name = - case nub (pkgNameLookup name) of - [] -> None - [name'] -> Unambiguous name' - names -> case find (name==) names of - Just name' -> Unambiguous name' - Nothing -> Ambiguous names - - -newtype PackageNameEnv = PackageNameEnv (PackageName -> [PackageName]) - -instance Monoid PackageNameEnv where - mempty = PackageNameEnv (const []) - mappend (PackageNameEnv lookupA) (PackageNameEnv lookupB) = - PackageNameEnv (\name -> lookupA name ++ lookupB name) - -indexPackageNameEnv :: Package pkg => PackageIndex pkg -> PackageNameEnv -indexPackageNameEnv pkgIndex = PackageNameEnv pkgNameLookup - where - pkgNameLookup (PackageName name) = - map fst (PackageIndex.searchByName pkgIndex name) - -extraPackageNameEnv :: [PackageName] -> PackageNameEnv -extraPackageNameEnv names = PackageNameEnv pkgNameLookup - where - pkgNameLookup (PackageName name) = - [ PackageName name' - | let lname = lowercase name - , PackageName name' <- names - , lowercase name' == lname ] - - --- ------------------------------------------------------------ --- * Package constraints --- ------------------------------------------------------------ - -data UserConstraint = - UserConstraintVersion PackageName VersionRange - | UserConstraintInstalled PackageName - | UserConstraintSource PackageName - | UserConstraintFlags PackageName FlagAssignment - | UserConstraintStanzas PackageName [OptionalStanza] - deriving (Show,Eq) - - -userToPackageConstraint :: UserConstraint -> PackageConstraint --- At the moment, the types happen to be directly equivalent -userToPackageConstraint uc = case uc of - UserConstraintVersion name ver -> PackageConstraintVersion name ver - UserConstraintInstalled name -> PackageConstraintInstalled name - UserConstraintSource name -> PackageConstraintSource name - UserConstraintFlags name flags -> PackageConstraintFlags name flags - UserConstraintStanzas name stanzas -> PackageConstraintStanzas name stanzas - -renamePackageConstraint :: PackageName -> PackageConstraint -> PackageConstraint -renamePackageConstraint name pc = case pc of - PackageConstraintVersion _ ver -> PackageConstraintVersion name ver - PackageConstraintInstalled _ -> PackageConstraintInstalled name - PackageConstraintSource _ -> PackageConstraintSource name - PackageConstraintFlags _ flags -> PackageConstraintFlags name flags - PackageConstraintStanzas _ stanzas -> PackageConstraintStanzas name stanzas - -readUserConstraint :: String -> Either String UserConstraint -readUserConstraint str = - case readPToMaybe parse str of - Nothing -> Left msgCannotParse - Just c -> Right c - where - msgCannotParse = - "expected a package name followed by a constraint, which is " - ++ "either a version range, 'installed', 'source' or flags" - ---FIXME: use Text instance for FlagName and FlagAssignment -instance Text UserConstraint where - disp (UserConstraintVersion pkgname verrange) = disp pkgname - <+> disp verrange - disp (UserConstraintInstalled pkgname) = disp pkgname - <+> Disp.text "installed" - disp (UserConstraintSource pkgname) = disp pkgname - <+> Disp.text "source" - disp (UserConstraintFlags pkgname flags) = disp pkgname - <+> dispFlagAssignment flags - where - dispFlagAssignment = Disp.hsep . map dispFlagValue - dispFlagValue (f, True) = Disp.char '+' <> dispFlagName f - dispFlagValue (f, False) = Disp.char '-' <> dispFlagName f - dispFlagName (FlagName f) = Disp.text f - - disp (UserConstraintStanzas pkgname stanzas) = disp pkgname - <+> dispStanzas stanzas - where - dispStanzas = Disp.hsep . map dispStanza - dispStanza TestStanzas = Disp.text "test" - dispStanza BenchStanzas = Disp.text "bench" - - parse = parse >>= parseConstraint - where - spaces = Parse.satisfy isSpace >> Parse.skipSpaces - - parseConstraint pkgname = - ((parse >>= return . UserConstraintVersion pkgname) - +++ (do spaces - _ <- Parse.string "installed" - return (UserConstraintInstalled pkgname)) - +++ (do spaces - _ <- Parse.string "source" - return (UserConstraintSource pkgname)) - +++ (do spaces - _ <- Parse.string "test" - return (UserConstraintStanzas pkgname [TestStanzas])) - +++ (do spaces - _ <- Parse.string "bench" - return (UserConstraintStanzas pkgname [BenchStanzas]))) - <++ (parseFlagAssignment >>= (return . UserConstraintFlags pkgname)) - - parseFlagAssignment = Parse.many1 (spaces >> parseFlagValue) - parseFlagValue = - (do Parse.optional (Parse.char '+') - f <- parseFlagName - return (f, True)) - +++ (do _ <- Parse.char '-' - f <- parseFlagName - return (f, False)) - parseFlagName = liftM FlagName ident - - ident :: Parse.ReadP r String - ident = Parse.munch1 identChar >>= \s -> check s >> return s - where - identChar c = isAlphaNum c || c == '_' || c == '-' - check ('-':_) = Parse.pfail - check _ = return () diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Tar.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Tar.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Tar.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Tar.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,951 +0,0 @@ -{-# LANGUAGE DeriveFunctor #-} -{-# OPTIONS_GHC -fno-warn-unused-imports #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Tar --- Copyright : (c) 2007 Bjorn Bringert, --- 2008 Andrea Vezzosi, --- 2008-2009 Duncan Coutts --- License : BSD3 --- --- Maintainer : duncan@community.haskell.org --- Portability : portable --- --- Reading, writing and manipulating \"@.tar@\" archive files. --- ------------------------------------------------------------------------------ -module Distribution.Client.Tar ( - -- * High level \"all in one\" operations - createTarGzFile, - extractTarGzFile, - - -- * Converting between internal and external representation - read, - write, - writeEntries, - - -- * Packing and unpacking files to\/from internal representation - pack, - unpack, - - -- * Tar entry and associated types - Entry(..), - entryPath, - EntryContent(..), - Ownership(..), - FileSize, - Permissions, - EpochTime, - DevMajor, - DevMinor, - TypeCode, - Format(..), - buildTreeRefTypeCode, - buildTreeSnapshotTypeCode, - isBuildTreeRefTypeCode, - entrySizeInBlocks, - entrySizeInBytes, - - -- * Constructing simple entry values - simpleEntry, - fileEntry, - directoryEntry, - - -- * TarPath type - TarPath, - toTarPath, - fromTarPath, - - -- ** Sequences of tar entries - Entries(..), - foldrEntries, - foldlEntries, - unfoldrEntries, - mapEntries, - filterEntries, - entriesIndex, - - ) where - -import Data.Char (ord) -import Data.Int (Int64) -import Data.Bits (Bits, shiftL, testBit) -import Data.List (foldl') -import Numeric (readOct, showOct) -import Control.Applicative (Applicative(..)) -import Control.Monad (MonadPlus(mplus), when, ap, liftM) -import qualified Data.Map as Map -import qualified Data.ByteString.Lazy as BS -import qualified Data.ByteString.Lazy.Char8 as BS.Char8 -import Data.ByteString.Lazy (ByteString) -import qualified Codec.Compression.GZip as GZip -import qualified Distribution.Client.GZipUtils as GZipUtils - -import System.FilePath - ( () ) -import qualified System.FilePath as FilePath.Native -import qualified System.FilePath.Windows as FilePath.Windows -import qualified System.FilePath.Posix as FilePath.Posix -import System.Directory - ( getDirectoryContents, doesDirectoryExist - , getPermissions, createDirectoryIfMissing, copyFile ) -import qualified System.Directory as Permissions - ( Permissions(executable) ) -import Distribution.Client.Compat.FilePerms - ( setFileExecutable ) -import System.Posix.Types - ( FileMode ) -import Distribution.Client.Compat.Time - ( EpochTime, getModTime ) -import System.IO - ( IOMode(ReadMode), openBinaryFile, hFileSize ) -import System.IO.Unsafe (unsafeInterleaveIO) - -import Prelude hiding (read) - - --- --- * High level operations --- - -createTarGzFile :: FilePath -- ^ Full Tarball path - -> FilePath -- ^ Base directory - -> FilePath -- ^ Directory to archive, relative to base dir - -> IO () -createTarGzFile tar base dir = - BS.writeFile tar . GZip.compress . write =<< pack base [dir] - -extractTarGzFile :: FilePath -- ^ Destination directory - -> FilePath -- ^ Expected subdir (to check for tarbombs) - -> FilePath -- ^ Tarball - -> IO () -extractTarGzFile dir expected tar = - unpack dir . checkTarbomb expected . read - . GZipUtils.maybeDecompress =<< BS.readFile tar - --- --- * Entry type --- - -type FileSize = Int64 -type DevMajor = Int -type DevMinor = Int -type TypeCode = Char -type Permissions = FileMode - --- | Tar archive entry. --- -data Entry = Entry { - - -- | The path of the file or directory within the archive. This is in a - -- tar-specific form. Use 'entryPath' to get a native 'FilePath'. - entryTarPath :: !TarPath, - - -- | The real content of the entry. For 'NormalFile' this includes the - -- file data. An entry usually contains a 'NormalFile' or a 'Directory'. - entryContent :: !EntryContent, - - -- | File permissions (Unix style file mode). - entryPermissions :: !Permissions, - - -- | The user and group to which this file belongs. - entryOwnership :: !Ownership, - - -- | The time the file was last modified. - entryTime :: !EpochTime, - - -- | The tar format the archive is using. - entryFormat :: !Format - } - --- | Type code for the local build tree reference entry type. We don't use the --- symbolic link entry type because it allows only 100 ASCII characters for the --- path. -buildTreeRefTypeCode :: TypeCode -buildTreeRefTypeCode = 'C' - --- | Type code for the local build tree snapshot entry type. -buildTreeSnapshotTypeCode :: TypeCode -buildTreeSnapshotTypeCode = 'S' - --- | Is this a type code for a build tree reference? -isBuildTreeRefTypeCode :: TypeCode -> Bool -isBuildTreeRefTypeCode typeCode - | (typeCode == buildTreeRefTypeCode - || typeCode == buildTreeSnapshotTypeCode) = True - | otherwise = False - --- | Native 'FilePath' of the file or directory within the archive. --- -entryPath :: Entry -> FilePath -entryPath = fromTarPath . entryTarPath - --- | Return the size of an entry in bytes. -entrySizeInBytes :: Entry -> FileSize -entrySizeInBytes = (*512) . fromIntegral . entrySizeInBlocks - --- | Return the number of blocks in an entry. -entrySizeInBlocks :: Entry -> Int -entrySizeInBlocks entry = 1 + case entryContent entry of - NormalFile _ size -> bytesToBlocks size - OtherEntryType _ _ size -> bytesToBlocks size - _ -> 0 - where - bytesToBlocks s = 1 + ((fromIntegral s - 1) `div` 512) - --- | The content of a tar archive entry, which depends on the type of entry. --- --- Portable archives should contain only 'NormalFile' and 'Directory'. --- -data EntryContent = NormalFile ByteString !FileSize - | Directory - | SymbolicLink !LinkTarget - | HardLink !LinkTarget - | CharacterDevice !DevMajor !DevMinor - | BlockDevice !DevMajor !DevMinor - | NamedPipe - | OtherEntryType !TypeCode ByteString !FileSize - -data Ownership = Ownership { - -- | The owner user name. Should be set to @\"\"@ if unknown. - ownerName :: String, - - -- | The owner group name. Should be set to @\"\"@ if unknown. - groupName :: String, - - -- | Numeric owner user id. Should be set to @0@ if unknown. - ownerId :: !Int, - - -- | Numeric owner group id. Should be set to @0@ if unknown. - groupId :: !Int - } - --- | There have been a number of extensions to the tar file format over the --- years. They all share the basic entry fields and put more meta-data in --- different extended headers. --- -data Format = - - -- | This is the classic Unix V7 tar format. It does not support owner and - -- group names, just numeric Ids. It also does not support device numbers. - V7Format - - -- | The \"USTAR\" format is an extension of the classic V7 format. It was - -- later standardised by POSIX. It has some restrictions but is the most - -- portable format. - -- - | UstarFormat - - -- | The GNU tar implementation also extends the classic V7 format, though - -- in a slightly different way from the USTAR format. In general for new - -- archives the standard USTAR/POSIX should be used. - -- - | GnuFormat - deriving Eq - --- | @rw-r--r--@ for normal files -ordinaryFilePermissions :: Permissions -ordinaryFilePermissions = 0o0644 - --- | @rwxr-xr-x@ for executable files -executableFilePermissions :: Permissions -executableFilePermissions = 0o0755 - --- | @rwxr-xr-x@ for directories -directoryPermissions :: Permissions -directoryPermissions = 0o0755 - -isExecutable :: Permissions -> Bool -isExecutable p = testBit p 0 || testBit p 6 -- user or other executable - --- | An 'Entry' with all default values except for the file name and type. It --- uses the portable USTAR/POSIX format (see 'UstarHeader'). --- --- You can use this as a basis and override specific fields, eg: --- --- > (emptyEntry name HardLink) { linkTarget = target } --- -simpleEntry :: TarPath -> EntryContent -> Entry -simpleEntry tarpath content = Entry { - entryTarPath = tarpath, - entryContent = content, - entryPermissions = case content of - Directory -> directoryPermissions - _ -> ordinaryFilePermissions, - entryOwnership = Ownership "" "" 0 0, - entryTime = 0, - entryFormat = UstarFormat - } - --- | A tar 'Entry' for a file. --- --- Entry fields such as file permissions and ownership have default values. --- --- You can use this as a basis and override specific fields. For example if you --- need an executable file you could use: --- --- > (fileEntry name content) { fileMode = executableFileMode } --- -fileEntry :: TarPath -> ByteString -> Entry -fileEntry name fileContent = - simpleEntry name (NormalFile fileContent (BS.length fileContent)) - --- | A tar 'Entry' for a directory. --- --- Entry fields such as file permissions and ownership have default values. --- -directoryEntry :: TarPath -> Entry -directoryEntry name = simpleEntry name Directory - --- --- * Tar paths --- - --- | The classic tar format allowed just 100 characters for the file name. The --- USTAR format extended this with an extra 155 characters, however it uses a --- complex method of splitting the name between the two sections. --- --- Instead of just putting any overflow into the extended area, it uses the --- extended area as a prefix. The aggravating insane bit however is that the --- prefix (if any) must only contain a directory prefix. That is the split --- between the two areas must be on a directory separator boundary. So there is --- no simple calculation to work out if a file name is too long. Instead we --- have to try to find a valid split that makes the name fit in the two areas. --- --- The rationale presumably was to make it a bit more compatible with old tar --- programs that only understand the classic format. A classic tar would be --- able to extract the file name and possibly some dir prefix, but not the --- full dir prefix. So the files would end up in the wrong place, but that's --- probably better than ending up with the wrong names too. --- --- So it's understandable but rather annoying. --- --- * Tar paths use POSIX format (ie @\'/\'@ directory separators), irrespective --- of the local path conventions. --- --- * The directory separator between the prefix and name is /not/ stored. --- -data TarPath = TarPath FilePath -- path name, 100 characters max. - FilePath -- path prefix, 155 characters max. - deriving (Eq, Ord) - --- | Convert a 'TarPath' to a native 'FilePath'. --- --- The native 'FilePath' will use the native directory separator but it is not --- otherwise checked for validity or sanity. In particular: --- --- * The tar path may be invalid as a native path, eg the filename @\"nul\"@ is --- not valid on Windows. --- --- * The tar path may be an absolute path or may contain @\"..\"@ components. --- For security reasons this should not usually be allowed, but it is your --- responsibility to check for these conditions (eg using 'checkSecurity'). --- -fromTarPath :: TarPath -> FilePath -fromTarPath (TarPath name prefix) = adjustDirectory $ - FilePath.Native.joinPath $ FilePath.Posix.splitDirectories prefix - ++ FilePath.Posix.splitDirectories name - where - adjustDirectory | FilePath.Posix.hasTrailingPathSeparator name - = FilePath.Native.addTrailingPathSeparator - | otherwise = id - --- | Convert a native 'FilePath' to a 'TarPath'. --- --- The conversion may fail if the 'FilePath' is too long. See 'TarPath' for a --- description of the problem with splitting long 'FilePath's. --- -toTarPath :: Bool -- ^ Is the path for a directory? This is needed because for - -- directories a 'TarPath' must always use a trailing @\/@. - -> FilePath -> Either String TarPath -toTarPath isDir = splitLongPath - . addTrailingSep - . FilePath.Posix.joinPath - . FilePath.Native.splitDirectories - where - addTrailingSep | isDir = FilePath.Posix.addTrailingPathSeparator - | otherwise = id - --- | Take a sanitized path, split on directory separators and try to pack it --- into the 155 + 100 tar file name format. --- --- The strategy is this: take the name-directory components in reverse order --- and try to fit as many components into the 100 long name area as possible. --- If all the remaining components fit in the 155 name area then we win. --- -splitLongPath :: FilePath -> Either String TarPath -splitLongPath path = - case packName nameMax (reverse (FilePath.Posix.splitPath path)) of - Left err -> Left err - Right (name, []) -> Right (TarPath name "") - Right (name, first:rest) -> case packName prefixMax remainder of - Left err -> Left err - Right (_ , _ : _) -> Left "File name too long (cannot split)" - Right (prefix, []) -> Right (TarPath name prefix) - where - -- drop the '/' between the name and prefix: - remainder = init first : rest - - where - nameMax, prefixMax :: Int - nameMax = 100 - prefixMax = 155 - - packName _ [] = Left "File name empty" - packName maxLen (c:cs) - | n > maxLen = Left "File name too long" - | otherwise = Right (packName' maxLen n [c] cs) - where n = length c - - packName' maxLen n ok (c:cs) - | n' <= maxLen = packName' maxLen n' (c:ok) cs - where n' = n + length c - packName' _ _ ok cs = (FilePath.Posix.joinPath ok, cs) - --- | The tar format allows just 100 ASCII characters for the 'SymbolicLink' and --- 'HardLink' entry types. --- -newtype LinkTarget = LinkTarget FilePath - deriving (Eq, Ord) - --- | Convert a tar 'LinkTarget' to a native 'FilePath'. --- -fromLinkTarget :: LinkTarget -> FilePath -fromLinkTarget (LinkTarget path) = adjustDirectory $ - FilePath.Native.joinPath $ FilePath.Posix.splitDirectories path - where - adjustDirectory | FilePath.Posix.hasTrailingPathSeparator path - = FilePath.Native.addTrailingPathSeparator - | otherwise = id - --- --- * Entries type --- - --- | A tar archive is a sequence of entries. -data Entries = Next Entry Entries - | Done - | Fail String - -unfoldrEntries :: (a -> Either String (Maybe (Entry, a))) -> a -> Entries -unfoldrEntries f = unfold - where - unfold x = case f x of - Left err -> Fail err - Right Nothing -> Done - Right (Just (e, x')) -> Next e (unfold x') - -foldrEntries :: (Entry -> a -> a) -> a -> (String -> a) -> Entries -> a -foldrEntries next done fail' = fold - where - fold (Next e es) = next e (fold es) - fold Done = done - fold (Fail err) = fail' err - -foldlEntries :: (a -> Entry -> a) -> a -> Entries -> Either String a -foldlEntries f = fold - where - fold a (Next e es) = (fold $! f a e) es - fold a Done = Right a - fold _ (Fail err) = Left err - -mapEntries :: (Entry -> Entry) -> Entries -> Entries -mapEntries f = foldrEntries (Next . f) Done Fail - -filterEntries :: (Entry -> Bool) -> Entries -> Entries -filterEntries p = - foldrEntries - (\entry rest -> if p entry - then Next entry rest - else rest) - Done Fail - -checkEntries :: (Entry -> Maybe String) -> Entries -> Entries -checkEntries checkEntry = - foldrEntries - (\entry rest -> case checkEntry entry of - Nothing -> Next entry rest - Just err -> Fail err) - Done Fail - -entriesIndex :: Entries -> Either String (Map.Map TarPath Entry) -entriesIndex = foldlEntries (\m e -> Map.insert (entryTarPath e) e m) Map.empty - --- --- * Checking --- - --- | This function checks a sequence of tar entries for file name security --- problems. It checks that: --- --- * file paths are not absolute --- --- * file paths do not contain any path components that are \"@..@\" --- --- * file names are valid --- --- These checks are from the perspective of the current OS. That means we check --- for \"@C:\blah@\" files on Windows and \"\/blah\" files on Unix. For archive --- entry types 'HardLink' and 'SymbolicLink' the same checks are done for the --- link target. A failure in any entry terminates the sequence of entries with --- an error. --- -checkSecurity :: Entries -> Entries -checkSecurity = checkEntries checkEntrySecurity - -checkTarbomb :: FilePath -> Entries -> Entries -checkTarbomb expectedTopDir = checkEntries (checkEntryTarbomb expectedTopDir) - -checkEntrySecurity :: Entry -> Maybe String -checkEntrySecurity entry = case entryContent entry of - HardLink link -> check (entryPath entry) - `mplus` check (fromLinkTarget link) - SymbolicLink link -> check (entryPath entry) - `mplus` check (fromLinkTarget link) - _ -> check (entryPath entry) - - where - check name - | not (FilePath.Native.isRelative name) - = Just $ "Absolute file name in tar archive: " ++ show name - - | not (FilePath.Native.isValid name) - = Just $ "Invalid file name in tar archive: " ++ show name - - | ".." `elem` FilePath.Native.splitDirectories name - = Just $ "Invalid file name in tar archive: " ++ show name - - | otherwise = Nothing - -checkEntryTarbomb :: FilePath -> Entry -> Maybe String -checkEntryTarbomb _ entry | nonFilesystemEntry = Nothing - where - -- Ignore some special entries we will not unpack anyway - nonFilesystemEntry = - case entryContent entry of - OtherEntryType 'g' _ _ -> True --PAX global header - OtherEntryType 'x' _ _ -> True --PAX individual header - _ -> False - -checkEntryTarbomb expectedTopDir entry = - case FilePath.Native.splitDirectories (entryPath entry) of - (topDir:_) | topDir == expectedTopDir -> Nothing - s -> Just $ "File in tar archive is not in the expected directory. " - ++ "Expected: " ++ show expectedTopDir - ++ " but got the following hierarchy: " - ++ show s - - --- --- * Reading --- - -read :: ByteString -> Entries -read = unfoldrEntries getEntry - -getEntry :: ByteString -> Either String (Maybe (Entry, ByteString)) -getEntry bs - | BS.length header < 512 = Left "truncated tar archive" - - -- Tar files end with at least two blocks of all '0'. Checking this serves - -- two purposes. It checks the format but also forces the tail of the data - -- which is necessary to close the file if it came from a lazily read file. - | BS.head bs == 0 = case BS.splitAt 1024 bs of - (end, trailing) - | BS.length end /= 1024 -> Left "short tar trailer" - | not (BS.all (== 0) end) -> Left "bad tar trailer" - | not (BS.all (== 0) trailing) -> Left "tar file has trailing junk" - | otherwise -> Right Nothing - - | otherwise = partial $ do - - case (chksum_, format_) of - (Ok chksum, _ ) | correctChecksum header chksum -> return () - (Ok _, Ok _) -> fail "tar checksum error" - _ -> fail "data is not in tar format" - - -- These fields are partial, have to check them - format <- format_; mode <- mode_; - uid <- uid_; gid <- gid_; - size <- size_; mtime <- mtime_; - devmajor <- devmajor_; devminor <- devminor_; - - let content = BS.take size (BS.drop 512 bs) - padding = (512 - size) `mod` 512 - bs' = BS.drop (512 + size + padding) bs - - entry = Entry { - entryTarPath = TarPath name prefix, - entryContent = case typecode of - '\0' -> NormalFile content size - '0' -> NormalFile content size - '1' -> HardLink (LinkTarget linkname) - '2' -> SymbolicLink (LinkTarget linkname) - '3' -> CharacterDevice devmajor devminor - '4' -> BlockDevice devmajor devminor - '5' -> Directory - '6' -> NamedPipe - '7' -> NormalFile content size - _ -> OtherEntryType typecode content size, - entryPermissions = mode, - entryOwnership = Ownership uname gname uid gid, - entryTime = mtime, - entryFormat = format - } - - return (Just (entry, bs')) - - where - header = BS.take 512 bs - - name = getString 0 100 header - mode_ = getOct 100 8 header - uid_ = getOct 108 8 header - gid_ = getOct 116 8 header - size_ = getOct 124 12 header - mtime_ = getOct 136 12 header - chksum_ = getOct 148 8 header - typecode = getByte 156 header - linkname = getString 157 100 header - magic = getChars 257 8 header - uname = getString 265 32 header - gname = getString 297 32 header - devmajor_ = getOct 329 8 header - devminor_ = getOct 337 8 header - prefix = getString 345 155 header --- trailing = getBytes 500 12 header - - format_ = case magic of - "\0\0\0\0\0\0\0\0" -> return V7Format - "ustar\NUL00" -> return UstarFormat - "ustar \NUL" -> return GnuFormat - _ -> fail "tar entry not in a recognised format" - -correctChecksum :: ByteString -> Int -> Bool -correctChecksum header checksum = checksum == checksum' - where - -- sum of all 512 bytes in the header block, - -- treating each byte as an 8-bit unsigned value - checksum' = BS.Char8.foldl' (\x y -> x + ord y) 0 header' - -- treating the 8 bytes of chksum as blank characters. - header' = BS.concat [BS.take 148 header, - BS.Char8.replicate 8 ' ', - BS.drop 156 header] - --- * TAR format primitive input - -getOct :: (Integral a, Bits a) => Int64 -> Int64 -> ByteString -> Partial a -getOct off len header - | BS.head bytes == 128 = parseBinInt (BS.unpack (BS.tail bytes)) - | null octstr = return 0 - | otherwise = case readOct octstr of - [(x,[])] -> return x - _ -> fail "tar header is malformed (bad numeric encoding)" - where - bytes = getBytes off len header - octstr = BS.Char8.unpack - . BS.Char8.takeWhile (\c -> c /= '\NUL' && c /= ' ') - . BS.Char8.dropWhile (== ' ') - $ bytes - - -- Some tar programs switch into a binary format when they try to represent - -- field values that will not fit in the required width when using the text - -- octal format. In particular, the UID/GID fields can only hold up to 2^21 - -- while in the binary format can hold up to 2^32. The binary format uses - -- '\128' as the header which leaves 7 bytes. Only the last 4 are used. - parseBinInt [0, 0, 0, byte3, byte2, byte1, byte0] = - return $! shiftL (fromIntegral byte3) 24 - + shiftL (fromIntegral byte2) 16 - + shiftL (fromIntegral byte1) 8 - + shiftL (fromIntegral byte0) 0 - parseBinInt _ = fail "tar header uses non-standard number encoding" - -getBytes :: Int64 -> Int64 -> ByteString -> ByteString -getBytes off len = BS.take len . BS.drop off - -getByte :: Int64 -> ByteString -> Char -getByte off bs = BS.Char8.index bs off - -getChars :: Int64 -> Int64 -> ByteString -> String -getChars off len = BS.Char8.unpack . getBytes off len - -getString :: Int64 -> Int64 -> ByteString -> String -getString off len = BS.Char8.unpack . BS.Char8.takeWhile (/='\0') - . getBytes off len - -data Partial a = Error String | Ok a - deriving Functor - -partial :: Partial a -> Either String a -partial (Error msg) = Left msg -partial (Ok x) = Right x - -instance Applicative Partial where - pure = return - (<*>) = ap - -instance Monad Partial where - return = Ok - Error m >>= _ = Error m - Ok x >>= k = k x - fail = Error - --- --- * Writing --- - --- | Create the external representation of a tar archive by serialising a list --- of tar entries. --- --- * The conversion is done lazily. --- -write :: [Entry] -> ByteString -write es = BS.concat $ map putEntry es ++ [BS.replicate (512*2) 0] - --- | Same as 'write', but for 'Entries'. -writeEntries :: Entries -> ByteString -writeEntries entries = BS.concat $ foldrEntries (\e res -> putEntry e : res) - [BS.replicate (512*2) 0] error entries - -putEntry :: Entry -> ByteString -putEntry entry = case entryContent entry of - NormalFile content size -> BS.concat [ header, content, padding size ] - OtherEntryType _ content size -> BS.concat [ header, content, padding size ] - _ -> header - where - header = putHeader entry - padding size = BS.replicate paddingSize 0 - where paddingSize = fromIntegral (negate size `mod` 512) - -putHeader :: Entry -> ByteString -putHeader entry = - BS.concat [ BS.take 148 block - , BS.Char8.pack $ putOct 7 checksum - , BS.Char8.singleton ' ' - , BS.drop 156 block ] - where - -- putHeaderNoChkSum returns a String, so we convert it to the final - -- representation before calculating the checksum. - block = BS.Char8.pack . putHeaderNoChkSum $ entry - checksum = BS.Char8.foldl' (\x y -> x + ord y) 0 block - -putHeaderNoChkSum :: Entry -> String -putHeaderNoChkSum Entry { - entryTarPath = TarPath name prefix, - entryContent = content, - entryPermissions = permissions, - entryOwnership = ownership, - entryTime = modTime, - entryFormat = format - } = - - concat - [ putString 100 $ name - , putOct 8 $ permissions - , putOct 8 $ ownerId ownership - , putOct 8 $ groupId ownership - , putOct 12 $ contentSize - , putOct 12 $ modTime - , fill 8 $ ' ' -- dummy checksum - , putChar8 $ typeCode - , putString 100 $ linkTarget - ] ++ - case format of - V7Format -> - fill 255 '\NUL' - UstarFormat -> concat - [ putString 8 $ "ustar\NUL00" - , putString 32 $ ownerName ownership - , putString 32 $ groupName ownership - , putOct 8 $ deviceMajor - , putOct 8 $ deviceMinor - , putString 155 $ prefix - , fill 12 $ '\NUL' - ] - GnuFormat -> concat - [ putString 8 $ "ustar \NUL" - , putString 32 $ ownerName ownership - , putString 32 $ groupName ownership - , putGnuDev 8 $ deviceMajor - , putGnuDev 8 $ deviceMinor - , putString 155 $ prefix - , fill 12 $ '\NUL' - ] - where - (typeCode, contentSize, linkTarget, - deviceMajor, deviceMinor) = case content of - NormalFile _ size -> ('0' , size, [], 0, 0) - Directory -> ('5' , 0, [], 0, 0) - SymbolicLink (LinkTarget link) -> ('2' , 0, link, 0, 0) - HardLink (LinkTarget link) -> ('1' , 0, link, 0, 0) - CharacterDevice major minor -> ('3' , 0, [], major, minor) - BlockDevice major minor -> ('4' , 0, [], major, minor) - NamedPipe -> ('6' , 0, [], 0, 0) - OtherEntryType code _ size -> (code, size, [], 0, 0) - - putGnuDev w n = case content of - CharacterDevice _ _ -> putOct w n - BlockDevice _ _ -> putOct w n - _ -> replicate w '\NUL' - --- * TAR format primitive output - -type FieldWidth = Int - -putString :: FieldWidth -> String -> String -putString n s = take n s ++ fill (n - length s) '\NUL' - ---TODO: check integer widths, eg for large file sizes -putOct :: (Show a, Integral a) => FieldWidth -> a -> String -putOct n x = - let octStr = take (n-1) $ showOct x "" - in fill (n - length octStr - 1) '0' - ++ octStr - ++ putChar8 '\NUL' - -putChar8 :: Char -> String -putChar8 c = [c] - -fill :: FieldWidth -> Char -> String -fill n c = replicate n c - --- --- * Unpacking --- - -unpack :: FilePath -> Entries -> IO () -unpack baseDir entries = unpackEntries [] (checkSecurity entries) - >>= emulateLinks - - where - -- We're relying here on 'checkSecurity' to make sure we're not scribbling - -- files all over the place. - - unpackEntries _ (Fail err) = fail err - unpackEntries links Done = return links - unpackEntries links (Next entry es) = case entryContent entry of - NormalFile file _ -> extractFile entry path file - >> unpackEntries links es - Directory -> extractDir path - >> unpackEntries links es - HardLink link -> (unpackEntries $! saveLink path link links) es - SymbolicLink link -> (unpackEntries $! saveLink path link links) es - _ -> unpackEntries links es --ignore other file types - where - path = entryPath entry - - extractFile entry path content = do - -- Note that tar archives do not make sure each directory is created - -- before files they contain, indeed we may have to create several - -- levels of directory. - createDirectoryIfMissing True absDir - BS.writeFile absPath content - when (isExecutable (entryPermissions entry)) - (setFileExecutable absPath) - where - absDir = baseDir FilePath.Native.takeDirectory path - absPath = baseDir path - - extractDir path = createDirectoryIfMissing True (baseDir path) - - saveLink path link links = seq (length path) - $ seq (length link') - $ (path, link'):links - where link' = fromLinkTarget link - - emulateLinks = mapM_ $ \(relPath, relLinkTarget) -> - let absPath = baseDir relPath - absTarget = FilePath.Native.takeDirectory absPath relLinkTarget - in copyFile absTarget absPath - --- --- * Packing --- - -pack :: FilePath -- ^ Base directory - -> [FilePath] -- ^ Files and directories to pack, relative to the base dir - -> IO [Entry] -pack baseDir paths0 = preparePaths baseDir paths0 >>= packPaths baseDir - -preparePaths :: FilePath -> [FilePath] -> IO [FilePath] -preparePaths baseDir paths = - fmap concat $ interleave - [ do isDir <- doesDirectoryExist (baseDir path) - if isDir - then do entries <- getDirectoryContentsRecursive (baseDir path) - return (FilePath.Native.addTrailingPathSeparator path - : map (path ) entries) - else return [path] - | path <- paths ] - -packPaths :: FilePath -> [FilePath] -> IO [Entry] -packPaths baseDir paths = - interleave - [ do tarpath <- either fail return (toTarPath isDir relpath) - if isDir then packDirectoryEntry filepath tarpath - else packFileEntry filepath tarpath - | relpath <- paths - , let isDir = FilePath.Native.hasTrailingPathSeparator filepath - filepath = baseDir relpath ] - -interleave :: [IO a] -> IO [a] -interleave = unsafeInterleaveIO . go - where - go [] = return [] - go (x:xs) = do - x' <- x - xs' <- interleave xs - return (x':xs') - -packFileEntry :: FilePath -- ^ Full path to find the file on the local disk - -> TarPath -- ^ Path to use for the tar Entry in the archive - -> IO Entry -packFileEntry filepath tarpath = do - mtime <- getModTime filepath - perms <- getPermissions filepath - file <- openBinaryFile filepath ReadMode - size <- hFileSize file - content <- BS.hGetContents file - return (simpleEntry tarpath (NormalFile content (fromIntegral size))) { - entryPermissions = if Permissions.executable perms - then executableFilePermissions - else ordinaryFilePermissions, - entryTime = mtime - } - -packDirectoryEntry :: FilePath -- ^ Full path to find the file on the local disk - -> TarPath -- ^ Path to use for the tar Entry in the archive - -> IO Entry -packDirectoryEntry filepath tarpath = do - mtime <- getModTime filepath - return (directoryEntry tarpath) { - entryTime = mtime - } - -getDirectoryContentsRecursive :: FilePath -> IO [FilePath] -getDirectoryContentsRecursive dir0 = - fmap tail (recurseDirectories dir0 [""]) - -recurseDirectories :: FilePath -> [FilePath] -> IO [FilePath] -recurseDirectories _ [] = return [] -recurseDirectories base (dir:dirs) = unsafeInterleaveIO $ do - (files, dirs') <- collect [] [] =<< getDirectoryContents (base dir) - - files' <- recurseDirectories base (dirs' ++ dirs) - return (dir : files ++ files') - - where - collect files dirs' [] = return (reverse files, reverse dirs') - collect files dirs' (entry:entries) | ignore entry - = collect files dirs' entries - collect files dirs' (entry:entries) = do - let dirEntry = dir entry - dirEntry' = FilePath.Native.addTrailingPathSeparator dirEntry - isDirectory <- doesDirectoryExist (base dirEntry) - if isDirectory - then collect files (dirEntry':dirs') entries - else collect (dirEntry:files) dirs' entries - - ignore ['.'] = True - ignore ['.', '.'] = True - ignore _ = False diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Types.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Types.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Types.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Types.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,247 +0,0 @@ -{-# LANGUAGE DeriveFunctor #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Types --- Copyright : (c) David Himmelstrup 2005 --- Duncan Coutts 2011 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Various common data types for the entire cabal-install system ------------------------------------------------------------------------------ -module Distribution.Client.Types where - -import Distribution.Package - ( PackageName, PackageId, Package(..), PackageFixedDeps(..) - , mkPackageKey, PackageKey, InstalledPackageId(..) - , PackageInstalled(..) ) -import Distribution.InstalledPackageInfo - ( InstalledPackageInfo, packageKey ) -import Distribution.PackageDescription - ( Benchmark(..), GenericPackageDescription(..), FlagAssignment - , TestSuite(..) ) -import Distribution.PackageDescription.Configuration - ( mapTreeData ) -import Distribution.Client.PackageIndex - ( PackageIndex ) -import Distribution.Version - ( VersionRange ) -import Distribution.Simple.Compiler - ( Compiler, packageKeySupported ) -import Distribution.Text (display) - -import Data.Map (Map) -import Network.URI (URI) -import Data.ByteString.Lazy (ByteString) -import Control.Exception - ( SomeException ) - -newtype Username = Username { unUsername :: String } -newtype Password = Password { unPassword :: String } - --- | This is the information we get from a @00-index.tar.gz@ hackage index. --- -data SourcePackageDb = SourcePackageDb { - packageIndex :: PackageIndex SourcePackage, - packagePreferences :: Map PackageName VersionRange -} - --- ------------------------------------------------------------ --- * Various kinds of information about packages --- ------------------------------------------------------------ - --- | TODO: This is a hack to help us transition from Cabal-1.6 to 1.8. --- What is new in 1.8 is that installed packages and dependencies between --- installed packages are now identified by an opaque InstalledPackageId --- rather than a source PackageId. --- --- We should use simply an 'InstalledPackageInfo' here but to ease the --- transition we are temporarily using this variant where we pretend that --- installed packages still specify their deps in terms of PackageIds. --- --- Crucially this means that 'InstalledPackage' can be an instance of --- 'PackageFixedDeps' where as 'InstalledPackageInfo' is no longer an instance --- of that class. This means we can make 'PackageIndex'es of InstalledPackage --- where as the InstalledPackageInfo now has its own monomorphic index type. --- -data InstalledPackage = InstalledPackage - InstalledPackageInfo - [PackageId] - -instance Package InstalledPackage where - packageId (InstalledPackage pkg _) = packageId pkg -instance PackageFixedDeps InstalledPackage where - depends (InstalledPackage _ deps) = deps -instance PackageInstalled InstalledPackage where - installedPackageId (InstalledPackage pkg _) = installedPackageId pkg - installedDepends (InstalledPackage pkg _) = installedDepends pkg - - --- | In order to reuse the implementation of PackageIndex which relies on --- 'InstalledPackageId', we need to be able to synthesize these IDs prior --- to installation. Eventually, we'll move to a representation of --- 'InstalledPackageId' which can be properly computed before compilation --- (of course, it's a bit of a misnomer since the packages are not actually --- installed yet.) In any case, we'll synthesize temporary installed package --- IDs to use as keys during install planning. These should never be written --- out! Additionally, they need to be guaranteed unique within the install --- plan. -fakeInstalledPackageId :: PackageId -> InstalledPackageId -fakeInstalledPackageId = InstalledPackageId . (".fake."++) . display - --- | A 'ConfiguredPackage' is a not-yet-installed package along with the --- total configuration information. The configuration information is total in --- the sense that it provides all the configuration information and so the --- final configure process will be independent of the environment. --- -data ConfiguredPackage = ConfiguredPackage - SourcePackage -- package info, including repo - FlagAssignment -- complete flag assignment for the package - [OptionalStanza] -- list of enabled optional stanzas for the package - [PackageId] -- set of exact dependencies. These must be - -- consistent with the 'buildDepends' in the - -- 'PackageDescription' that you'd get by applying - -- the flag assignment and optional stanzas. - deriving Show - -instance Package ConfiguredPackage where - packageId (ConfiguredPackage pkg _ _ _) = packageId pkg - -instance PackageFixedDeps ConfiguredPackage where - depends (ConfiguredPackage _ _ _ deps) = deps - -instance PackageInstalled ConfiguredPackage where - installedPackageId = fakeInstalledPackageId . packageId - installedDepends = map fakeInstalledPackageId . depends - --- | Like 'ConfiguredPackage', but with all dependencies guaranteed to be --- installed already, hence itself ready to be installed. -data ReadyPackage = ReadyPackage - SourcePackage -- see 'ConfiguredPackage'. - FlagAssignment -- - [OptionalStanza] -- - [InstalledPackageInfo] -- Installed dependencies. - deriving Show - -instance Package ReadyPackage where - packageId (ReadyPackage pkg _ _ _) = packageId pkg - -instance PackageFixedDeps ReadyPackage where - depends (ReadyPackage _ _ _ deps) = map packageId deps - -instance PackageInstalled ReadyPackage where - installedPackageId = fakeInstalledPackageId . packageId - installedDepends (ReadyPackage _ _ _ ipis) = map installedPackageId ipis - --- | Extracts a package key from ReadyPackage, a common operation needed --- to calculate build paths. -readyPackageKey :: Compiler -> ReadyPackage -> PackageKey -readyPackageKey comp (ReadyPackage pkg _ _ deps) = - mkPackageKey (packageKeySupported comp) (packageId pkg) - (map packageKey deps) [] - - --- | Sometimes we need to convert a 'ReadyPackage' back to a --- 'ConfiguredPackage'. For example, a failed 'PlanPackage' can be *either* --- Ready or Configured. -readyPackageToConfiguredPackage :: ReadyPackage -> ConfiguredPackage -readyPackageToConfiguredPackage (ReadyPackage srcpkg flags stanzas deps) = - ConfiguredPackage srcpkg flags stanzas (map packageId deps) - --- | A package description along with the location of the package sources. --- -data SourcePackage = SourcePackage { - packageInfoId :: PackageId, - packageDescription :: GenericPackageDescription, - packageSource :: PackageLocation (Maybe FilePath), - packageDescrOverride :: PackageDescriptionOverride - } - deriving Show - --- | We sometimes need to override the .cabal file in the tarball with --- the newer one from the package index. -type PackageDescriptionOverride = Maybe ByteString - -instance Package SourcePackage where packageId = packageInfoId - -data OptionalStanza - = TestStanzas - | BenchStanzas - deriving (Eq, Ord, Show) - -enableStanzas - :: [OptionalStanza] - -> GenericPackageDescription - -> GenericPackageDescription -enableStanzas stanzas gpkg = gpkg - { condBenchmarks = flagBenchmarks $ condBenchmarks gpkg - , condTestSuites = flagTests $ condTestSuites gpkg - } - where - enableTest t = t { testEnabled = TestStanzas `elem` stanzas } - enableBenchmark bm = bm { benchmarkEnabled = BenchStanzas `elem` stanzas } - flagBenchmarks = map (\(n, bm) -> (n, mapTreeData enableBenchmark bm)) - flagTests = map (\(n, t) -> (n, mapTreeData enableTest t)) - --- ------------------------------------------------------------ --- * Package locations and repositories --- ------------------------------------------------------------ - -data PackageLocation local = - - -- | An unpacked package in the given dir, or current dir - LocalUnpackedPackage FilePath - - -- | A package as a tarball that's available as a local tarball - | LocalTarballPackage FilePath - - -- | A package as a tarball from a remote URI - | RemoteTarballPackage URI local - - -- | A package available as a tarball from a repository. - -- - -- It may be from a local repository or from a remote repository, with a - -- locally cached copy. ie a package available from hackage - | RepoTarballPackage Repo PackageId local - ---TODO: --- * add support for darcs and other SCM style remote repos with a local cache --- | ScmPackage - deriving (Show, Functor) - -data LocalRepo = LocalRepo - deriving (Show,Eq) - -data RemoteRepo = RemoteRepo { - remoteRepoName :: String, - remoteRepoURI :: URI - } - deriving (Show,Eq,Ord) - -data Repo = Repo { - repoKind :: Either RemoteRepo LocalRepo, - repoLocalDir :: FilePath - } - deriving (Show,Eq) - --- ------------------------------------------------------------ --- * Build results --- ------------------------------------------------------------ - -type BuildResult = Either BuildFailure BuildSuccess -data BuildFailure = PlanningFailed - | DependentFailed PackageId - | DownloadFailed SomeException - | UnpackFailed SomeException - | ConfigureFailed SomeException - | BuildFailed SomeException - | TestsFailed SomeException - | InstallFailed SomeException -data BuildSuccess = BuildOk DocsResult TestsResult - (Maybe InstalledPackageInfo) - -data DocsResult = DocsNotTried | DocsFailed | DocsOk -data TestsResult = TestsNotTried | TestsOk diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Update.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Update.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Update.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Update.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Update --- Copyright : (c) David Himmelstrup 2005 --- License : BSD-like --- --- Maintainer : lemmih@gmail.com --- Stability : provisional --- Portability : portable --- --- ------------------------------------------------------------------------------ -module Distribution.Client.Update - ( update - ) where - -import Distribution.Client.Types - ( Repo(..), RemoteRepo(..), LocalRepo(..) ) -import Distribution.Client.HttpUtils - ( DownloadResult(..) ) -import Distribution.Client.FetchUtils - ( downloadIndex ) -import Distribution.Client.IndexUtils - ( updateRepoIndexCache ) - -import Distribution.Simple.Utils - ( writeFileAtomic, warn, notice ) -import Distribution.Verbosity - ( Verbosity ) - -import qualified Data.ByteString.Lazy as BS -import Distribution.Client.GZipUtils (maybeDecompress) -import System.FilePath (dropExtension) - --- | 'update' downloads the package list from all known servers -update :: Verbosity -> [Repo] -> IO () -update verbosity [] = - warn verbosity $ "No remote package servers have been specified. Usually " - ++ "you would have one specified in the config file." -update verbosity repos = do - mapM_ (updateRepo verbosity) repos - -updateRepo :: Verbosity -> Repo -> IO () -updateRepo verbosity repo = case repoKind repo of - Right LocalRepo -> return () - Left remoteRepo -> do - notice verbosity $ "Downloading the latest package list from " - ++ remoteRepoName remoteRepo - downloadResult <- downloadIndex verbosity remoteRepo (repoLocalDir repo) - case downloadResult of - FileAlreadyInCache -> return () - FileDownloaded indexPath -> do - writeFileAtomic (dropExtension indexPath) . maybeDecompress - =<< BS.readFile indexPath - updateRepoIndexCache verbosity repo diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Upload.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Upload.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Upload.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Upload.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,173 +0,0 @@ --- This is a quick hack for uploading packages to Hackage. --- See http://hackage.haskell.org/trac/hackage/wiki/CabalUpload - -module Distribution.Client.Upload (check, upload, report) where - -import qualified Data.ByteString.Lazy.Char8 as B (concat, length, pack, readFile, unpack) -import Data.ByteString.Lazy.Char8 (ByteString) - -import Distribution.Client.Types (Username(..), Password(..),Repo(..),RemoteRepo(..)) -import Distribution.Client.HttpUtils (isOldHackageURI, cabalBrowse) - -import Distribution.Simple.Utils (debug, notice, warn, info) -import Distribution.Verbosity (Verbosity) -import Distribution.Text (display) -import Distribution.Client.Config - -import qualified Distribution.Client.BuildReports.Anonymous as BuildReport -import qualified Distribution.Client.BuildReports.Upload as BuildReport - -import Network.Browser - ( request ) -import Network.HTTP - ( Header(..), HeaderName(..), findHeader - , Request(..), RequestMethod(..), Response(..) ) -import Network.URI (URI(uriPath), parseURI) - -import Data.Char (intToDigit) -import Numeric (showHex) -import System.IO (hFlush, stdin, stdout, hGetEcho, hSetEcho) -import Control.Exception (bracket) -import System.Random (randomRIO) -import System.FilePath ((), takeExtension, takeFileName) -import qualified System.FilePath.Posix as FilePath.Posix (combine) -import System.Directory -import Control.Monad (forM_, when) - - ---FIXME: how do we find this path for an arbitrary hackage server? --- is it always at some fixed location relative to the server root? -legacyUploadURI :: URI -Just legacyUploadURI = parseURI "http://hackage.haskell.org/cgi-bin/hackage-scripts/protected/upload-pkg" - -checkURI :: URI -Just checkURI = parseURI "http://hackage.haskell.org/cgi-bin/hackage-scripts/check-pkg" - - -upload :: Verbosity -> [Repo] -> Maybe Username -> Maybe Password -> [FilePath] -> IO () -upload verbosity repos mUsername mPassword paths = do - let uploadURI = if isOldHackageURI targetRepoURI - then legacyUploadURI - else targetRepoURI{uriPath = uriPath targetRepoURI `FilePath.Posix.combine` "upload"} - Username username <- maybe promptUsername return mUsername - Password password <- maybe promptPassword return mPassword - let auth = Just (username, password) - flip mapM_ paths $ \path -> do - notice verbosity $ "Uploading " ++ path ++ "... " - handlePackage verbosity uploadURI auth path - where - targetRepoURI = remoteRepoURI $ last [ remoteRepo | Left remoteRepo <- map repoKind repos ] --FIXME: better error message when no repos are given - -promptUsername :: IO Username -promptUsername = do - putStr "Hackage username: " - hFlush stdout - fmap Username getLine - -promptPassword :: IO Password -promptPassword = do - putStr "Hackage password: " - hFlush stdout - -- save/restore the terminal echoing status - passwd <- bracket (hGetEcho stdin) (hSetEcho stdin) $ \_ -> do - hSetEcho stdin False -- no echoing for entering the password - fmap Password getLine - putStrLn "" - return passwd - -report :: Verbosity -> [Repo] -> Maybe Username -> Maybe Password -> IO () -report verbosity repos mUsername mPassword = do - Username username <- maybe promptUsername return mUsername - Password password <- maybe promptPassword return mPassword - let auth = Just (username, password) - forM_ repos $ \repo -> case repoKind repo of - Left remoteRepo - -> do dotCabal <- defaultCabalDir - let srcDir = dotCabal "reports" remoteRepoName remoteRepo - -- We don't want to bomb out just because we haven't built any packages from this repo yet - srcExists <- doesDirectoryExist srcDir - when srcExists $ do - contents <- getDirectoryContents srcDir - forM_ (filter (\c -> takeExtension c == ".log") contents) $ \logFile -> - do inp <- readFile (srcDir logFile) - let (reportStr, buildLog) = read inp :: (String,String) - case BuildReport.parse reportStr of - Left errs -> do warn verbosity $ "Errors: " ++ errs -- FIXME - Right report' -> - do info verbosity $ "Uploading report for " ++ display (BuildReport.package report') - cabalBrowse verbosity auth $ BuildReport.uploadReports (remoteRepoURI remoteRepo) [(report', Just buildLog)] - return () - Right{} -> return () - -check :: Verbosity -> [FilePath] -> IO () -check verbosity paths = do - flip mapM_ paths $ \path -> do - notice verbosity $ "Checking " ++ path ++ "... " - handlePackage verbosity checkURI Nothing path - -handlePackage :: Verbosity -> URI -> Maybe (String, String) - -> FilePath -> IO () -handlePackage verbosity uri auth path = - do req <- mkRequest uri path - debug verbosity $ "\n" ++ show req - (_,resp) <- cabalBrowse verbosity auth $ request req - debug verbosity $ show resp - case rspCode resp of - (2,0,0) -> do notice verbosity "Ok" - (x,y,z) -> do notice verbosity $ "Error: " ++ path ++ ": " - ++ map intToDigit [x,y,z] ++ " " - ++ rspReason resp - case findHeader HdrContentType resp of - Just contenttype - | takeWhile (/= ';') contenttype == "text/plain" - -> notice verbosity $ B.unpack $ rspBody resp - _ -> debug verbosity $ B.unpack $ rspBody resp - -mkRequest :: URI -> FilePath -> IO (Request ByteString) -mkRequest uri path = - do pkg <- readBinaryFile path - boundary <- genBoundary - let body = printMultiPart (B.pack boundary) (mkFormData path pkg) - return $ Request { - rqURI = uri, - rqMethod = POST, - rqHeaders = [Header HdrContentType ("multipart/form-data; boundary="++boundary), - Header HdrContentLength (show (B.length body)), - Header HdrAccept ("text/plain")], - rqBody = body - } - -readBinaryFile :: FilePath -> IO ByteString -readBinaryFile = B.readFile - -genBoundary :: IO String -genBoundary = do i <- randomRIO (0x10000000000000,0xFFFFFFFFFFFFFF) :: IO Integer - return $ showHex i "" - -mkFormData :: FilePath -> ByteString -> [BodyPart] -mkFormData path pkg = - -- yes, web browsers are that stupid (re quoting) - [BodyPart [Header hdrContentDisposition $ - "form-data; name=package; filename=\""++takeFileName path++"\"", - Header HdrContentType "application/x-gzip"] - pkg] - -hdrContentDisposition :: HeaderName -hdrContentDisposition = HdrCustom "Content-disposition" - --- * Multipart, partly stolen from the cgi package. - -data BodyPart = BodyPart [Header] ByteString - -printMultiPart :: ByteString -> [BodyPart] -> ByteString -printMultiPart boundary xs = - B.concat $ map (printBodyPart boundary) xs ++ [crlf, dd, boundary, dd, crlf] - -printBodyPart :: ByteString -> BodyPart -> ByteString -printBodyPart boundary (BodyPart hs c) = B.concat $ [crlf, dd, boundary, crlf] ++ map (B.pack . show) hs ++ [crlf, c] - -crlf :: ByteString -crlf = B.pack "\r\n" - -dd :: ByteString -dd = B.pack "--" diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Utils.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Utils.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Utils.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Utils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,237 +0,0 @@ -{-# LANGUAGE ForeignFunctionInterface, CPP #-} - -module Distribution.Client.Utils ( MergeResult(..) - , mergeBy, duplicates, duplicatesBy - , inDir, determineNumJobs, numberOfProcessors - , removeExistingFile - , makeAbsoluteToCwd, filePathToByteString - , byteStringToFilePath, tryCanonicalizePath - , canonicalizePathNoThrow - , moreRecentFile, existsAndIsMoreRecentThan - , tryFindAddSourcePackageDesc - , tryFindPackageDesc - , relaxEncodingErrors) - where - -import Distribution.Compat.Exception ( catchIO ) -import Distribution.Client.Compat.Time ( getModTime ) -import Distribution.Simple.Setup ( Flag(..) ) -import Distribution.Simple.Utils ( die, findPackageDesc ) -import qualified Data.ByteString.Lazy as BS -import Control.Monad - ( when ) -import Data.Bits - ( (.|.), shiftL, shiftR ) -import Data.Char - ( ord, chr ) -import Data.List - ( isPrefixOf, sortBy, groupBy ) -import Data.Word - ( Word8, Word32) -import Foreign.C.Types ( CInt(..) ) -import qualified Control.Exception as Exception - ( finally ) -import System.Directory - ( canonicalizePath, doesFileExist, getCurrentDirectory - , removeFile, setCurrentDirectory ) -import System.FilePath - ( (), isAbsolute ) -import System.IO - ( Handle -#if MIN_VERSION_base(4,4,0) - , hGetEncoding, hSetEncoding -#endif - ) -import System.IO.Unsafe ( unsafePerformIO ) - -#if MIN_VERSION_base(4,4,0) -import GHC.IO.Encoding - ( recover, TextEncoding(TextEncoding) ) -import GHC.IO.Encoding.Failure - ( recoverEncode, CodingFailureMode(TransliterateCodingFailure) ) -#endif - -#if defined(mingw32_HOST_OS) -import Prelude hiding (ioError) -import Control.Monad (liftM2, unless) -import System.Directory (doesDirectoryExist) -import System.IO.Error (ioError, mkIOError, doesNotExistErrorType) -#endif - --- | Generic merging utility. For sorted input lists this is a full outer join. --- --- * The result list never contains @(Nothing, Nothing)@. --- -mergeBy :: (a -> b -> Ordering) -> [a] -> [b] -> [MergeResult a b] -mergeBy cmp = merge - where - merge [] ys = [ OnlyInRight y | y <- ys] - merge xs [] = [ OnlyInLeft x | x <- xs] - merge (x:xs) (y:ys) = - case x `cmp` y of - GT -> OnlyInRight y : merge (x:xs) ys - EQ -> InBoth x y : merge xs ys - LT -> OnlyInLeft x : merge xs (y:ys) - -data MergeResult a b = OnlyInLeft a | InBoth a b | OnlyInRight b - -duplicates :: Ord a => [a] -> [[a]] -duplicates = duplicatesBy compare - -duplicatesBy :: (a -> a -> Ordering) -> [a] -> [[a]] -duplicatesBy cmp = filter moreThanOne . groupBy eq . sortBy cmp - where - eq a b = case cmp a b of - EQ -> True - _ -> False - moreThanOne (_:_:_) = True - moreThanOne _ = False - --- | Like 'removeFile', but does not throw an exception when the file does not --- exist. -removeExistingFile :: FilePath -> IO () -removeExistingFile path = do - exists <- doesFileExist path - when exists $ - removeFile path - --- | Executes the action in the specified directory. -inDir :: Maybe FilePath -> IO a -> IO a -inDir Nothing m = m -inDir (Just d) m = do - old <- getCurrentDirectory - setCurrentDirectory d - m `Exception.finally` setCurrentDirectory old - -foreign import ccall "getNumberOfProcessors" c_getNumberOfProcessors :: IO CInt - --- The number of processors is not going to change during the duration of the --- program, so unsafePerformIO is safe here. -numberOfProcessors :: Int -numberOfProcessors = fromEnum $ unsafePerformIO c_getNumberOfProcessors - --- | Determine the number of jobs to use given the value of the '-j' flag. -determineNumJobs :: Flag (Maybe Int) -> Int -determineNumJobs numJobsFlag = - case numJobsFlag of - NoFlag -> 1 - Flag Nothing -> numberOfProcessors - Flag (Just n) -> n - --- | Given a relative path, make it absolute relative to the current --- directory. Absolute paths are returned unmodified. -makeAbsoluteToCwd :: FilePath -> IO FilePath -makeAbsoluteToCwd path | isAbsolute path = return path - | otherwise = do cwd <- getCurrentDirectory - return $! cwd path - --- | Convert a 'FilePath' to a lazy 'ByteString'. Each 'Char' is --- encoded as a little-endian 'Word32'. -filePathToByteString :: FilePath -> BS.ByteString -filePathToByteString p = - BS.pack $ foldr conv [] codepts - where - codepts :: [Word32] - codepts = map (fromIntegral . ord) p - - conv :: Word32 -> [Word8] -> [Word8] - conv w32 rest = b0:b1:b2:b3:rest - where - b0 = fromIntegral $ w32 - b1 = fromIntegral $ w32 `shiftR` 8 - b2 = fromIntegral $ w32 `shiftR` 16 - b3 = fromIntegral $ w32 `shiftR` 24 - --- | Reverse operation to 'filePathToByteString'. -byteStringToFilePath :: BS.ByteString -> FilePath -byteStringToFilePath bs | bslen `mod` 4 /= 0 = unexpected - | otherwise = go 0 - where - unexpected = "Distribution.Client.Utils.byteStringToFilePath: unexpected" - bslen = BS.length bs - - go i | i == bslen = [] - | otherwise = (chr . fromIntegral $ w32) : go (i+4) - where - w32 :: Word32 - w32 = b0 .|. (b1 `shiftL` 8) .|. (b2 `shiftL` 16) .|. (b3 `shiftL` 24) - b0 = fromIntegral $ BS.index bs i - b1 = fromIntegral $ BS.index bs (i + 1) - b2 = fromIntegral $ BS.index bs (i + 2) - b3 = fromIntegral $ BS.index bs (i + 3) - --- | Workaround for the inconsistent behaviour of 'canonicalizePath'. It throws --- an error if the path refers to a non-existent file on *nix, but not on --- Windows. -tryCanonicalizePath :: FilePath -> IO FilePath -tryCanonicalizePath path = do - ret <- canonicalizePath path -#if defined(mingw32_HOST_OS) - exists <- liftM2 (||) (doesFileExist ret) (doesDirectoryExist ret) - unless exists $ - ioError $ mkIOError doesNotExistErrorType "canonicalizePath" - Nothing (Just ret) -#endif - return ret - --- | A non-throwing wrapper for 'canonicalizePath'. If 'canonicalizePath' throws --- an exception, returns the path argument unmodified. -canonicalizePathNoThrow :: FilePath -> IO FilePath -canonicalizePathNoThrow path = do - canonicalizePath path `catchIO` (\_ -> return path) - --------------------- --- Modification time - --- | Like Distribution.Simple.Utils.moreRecentFile, but uses getModTime instead --- of getModificationTime for higher precision. We can't merge the two because --- Distribution.Client.Time uses MIN_VERSION macros. -moreRecentFile :: FilePath -> FilePath -> IO Bool -moreRecentFile a b = do - exists <- doesFileExist b - if not exists - then return True - else do tb <- getModTime b - ta <- getModTime a - return (ta > tb) - --- | Like 'moreRecentFile', but also checks that the first file exists. -existsAndIsMoreRecentThan :: FilePath -> FilePath -> IO Bool -existsAndIsMoreRecentThan a b = do - exists <- doesFileExist a - if not exists - then return False - else a `moreRecentFile` b - --- | Sets the handler for encoding errors to one that transliterates invalid --- characters into one present in the encoding (i.e., \'?\'). --- This is opposed to the default behavior, which is to throw an exception on --- error. This function will ignore file handles that have a Unicode encoding --- set. It's a no-op for versions of `base` less than 4.4. -relaxEncodingErrors :: Handle -> IO () -relaxEncodingErrors handle = do -#if MIN_VERSION_base(4,4,0) - maybeEncoding <- hGetEncoding handle - case maybeEncoding of - Just (TextEncoding name decoder encoder) | not ("UTF" `isPrefixOf` name) -> - let relax x = x { recover = recoverEncode TransliterateCodingFailure } - in hSetEncoding handle (TextEncoding name decoder (fmap relax encoder)) - _ -> -#endif - return () - --- |Like 'tryFindPackageDesc', but with error specific to add-source deps. -tryFindAddSourcePackageDesc :: FilePath -> String -> IO FilePath -tryFindAddSourcePackageDesc depPath err = tryFindPackageDesc depPath $ - err ++ "\n" ++ "Failed to read cabal file of add-source dependency: " - ++ depPath - --- |Try to find a @.cabal@ file, in directory @depPath@. Fails if one cannot be --- found, with @err@ prefixing the error message. This function simply allows --- us to give a more descriptive error than that provided by @findPackageDesc@. -tryFindPackageDesc :: FilePath -> String -> IO FilePath -tryFindPackageDesc depPath err = do - errOrCabalFile <- findPackageDesc depPath - case errOrCabalFile of - Right file -> return file - Left _ -> die err diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/Win32SelfUpgrade.hs cabal-install-1.22-1.22.9.0/Distribution/Client/Win32SelfUpgrade.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/Win32SelfUpgrade.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/Win32SelfUpgrade.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,225 +0,0 @@ -{-# LANGUAGE CPP, ForeignFunctionInterface #-} ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.Win32SelfUpgrade --- Copyright : (c) Duncan Coutts 2008 --- License : BSD-like --- --- Maintainer : cabal-devel@haskell.org --- Stability : provisional --- Portability : portable --- --- Support for self-upgrading executables on Windows platforms. ------------------------------------------------------------------------------ -module Distribution.Client.Win32SelfUpgrade ( --- * Explanation --- --- | Windows inherited a design choice from DOS that while initially innocuous --- has rather unfortunate consequences. It maintains the invariant that every --- open file has a corresponding name on disk. One positive consequence of this --- is that an executable can always find it's own executable file. The downside --- is that a program cannot be deleted or upgraded while it is running without --- hideous workarounds. This module implements one such hideous workaround. --- --- The basic idea is: --- --- * Move our own exe file to a new name --- * Copy a new exe file to the previous name --- * Run the new exe file, passing our own PID and new path --- * Wait for the new process to start --- * Close the new exe file --- * Exit old process --- --- Then in the new process: --- --- * Inform the old process that we've started --- * Wait for the old process to die --- * Delete the old exe file --- * Exit new process --- - - possibleSelfUpgrade, - deleteOldExeFile, - ) where - -#if mingw32_HOST_OS - -import qualified System.Win32 as Win32 -import System.Win32 (DWORD, BOOL, HANDLE, LPCTSTR) -import Foreign.Ptr (Ptr, nullPtr) -import System.Process (runProcess) -import System.Directory (canonicalizePath) -import System.FilePath (takeBaseName, replaceBaseName, equalFilePath) - -import Distribution.Verbosity as Verbosity (Verbosity, showForCabal) -import Distribution.Simple.Utils (debug, info) - -import Prelude hiding (log) - --- | If one of the given files is our own exe file then we arrange things such --- that the nested action can replace our own exe file. --- --- We require that the new process accepts a command line invocation that --- calls 'deleteOldExeFile', passing in the PID and exe file. --- -possibleSelfUpgrade :: Verbosity - -> [FilePath] - -> IO a -> IO a -possibleSelfUpgrade verbosity newPaths action = do - dstPath <- canonicalizePath =<< Win32.getModuleFileName Win32.nullHANDLE - - newPaths' <- mapM canonicalizePath newPaths - let doingSelfUpgrade = any (equalFilePath dstPath) newPaths' - - if not doingSelfUpgrade - then action - else do - info verbosity $ "cabal-install does the replace-own-exe-file dance..." - tmpPath <- moveOurExeOutOfTheWay verbosity - result <- action - scheduleOurDemise verbosity dstPath tmpPath - (\pid path -> ["win32selfupgrade", pid, path - ,"--verbose=" ++ Verbosity.showForCabal verbosity]) - return result - --- | The name of a Win32 Event object that we use to synchronise between the --- old and new processes. We need to synchronise to make sure that the old --- process has not yet terminated by the time the new one starts up and looks --- for the old process. Otherwise the old one might have already terminated --- and we could not wait on it terminating reliably (eg the PID might get --- re-used). --- -syncEventName :: String -syncEventName = "Local\\cabal-install-upgrade" - --- | The first part of allowing our exe file to be replaced is to move the --- existing exe file out of the way. Although we cannot delete our exe file --- while we're still running, fortunately we can rename it, at least within --- the same directory. --- -moveOurExeOutOfTheWay :: Verbosity -> IO FilePath -moveOurExeOutOfTheWay verbosity = do - ourPID <- getCurrentProcessId - dstPath <- Win32.getModuleFileName Win32.nullHANDLE - - let tmpPath = replaceBaseName dstPath (takeBaseName dstPath ++ show ourPID) - - debug verbosity $ "moving " ++ dstPath ++ " to " ++ tmpPath - Win32.moveFile dstPath tmpPath - return tmpPath - --- | Assuming we've now installed the new exe file in the right place, we --- launch it and ask it to delete our exe file when we eventually terminate. --- -scheduleOurDemise :: Verbosity -> FilePath -> FilePath - -> (String -> FilePath -> [String]) -> IO () -scheduleOurDemise verbosity dstPath tmpPath mkArgs = do - ourPID <- getCurrentProcessId - event <- createEvent syncEventName - - let args = mkArgs (show ourPID) tmpPath - log $ "launching child " ++ unwords (dstPath : map show args) - _ <- runProcess dstPath args Nothing Nothing Nothing Nothing Nothing - - log $ "waiting for the child to start up" - waitForSingleObject event (10*1000) -- wait at most 10 sec - log $ "child started ok" - - where - log msg = debug verbosity ("Win32Reinstall.parent: " ++ msg) - --- | Assuming we're now in the new child process, we've been asked by the old --- process to wait for it to terminate and then we can remove the old exe file --- that it renamed itself to. --- -deleteOldExeFile :: Verbosity -> Int -> FilePath -> IO () -deleteOldExeFile verbosity oldPID tmpPath = do - log $ "process started. Will delete exe file of process " - ++ show oldPID ++ " at path " ++ tmpPath - - log $ "getting handle of parent process " ++ show oldPID - oldPHANDLE <- Win32.openProcess Win32.sYNCHORNIZE False (fromIntegral oldPID) - - log $ "synchronising with parent" - event <- openEvent syncEventName - setEvent event - - log $ "waiting for parent process to terminate" - waitForSingleObject oldPHANDLE Win32.iNFINITE - log $ "parent process terminated" - - log $ "deleting parent's old .exe file" - Win32.deleteFile tmpPath - - where - log msg = debug verbosity ("Win32Reinstall.child: " ++ msg) - ------------------------- --- Win32 foreign imports --- - --- A bunch of functions sadly not provided by the Win32 package. - -#ifdef x86_64_HOST_ARCH -#define CALLCONV ccall -#else -#define CALLCONV stdcall -#endif - -foreign import CALLCONV unsafe "windows.h GetCurrentProcessId" - getCurrentProcessId :: IO DWORD - -foreign import CALLCONV unsafe "windows.h WaitForSingleObject" - waitForSingleObject_ :: HANDLE -> DWORD -> IO DWORD - -waitForSingleObject :: HANDLE -> DWORD -> IO () -waitForSingleObject handle timeout = - Win32.failIf_ bad "WaitForSingleObject" $ - waitForSingleObject_ handle timeout - where - bad result = not (result == 0 || result == wAIT_TIMEOUT) - wAIT_TIMEOUT = 0x00000102 - -foreign import CALLCONV unsafe "windows.h CreateEventW" - createEvent_ :: Ptr () -> BOOL -> BOOL -> LPCTSTR -> IO HANDLE - -createEvent :: String -> IO HANDLE -createEvent name = do - Win32.failIfNull "CreateEvent" $ - Win32.withTString name $ - createEvent_ nullPtr False False - -foreign import CALLCONV unsafe "windows.h OpenEventW" - openEvent_ :: DWORD -> BOOL -> LPCTSTR -> IO HANDLE - -openEvent :: String -> IO HANDLE -openEvent name = do - Win32.failIfNull "OpenEvent" $ - Win32.withTString name $ - openEvent_ eVENT_MODIFY_STATE False - where - eVENT_MODIFY_STATE :: DWORD - eVENT_MODIFY_STATE = 0x0002 - -foreign import CALLCONV unsafe "windows.h SetEvent" - setEvent_ :: HANDLE -> IO BOOL - -setEvent :: HANDLE -> IO () -setEvent handle = - Win32.failIfFalse_ "SetEvent" $ - setEvent_ handle - -#else - -import Distribution.Verbosity (Verbosity) -import Distribution.Simple.Utils (die) - -possibleSelfUpgrade :: Verbosity - -> [FilePath] - -> IO a -> IO a -possibleSelfUpgrade _ _ action = action - -deleteOldExeFile :: Verbosity -> Int -> FilePath -> IO () -deleteOldExeFile _ _ _ = die "win32selfupgrade not needed except on win32" - -#endif diff -Nru cabal-install-1.22-1.22.6.0/Distribution/Client/World.hs cabal-install-1.22-1.22.9.0/Distribution/Client/World.hs --- cabal-install-1.22-1.22.6.0/Distribution/Client/World.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Distribution/Client/World.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,172 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Distribution.Client.World --- Copyright : (c) Peter Robinson 2009 --- License : BSD-like --- --- Maintainer : thaldyron@gmail.com --- Stability : provisional --- Portability : portable --- --- Interface to the world-file that contains a list of explicitly --- requested packages. Meant to be imported qualified. --- --- A world file entry stores the package-name, package-version, and --- user flags. --- For example, the entry generated by --- # cabal install stm-io-hooks --flags="-debug" --- looks like this: --- # stm-io-hooks -any --flags="-debug" --- To rebuild/upgrade the packages in world (e.g. when updating the compiler) --- use --- # cabal install world --- ------------------------------------------------------------------------------ -module Distribution.Client.World ( - WorldPkgInfo(..), - insert, - delete, - getContents, - ) where - -import Distribution.Package - ( Dependency(..) ) -import Distribution.PackageDescription - ( FlagAssignment, FlagName(FlagName) ) -import Distribution.Verbosity - ( Verbosity ) -import Distribution.Simple.Utils - ( die, info, chattyTry, writeFileAtomic ) -import Distribution.Text - ( Text(..), display, simpleParse ) -import qualified Distribution.Compat.ReadP as Parse -import Distribution.Compat.Exception ( catchIO ) -import qualified Text.PrettyPrint as Disp -import Text.PrettyPrint ( (<>), (<+>) ) - - -import Data.Char as Char - -import Data.List - ( unionBy, deleteFirstsBy, nubBy ) -import System.IO.Error - ( isDoesNotExistError ) -import qualified Data.ByteString.Lazy.Char8 as B -import Prelude hiding (getContents) - - -data WorldPkgInfo = WorldPkgInfo Dependency FlagAssignment - deriving (Show,Eq) - --- | Adds packages to the world file; creates the file if it doesn't --- exist yet. Version constraints and flag assignments for a package are --- updated if already present. IO errors are non-fatal. -insert :: Verbosity -> FilePath -> [WorldPkgInfo] -> IO () -insert = modifyWorld $ unionBy equalUDep - --- | Removes packages from the world file. --- Note: Currently unused as there is no mechanism in Cabal (yet) to --- handle uninstalls. IO errors are non-fatal. -delete :: Verbosity -> FilePath -> [WorldPkgInfo] -> IO () -delete = modifyWorld $ flip (deleteFirstsBy equalUDep) - --- | WorldPkgInfo values are considered equal if they refer to --- the same package, i.e., we don't care about differing versions or flags. -equalUDep :: WorldPkgInfo -> WorldPkgInfo -> Bool -equalUDep (WorldPkgInfo (Dependency pkg1 _) _) - (WorldPkgInfo (Dependency pkg2 _) _) = pkg1 == pkg2 - --- | Modifies the world file by applying an update-function ('unionBy' --- for 'insert', 'deleteFirstsBy' for 'delete') to the given list of --- packages. IO errors are considered non-fatal. -modifyWorld :: ([WorldPkgInfo] -> [WorldPkgInfo] - -> [WorldPkgInfo]) - -- ^ Function that defines how - -- the list of user packages are merged with - -- existing world packages. - -> Verbosity - -> FilePath -- ^ Location of the world file - -> [WorldPkgInfo] -- ^ list of user supplied packages - -> IO () -modifyWorld _ _ _ [] = return () -modifyWorld f verbosity world pkgs = - chattyTry "Error while updating world-file. " $ do - pkgsOldWorld <- getContents world - -- Filter out packages that are not in the world file: - let pkgsNewWorld = nubBy equalUDep $ f pkgs pkgsOldWorld - -- 'Dependency' is not an Ord instance, so we need to check for - -- equivalence the awkward way: - if not (all (`elem` pkgsOldWorld) pkgsNewWorld && - all (`elem` pkgsNewWorld) pkgsOldWorld) - then do - info verbosity "Updating world file..." - writeFileAtomic world . B.pack $ unlines - [ (display pkg) | pkg <- pkgsNewWorld] - else - info verbosity "World file is already up to date." - - --- | Returns the content of the world file as a list -getContents :: FilePath -> IO [WorldPkgInfo] -getContents world = do - content <- safelyReadFile world - let result = map simpleParse (lines $ B.unpack content) - case sequence result of - Nothing -> die "Could not parse world file." - Just xs -> return xs - where - safelyReadFile :: FilePath -> IO B.ByteString - safelyReadFile file = B.readFile file `catchIO` handler - where - handler e | isDoesNotExistError e = return B.empty - | otherwise = ioError e - - -instance Text WorldPkgInfo where - disp (WorldPkgInfo dep flags) = disp dep <+> dispFlags flags - where - dispFlags [] = Disp.empty - dispFlags fs = Disp.text "--flags=" - <> Disp.doubleQuotes (flagAssToDoc fs) - flagAssToDoc = foldr (\(FlagName fname,val) flagAssDoc -> - (if not val then Disp.char '-' - else Disp.empty) - Disp.<> Disp.text fname - Disp.<+> flagAssDoc) - Disp.empty - parse = do - dep <- parse - Parse.skipSpaces - flagAss <- Parse.option [] parseFlagAssignment - return $ WorldPkgInfo dep flagAss - where - parseFlagAssignment :: Parse.ReadP r FlagAssignment - parseFlagAssignment = do - _ <- Parse.string "--flags" - Parse.skipSpaces - _ <- Parse.char '=' - Parse.skipSpaces - inDoubleQuotes $ Parse.many1 flag - where - inDoubleQuotes :: Parse.ReadP r a -> Parse.ReadP r a - inDoubleQuotes = Parse.between (Parse.char '"') (Parse.char '"') - - flag = do - Parse.skipSpaces - val <- negative Parse.+++ positive - name <- ident - Parse.skipSpaces - return (FlagName name,val) - negative = do - _ <- Parse.char '-' - return False - positive = return True - - ident :: Parse.ReadP r String - ident = do - -- First character must be a letter/digit to avoid flags - -- like "+-debug": - c <- Parse.satisfy Char.isAlphaNum - cs <- Parse.munch (\ch -> Char.isAlphaNum ch || ch == '_' - || ch == '-') - return (c:cs) Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/HTTP-4000.2.19.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/HTTP-4000.2.19.tar.gz differ diff -Nru cabal-install-1.22-1.22.6.0/Main.hs cabal-install-1.22-1.22.9.0/Main.hs --- cabal-install-1.22-1.22.6.0/Main.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Main.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1119 +0,0 @@ -{-# LANGUAGE CPP #-} - ------------------------------------------------------------------------------ --- | --- Module : Main --- Copyright : (c) David Himmelstrup 2005 --- License : BSD-like --- --- Maintainer : lemmih@gmail.com --- Stability : provisional --- Portability : portable --- --- Entry point to the default cabal-install front-end. ------------------------------------------------------------------------------ - -module Main (main) where - -import Distribution.Client.Setup - ( GlobalFlags(..), globalCommand, globalRepos - , ConfigFlags(..) - , ConfigExFlags(..), defaultConfigExFlags, configureExCommand - , BuildFlags(..), BuildExFlags(..), SkipAddSourceDepsCheck(..) - , buildCommand, replCommand, testCommand, benchmarkCommand - , InstallFlags(..), defaultInstallFlags - , installCommand, upgradeCommand - , FetchFlags(..), fetchCommand - , FreezeFlags(..), freezeCommand - , GetFlags(..), getCommand, unpackCommand - , checkCommand - , formatCommand - , updateCommand - , ListFlags(..), listCommand - , InfoFlags(..), infoCommand - , UploadFlags(..), uploadCommand - , ReportFlags(..), reportCommand - , runCommand - , InitFlags(initVerbosity), initCommand - , SDistFlags(..), SDistExFlags(..), sdistCommand - , Win32SelfUpgradeFlags(..), win32SelfUpgradeCommand - , SandboxFlags(..), sandboxCommand - , ExecFlags(..), execCommand - , UserConfigFlags(..), userConfigCommand - , reportCommand - ) -import Distribution.Simple.Setup - ( HaddockFlags(..), haddockCommand, defaultHaddockFlags - , HscolourFlags(..), hscolourCommand - , ReplFlags(..) - , CopyFlags(..), copyCommand - , RegisterFlags(..), registerCommand - , CleanFlags(..), cleanCommand - , TestFlags(..), BenchmarkFlags(..) - , Flag(..), fromFlag, fromFlagOrDefault, flagToMaybe, toFlag - , configAbsolutePaths - ) - -import Distribution.Client.SetupWrapper - ( setupWrapper, SetupScriptOptions(..), defaultSetupScriptOptions ) -import Distribution.Client.Config - ( SavedConfig(..), loadConfig, defaultConfigFile, userConfigDiff - , userConfigUpdate ) -import Distribution.Client.Targets - ( readUserTargets ) -import qualified Distribution.Client.List as List - ( list, info ) - -import Distribution.Client.Install (install) -import Distribution.Client.Configure (configure) -import Distribution.Client.Update (update) -import Distribution.Client.Exec (exec) -import Distribution.Client.Fetch (fetch) -import Distribution.Client.Freeze (freeze) -import Distribution.Client.Check as Check (check) ---import Distribution.Client.Clean (clean) -import Distribution.Client.Upload as Upload (upload, check, report) -import Distribution.Client.Run (run, splitRunArgs) -import Distribution.Client.SrcDist (sdist) -import Distribution.Client.Get (get) -import Distribution.Client.Sandbox (sandboxInit - ,sandboxAddSource - ,sandboxDelete - ,sandboxDeleteSource - ,sandboxListSources - ,sandboxHcPkg - ,dumpPackageEnvironment - - ,getSandboxConfigFilePath - ,loadConfigOrSandboxConfig - ,initPackageDBIfNeeded - ,maybeWithSandboxDirOnSearchPath - ,maybeWithSandboxPackageInfo - ,WereDepsReinstalled(..) - ,maybeReinstallAddSourceDeps - ,tryGetIndexFilePath - ,sandboxBuildDir - ,updateSandboxConfigFileFlag - - ,configCompilerAux' - ,configPackageDB') -import Distribution.Client.Sandbox.PackageEnvironment - (setPackageDB - ,userPackageEnvironmentFile) -import Distribution.Client.Sandbox.Timestamp (maybeAddCompilerTimestampRecord) -import Distribution.Client.Sandbox.Types (UseSandbox(..), whenUsingSandbox) -import Distribution.Client.Init (initCabal) -import qualified Distribution.Client.Win32SelfUpgrade as Win32SelfUpgrade -import Distribution.Client.Utils (determineNumJobs -#if defined(mingw32_HOST_OS) - ,relaxEncodingErrors -#endif - ,existsAndIsMoreRecentThan) - -import Distribution.PackageDescription - ( Executable(..), benchmarkName, benchmarkBuildInfo, testName - , testBuildInfo, buildable ) -import Distribution.PackageDescription.Parse - ( readPackageDescription ) -import Distribution.PackageDescription.PrettyPrint - ( writeGenericPackageDescription ) -import Distribution.Simple.Build - ( startInterpreter ) -import Distribution.Simple.Command - ( CommandParse(..), CommandUI(..), Command - , commandsRun, commandAddAction, hiddenCommand ) -import Distribution.Simple.Compiler - ( Compiler(..) ) -import Distribution.Simple.Configure - ( checkPersistBuildConfigOutdated, configCompilerAuxEx - , ConfigStateFileError(..), localBuildInfoFile - , getPersistBuildConfig, tryGetPersistBuildConfig ) -import qualified Distribution.Simple.LocalBuildInfo as LBI -import Distribution.Simple.Program (defaultProgramConfiguration - ,configureAllKnownPrograms) -import qualified Distribution.Simple.Setup as Cabal -import Distribution.Simple.Utils - ( cabalVersion, die, notice, info, topHandler - , findPackageDesc, tryFindPackageDesc ) -import Distribution.Text - ( display ) -import Distribution.Verbosity as Verbosity - ( Verbosity, normal ) -import Distribution.Version - ( Version(..), orLaterVersion ) -import qualified Paths_cabal_install (version) - -import System.Environment (getArgs, getProgName) -import System.Exit (exitFailure) -import System.FilePath (splitExtension, takeExtension) -import System.IO ( BufferMode(LineBuffering), hSetBuffering -#ifdef mingw32_HOST_OS - , stderr -#endif - , stdout ) -import System.Directory (doesFileExist, getCurrentDirectory) -import Data.List (intercalate) -import Data.Maybe (mapMaybe) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid (Monoid(..)) -#endif -import Control.Monad (when, unless) - --- | Entry point --- -main :: IO () -main = do - -- Enable line buffering so that we can get fast feedback even when piped. - -- This is especially important for CI and build systems. - hSetBuffering stdout LineBuffering - -- The default locale encoding for Windows CLI is not UTF-8 and printing - -- Unicode characters to it will fail unless we relax the handling of encoding - -- errors when writing to stderr and stdout. -#ifdef mingw32_HOST_OS - relaxEncodingErrors stdout - relaxEncodingErrors stderr -#endif - getArgs >>= mainWorker - -mainWorker :: [String] -> IO () -mainWorker args = topHandler $ - case commandsRun (globalCommand commands) commands args of - CommandHelp help -> printGlobalHelp help - CommandList opts -> printOptionsList opts - CommandErrors errs -> printErrors errs - CommandReadyToGo (globalFlags, commandParse) -> - case commandParse of - _ | fromFlagOrDefault False (globalVersion globalFlags) - -> printVersion - | fromFlagOrDefault False (globalNumericVersion globalFlags) - -> printNumericVersion - CommandHelp help -> printCommandHelp help - CommandList opts -> printOptionsList opts - CommandErrors errs -> printErrors errs - CommandReadyToGo action -> do - globalFlags' <- updateSandboxConfigFileFlag globalFlags - action globalFlags' - - where - printCommandHelp help = do - pname <- getProgName - putStr (help pname) - printGlobalHelp help = do - pname <- getProgName - configFile <- defaultConfigFile - putStr (help pname) - putStr $ "\nYou can edit the cabal configuration file to set defaults:\n" - ++ " " ++ configFile ++ "\n" - exists <- doesFileExist configFile - when (not exists) $ - putStrLn $ "This file will be generated with sensible " - ++ "defaults if you run 'cabal update'." - printOptionsList = putStr . unlines - printErrors errs = die $ intercalate "\n" errs - printNumericVersion = putStrLn $ display Paths_cabal_install.version - printVersion = putStrLn $ "cabal-install version " - ++ display Paths_cabal_install.version - ++ "\nusing version " - ++ display cabalVersion - ++ " of the Cabal library " - - commands = - [installCommand `commandAddAction` installAction - ,updateCommand `commandAddAction` updateAction - ,listCommand `commandAddAction` listAction - ,infoCommand `commandAddAction` infoAction - ,fetchCommand `commandAddAction` fetchAction - ,freezeCommand `commandAddAction` freezeAction - ,getCommand `commandAddAction` getAction - ,hiddenCommand $ - unpackCommand `commandAddAction` unpackAction - ,checkCommand `commandAddAction` checkAction - ,sdistCommand `commandAddAction` sdistAction - ,uploadCommand `commandAddAction` uploadAction - ,reportCommand `commandAddAction` reportAction - ,runCommand `commandAddAction` runAction - ,initCommand `commandAddAction` initAction - ,configureExCommand `commandAddAction` configureAction - ,buildCommand `commandAddAction` buildAction - ,replCommand `commandAddAction` replAction - ,sandboxCommand `commandAddAction` sandboxAction - ,haddockCommand `commandAddAction` haddockAction - ,execCommand `commandAddAction` execAction - ,userConfigCommand `commandAddAction` userConfigAction - ,cleanCommand `commandAddAction` cleanAction - ,wrapperAction copyCommand - copyVerbosity copyDistPref - ,wrapperAction hscolourCommand - hscolourVerbosity hscolourDistPref - ,wrapperAction registerCommand - regVerbosity regDistPref - ,testCommand `commandAddAction` testAction - ,benchmarkCommand `commandAddAction` benchmarkAction - ,hiddenCommand $ - formatCommand `commandAddAction` formatAction - ,hiddenCommand $ - upgradeCommand `commandAddAction` upgradeAction - ,hiddenCommand $ - win32SelfUpgradeCommand`commandAddAction` win32SelfUpgradeAction - ] - -wrapperAction :: Monoid flags - => CommandUI flags - -> (flags -> Flag Verbosity) - -> (flags -> Flag String) - -> Command (GlobalFlags -> IO ()) -wrapperAction command verbosityFlag distPrefFlag = - commandAddAction command - { commandDefaultFlags = mempty } $ \flags extraArgs _globalFlags -> do - let verbosity = fromFlagOrDefault normal (verbosityFlag flags) - setupScriptOptions = defaultSetupScriptOptions { - useDistPref = fromFlagOrDefault - (useDistPref defaultSetupScriptOptions) - (distPrefFlag flags) - } - setupWrapper verbosity setupScriptOptions Nothing - command (const flags) extraArgs - -configureAction :: (ConfigFlags, ConfigExFlags) - -> [String] -> GlobalFlags -> IO () -configureAction (configFlags, configExFlags) extraArgs globalFlags = do - let verbosity = fromFlagOrDefault normal (configVerbosity configFlags) - - (useSandbox, config) <- loadConfigOrSandboxConfig verbosity - globalFlags (configUserInstall configFlags) - let configFlags' = savedConfigureFlags config `mappend` configFlags - configExFlags' = savedConfigureExFlags config `mappend` configExFlags - globalFlags' = savedGlobalFlags config `mappend` globalFlags - (comp, platform, conf) <- configCompilerAuxEx configFlags' - - -- If we're working inside a sandbox and the user has set the -w option, we - -- may need to create a sandbox-local package DB for this compiler and add a - -- timestamp record for this compiler to the timestamp file. - let configFlags'' = case useSandbox of - NoSandbox -> configFlags' - (UseSandbox sandboxDir) -> setPackageDB sandboxDir - comp platform configFlags' - - whenUsingSandbox useSandbox $ \sandboxDir -> do - initPackageDBIfNeeded verbosity configFlags'' comp conf - -- NOTE: We do not write the new sandbox package DB location to - -- 'cabal.sandbox.config' here because 'configure -w' must not affect - -- subsequent 'install' (for UI compatibility with non-sandboxed mode). - - indexFile <- tryGetIndexFilePath config - maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile - (compilerId comp) platform - - maybeWithSandboxDirOnSearchPath useSandbox $ - configure verbosity - (configPackageDB' configFlags'') - (globalRepos globalFlags') - comp platform conf configFlags'' configExFlags' extraArgs - -buildAction :: (BuildFlags, BuildExFlags) -> [String] -> GlobalFlags -> IO () -buildAction (buildFlags, buildExFlags) extraArgs globalFlags = do - let distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) - (buildDistPref buildFlags) - verbosity = fromFlagOrDefault normal (buildVerbosity buildFlags) - noAddSource = fromFlagOrDefault DontSkipAddSourceDepsCheck - (buildOnly buildExFlags) - - -- Calls 'configureAction' to do the real work, so nothing special has to be - -- done to support sandboxes. - (useSandbox, config) <- reconfigure verbosity distPref - mempty [] globalFlags noAddSource - (buildNumJobs buildFlags) (const Nothing) - - maybeWithSandboxDirOnSearchPath useSandbox $ - build verbosity config distPref buildFlags extraArgs - - --- | Actually do the work of building the package. This is separate from --- 'buildAction' so that 'testAction' and 'benchmarkAction' do not invoke --- 'reconfigure' twice. -build :: Verbosity -> SavedConfig -> FilePath -> BuildFlags -> [String] -> IO () -build verbosity config distPref buildFlags extraArgs = - setupWrapper verbosity setupOptions Nothing - (Cabal.buildCommand progConf) mkBuildFlags extraArgs - where - progConf = defaultProgramConfiguration - setupOptions = defaultSetupScriptOptions { useDistPref = distPref } - - mkBuildFlags version = filterBuildFlags version config buildFlags' - buildFlags' = buildFlags - { buildVerbosity = toFlag verbosity - , buildDistPref = toFlag distPref - } - --- | Make sure that we don't pass new flags to setup scripts compiled against --- old versions of Cabal. -filterBuildFlags :: Version -> SavedConfig -> BuildFlags -> BuildFlags -filterBuildFlags version config buildFlags - | version >= Version [1,19,1] [] = buildFlags_latest - -- Cabal < 1.19.1 doesn't support 'build -j'. - | otherwise = buildFlags_pre_1_19_1 - where - buildFlags_pre_1_19_1 = buildFlags { - buildNumJobs = NoFlag - } - buildFlags_latest = buildFlags { - -- Take the 'jobs' setting '~/.cabal/config' into account. - buildNumJobs = Flag . Just . determineNumJobs $ - (numJobsConfigFlag `mappend` numJobsCmdLineFlag) - } - numJobsConfigFlag = installNumJobs . savedInstallFlags $ config - numJobsCmdLineFlag = buildNumJobs buildFlags - - -replAction :: (ReplFlags, BuildExFlags) -> [String] -> GlobalFlags -> IO () -replAction (replFlags, buildExFlags) extraArgs globalFlags = do - cwd <- getCurrentDirectory - pkgDesc <- findPackageDesc cwd - either (const onNoPkgDesc) (const onPkgDesc) pkgDesc - where - verbosity = fromFlagOrDefault normal (replVerbosity replFlags) - - -- There is a .cabal file in the current directory: start a REPL and load - -- the project's modules. - onPkgDesc = do - let distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) - (replDistPref replFlags) - noAddSource = case replReload replFlags of - Flag True -> SkipAddSourceDepsCheck - _ -> fromFlagOrDefault DontSkipAddSourceDepsCheck - (buildOnly buildExFlags) - progConf = defaultProgramConfiguration - setupOptions = defaultSetupScriptOptions - { useCabalVersion = orLaterVersion $ Version [1,18,0] [] - , useDistPref = distPref - } - replFlags' = replFlags - { replVerbosity = toFlag verbosity - , replDistPref = toFlag distPref - } - -- Calls 'configureAction' to do the real work, so nothing special has to - -- be done to support sandboxes. - (useSandbox, _config) <- reconfigure verbosity distPref - mempty [] globalFlags noAddSource NoFlag - (const Nothing) - - maybeWithSandboxDirOnSearchPath useSandbox $ - setupWrapper verbosity setupOptions Nothing - (Cabal.replCommand progConf) (const replFlags') extraArgs - - -- No .cabal file in the current directory: just start the REPL (possibly - -- using the sandbox package DB). - onNoPkgDesc = do - (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags - mempty - let configFlags = savedConfigureFlags config - (comp, _platform, programDb) <- configCompilerAux' configFlags - startInterpreter verbosity programDb comp (configPackageDB' configFlags) - --- | Re-configure the package in the current directory if needed. Deciding --- when to reconfigure and with which options is convoluted: --- --- If we are reconfiguring, we must always run @configure@ with the --- verbosity option we are given; however, that a previous configuration --- uses a different verbosity setting is not reason enough to reconfigure. --- --- The package should be configured to use the same \"dist\" prefix as --- given to the @build@ command, otherwise the build will probably --- fail. Not only does this determine the \"dist\" prefix setting if we --- need to reconfigure anyway, but an existing configuration should be --- invalidated if its \"dist\" prefix differs. --- --- If the package has never been configured (i.e., there is no --- LocalBuildInfo), we must configure first, using the default options. --- --- If the package has been configured, there will be a 'LocalBuildInfo'. --- If there no package description file, we assume that the --- 'PackageDescription' is up to date, though the configuration may need --- to be updated for other reasons (see above). If there is a package --- description file, and it has been modified since the 'LocalBuildInfo' --- was generated, then we need to reconfigure. --- --- The caller of this function may also have specific requirements --- regarding the flags the last configuration used. For example, --- 'testAction' requires that the package be configured with test suites --- enabled. The caller may pass the required settings to this function --- along with a function to check the validity of the saved 'ConfigFlags'; --- these required settings will be checked first upon determining that --- a previous configuration exists. -reconfigure :: Verbosity -- ^ Verbosity setting - -> FilePath -- ^ \"dist\" prefix - -> ConfigFlags -- ^ Additional config flags to set. These flags - -- will be 'mappend'ed to the last used or - -- default 'ConfigFlags' as appropriate, so - -- this value should be 'mempty' with only the - -- required flags set. The required verbosity - -- and \"dist\" prefix flags will be set - -- automatically because they are always - -- required; therefore, it is not necessary to - -- set them here. - -> [String] -- ^ Extra arguments - -> GlobalFlags -- ^ Global flags - -> SkipAddSourceDepsCheck - -- ^ Should we skip the timestamp check for modified - -- add-source dependencies? - -> Flag (Maybe Int) - -- ^ -j flag for reinstalling add-source deps. - -> (ConfigFlags -> Maybe String) - -- ^ Check that the required flags are set in - -- the last used 'ConfigFlags'. If the required - -- flags are not set, provide a message to the - -- user explaining the reason for - -- reconfiguration. Because the correct \"dist\" - -- prefix setting is always required, it is checked - -- automatically; this function need not check - -- for it. - -> IO (UseSandbox, SavedConfig) -reconfigure verbosity distPref addConfigFlags extraArgs globalFlags - skipAddSourceDepsCheck numJobsFlag checkFlags = do - eLbi <- tryGetPersistBuildConfig distPref - case eLbi of - Left err -> onNoBuildConfig err - Right lbi -> onBuildConfig lbi - - where - - -- We couldn't load the saved package config file. - -- - -- If we're in a sandbox: add-source deps don't have to be reinstalled - -- (since we don't know the compiler & platform). - onNoBuildConfig :: ConfigStateFileError -> IO (UseSandbox, SavedConfig) - onNoBuildConfig err = do - let msg = case err of - ConfigStateFileMissing -> "Package has never been configured." - ConfigStateFileNoParse -> "Saved package config file seems " - ++ "to be corrupt." - _ -> show err - case err of - ConfigStateFileBadVersion _ _ _ -> info verbosity msg - _ -> do - notice verbosity - $ msg ++ " Configuring with default flags." ++ configureManually - configureAction (defaultFlags, defaultConfigExFlags) - extraArgs globalFlags - loadConfigOrSandboxConfig verbosity globalFlags mempty - - -- Package has been configured, but the configuration may be out of - -- date or required flags may not be set. - -- - -- If we're in a sandbox: reinstall the modified add-source deps and - -- force reconfigure if we did. - onBuildConfig :: LBI.LocalBuildInfo -> IO (UseSandbox, SavedConfig) - onBuildConfig lbi = do - let configFlags = LBI.configFlags lbi - flags = mconcat [configFlags, addConfigFlags, distVerbFlags] - - -- Was the sandbox created after the package was already configured? We - -- may need to skip reinstallation of add-source deps and force - -- reconfigure. - let buildConfig = localBuildInfoFile distPref - sandboxConfig <- getSandboxConfigFilePath globalFlags - isSandboxConfigNewer <- - sandboxConfig `existsAndIsMoreRecentThan` buildConfig - - let skipAddSourceDepsCheck' - | isSandboxConfigNewer = SkipAddSourceDepsCheck - | otherwise = skipAddSourceDepsCheck - - when (skipAddSourceDepsCheck' == SkipAddSourceDepsCheck) $ - info verbosity "Skipping add-source deps check..." - - (useSandbox, config, depsReinstalled) <- - case skipAddSourceDepsCheck' of - DontSkipAddSourceDepsCheck -> - maybeReinstallAddSourceDeps verbosity numJobsFlag flags globalFlags - SkipAddSourceDepsCheck -> do - (useSandbox, config) <- loadConfigOrSandboxConfig verbosity - globalFlags (configUserInstall flags) - return (useSandbox, config, NoDepsReinstalled) - - -- Is the @cabal.config@ file newer than @dist/setup.config@? Then we need - -- to force reconfigure. Note that it's possible to use @cabal.config@ - -- even without sandboxes. - isUserPackageEnvironmentFileNewer <- - userPackageEnvironmentFile `existsAndIsMoreRecentThan` buildConfig - - -- Determine whether we need to reconfigure and which message to show to - -- the user if that is the case. - mMsg <- determineMessageToShow lbi configFlags depsReinstalled - isSandboxConfigNewer - isUserPackageEnvironmentFileNewer - case mMsg of - - -- No message for the user indicates that reconfiguration - -- is not required. - Nothing -> return (useSandbox, config) - - -- Show the message and reconfigure. - Just msg -> do - notice verbosity msg - configureAction (flags, defaultConfigExFlags) - extraArgs globalFlags - return (useSandbox, config) - - -- Determine what message, if any, to display to the user if reconfiguration - -- is required. - determineMessageToShow :: LBI.LocalBuildInfo -> ConfigFlags - -> WereDepsReinstalled -> Bool -> Bool - -> IO (Maybe String) - determineMessageToShow _ _ _ True _ = - -- The sandbox was created after the package was already configured. - return $! Just $! sandboxConfigNewerMessage - - determineMessageToShow _ _ _ False True = - -- The user package environment file was modified. - return $! Just $! userPackageEnvironmentFileModifiedMessage - - determineMessageToShow lbi configFlags depsReinstalled False False = do - let savedDistPref = fromFlagOrDefault - (useDistPref defaultSetupScriptOptions) - (configDistPref configFlags) - case depsReinstalled of - ReinstalledSomeDeps -> - -- Some add-source deps were reinstalled. - return $! Just $! reinstalledDepsMessage - NoDepsReinstalled -> - case checkFlags configFlags of - -- Flag required by the caller is not set. - Just msg -> return $! Just $! msg ++ configureManually - - Nothing - -- Required "dist" prefix is not set. - | savedDistPref /= distPref -> - return $! Just distPrefMessage - - -- All required flags are set, but the configuration - -- may be outdated. - | otherwise -> case LBI.pkgDescrFile lbi of - Nothing -> return Nothing - Just pdFile -> do - outdated <- checkPersistBuildConfigOutdated - distPref pdFile - return $! if outdated - then Just $! outdatedMessage pdFile - else Nothing - - defaultFlags = mappend addConfigFlags distVerbFlags - distVerbFlags = mempty - { configVerbosity = toFlag verbosity - , configDistPref = toFlag distPref - } - reconfiguringMostRecent = " Re-configuring with most recently used options." - configureManually = " If this fails, please run configure manually." - sandboxConfigNewerMessage = - "The sandbox was created after the package was already configured." - ++ reconfiguringMostRecent - ++ configureManually - userPackageEnvironmentFileModifiedMessage = - "The user package environment file ('" - ++ userPackageEnvironmentFile ++ "') was modified." - ++ reconfiguringMostRecent - ++ configureManually - distPrefMessage = - "Package previously configured with different \"dist\" prefix." - ++ reconfiguringMostRecent - ++ configureManually - outdatedMessage pdFile = - pdFile ++ " has been changed." - ++ reconfiguringMostRecent - ++ configureManually - reinstalledDepsMessage = - "Some add-source dependencies have been reinstalled." - ++ reconfiguringMostRecent - ++ configureManually - -installAction :: (ConfigFlags, ConfigExFlags, InstallFlags, HaddockFlags) - -> [String] -> GlobalFlags -> IO () -installAction (configFlags, _, installFlags, _) _ _globalFlags - | fromFlagOrDefault False (installOnly installFlags) - = let verbosity = fromFlagOrDefault normal (configVerbosity configFlags) - in setupWrapper verbosity defaultSetupScriptOptions Nothing - installCommand (const mempty) [] - -installAction (configFlags, configExFlags, installFlags, haddockFlags) - extraArgs globalFlags = do - let verbosity = fromFlagOrDefault normal (configVerbosity configFlags) - (useSandbox, config) <- loadConfigOrSandboxConfig verbosity - globalFlags (configUserInstall configFlags) - targets <- readUserTargets verbosity extraArgs - - -- TODO: It'd be nice if 'cabal install' picked up the '-w' flag passed to - -- 'configure' when run inside a sandbox. Right now, running - -- - -- $ cabal sandbox init && cabal configure -w /path/to/ghc - -- && cabal build && cabal install - -- - -- performs the compilation twice unless you also pass -w to 'install'. - -- However, this is the same behaviour that 'cabal install' has in the normal - -- mode of operation, so we stick to it for consistency. - - let sandboxDistPref = case useSandbox of - NoSandbox -> NoFlag - UseSandbox sandboxDir -> Flag $ sandboxBuildDir sandboxDir - configFlags' = maybeForceTests installFlags' $ - savedConfigureFlags config `mappend` configFlags - configExFlags' = defaultConfigExFlags `mappend` - savedConfigureExFlags config `mappend` configExFlags - installFlags' = defaultInstallFlags `mappend` - savedInstallFlags config `mappend` installFlags - haddockFlags' = defaultHaddockFlags `mappend` - savedHaddockFlags config `mappend` haddockFlags - globalFlags' = savedGlobalFlags config `mappend` globalFlags - (comp, platform, conf) <- configCompilerAux' configFlags' - -- TODO: Redesign ProgramDB API to prevent such problems as #2241 in the future. - conf' <- configureAllKnownPrograms verbosity conf - - -- If we're working inside a sandbox and the user has set the -w option, we - -- may need to create a sandbox-local package DB for this compiler and add a - -- timestamp record for this compiler to the timestamp file. - configFlags'' <- case useSandbox of - NoSandbox -> configAbsolutePaths $ configFlags' - (UseSandbox sandboxDir) -> - return $ (setPackageDB sandboxDir comp platform configFlags') { - configDistPref = sandboxDistPref - } - - whenUsingSandbox useSandbox $ \sandboxDir -> do - initPackageDBIfNeeded verbosity configFlags'' comp conf' - - indexFile <- tryGetIndexFilePath config - maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile - (compilerId comp) platform - - -- FIXME: Passing 'SandboxPackageInfo' to install unconditionally here means - -- that 'cabal install some-package' inside a sandbox will sometimes reinstall - -- modified add-source deps, even if they are not among the dependencies of - -- 'some-package'. This can also prevent packages that depend on older - -- versions of add-source'd packages from building (see #1362). - maybeWithSandboxPackageInfo verbosity configFlags'' globalFlags' - comp platform conf useSandbox $ \mSandboxPkgInfo -> - maybeWithSandboxDirOnSearchPath useSandbox $ - install verbosity - (configPackageDB' configFlags'') - (globalRepos globalFlags') - comp platform conf' - useSandbox mSandboxPkgInfo - globalFlags' configFlags'' configExFlags' - installFlags' haddockFlags' - targets - - where - -- '--run-tests' implies '--enable-tests'. - maybeForceTests installFlags' configFlags' = - if fromFlagOrDefault False (installRunTests installFlags') - then configFlags' { configTests = toFlag True } - else configFlags' - -testAction :: (TestFlags, BuildFlags, BuildExFlags) -> [String] -> GlobalFlags - -> IO () -testAction (testFlags, buildFlags, buildExFlags) extraArgs globalFlags = do - let verbosity = fromFlagOrDefault normal (testVerbosity testFlags) - distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) - (testDistPref testFlags) - setupOptions = defaultSetupScriptOptions { useDistPref = distPref } - buildFlags' = buildFlags { buildVerbosity = testVerbosity testFlags - , buildDistPref = testDistPref testFlags } - addConfigFlags = mempty { configTests = toFlag True } - checkFlags flags - | fromFlagOrDefault False (configTests flags) = Nothing - | otherwise = Just "Re-configuring with test suites enabled." - noAddSource = fromFlagOrDefault DontSkipAddSourceDepsCheck - (buildOnly buildExFlags) - - -- reconfigure also checks if we're in a sandbox and reinstalls add-source - -- deps if needed. - (useSandbox, config) <- reconfigure verbosity distPref addConfigFlags [] - globalFlags noAddSource - (buildNumJobs buildFlags') checkFlags - - -- the package was just configured, so the LBI must be available - lbi <- getPersistBuildConfig distPref - let pkgDescr = LBI.localPkgDescr lbi - nameTestsOnly = - LBI.foldComponent - (const Nothing) - (const Nothing) - (\t -> - if buildable (testBuildInfo t) - then Just (testName t) - else Nothing) - (const Nothing) - tests = mapMaybe nameTestsOnly $ LBI.pkgComponents pkgDescr - extraArgs' - | null extraArgs = tests - | otherwise = extraArgs - - if null tests - then notice verbosity "Package has no buildable test suites." - else do - maybeWithSandboxDirOnSearchPath useSandbox $ - build verbosity config distPref buildFlags' extraArgs' - - maybeWithSandboxDirOnSearchPath useSandbox $ - setupWrapper verbosity setupOptions Nothing - Cabal.testCommand (const testFlags) extraArgs' - -benchmarkAction :: (BenchmarkFlags, BuildFlags, BuildExFlags) - -> [String] -> GlobalFlags - -> IO () -benchmarkAction (benchmarkFlags, buildFlags, buildExFlags) - extraArgs globalFlags = do - let verbosity = fromFlagOrDefault normal - (benchmarkVerbosity benchmarkFlags) - distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) - (benchmarkDistPref benchmarkFlags) - setupOptions = defaultSetupScriptOptions { useDistPref = distPref } - buildFlags' = buildFlags - { buildVerbosity = benchmarkVerbosity benchmarkFlags - , buildDistPref = benchmarkDistPref benchmarkFlags } - addConfigFlags = mempty { configBenchmarks = toFlag True } - checkFlags flags - | fromFlagOrDefault False (configBenchmarks flags) = Nothing - | otherwise = Just "Re-configuring with benchmarks enabled." - noAddSource = fromFlagOrDefault DontSkipAddSourceDepsCheck - (buildOnly buildExFlags) - - -- reconfigure also checks if we're in a sandbox and reinstalls add-source - -- deps if needed. - (useSandbox, config) <- reconfigure verbosity distPref addConfigFlags [] - globalFlags noAddSource (buildNumJobs buildFlags') - checkFlags - - -- the package was just configured, so the LBI must be available - lbi <- getPersistBuildConfig distPref - let pkgDescr = LBI.localPkgDescr lbi - nameBenchsOnly = - LBI.foldComponent - (const Nothing) - (const Nothing) - (const Nothing) - (\b -> - if buildable (benchmarkBuildInfo b) - then Just (benchmarkName b) - else Nothing) - benchs = mapMaybe nameBenchsOnly $ LBI.pkgComponents pkgDescr - extraArgs' - | null extraArgs = benchs - | otherwise = extraArgs - - if null benchs - then notice verbosity "Package has no buildable benchmarks." - else do - maybeWithSandboxDirOnSearchPath useSandbox $ - build verbosity config distPref buildFlags' extraArgs' - - maybeWithSandboxDirOnSearchPath useSandbox $ - setupWrapper verbosity setupOptions Nothing - Cabal.benchmarkCommand (const benchmarkFlags) extraArgs' - -haddockAction :: HaddockFlags -> [String] -> GlobalFlags -> IO () -haddockAction haddockFlags extraArgs globalFlags = do - let verbosity = fromFlag (haddockVerbosity haddockFlags) - (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty - let haddockFlags' = defaultHaddockFlags `mappend` - savedHaddockFlags config `mappend` haddockFlags - setupScriptOptions = defaultSetupScriptOptions { - useDistPref = fromFlagOrDefault - (useDistPref defaultSetupScriptOptions) - (haddockDistPref haddockFlags') - } - setupWrapper verbosity setupScriptOptions Nothing - haddockCommand (const haddockFlags') extraArgs - -cleanAction :: CleanFlags -> [String] -> GlobalFlags -> IO () -cleanAction cleanFlags extraArgs _globalFlags = - setupWrapper verbosity setupScriptOptions Nothing - cleanCommand (const cleanFlags) extraArgs - where - verbosity = fromFlagOrDefault normal (cleanVerbosity cleanFlags) - setupScriptOptions = defaultSetupScriptOptions { - useDistPref = fromFlagOrDefault - (useDistPref defaultSetupScriptOptions) - (cleanDistPref cleanFlags), - useWin32CleanHack = True - } - -listAction :: ListFlags -> [String] -> GlobalFlags -> IO () -listAction listFlags extraArgs globalFlags = do - let verbosity = fromFlag (listVerbosity listFlags) - (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty - let configFlags' = savedConfigureFlags config - configFlags = configFlags' { - configPackageDBs = configPackageDBs configFlags' - `mappend` listPackageDBs listFlags - } - globalFlags' = savedGlobalFlags config `mappend` globalFlags - (comp, _, conf) <- configCompilerAux' configFlags - List.list verbosity - (configPackageDB' configFlags) - (globalRepos globalFlags') - comp - conf - listFlags - extraArgs - -infoAction :: InfoFlags -> [String] -> GlobalFlags -> IO () -infoAction infoFlags extraArgs globalFlags = do - let verbosity = fromFlag (infoVerbosity infoFlags) - targets <- readUserTargets verbosity extraArgs - (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty - let configFlags' = savedConfigureFlags config - configFlags = configFlags' { - configPackageDBs = configPackageDBs configFlags' - `mappend` infoPackageDBs infoFlags - } - globalFlags' = savedGlobalFlags config `mappend` globalFlags - (comp, _, conf) <- configCompilerAuxEx configFlags - List.info verbosity - (configPackageDB' configFlags) - (globalRepos globalFlags') - comp - conf - globalFlags' - infoFlags - targets - -updateAction :: Flag Verbosity -> [String] -> GlobalFlags -> IO () -updateAction verbosityFlag extraArgs globalFlags = do - unless (null extraArgs) $ - die $ "'update' doesn't take any extra arguments: " ++ unwords extraArgs - let verbosity = fromFlag verbosityFlag - (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags NoFlag - let globalFlags' = savedGlobalFlags config `mappend` globalFlags - update verbosity (globalRepos globalFlags') - -upgradeAction :: (ConfigFlags, ConfigExFlags, InstallFlags, HaddockFlags) - -> [String] -> GlobalFlags -> IO () -upgradeAction _ _ _ = die $ - "Use the 'cabal install' command instead of 'cabal upgrade'.\n" - ++ "You can install the latest version of a package using 'cabal install'. " - ++ "The 'cabal upgrade' command has been removed because people found it " - ++ "confusing and it often led to broken packages.\n" - ++ "If you want the old upgrade behaviour then use the install command " - ++ "with the --upgrade-dependencies flag (but check first with --dry-run " - ++ "to see what would happen). This will try to pick the latest versions " - ++ "of all dependencies, rather than the usual behaviour of trying to pick " - ++ "installed versions of all dependencies. If you do use " - ++ "--upgrade-dependencies, it is recommended that you do not upgrade core " - ++ "packages (e.g. by using appropriate --constraint= flags)." - -fetchAction :: FetchFlags -> [String] -> GlobalFlags -> IO () -fetchAction fetchFlags extraArgs globalFlags = do - let verbosity = fromFlag (fetchVerbosity fetchFlags) - targets <- readUserTargets verbosity extraArgs - config <- loadConfig verbosity (globalConfigFile globalFlags) mempty - let configFlags = savedConfigureFlags config - globalFlags' = savedGlobalFlags config `mappend` globalFlags - (comp, platform, conf) <- configCompilerAux' configFlags - fetch verbosity - (configPackageDB' configFlags) - (globalRepos globalFlags') - comp platform conf globalFlags' fetchFlags - targets - -freezeAction :: FreezeFlags -> [String] -> GlobalFlags -> IO () -freezeAction freezeFlags _extraArgs globalFlags = do - let verbosity = fromFlag (freezeVerbosity freezeFlags) - (useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty - let configFlags = savedConfigureFlags config - globalFlags' = savedGlobalFlags config `mappend` globalFlags - (comp, platform, conf) <- configCompilerAux' configFlags - - maybeWithSandboxPackageInfo verbosity configFlags globalFlags' - comp platform conf useSandbox $ \mSandboxPkgInfo -> - maybeWithSandboxDirOnSearchPath useSandbox $ - freeze verbosity - (configPackageDB' configFlags) - (globalRepos globalFlags') - comp platform conf - mSandboxPkgInfo - globalFlags' freezeFlags - -uploadAction :: UploadFlags -> [String] -> GlobalFlags -> IO () -uploadAction uploadFlags extraArgs globalFlags = do - let verbosity = fromFlag (uploadVerbosity uploadFlags) - config <- loadConfig verbosity (globalConfigFile globalFlags) mempty - let uploadFlags' = savedUploadFlags config `mappend` uploadFlags - globalFlags' = savedGlobalFlags config `mappend` globalFlags - tarfiles = extraArgs - checkTarFiles extraArgs - if fromFlag (uploadCheck uploadFlags') - then Upload.check verbosity tarfiles - else upload verbosity - (globalRepos globalFlags') - (flagToMaybe $ uploadUsername uploadFlags') - (flagToMaybe $ uploadPassword uploadFlags') - tarfiles - where - checkTarFiles tarfiles - | null tarfiles - = die "the 'upload' command expects one or more .tar.gz packages." - | not (null otherFiles) - = die $ "the 'upload' command expects only .tar.gz packages: " - ++ intercalate ", " otherFiles - | otherwise = sequence_ - [ do exists <- doesFileExist tarfile - unless exists $ die $ "file not found: " ++ tarfile - | tarfile <- tarfiles ] - - where otherFiles = filter (not . isTarGzFile) tarfiles - isTarGzFile file = case splitExtension file of - (file', ".gz") -> takeExtension file' == ".tar" - _ -> False - -checkAction :: Flag Verbosity -> [String] -> GlobalFlags -> IO () -checkAction verbosityFlag extraArgs _globalFlags = do - unless (null extraArgs) $ - die $ "'check' doesn't take any extra arguments: " ++ unwords extraArgs - allOk <- Check.check (fromFlag verbosityFlag) - unless allOk exitFailure - -formatAction :: Flag Verbosity -> [String] -> GlobalFlags -> IO () -formatAction verbosityFlag extraArgs _globalFlags = do - let verbosity = fromFlag verbosityFlag - path <- case extraArgs of - [] -> do cwd <- getCurrentDirectory - tryFindPackageDesc cwd - (p:_) -> return p - pkgDesc <- readPackageDescription verbosity path - -- Uses 'writeFileAtomic' under the hood. - writeGenericPackageDescription path pkgDesc - -sdistAction :: (SDistFlags, SDistExFlags) -> [String] -> GlobalFlags -> IO () -sdistAction (sdistFlags, sdistExFlags) extraArgs _globalFlags = do - unless (null extraArgs) $ - die $ "'sdist' doesn't take any extra arguments: " ++ unwords extraArgs - sdist sdistFlags sdistExFlags - -reportAction :: ReportFlags -> [String] -> GlobalFlags -> IO () -reportAction reportFlags extraArgs globalFlags = do - unless (null extraArgs) $ - die $ "'report' doesn't take any extra arguments: " ++ unwords extraArgs - - let verbosity = fromFlag (reportVerbosity reportFlags) - config <- loadConfig verbosity (globalConfigFile globalFlags) mempty - let globalFlags' = savedGlobalFlags config `mappend` globalFlags - reportFlags' = savedReportFlags config `mappend` reportFlags - - Upload.report verbosity (globalRepos globalFlags') - (flagToMaybe $ reportUsername reportFlags') - (flagToMaybe $ reportPassword reportFlags') - -runAction :: (BuildFlags, BuildExFlags) -> [String] -> GlobalFlags -> IO () -runAction (buildFlags, buildExFlags) extraArgs globalFlags = do - let verbosity = fromFlagOrDefault normal (buildVerbosity buildFlags) - distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) - (buildDistPref buildFlags) - noAddSource = fromFlagOrDefault DontSkipAddSourceDepsCheck - (buildOnly buildExFlags) - - -- reconfigure also checks if we're in a sandbox and reinstalls add-source - -- deps if needed. - (useSandbox, config) <- reconfigure verbosity distPref mempty [] - globalFlags noAddSource (buildNumJobs buildFlags) - (const Nothing) - - lbi <- getPersistBuildConfig distPref - (exe, exeArgs) <- splitRunArgs lbi extraArgs - - maybeWithSandboxDirOnSearchPath useSandbox $ - build verbosity config distPref buildFlags ["exe:" ++ exeName exe] - - maybeWithSandboxDirOnSearchPath useSandbox $ - run verbosity lbi exe exeArgs - -getAction :: GetFlags -> [String] -> GlobalFlags -> IO () -getAction getFlags extraArgs globalFlags = do - let verbosity = fromFlag (getVerbosity getFlags) - targets <- readUserTargets verbosity extraArgs - config <- loadConfig verbosity (globalConfigFile globalFlags) mempty - let globalFlags' = savedGlobalFlags config `mappend` globalFlags - get verbosity - (globalRepos (savedGlobalFlags config)) - globalFlags' - getFlags - targets - -unpackAction :: GetFlags -> [String] -> GlobalFlags -> IO () -unpackAction getFlags extraArgs globalFlags = do - getAction getFlags extraArgs globalFlags - -initAction :: InitFlags -> [String] -> GlobalFlags -> IO () -initAction initFlags _extraArgs globalFlags = do - let verbosity = fromFlag (initVerbosity initFlags) - (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty - let configFlags = savedConfigureFlags config - (comp, _, conf) <- configCompilerAux' configFlags - initCabal verbosity - (configPackageDB' configFlags) - comp - conf - initFlags - -sandboxAction :: SandboxFlags -> [String] -> GlobalFlags -> IO () -sandboxAction sandboxFlags extraArgs globalFlags = do - let verbosity = fromFlag (sandboxVerbosity sandboxFlags) - case extraArgs of - -- Basic sandbox commands. - ["init"] -> sandboxInit verbosity sandboxFlags globalFlags - ["delete"] -> sandboxDelete verbosity sandboxFlags globalFlags - ("add-source":extra) -> do - when (noExtraArgs extra) $ - die "The 'sandbox add-source' command expects at least one argument" - sandboxAddSource verbosity extra sandboxFlags globalFlags - ("delete-source":extra) -> do - when (noExtraArgs extra) $ - die "The 'sandbox delete-source' command expects \ - \at least one argument" - sandboxDeleteSource verbosity extra sandboxFlags globalFlags - ["list-sources"] -> sandboxListSources verbosity sandboxFlags globalFlags - - -- More advanced commands. - ("hc-pkg":extra) -> do - when (noExtraArgs extra) $ - die $ "The 'sandbox hc-pkg' command expects at least one argument" - sandboxHcPkg verbosity sandboxFlags globalFlags extra - ["buildopts"] -> die "Not implemented!" - - -- Hidden commands. - ["dump-pkgenv"] -> dumpPackageEnvironment verbosity sandboxFlags globalFlags - - -- Error handling. - [] -> die $ "Please specify a subcommand (see 'help sandbox')" - _ -> die $ "Unknown 'sandbox' subcommand: " ++ unwords extraArgs - - where - noExtraArgs = (<1) . length - -execAction :: ExecFlags -> [String] -> GlobalFlags -> IO () -execAction execFlags extraArgs globalFlags = do - let verbosity = fromFlag (execVerbosity execFlags) - (useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags - mempty - let configFlags = savedConfigureFlags config - (comp, platform, conf) <- configCompilerAux' configFlags - exec verbosity useSandbox comp platform conf extraArgs - -userConfigAction :: UserConfigFlags -> [String] -> GlobalFlags -> IO () -userConfigAction ucflags extraArgs globalFlags = do - let verbosity = fromFlag (userConfigVerbosity ucflags) - case extraArgs of - ("diff":_) -> mapM_ putStrLn =<< userConfigDiff globalFlags - ("update":_) -> userConfigUpdate verbosity globalFlags - -- Error handling. - [] -> die $ "Please specify a subcommand (see 'help user-config')" - _ -> die $ "Unknown 'user-config' subcommand: " ++ unwords extraArgs - - --- | See 'Distribution.Client.Install.withWin32SelfUpgrade' for details. --- -win32SelfUpgradeAction :: Win32SelfUpgradeFlags -> [String] -> GlobalFlags - -> IO () -win32SelfUpgradeAction selfUpgradeFlags (pid:path:_extraArgs) _globalFlags = do - let verbosity = fromFlag (win32SelfUpgradeVerbosity selfUpgradeFlags) - Win32SelfUpgrade.deleteOldExeFile verbosity (read pid) path -win32SelfUpgradeAction _ _ _ = return () Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/mtl-2.2.1.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/mtl-2.2.1.tar.gz differ Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/network-2.6.0.2.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/network-2.6.0.2.tar.gz differ Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/network-uri-2.6.0.1.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/network-uri-2.6.0.1.tar.gz differ Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/old-locale-1.0.0.7.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/old-locale-1.0.0.7.tar.gz differ Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/old-time-1.1.0.3.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/old-time-1.1.0.3.tar.gz differ Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/parsec-3.1.7.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/parsec-3.1.7.tar.gz differ Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/random-1.1.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/random-1.1.tar.gz differ diff -Nru cabal-install-1.22-1.22.6.0/README.md cabal-install-1.22-1.22.9.0/README.md --- cabal-install-1.22-1.22.6.0/README.md 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/README.md 2016-06-02 07:15:44.000000000 +0000 @@ -1,155 +1,182 @@ -The cabal-install package +The Cabal library package ========================= See the [Cabal web site] for more information. -The `cabal-install` package provides a command line tool named `cabal`. -It uses the [Cabal] library and provides a user interface to the -Cabal/[Hackage] build automation and package management system. It can -build and install both local and remote packages, including -dependencies. +If you also want the `cabal` command-line program, you need the +[cabal-install] package in addition to this library. -[Cabal web site]: http://www.haskell.org/cabal/ -[Cabal]: ../Cabal/README.md +[cabal-install]: ../cabal-install/README.md -Installing the `cabal` command-line tool -======================================== +Installing the Cabal library +============================ -The `cabal-install` package requires a number of other packages, most of -which come with a standard GHC installation. It requires the [network] -package, which is sometimes packaged separately by Linux distributions; -for example, on Debian or Ubuntu, it is located in the -"libghc6-network-dev" package. +If you already have the `cabal` program +--------------------------------------- -`cabal` requires a few other Haskell packages that are not always -installed. The exact list is specified in the [.cabal] file or in the -[bootstrap.sh] file. All these packages are available from [Hackage]. +In this case run: -Note that on some Unix systems you may need to install an additional -zlib development package using your system package manager; for example, -on Debian or Ubuntu, it is located in the "zlib1g-dev" package; on -Fedora, it is located in the "zlib-devel" package. It is required -because the Haskell zlib package uses the system zlib C library and -header files. + $ cabal install -The `cabal-install` package is now part of the [Haskell Platform], so you -do not usually need to install it separately. However, if you are -starting from a minimal GHC installation, you need to install -`cabal-install` manually. Since it is an ordinary Cabal package, -`cabal-install` can be built the standard way; to facilitate this, the -process has been partially automated. It is described below. +However, if you do not have an existing version of the `cabal` program, +you first must install the Cabal library. To avoid this bootstrapping +problem, you can install the Cabal library directly as described below. -[.cabal]: cabal-install.cabal -[network]: http://hackage.haskell.org/package/network -[Haskell Platform]: http://www.haskell.org/platform/ -Quick start on Unix-like systems --------------------------------- +Installing as a user (no root or administrator access) +------------------------------------------------------ -As a convenience for users on Unix-like systems, there is a -[bootstrap.sh] script that will download and install each of -`cabal-install`'s dependencies in turn. + ghc -threaded --make Setup + ./Setup configure --user + ./Setup build + ./Setup install - $ ./bootstrap.sh +Note the use of the `--user` flag at the configure step. -It will download and install the dependencies. The script will install the -library packages (vanilla, profiling and shared) into `$HOME/.cabal/` and the -`cabal` program into `$HOME/.cabal/bin/`. If you don't want to install profiling -and shared versions of the libraries, use +Compiling 'Setup' rather than using `runghc Setup` is much faster and +works on Windows. For all packages other than Cabal itself, it is fine +to use `runghc`. - $ EXTRA_CONFIGURE_OPTS="" ./bootstrap.sh +This will install into `$HOME/.cabal/` on Unix and into +`Documents and Settings\$User\Application Data\cabal\` on Windows. +If you want to install elsewhere, use the `--prefix=` flag at the +configure step. -You then have the choice either to place `$HOME/.cabal/bin` on your -`$PATH` or move the `cabal` program to somewhere on your `$PATH`. Next, -you can get the latest list of packages by running: - $ cabal update +Installing as root or Administrator +----------------------------------- -This will also create a default configuration file, if it does not -already exist, at `$HOME/.cabal/config`. + ghc -threaded --make Setup + ./Setup configure + ./Setup build + sudo ./Setup install -By default, `cabal` will install programs to `$HOME/.cabal/bin`. If you -do not want to add this directory to your `$PATH`, you can change -the setting in the config file; for example, you could use the -following: +Compiling Setup rather than using `runghc Setup` is much faster and +works on Windows. For all packages other than Cabal itself, it is fine +to use `runghc`. - symlink-bindir: $HOME/bin +This will install into `/usr/local` on Unix, and on Windows it will +install into `$ProgramFiles/Haskell`. If you want to install elsewhere, +use the `--prefix=` flag at the configure step. -Quick start on Windows systems ------------------------------- +Using older versions of GHC and Cabal +====================================== -For Windows users, a precompiled program ([cabal.exe]) is provided. -Download and put it somewhere on your `%PATH%` (for example, -`C:\Program Files\Haskell\bin`.) +It is recommended that you leave any pre-existing version of Cabal +installed. In particular, it is *essential* you keep the version that +came with GHC itself, since other installed packages require it (for +instance, the "ghc" API package). -Next, you can get the latest list of packages by running: +Prior to GHC 6.4.2, however, GHC did not deal particularly well with +having multiple versions of packages installed at once. So if you are +using GHC 6.4.1 or older and you have an older version of Cabal +installed, you should probably remove it by running: - $ cabal update + $ ghc-pkg unregister Cabal -This will also create a default configuration file (if it does not -already exist) at -`C:\Documents and Settings\%USERNAME%\Application Data\cabal\config`. +or, if you had Cabal installed only for your user account, run: -[cabal.exe]: http://www.haskell.org/cabal/release/cabal-install-latest/ + $ ghc-pkg unregister Cabal --user -Using `cabal` -============= +The `filepath` dependency +========================= -There are two sets of commands: commands for working with a local -project build tree and those for working with packages distributed -from [Hackage]. +Cabal uses the [filepath] package, so it must be installed first. +GHC version 6.6.1 and later come with `filepath`, however, earlier +versions do not by default. If you do not already have `filepath`, +you need to install it. You can use any existing version of Cabal to do +that. If you have neither Cabal nor `filepath`, it is slightly +harder but still possible. -For the list of the full set of commands and flags for each command, -run: +Unpack Cabal and `filepath` into separate directories. For example: - $ cabal help + tar -xzf filepath-1.1.0.0.tar.gz + tar -xzf Cabal-1.6.0.0.tar.gz + # rename to make the following instructions simpler: + mv filepath-1.1.0.0/ filepath/ + mv Cabal-1.6.0.0/ Cabal/ -Commands for developers for local build trees ---------------------------------------------- + cd Cabal + ghc -i../filepath -cpp --make Setup.hs -o ../filepath/setup + cd ../filepath/ + ./setup configure --user + ./setup build + ./setup install -The commands for local project build trees are almost the same as the -`runghc Setup` command-line interface you may already be familiar with. -In particular, it has the following commands: +This installs `filepath` so that you can install Cabal with the normal +method. - * `cabal configure` - * `cabal build` - * `cabal haddock` - * `cabal clean` - * `cabal sdist` +[filepath]: http://hackage.haskell.org/package/filepath -The `install` command is somewhat different; it is an all-in-one -operation. If you run `cabal install` in your build tree, it will -configure, build, and install. It takes all the flags that `configure` -takes such as `--global` and `--prefix`. +More information +================ -In addition, `cabal` will download and install any dependencies that are -not already installed. It can also rebuild packages to ensure a -consistent set of dependencies. +Please see the [Cabal web site] for the [user guide] and [API +documentation]. There is additional information available on the +[development wiki]. +[user guide]: http://www.haskell.org/cabal/users-guide +[API documentation]: http://www.haskell.org/cabal/release/cabal-latest/doc/API/Cabal/Distribution-Simple.html +[development wiki]: https://github.com/haskell/cabal/wiki -Commands for released Hackage packages --------------------------------------- - $ cabal update +Bugs +==== -This command gets the latest list of packages from the [Hackage] server. -On occasion, this command must be run manually--for instance, if you -want to install a newly released package. +Please report bugs and feature requests to Cabal's [bug tracker]. - $ cabal install xmonad -This command installs one or more named packages, and all their -dependencies, from Hackage. By default, it installs the latest available -version; however, you may specify exact versions or version ranges. For -example, `cabal install alex-2.2` or `cabal install parsec < 3`. +Your help +--------- - $ cabal list xml +To help Cabal's development, it is enormously helpful to know from +Cabal's users what their most pressing problems are with Cabal and +[Hackage]. You may have a favourite Cabal bug or limitation. Look at +Cabal's [bug tracker]. Ensure that the problem is reported there and +adequately described. Comment on the issue to report how much of a +problem the bug is for you. Subscribe to the issues's notifications to +discussed requirements and keep informed on progress. For feature +requests, it is helpful if there is a description of how you would +expect to interact with the new feature. -This does a search of the installed and available packages. It does a -case-insensitive substring match on the package name. +[Hackage]: http://hackage.haskell.org -[Hackage]: http://hackage.haskell.org -[bootstrap.sh]: bootstrap.sh +Source code +=========== + +You can get the master development branch using: + + $ git clone https://github.com/haskell/cabal.git + + +Credits +======= + +Cabal developers (in alphabetical order): + +- Krasimir Angelov +- Bjorn Bringert +- Duncan Coutts +- Isaac Jones +- David Himmelstrup ("Lemmih") +- Simon Marlow +- Ross Patterson +- Thomas Schilling +- Martin Sjögren +- Malcolm Wallace +- and nearly 30 other people have contributed occasional patches + +Cabal specification authors: + +- Isaac Jones +- Simon Marlow +- Ross Patterson +- Simon Peyton Jones +- Malcolm Wallace + + +[bug tracker]: https://github.com/haskell/cabal/issues +[Cabal web site]: http://www.haskell.org/cabal/ diff -Nru cabal-install-1.22-1.22.6.0/Setup.hs cabal-install-1.22-1.22.9.0/Setup.hs --- cabal-install-1.22-1.22.6.0/Setup.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -import Distribution.Simple -main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/buildplan.lst cabal-install-1.22-1.22.9.0/src/buildplan.lst --- cabal-install-1.22-1.22.6.0/src/buildplan.lst 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/buildplan.lst 2016-06-02 07:15:39.000000000 +0000 @@ -0,0 +1,10 @@ +mtl-2.2.1 +network-2.6.2.1 +random-1.1 +stm-2.4.4.1 +text-1.2.2.1 +parsec-3.1.11 +network-uri-2.6.1.0 +HTTP-4000.3.3 +zlib-0.6.1.1 +cabal-install-1.22.9.0 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Cabal.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Cabal.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Cabal.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Cabal.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,328 @@ +name: Cabal +version: 1.22.8.0 +copyright: 2003-2006, Isaac Jones + 2005-2011, Duncan Coutts +license: BSD3 +license-file: LICENSE +author: Isaac Jones + Duncan Coutts +maintainer: cabal-devel@haskell.org +homepage: http://www.haskell.org/cabal/ +bug-reports: https://github.com/haskell/cabal/issues +synopsis: A framework for packaging Haskell software +description: + The Haskell Common Architecture for Building Applications and + Libraries: a framework defining a common interface for authors to more + easily build their Haskell applications in a portable way. + . + The Haskell Cabal is part of a larger infrastructure for distributing, + organizing, and cataloging Haskell libraries and tools. +category: Distribution +cabal-version: >=1.10 +build-type: Simple +-- Even though we do use the default Setup.lhs it's vital to bootstrapping +-- that we build Setup.lhs using our own local Cabal source code. + +extra-source-files: + README.md tests/README.md changelog + doc/developing-packages.markdown doc/index.markdown + doc/installing-packages.markdown + doc/misc.markdown + + -- Generated with 'misc/gen-extra-source-files.sh' & 'M-x sort-lines': + tests/PackageTests/BenchmarkExeV10/Foo.hs + tests/PackageTests/BenchmarkExeV10/benchmarks/bench-Foo.hs + tests/PackageTests/BenchmarkExeV10/my.cabal + tests/PackageTests/BenchmarkOptions/BenchmarkOptions.cabal + tests/PackageTests/BenchmarkOptions/test-BenchmarkOptions.hs + tests/PackageTests/BenchmarkStanza/my.cabal + tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/GlobalBuildDepsNotAdditive1.cabal + tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/MyLibrary.hs + tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/GlobalBuildDepsNotAdditive2.cabal + tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/lemon.hs + tests/PackageTests/BuildDeps/InternalLibrary0/MyLibrary.hs + tests/PackageTests/BuildDeps/InternalLibrary0/my.cabal + tests/PackageTests/BuildDeps/InternalLibrary0/programs/lemon.hs + tests/PackageTests/BuildDeps/InternalLibrary1/MyLibrary.hs + tests/PackageTests/BuildDeps/InternalLibrary1/my.cabal + tests/PackageTests/BuildDeps/InternalLibrary1/programs/lemon.hs + tests/PackageTests/BuildDeps/InternalLibrary2/MyLibrary.hs + tests/PackageTests/BuildDeps/InternalLibrary2/my.cabal + tests/PackageTests/BuildDeps/InternalLibrary2/programs/lemon.hs + tests/PackageTests/BuildDeps/InternalLibrary2/to-install/MyLibrary.hs + tests/PackageTests/BuildDeps/InternalLibrary2/to-install/my.cabal + tests/PackageTests/BuildDeps/InternalLibrary3/MyLibrary.hs + tests/PackageTests/BuildDeps/InternalLibrary3/my.cabal + tests/PackageTests/BuildDeps/InternalLibrary3/programs/lemon.hs + tests/PackageTests/BuildDeps/InternalLibrary3/to-install/MyLibrary.hs + tests/PackageTests/BuildDeps/InternalLibrary3/to-install/my.cabal + tests/PackageTests/BuildDeps/InternalLibrary4/MyLibrary.hs + tests/PackageTests/BuildDeps/InternalLibrary4/my.cabal + tests/PackageTests/BuildDeps/InternalLibrary4/programs/lemon.hs + tests/PackageTests/BuildDeps/InternalLibrary4/to-install/MyLibrary.hs + tests/PackageTests/BuildDeps/InternalLibrary4/to-install/my.cabal + tests/PackageTests/BuildDeps/SameDepsAllRound/MyLibrary.hs + tests/PackageTests/BuildDeps/SameDepsAllRound/SameDepsAllRound.cabal + tests/PackageTests/BuildDeps/SameDepsAllRound/lemon.hs + tests/PackageTests/BuildDeps/SameDepsAllRound/pineapple.hs + tests/PackageTests/BuildDeps/TargetSpecificDeps1/MyLibrary.hs + tests/PackageTests/BuildDeps/TargetSpecificDeps1/lemon.hs + tests/PackageTests/BuildDeps/TargetSpecificDeps1/my.cabal + tests/PackageTests/BuildDeps/TargetSpecificDeps2/MyLibrary.hs + tests/PackageTests/BuildDeps/TargetSpecificDeps2/lemon.hs + tests/PackageTests/BuildDeps/TargetSpecificDeps2/my.cabal + tests/PackageTests/BuildDeps/TargetSpecificDeps3/MyLibrary.hs + tests/PackageTests/BuildDeps/TargetSpecificDeps3/lemon.hs + tests/PackageTests/BuildDeps/TargetSpecificDeps3/my.cabal + tests/PackageTests/BuildTestSuiteDetailedV09/Dummy.hs + tests/PackageTests/BuildTestSuiteDetailedV09/my.cabal + tests/PackageTests/CMain/Bar.hs + tests/PackageTests/CMain/Setup.hs + tests/PackageTests/CMain/foo.c + tests/PackageTests/CMain/my.cabal + tests/PackageTests/DeterministicAr/Lib.hs + tests/PackageTests/DeterministicAr/my.cabal + tests/PackageTests/EmptyLib/empty/empty.cabal + tests/PackageTests/Haddock/CPP.hs + tests/PackageTests/Haddock/Literate.lhs + tests/PackageTests/Haddock/NoCPP.hs + tests/PackageTests/Haddock/Simple.hs + tests/PackageTests/Haddock/my.cabal + tests/PackageTests/OrderFlags/Foo.hs + tests/PackageTests/OrderFlags/my.cabal + tests/PackageTests/PathsModule/Executable/Main.hs + tests/PackageTests/PathsModule/Executable/my.cabal + tests/PackageTests/PathsModule/Library/my.cabal + tests/PackageTests/PreProcess/Foo.hsc + tests/PackageTests/PreProcess/Main.hs + tests/PackageTests/PreProcess/my.cabal + tests/PackageTests/ReexportedModules/ReexportedModules.cabal + tests/PackageTests/TemplateHaskell/dynamic/Exe.hs + tests/PackageTests/TemplateHaskell/dynamic/Lib.hs + tests/PackageTests/TemplateHaskell/dynamic/TH.hs + tests/PackageTests/TemplateHaskell/dynamic/my.cabal + tests/PackageTests/TemplateHaskell/profiling/Exe.hs + tests/PackageTests/TemplateHaskell/profiling/Lib.hs + tests/PackageTests/TemplateHaskell/profiling/TH.hs + tests/PackageTests/TemplateHaskell/profiling/my.cabal + tests/PackageTests/TemplateHaskell/vanilla/Exe.hs + tests/PackageTests/TemplateHaskell/vanilla/Lib.hs + tests/PackageTests/TemplateHaskell/vanilla/TH.hs + tests/PackageTests/TemplateHaskell/vanilla/my.cabal + tests/PackageTests/TestOptions/TestOptions.cabal + tests/PackageTests/TestOptions/test-TestOptions.hs + tests/PackageTests/TestStanza/my.cabal + tests/PackageTests/TestSuiteExeV10/Foo.hs + tests/PackageTests/TestSuiteExeV10/my.cabal + tests/PackageTests/TestSuiteExeV10/tests/test-Foo.hs + tests/Setup.hs + tests/hackage/check.sh + tests/hackage/download.sh + tests/hackage/unpack.sh + tests/misc/ghc-supported-languages.hs + +source-repository head + type: git + location: https://github.com/haskell/cabal/ + subdir: Cabal + +flag bundled-binary-generic + default: False + +library + build-depends: + base >= 4.4 && < 5, + deepseq >= 1.3 && < 1.5, + filepath >= 1 && < 1.5, + directory >= 1 && < 1.3, + process >= 1.1.0.1 && < 1.3, + time >= 1.1 && < 1.6, + containers >= 0.1 && < 0.6, + array >= 0.1 && < 0.6, + pretty >= 1 && < 1.2, + bytestring >= 0.9 + + if flag(bundled-binary-generic) + build-depends: binary >= 0.5 && < 0.7 + else + build-depends: binary >= 0.7 && < 0.9 + + -- Needed for GHC.Generics before GHC 7.6 + if impl(ghc < 7.6) + build-depends: ghc-prim >= 0.2 && < 0.3 + + if !os(windows) + build-depends: + unix >= 2.0 && < 2.8 + + ghc-options: -Wall -fno-ignore-asserts -fwarn-tabs + + exposed-modules: + Distribution.Compat.CreatePipe + Distribution.Compat.Environment + Distribution.Compat.Exception + Distribution.Compat.ReadP + Distribution.Compiler + Distribution.InstalledPackageInfo + Distribution.License + Distribution.Make + Distribution.ModuleName + Distribution.Package + Distribution.PackageDescription + Distribution.PackageDescription.Check + Distribution.PackageDescription.Configuration + Distribution.PackageDescription.Parse + Distribution.PackageDescription.PrettyPrint + Distribution.PackageDescription.Utils + Distribution.ParseUtils + Distribution.ReadE + Distribution.Simple + Distribution.Simple.Bench + Distribution.Simple.Build + Distribution.Simple.Build.Macros + Distribution.Simple.Build.PathsModule + Distribution.Simple.BuildPaths + Distribution.Simple.BuildTarget + Distribution.Simple.CCompiler + Distribution.Simple.Command + Distribution.Simple.Compiler + Distribution.Simple.Configure + Distribution.Simple.GHC + Distribution.Simple.GHCJS + Distribution.Simple.Haddock + Distribution.Simple.HaskellSuite + Distribution.Simple.Hpc + Distribution.Simple.Install + Distribution.Simple.InstallDirs + Distribution.Simple.JHC + Distribution.Simple.LHC + Distribution.Simple.LocalBuildInfo + Distribution.Simple.PackageIndex + Distribution.Simple.PreProcess + Distribution.Simple.PreProcess.Unlit + Distribution.Simple.Program + Distribution.Simple.Program.Ar + Distribution.Simple.Program.Builtin + Distribution.Simple.Program.Db + Distribution.Simple.Program.Find + Distribution.Simple.Program.GHC + Distribution.Simple.Program.HcPkg + Distribution.Simple.Program.Hpc + Distribution.Simple.Program.Ld + Distribution.Simple.Program.Run + Distribution.Simple.Program.Script + Distribution.Simple.Program.Strip + Distribution.Simple.Program.Types + Distribution.Simple.Register + Distribution.Simple.Setup + Distribution.Simple.SrcDist + Distribution.Simple.Test + Distribution.Simple.Test.ExeV10 + Distribution.Simple.Test.LibV09 + Distribution.Simple.Test.Log + Distribution.Simple.UHC + Distribution.Simple.UserHooks + Distribution.Simple.Utils + Distribution.System + Distribution.TestSuite + Distribution.Text + Distribution.Utils.NubList + Distribution.Verbosity + Distribution.Version + Language.Haskell.Extension + + other-modules: + Distribution.Compat.Binary + Distribution.Compat.CopyFile + Distribution.Compat.TempFile + Distribution.GetOpt + Distribution.Simple.GHC.Internal + Distribution.Simple.GHC.IPI641 + Distribution.Simple.GHC.IPI642 + Distribution.Simple.GHC.ImplInfo + Paths_Cabal + + if flag(bundled-binary-generic) + other-modules: + Distribution.Compat.Binary.Class + Distribution.Compat.Binary.Generic + + default-language: Haskell98 + default-extensions: CPP + +-- Small, fast running tests. +test-suite unit-tests + type: exitcode-stdio-1.0 + hs-source-dirs: tests + other-modules: + UnitTests.Distribution.Compat.CreatePipe + UnitTests.Distribution.Compat.ReadP + UnitTests.Distribution.Utils.NubList + main-is: UnitTests.hs + build-depends: + base, + test-framework, + test-framework-hunit, + test-framework-quickcheck2, + HUnit, + QuickCheck < 2.9, + Cabal + ghc-options: -Wall + default-language: Haskell98 + +-- Large, system tests that build packages. +test-suite package-tests + type: exitcode-stdio-1.0 + main-is: PackageTests.hs + other-modules: + PackageTests.BenchmarkExeV10.Check + PackageTests.BenchmarkOptions.Check + PackageTests.BenchmarkStanza.Check + PackageTests.BuildDeps.GlobalBuildDepsNotAdditive1.Check + PackageTests.BuildDeps.GlobalBuildDepsNotAdditive2.Check + PackageTests.BuildDeps.InternalLibrary0.Check + PackageTests.BuildDeps.InternalLibrary1.Check + PackageTests.BuildDeps.InternalLibrary2.Check + PackageTests.BuildDeps.InternalLibrary3.Check + PackageTests.BuildDeps.InternalLibrary4.Check + PackageTests.BuildDeps.SameDepsAllRound.Check + PackageTests.BuildDeps.TargetSpecificDeps1.Check + PackageTests.BuildDeps.TargetSpecificDeps2.Check + PackageTests.BuildDeps.TargetSpecificDeps3.Check + PackageTests.BuildTestSuiteDetailedV09.Check + PackageTests.CMain.Check + PackageTests.DeterministicAr.Check + PackageTests.EmptyLib.Check + PackageTests.Haddock.Check + PackageTests.OrderFlags.Check + PackageTests.PackageTester + PackageTests.PathsModule.Executable.Check + PackageTests.PathsModule.Library.Check + PackageTests.PreProcess.Check + PackageTests.ReexportedModules.Check + PackageTests.TemplateHaskell.Check + PackageTests.TestOptions.Check + PackageTests.TestStanza.Check + PackageTests.TestSuiteExeV10.Check + hs-source-dirs: tests + build-depends: + base, + containers, + test-framework, + test-framework-quickcheck2 >= 0.2.12, + test-framework-hunit, + HUnit, + QuickCheck >= 2.1.0.1 && < 2.9, + Cabal, + process, + directory, + filepath, + extensible-exceptions, + bytestring, + regex-posix, + old-time + if !os(windows) + build-depends: unix + ghc-options: -Wall + default-extensions: CPP + default-language: Haskell98 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/changelog cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/changelog --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/changelog 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/changelog 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,481 @@ +1.22.8.0 Ryan Thomas March 2016 + * Distribution.Simple.Setup: remove job cap. Fixes #3191. + * Check all object file suffixes for recompilation. Fixes #3128. + * Move source files under 'src/'. Fixes #3003. + +1.22.7.0 Ryan Thomas January 2016 + * Backport #3012 to the 1.22 branch + * Cabal.cabal: change build-type to Simple + * Add foldl' import + * The Cabal part for fully gcc-like response files + +1.22.6.0 + * Relax upper bound to allow upcoming binary-0.8 + +1.22.5.0 + * Don't recompile C sources unless needed (#2601). (Luke Iannini) + * Support Haddock response files. + * Add frameworks when linking a dynamic library. + +1.22.4.0 Ryan Thomas June 2015 + * Add libname install-dirs variable, use it by default. Fixes #2437. (Edward Z. Yang) + * Reduce temporary directory name length, fixes #2502. (Edward Z. Yang) + * Workaround for #2527. (Mikhail Glushenkov) + +1.22.3.0 Ryan Thomas April 2015 + * Fix for the ghcjs-pkg version number handling (Luite Stegeman) + * filterConfigureFlags: filter more flags (Mikhail Glushenkov) + * Cabal check will fail on -fprof-auto passed as a ghc-option - Fixes #2479 (John Chee) + +1.22.2.0 Ryan Thomas March 2015 + * Don't pass '--{en,dis}able-profiling' to old setup. + * Add -Wall police + * Fix dependencies on 'old-time' + * Fix test interface detailed-0.9 with GHC 7.10 + * Fix HPC tests with GHC 7.10 + * Make sure to pass the package key to ghc + * Use --package-{name|version} when available for Haddock when available + * Put full package name and version in library names + * Fully specify package key format, so external tools can generate it. + +1.22.0.0 Johan Tibell January 2015 + * Support GHC 7.10. + * Experimental support for emitting DWARF debug info. + * Preliminary support for relocatable packages. + * Allow cabal to be used inside cabal exec enviroments. + * hpc: support mutliple "ways" (e.g. profiling and vanilla). + * Support GHCJS. + * Improved command line documentation. + * Add '-none' constraint syntax for version ranges (#2093). + * Make the default doc index file path compiler/arch/os-dependent + (#2136). + * Warn instead of dying when generating documentation and hscolour + isn't installed (455f51622fa38347db62197a04bb0fa5b928ff17). + * Support the new BinaryLiterals extension + (1f25ab3c5eff311ada73c6c987061b80e9bbebd9). + * Warn about 'ghc-prof-options: -auto-all' in 'cabal check' (#2162). + * Add preliminary support for multiple instances of the same package + version installed side-by-side (#2002). + * New binary build config format - faster build times (#2076). + * Support module thinning and renaming (#2038). + * Add a new license type: UnspecifiedLicense (#2141). + * Remove support for Hugs and nhc98 (#2168). + * Invoke 'tar' with '--formar ustar' if possible in 'sdist' (#1903). + * Replace --enable-library-coverage with --enable-coverage, which + enables program coverage for all components (#1945). + * Suggest that `ExitFailure 9` is probably due to memory + exhaustion (#1522). + * Drop support for Haddock < 2.0 (#1808, #1718). + * Make 'cabal test'/'cabal bench' build only what's needed for + running tests/benchmarks (#1821). + * Build shared libraries by default when linking executables dynamically. + * Build profiled libraries by default when profiling executables. + +1.20.0.1 Johan Tibell May 2014 + * Fix streaming test output. + +1.20.0.0 Johan Tibell April 2014 + * Rewrite user guide + * Fix repl Ctrl+C handling + * Add haskell-suite compiler support + * Add __HADDOCK_VERSION__ define + * Allow specifying exact dependency version using hash + * Rename extra-html-files to extra-doc-files + * Add parallel build support for GHC 7.8 and later + * Don't call ranlib on OS X + * Avoid re-linking executables, test suites, and benchmarks + unnecessarily, shortening build times + * Add --allow-newer which allows upper version bounds to be + ignored + * Add --enable-library-stripping + * Add command for freezing dependencies + * Allow repl to be used outside Cabal packages + * Add --require-sandbox + * Don't use --strip-unneeded on OS X or iOS + * Add new license-files field got additional licenses + * Fix if(solaris) on some Solaris versions + * Don't use -dylib-install-name on OS X with GHC > 7.8 + * Add DragonFly as a known OS + * Improve pretty-printing of Cabal files + * Add test flag --show-details=streaming for real-time test output + * Add exec command + +1.10.2.0 Duncan Coutts June 2011 + * Include test suites in cabal sdist + * Fix for conditionals in test suite stanzas in .cabal files + * Fix permissions of directories created during install + * Fix for global builds when $HOME env var is not set + +1.10.1.0 Duncan Coutts February 2011 + * Improved error messages when test suites are not enabled + * Template parameters allowed in test --test-option(s) flag + * Improved documentation of the test feature + * Relaxed QA check on cabal-version when using test-suite sections + * haddock command now allows both --hoogle and --html at the same time + * Find ghc-version-specific instances of the hsc2hs program + * Preserve file executable permissions in sdist tarballs + * Pass gcc location and flags to ./configure scripts + * Get default gcc flags from ghc + +1.10.0.0 Duncan Coutts November 2010 + * New cabal test feature + * Initial support for UHC + * New default-language and other-languages fields (e.g. Haskell98/2010) + * New default-extensions and other-extensions fields + * Deprecated extensions field (for packages using cabal-version >=1.10) + * Cabal-version field must now only be of the form ">= x.y" + * Removed deprecated --copy-prefix= feature + * Auto-reconfigure when .cabal file changes + * Workaround for haddock overwriting .hi and .o files when using TH + * Extra cpp flags used with hsc2hs and c2hs (-D${os}_BUILD_OS etc) + * New cpp define VERSION_ gives string version of dependencies + * User guide source now in markdown format for easier editing + * Improved checks and error messages for C libraries and headers + * Removed BSD4 from the list of suggested licenses + * Updated list of known language extensions + * Fix for include paths to allow C code to import FFI stub.h files + * Fix for intra-package dependencies on OSX + * Stricter checks on various bits of .cabal file syntax + * Minor fixes for c2hs + +1.8.0.6 Duncan Coutts June 2010 + * Fix 'register --global/--user' + +1.8.0.4 Duncan Coutts March 2010 + * Set dylib-install-name for dynalic libs on OSX + * Stricter configure check that compiler supports a package's extensions + * More configure-time warnings + * Hugs can compile Cabal lib again + * Default datadir now follows prefix on Windows + * Support for finding installed packages for hugs + * Cabal version macros now have proper parenthesis + * Reverted change to filter out deps of non-buildable components + * Fix for registering implace when using a specific package db + * Fix mismatch between $os and $arch path template variables + * Fix for finding ar.exe on Windows, always pick ghc's version + * Fix for intra-package dependencies with ghc-6.12 + +1.8.0.2 Duncan Coutts December 2009 + * Support for GHC-6.12 + * New unique installed package IDs which use a package hash + * Allow executables to depend on the lib within the same package + * Dependencies for each component apply only to that component + (previously applied to all the other components too) + * Added new known license MIT and versioned GPL and LGPL + * More liberal package version range syntax + * Package registration files are now UTF8 + * Support for LHC and JHC-0.7.2 + * Deprecated RecordPuns extension in favour of NamedFieldPuns + * Deprecated PatternSignatures extension in favor of ScopedTypeVariables + * New VersionRange semantic view as a sequence of intervals + * Improved package quality checks + * Minor simplification in a couple Setup.hs hooks + * Beginnings of a unit level testsuite using QuickCheck + * Various bug fixes + * Various internal cleanups + +1.6.0.2 Duncan Coutts February 2009 + * New configure-time check for C headers and libraries + * Added language extensions present in ghc-6.10 + * Added support for NamedFieldPuns extension in ghc-6.8 + * Fix in configure step for ghc-6.6 on Windows + * Fix warnings in Path_pkgname.hs module on Windows + * Fix for exotic flags in ld-options field + * Fix for using pkg-config in a package with a lib and an executable + * Fix for building haddock docs for exes that use the Paths module + * Fix for installing header files in subdirectories + * Fix for the case of building profiling libs but not ordinary libs + * Fix read-only attribute of installed files on Windows + * Ignore ghc -threaded flag when profiling in ghc-6.8 and older + +1.6.0.1 Duncan Coutts October 2008 + * Export a compat function to help alex and happy + +1.6.0.0 Duncan Coutts October 2008 + * Support for ghc-6.10 + * Source control repositories can now be specified in .cabal files + * Bug report URLs can be now specified in .cabal files + * Wildcards now allowed in data-files and extra-source-files fields + * New syntactic sugar for dependencies "build-depends: foo ==1.2.*" + * New cabal_macros.h provides macros to test versions of dependencies + * Relocatable bindists now possible on unix via env vars + * New 'exposed' field allows packages to be not exposed by default + * Install dir flags can now use $os and $arch variables + * New --builddir flag allows multiple builds from a single sources dir + * cc-options now only apply to .c files, not for -fvia-C + * cc-options are not longer propagated to dependent packages + * The cpp/cc/ld-options fields no longer use ',' as a separator + * hsc2hs is now called using gcc instead of using ghc as gcc + * New api for manipulating sets and graphs of packages + * Internal api improvements and code cleanups + * Minor improvements to the user guide + * Miscellaneous minor bug fixes + +1.4.0.2 Duncan Coutts August 2008 + * Fix executable stripping default + * Fix striping exes on OSX that export dynamic symbols (like ghc) + * Correct the order of arguments given by --prog-options= + * Fix corner case with overlapping user and global packages + * Fix for modules that use pre-processing and .hs-boot files + * Clarify some points in the user guide and readme text + * Fix verbosity flags passed to sub-command like haddock + * Fix sdist --snapshot + * Allow meta-packages that contain no modules or C code + * Make the generated Paths module -Wall clean on Windows + +1.4.0.1 Duncan Coutts June 2008 + * Fix a bug which caused '.' to always be in the sources search path + * Haddock-2.2 and later do now support the --hoogle flag + +1.4.0.0 Duncan Coutts June 2008 + * Rewritten command line handling support + * Command line completion with bash + * Better support for Haddock 2 + * Improved support for nhc98 + * Removed support for ghc-6.2 + * Haddock markup in .lhs files now supported + * Default colour scheme for highlighted source code + * Default prefix for --user installs is now $HOME/.cabal + * All .cabal files are treaded as UTF-8 and must be valid + * Many checks added for common mistakes + * New --package-db= option for specific package databases + * Many internal changes to support cabal-install + * Stricter parsing for version strings, eg dissalows "1.05" + * Improved user guide introduction + * Programatica support removed + * New options --program-prefix/suffix allows eg versioned programs + * Support packages that use .hs-boot files + * Fix sdist for Main modules that require preprocessing + * New configure -O flag with optimisation level 0--2 + * Provide access to "x-" extension fields through the Cabal api + * Added check for broken installed packages + * Added warning about using inconsistent versions of dependencies + * Strip binary executable files by default with an option to disable + * New options to add site-specific include and library search paths + * Lift the restriction that libraries must have exposed-modules + * Many bugs fixed. + * Many internal structural improvements and code cleanups + +1.2.4.0 Duncan Coutts June 2008 + * Released with GHC 6.8.3 + * Backported several fixes and minor improvements from Cabal-1.4 + * Use a default colour scheme for sources with hscolour >=1.9 + * Support --hyperlink-source for Haddock >= 2.0 + * Fix for running in a non-writable directory + * Add OSX -framework arguments when linking executables + * Updates to the user guide + * Allow build-tools names to include + and _ + * Export autoconfUserHooks and simpleUserHooks + * Export ccLdOptionsBuildInfo for Setup.hs scripts + * Export unionBuildInfo and make BuildInfo an instance of Monoid + * Fix to allow the 'main-is' module to use a pre-processor + +1.2.3.0 Duncan Coutts Nov 2007 + * Released with GHC 6.8.2 + * Includes full list of GHC language extensions + * Fix infamous "dist/conftest.c" bug + * Fix configure --interfacedir= + * Find ld.exe on Windows correctly + * Export PreProcessor constructor and mkSimplePreProcessor + * Fix minor bug in unlit code + * Fix some markup in the haddock docs + +1.2.2.0 Duncan Coutts Nov 2007 + * Released with GHC 6.8.1 + * Support haddock-2.0 + * Support building DSOs with GHC + * Require reconfiguring if the .cabal file has changed + * Fix os(windows) configuration test + * Fix building documentation + * Fix building packages on Solaris + * Other minor bug fixes + +1.2.1 Duncan Coutts Oct 2007 + * To be included in GHC 6.8.1 + * New field "cpp-options" used when preprocessing Haskell modules + * Fixes for hsc2hs when using ghc + * C source code gets compiled with -O2 by default + * OS aliases, to allow os(windows) rather than requiring os(mingw32) + * Fix cleaning of 'stub' files + * Fix cabal-setup, command line ui that replaces "runhaskell Setup.hs" + * Build docs even when dependent packages docs are missing + * Allow the --html-dir to be specified at configure time + * Fix building with ghc-6.2 + * Other minor bug fixes and build fixes + +1.2.0 Duncan Coutts Sept 2007 + * To be included in GHC 6.8.x + * New configurations feature + * Can make haddock docs link to hilighted sources (with hscolour) + * New flag to allow linking to haddock docs on the web + * Supports pkg-config + * New field "build-tools" for tool dependencies + * Improved c2hs support + * Preprocessor output no longer clutters source dirs + * Separate "includes" and "install-includes" fields + * Makefile command to generate makefiles for building libs with GHC + * New --docdir configure flag + * Generic --with-prog --prog-args configure flags + * Better default installation paths on Windows + * Install paths can be specified relative to each other + * License files now installed + * Initial support for NHC (incomplete) + * Consistent treatment of verbosity + * Reduced verbosity of configure step by default + * Improved helpfulness of output messages + * Help output now clearer and fits in 80 columns + * New setup register --gen-pkg-config flag for distros + * Major internal refactoring, hooks api has changed + * Dozens of bug fixes + +1.1.6.2 Duncan Coutts May 2007 + * Released with GHC 6.6.1 + * Handle windows text file encoding for .cabal files + * Fix compiling a executable for profiling that uses Template Haskell + * Other minor bug fixes and user guide clarifications + +1.1.6.1 Duncan Coutts Oct 2006 + * fix unlit code + * fix escaping in register.sh + +1.1.6 Duncan Coutts Oct 2006 + * Released with GHC 6.6 + * Added support for hoogle + * Allow profiling and normal builds of libs to be chosen indepentantly + * Default installation directories on Win32 changed + * Register haddock docs with ghc-pkg + * Get haddock to make hyperlinks to dependent package docs + * Added BangPatterns language extension + * Various bug fixes + +1.1.4 Duncan Coutts May 2006 + * Released with GHC 6.4.2 + * Better support for packages that need to install header files + * cabal-setup added, but not installed by default yet + * Implemented "setup register --inplace" + * Have packages exposed by default with ghc-6.2 + * It is no longer necessary to run 'configure' before 'clean' or 'sdist' + * Added support for ghc's -split-objs + * Initial support for JHC + * Ignore extension fields in .cabal files (fields begining with "x-") + * Some changes to command hooks API to improve consistency + * Hugs support improvements + * Added GeneralisedNewtypeDeriving language extension + * Added cabal-version field + * Support hidden modules with haddock + * Internal code refactoring + * More bug fixes + +1.1.3 Isaac Jones Sept 2005 + * WARNING: Interfaces not documented in the user's guide may + change in future releases. + * Move building of GHCi .o libs to the build phase rather than + register phase. (from Duncan Coutts) + * Use .tar.gz for source package extension + * Uses GHC instead of cpphs if the latter is not available + * Added experimental "command hooks" which completely override the + default behavior of a command. + * Some bugfixes + +1.1.1 Isaac Jones July 2005 + * WARNING: Interfaces not documented in the user's guide may + change in future releases. + * Handles recursive modules for GHC 6.2 and GHC 6.4. + * Added "setup test" command (Used with UserHook) + * implemented handling of _stub.{c,h,o} files + * Added support for profiling + * Changed install prefix of libraries (pref/pkgname-version + to prefix/pkgname-version/compname-version) + * Added pattern guards as a language extension + * Moved some functionality to Language.Haskell.Extension + * Register / unregister .bat files for windows + * Exposed more of the API + * Added support for the hide-all-packages flag in GHC > 6.4 + * Several bug fixes + +1.0 Isaac Jones March 11 2005 + * Released with GHC 6.4, Hugs March 2005, and nhc98 1.18 + * Some sanity checking + +0.5 Isaac Jones Wed Feb 19 2005 + * WARNING: this is a pre-release and the interfaces are still + likely to change until we reach a 1.0 release. + * Hooks interfaces changed + * Added preprocessors to user hooks + * No more executable-modules or hidden-modules. Use + "other-modules" instead. + * Certain fields moved into BuildInfo, much refactoring + * extra-libs -> extra-libraries + * Added --gen-script to configure and unconfigure. + * modules-ghc (etc) now ghc-modules (etc) + * added new fields including "synopsis" + * Lots of bug fixes + * spaces can sometimes be used instead of commas + * A user manual has appeared (Thanks, ross!) + * for ghc 6.4, configures versionsed depends properly + * more features to ./setup haddock + +0.4 Isaac Jones Sun Jan 16 2005 + + * Much thanks to all the awesome fptools hackers who have been + working hard to build the Haskell Cabal! + + * Interface Changes: + + ** WARNING: this is a pre-release and the interfaces are still + likely to change until we reach a 1.0 release. + + ** Instead of Package.description, you should name your + description files .cabal. In particular, we suggest + that you name it .cabal, but this is not enforced + (yet). Multiple .cabal files in the same directory is an error, + at least for now. + + ** ./setup install --install-prefix is gone. Use ./setup copy + --copy-prefix instead. + + ** The "Modules" field is gone. Use "hidden-modules", + "exposed-modules", and "executable-modules". + + ** Build-depends is now a package-only field, and can't go into + executable stanzas. Build-depends is a package-to-package + relationship. + + ** Some new fields. Use the Source. + + * New Features + + ** Cabal is now included as a package in the CVS version of + fptools. That means it'll be released as "-package Cabal" in + future versions of the compilers, and if you are a bleeding-edge + user, you can grab it from the CVS repository with the compilers. + + ** Hugs compatibility and NHC98 compatibility should both be + improved. + + ** Hooks Interface / Autoconf compatibility: Most of the hooks + interface is hidden for now, because it's not finalized. I have + exposed only "defaultMainWithHooks" and "defaultUserHooks". This + allows you to use a ./configure script to preprocess + "foo.buildinfo", which gets merged with "foo.cabal". In future + releases, we'll expose UserHooks, but we're definitely going to + change the interface to those. The interface to the two functions + I've exposed should stay the same, though. + + ** ./setup haddock is a baby feature which pre-processes the + source code with hscpp and runs haddock on it. This is brand new + and hardly tested, so you get to knock it around and see what you + think. + + ** Some commands now actually implement verbosity. + + ** The preprocessors have been tested a bit more, and seem to work + OK. Please give feedback if you use these. + +0.3 Isaac Jones Sun Jan 16 2005 + * Unstable snapshot release + * From now on, stable releases are even. + +0.2 Isaac Jones + + * Adds more HUGS support and preprocessor support. diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary/Class.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary/Class.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary/Class.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary/Class.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,530 @@ +{-# LANGUAGE CPP, FlexibleContexts #-} +{-# LANGUAGE Trustworthy #-} +{-# LANGUAGE DefaultSignatures #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Compat.Binary.Class +-- Copyright : Lennart Kolmodin +-- License : BSD3-style (see LICENSE) +-- +-- Maintainer : Lennart Kolmodin +-- Stability : unstable +-- Portability : portable to Hugs and GHC. Requires the FFI and some flexible instances +-- +-- Typeclass and instances for binary serialization. +-- +----------------------------------------------------------------------------- + +module Distribution.Compat.Binary.Class ( + + -- * The Binary class + Binary(..) + + -- * Support for generics + , GBinary(..) + + ) where + +import Data.Word + +import Data.Binary.Put +import Data.Binary.Get + +import Control.Monad +import Foreign + +import Data.ByteString.Lazy (ByteString) +import qualified Data.ByteString.Lazy as L + +import Data.Char (chr,ord) +import Data.List (unfoldr) + +-- And needed for the instances: +import qualified Data.ByteString as B +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.IntMap as IntMap +import qualified Data.IntSet as IntSet +import qualified Data.Ratio as R + +import qualified Data.Tree as T + +import Data.Array.Unboxed + +import GHC.Generics + +-- +-- This isn't available in older Hugs or older GHC +-- +#if __GLASGOW_HASKELL__ >= 606 +import qualified Data.Sequence as Seq +import qualified Data.Foldable as Fold +#endif + +------------------------------------------------------------------------ + +class GBinary f where + gput :: f t -> Put + gget :: Get (f t) + +-- | The 'Binary' class provides 'put' and 'get', methods to encode and +-- decode a Haskell value to a lazy 'ByteString'. It mirrors the 'Read' and +-- 'Show' classes for textual representation of Haskell types, and is +-- suitable for serialising Haskell values to disk, over the network. +-- +-- For decoding and generating simple external binary formats (e.g. C +-- structures), Binary may be used, but in general is not suitable +-- for complex protocols. Instead use the 'Put' and 'Get' primitives +-- directly. +-- +-- Instances of Binary should satisfy the following property: +-- +-- > decode . encode == id +-- +-- That is, the 'get' and 'put' methods should be the inverse of each +-- other. A range of instances are provided for basic Haskell types. +-- +class Binary t where + -- | Encode a value in the Put monad. + put :: t -> Put + -- | Decode a value in the Get monad + get :: Get t + + default put :: (Generic t, GBinary (Rep t)) => t -> Put + put = gput . from + + default get :: (Generic t, GBinary (Rep t)) => Get t + get = to `fmap` gget + +------------------------------------------------------------------------ +-- Simple instances + +-- The () type need never be written to disk: values of singleton type +-- can be reconstructed from the type alone +instance Binary () where + put () = return () + get = return () + +-- Bools are encoded as a byte in the range 0 .. 1 +instance Binary Bool where + put = putWord8 . fromIntegral . fromEnum + get = liftM (toEnum . fromIntegral) getWord8 + +-- Values of type 'Ordering' are encoded as a byte in the range 0 .. 2 +instance Binary Ordering where + put = putWord8 . fromIntegral . fromEnum + get = liftM (toEnum . fromIntegral) getWord8 + +------------------------------------------------------------------------ +-- Words and Ints + +-- Words8s are written as bytes +instance Binary Word8 where + put = putWord8 + get = getWord8 + +-- Words16s are written as 2 bytes in big-endian (network) order +instance Binary Word16 where + put = putWord16be + get = getWord16be + +-- Words32s are written as 4 bytes in big-endian (network) order +instance Binary Word32 where + put = putWord32be + get = getWord32be + +-- Words64s are written as 8 bytes in big-endian (network) order +instance Binary Word64 where + put = putWord64be + get = getWord64be + +-- Int8s are written as a single byte. +instance Binary Int8 where + put i = put (fromIntegral i :: Word8) + get = liftM fromIntegral (get :: Get Word8) + +-- Int16s are written as a 2 bytes in big endian format +instance Binary Int16 where + put i = put (fromIntegral i :: Word16) + get = liftM fromIntegral (get :: Get Word16) + +-- Int32s are written as a 4 bytes in big endian format +instance Binary Int32 where + put i = put (fromIntegral i :: Word32) + get = liftM fromIntegral (get :: Get Word32) + +-- Int64s are written as a 4 bytes in big endian format +instance Binary Int64 where + put i = put (fromIntegral i :: Word64) + get = liftM fromIntegral (get :: Get Word64) + +------------------------------------------------------------------------ + +-- Words are are written as Word64s, that is, 8 bytes in big endian format +instance Binary Word where + put i = put (fromIntegral i :: Word64) + get = liftM fromIntegral (get :: Get Word64) + +-- Ints are are written as Int64s, that is, 8 bytes in big endian format +instance Binary Int where + put i = put (fromIntegral i :: Int64) + get = liftM fromIntegral (get :: Get Int64) + +------------------------------------------------------------------------ +-- +-- Portable, and pretty efficient, serialisation of Integer +-- + +-- Fixed-size type for a subset of Integer +type SmallInt = Int32 + +-- Integers are encoded in two ways: if they fit inside a SmallInt, +-- they're written as a byte tag, and that value. If the Integer value +-- is too large to fit in a SmallInt, it is written as a byte array, +-- along with a sign and length field. + +instance Binary Integer where + + {-# INLINE put #-} + put n | n >= lo && n <= hi = do + putWord8 0 + put (fromIntegral n :: SmallInt) -- fast path + where + lo = fromIntegral (minBound :: SmallInt) :: Integer + hi = fromIntegral (maxBound :: SmallInt) :: Integer + + put n = do + putWord8 1 + put sign + put (unroll (abs n)) -- unroll the bytes + where + sign = fromIntegral (signum n) :: Word8 + + {-# INLINE get #-} + get = do + tag <- get :: Get Word8 + case tag of + 0 -> liftM fromIntegral (get :: Get SmallInt) + _ -> do sign <- get + bytes <- get + let v = roll bytes + return $! if sign == (1 :: Word8) then v else - v + +-- +-- Fold and unfold an Integer to and from a list of its bytes +-- +unroll :: Integer -> [Word8] +unroll = unfoldr step + where + step 0 = Nothing + step i = Just (fromIntegral i, i `shiftR` 8) + +roll :: [Word8] -> Integer +roll = foldr unstep 0 + where + unstep b a = a `shiftL` 8 .|. fromIntegral b + +{- + +-- +-- An efficient, raw serialisation for Integer (GHC only) +-- + +-- TODO This instance is not architecture portable. GMP stores numbers as +-- arrays of machine sized words, so the byte format is not portable across +-- architectures with different endianness and word size. + +import Data.ByteString.Base (toForeignPtr,unsafePackAddress, memcpy) +import GHC.Base hiding (ord, chr) +import GHC.Prim +import GHC.Ptr (Ptr(..)) +import GHC.IOBase (IO(..)) + +instance Binary Integer where + put (S# i) = putWord8 0 >> put (I# i) + put (J# s ba) = do + putWord8 1 + put (I# s) + put (BA ba) + + get = do + b <- getWord8 + case b of + 0 -> do (I# i#) <- get + return (S# i#) + _ -> do (I# s#) <- get + (BA a#) <- get + return (J# s# a#) + +instance Binary ByteArray where + + -- Pretty safe. + put (BA ba) = + let sz = sizeofByteArray# ba -- (primitive) in *bytes* + addr = byteArrayContents# ba + bs = unsafePackAddress (I# sz) addr + in put bs -- write as a ByteString. easy, yay! + + -- Pretty scary. Should be quick though + get = do + (fp, off, n@(I# sz)) <- liftM toForeignPtr get -- so decode a ByteString + assert (off == 0) $ return $ unsafePerformIO $ do + (MBA arr) <- newByteArray sz -- and copy it into a ByteArray# + let to = byteArrayContents# (unsafeCoerce# arr) -- urk, is this safe? + withForeignPtr fp $ \from -> memcpy (Ptr to) from (fromIntegral n) + freezeByteArray arr + +-- wrapper for ByteArray# +data ByteArray = BA {-# UNPACK #-} !ByteArray# +data MBA = MBA {-# UNPACK #-} !(MutableByteArray# RealWorld) + +newByteArray :: Int# -> IO MBA +newByteArray sz = IO $ \s -> + case newPinnedByteArray# sz s of { (# s', arr #) -> + (# s', MBA arr #) } + +freezeByteArray :: MutableByteArray# RealWorld -> IO ByteArray +freezeByteArray arr = IO $ \s -> + case unsafeFreezeByteArray# arr s of { (# s', arr' #) -> + (# s', BA arr' #) } + +-} + +instance (Binary a,Integral a) => Binary (R.Ratio a) where + put r = put (R.numerator r) >> put (R.denominator r) + get = liftM2 (R.%) get get + +------------------------------------------------------------------------ + +-- Char is serialised as UTF-8 +instance Binary Char where + put a | c <= 0x7f = put (fromIntegral c :: Word8) + | c <= 0x7ff = do put (0xc0 .|. y) + put (0x80 .|. z) + | c <= 0xffff = do put (0xe0 .|. x) + put (0x80 .|. y) + put (0x80 .|. z) + | c <= 0x10ffff = do put (0xf0 .|. w) + put (0x80 .|. x) + put (0x80 .|. y) + put (0x80 .|. z) + | otherwise = error "Not a valid Unicode code point" + where + c = ord a + z, y, x, w :: Word8 + z = fromIntegral (c .&. 0x3f) + y = fromIntegral (shiftR c 6 .&. 0x3f) + x = fromIntegral (shiftR c 12 .&. 0x3f) + w = fromIntegral (shiftR c 18 .&. 0x7) + + get = do + let getByte = liftM (fromIntegral :: Word8 -> Int) get + shiftL6 = flip shiftL 6 :: Int -> Int + w <- getByte + r <- case () of + _ | w < 0x80 -> return w + | w < 0xe0 -> do + x <- liftM (xor 0x80) getByte + return (x .|. shiftL6 (xor 0xc0 w)) + | w < 0xf0 -> do + x <- liftM (xor 0x80) getByte + y <- liftM (xor 0x80) getByte + return (y .|. shiftL6 (x .|. shiftL6 + (xor 0xe0 w))) + | otherwise -> do + x <- liftM (xor 0x80) getByte + y <- liftM (xor 0x80) getByte + z <- liftM (xor 0x80) getByte + return (z .|. shiftL6 (y .|. shiftL6 + (x .|. shiftL6 (xor 0xf0 w)))) + return $! chr r + +------------------------------------------------------------------------ +-- Instances for the first few tuples + +instance (Binary a, Binary b) => Binary (a,b) where + put (a,b) = put a >> put b + get = liftM2 (,) get get + +instance (Binary a, Binary b, Binary c) => Binary (a,b,c) where + put (a,b,c) = put a >> put b >> put c + get = liftM3 (,,) get get get + +instance (Binary a, Binary b, Binary c, Binary d) => Binary (a,b,c,d) where + put (a,b,c,d) = put a >> put b >> put c >> put d + get = liftM4 (,,,) get get get get + +instance (Binary a, Binary b, Binary c, Binary d, Binary e) => Binary (a,b,c,d,e) where + put (a,b,c,d,e) = put a >> put b >> put c >> put d >> put e + get = liftM5 (,,,,) get get get get get + +-- +-- and now just recurse: +-- + +instance (Binary a, Binary b, Binary c, Binary d, Binary e, Binary f) + => Binary (a,b,c,d,e,f) where + put (a,b,c,d,e,f) = put (a,(b,c,d,e,f)) + get = do (a,(b,c,d,e,f)) <- get ; return (a,b,c,d,e,f) + +instance (Binary a, Binary b, Binary c, Binary d, Binary e, Binary f, Binary g) + => Binary (a,b,c,d,e,f,g) where + put (a,b,c,d,e,f,g) = put (a,(b,c,d,e,f,g)) + get = do (a,(b,c,d,e,f,g)) <- get ; return (a,b,c,d,e,f,g) + +instance (Binary a, Binary b, Binary c, Binary d, Binary e, + Binary f, Binary g, Binary h) + => Binary (a,b,c,d,e,f,g,h) where + put (a,b,c,d,e,f,g,h) = put (a,(b,c,d,e,f,g,h)) + get = do (a,(b,c,d,e,f,g,h)) <- get ; return (a,b,c,d,e,f,g,h) + +instance (Binary a, Binary b, Binary c, Binary d, Binary e, + Binary f, Binary g, Binary h, Binary i) + => Binary (a,b,c,d,e,f,g,h,i) where + put (a,b,c,d,e,f,g,h,i) = put (a,(b,c,d,e,f,g,h,i)) + get = do (a,(b,c,d,e,f,g,h,i)) <- get ; return (a,b,c,d,e,f,g,h,i) + +instance (Binary a, Binary b, Binary c, Binary d, Binary e, + Binary f, Binary g, Binary h, Binary i, Binary j) + => Binary (a,b,c,d,e,f,g,h,i,j) where + put (a,b,c,d,e,f,g,h,i,j) = put (a,(b,c,d,e,f,g,h,i,j)) + get = do (a,(b,c,d,e,f,g,h,i,j)) <- get ; return (a,b,c,d,e,f,g,h,i,j) + +------------------------------------------------------------------------ +-- Container types + +instance Binary a => Binary [a] where + put l = put (length l) >> mapM_ put l + get = do n <- get :: Get Int + getMany n + +-- | 'getMany n' get 'n' elements in order, without blowing the stack. +getMany :: Binary a => Int -> Get [a] +getMany n = go [] n + where + go xs 0 = return $! reverse xs + go xs i = do x <- get + -- we must seq x to avoid stack overflows due to laziness in + -- (>>=) + x `seq` go (x:xs) (i-1) +{-# INLINE getMany #-} + +instance (Binary a) => Binary (Maybe a) where + put Nothing = putWord8 0 + put (Just x) = putWord8 1 >> put x + get = do + w <- getWord8 + case w of + 0 -> return Nothing + _ -> liftM Just get + +instance (Binary a, Binary b) => Binary (Either a b) where + put (Left a) = putWord8 0 >> put a + put (Right b) = putWord8 1 >> put b + get = do + w <- getWord8 + case w of + 0 -> liftM Left get + _ -> liftM Right get + +------------------------------------------------------------------------ +-- ByteStrings (have specially efficient instances) + +instance Binary B.ByteString where + put bs = do put (B.length bs) + putByteString bs + get = get >>= getByteString + +-- +-- Using old versions of fps, this is a type synonym, and non portable +-- +-- Requires 'flexible instances' +-- +instance Binary ByteString where + put bs = do put (fromIntegral (L.length bs) :: Int) + putLazyByteString bs + get = get >>= getLazyByteString + +------------------------------------------------------------------------ +-- Maps and Sets + +instance (Binary a) => Binary (Set.Set a) where + put s = put (Set.size s) >> mapM_ put (Set.toAscList s) + get = liftM Set.fromDistinctAscList get + +instance (Binary k, Binary e) => Binary (Map.Map k e) where + put m = put (Map.size m) >> mapM_ put (Map.toAscList m) + get = liftM Map.fromDistinctAscList get + +instance Binary IntSet.IntSet where + put s = put (IntSet.size s) >> mapM_ put (IntSet.toAscList s) + get = liftM IntSet.fromDistinctAscList get + +instance (Binary e) => Binary (IntMap.IntMap e) where + put m = put (IntMap.size m) >> mapM_ put (IntMap.toAscList m) + get = liftM IntMap.fromDistinctAscList get + +------------------------------------------------------------------------ +-- Queues and Sequences + +#if __GLASGOW_HASKELL__ >= 606 +-- +-- This is valid Hugs, but you need the most recent Hugs +-- + +instance (Binary e) => Binary (Seq.Seq e) where + put s = put (Seq.length s) >> Fold.mapM_ put s + get = do n <- get :: Get Int + rep Seq.empty n get + where rep xs 0 _ = return $! xs + rep xs n g = xs `seq` n `seq` do + x <- g + rep (xs Seq.|> x) (n-1) g + +#endif + +------------------------------------------------------------------------ +-- Floating point + +instance Binary Double where + put d = put (decodeFloat d) + get = liftM2 encodeFloat get get + +instance Binary Float where + put f = put (decodeFloat f) + get = liftM2 encodeFloat get get + +------------------------------------------------------------------------ +-- Trees + +instance (Binary e) => Binary (T.Tree e) where + put (T.Node r s) = put r >> put s + get = liftM2 T.Node get get + +------------------------------------------------------------------------ +-- Arrays + +instance (Binary i, Ix i, Binary e) => Binary (Array i e) where + put a = do + put (bounds a) + put (rangeSize $ bounds a) -- write the length + mapM_ put (elems a) -- now the elems. + get = do + bs <- get + n <- get -- read the length + xs <- getMany n -- now the elems. + return (listArray bs xs) + +-- +-- The IArray UArray e constraint is non portable. Requires flexible instances +-- +instance (Binary i, Ix i, Binary e, IArray UArray e) => Binary (UArray i e) where + put a = do + put (bounds a) + put (rangeSize $ bounds a) -- now write the length + mapM_ put (elems a) + get = do + bs <- get + n <- get + xs <- getMany n + return (listArray bs xs) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary/Generic.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary/Generic.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary/Generic.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary/Generic.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,128 @@ +{-# LANGUAGE BangPatterns, CPP, FlexibleInstances, KindSignatures, + ScopedTypeVariables, Trustworthy, TypeOperators, TypeSynonymInstances #-} +{-# OPTIONS_GHC -fno-warn-orphans #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Compat.Binary.Generic +-- Copyright : Bryan O'Sullivan +-- License : BSD3-style (see LICENSE) +-- +-- Maintainer : Bryan O'Sullivan +-- Stability : unstable +-- Portability : Only works with GHC 7.2 and newer +-- +-- Instances for supporting GHC generics. +-- +----------------------------------------------------------------------------- +module Distribution.Compat.Binary.Generic + ( + ) where + +import Control.Applicative +import Distribution.Compat.Binary.Class +import Data.Binary.Get +import Data.Binary.Put +import Data.Bits +import Data.Word +import GHC.Generics + +-- Type without constructors +instance GBinary V1 where + gput _ = return () + gget = return undefined + +-- Constructor without arguments +instance GBinary U1 where + gput U1 = return () + gget = return U1 + +-- Product: constructor with parameters +instance (GBinary a, GBinary b) => GBinary (a :*: b) where + gput (x :*: y) = gput x >> gput y + gget = (:*:) <$> gget <*> gget + +-- Metadata (constructor name, etc) +instance GBinary a => GBinary (M1 i c a) where + gput = gput . unM1 + gget = M1 <$> gget + +-- Constants, additional parameters, and rank-1 recursion +instance Binary a => GBinary (K1 i a) where + gput = put . unK1 + gget = K1 <$> get + +-- Borrowed from the cereal package. + +-- The following GBinary instance for sums has support for serializing +-- types with up to 2^64-1 constructors. It will use the minimal +-- number of bytes needed to encode the constructor. For example when +-- a type has 2^8 constructors or less it will use a single byte to +-- encode the constructor. If it has 2^16 constructors or less it will +-- use two bytes, and so on till 2^64-1. + +#define GUARD(WORD) (size - 1) <= fromIntegral (maxBound :: WORD) +#define PUTSUM(WORD) GUARD(WORD) = putSum (0 :: WORD) (fromIntegral size) +#define GETSUM(WORD) GUARD(WORD) = (get :: Get WORD) >>= checkGetSum (fromIntegral size) + +instance ( GSum a, GSum b + , GBinary a, GBinary b + , SumSize a, SumSize b) => GBinary (a :+: b) where + gput | PUTSUM(Word8) | PUTSUM(Word16) | PUTSUM(Word32) | PUTSUM(Word64) + | otherwise = sizeError "encode" size + where + size = unTagged (sumSize :: Tagged (a :+: b) Word64) + + gget | GETSUM(Word8) | GETSUM(Word16) | GETSUM(Word32) | GETSUM(Word64) + | otherwise = sizeError "decode" size + where + size = unTagged (sumSize :: Tagged (a :+: b) Word64) + +sizeError :: Show size => String -> size -> error +sizeError s size = + error $ "Can't " ++ s ++ " a type with " ++ show size ++ " constructors" + +------------------------------------------------------------------------ + +checkGetSum :: (Ord word, Num word, Bits word, GSum f) + => word -> word -> Get (f a) +checkGetSum size code | code < size = getSum code size + | otherwise = fail "Unknown encoding for constructor" +{-# INLINE checkGetSum #-} + +class GSum f where + getSum :: (Ord word, Num word, Bits word) => word -> word -> Get (f a) + putSum :: (Num w, Bits w, Binary w) => w -> w -> f a -> Put + +instance (GSum a, GSum b, GBinary a, GBinary b) => GSum (a :+: b) where + getSum !code !size | code < sizeL = L1 <$> getSum code sizeL + | otherwise = R1 <$> getSum (code - sizeL) sizeR + where + sizeL = size `shiftR` 1 + sizeR = size - sizeL + + putSum !code !size s = case s of + L1 x -> putSum code sizeL x + R1 x -> putSum (code + sizeL) sizeR x + where + sizeL = size `shiftR` 1 + sizeR = size - sizeL + +instance GBinary a => GSum (C1 c a) where + getSum _ _ = gget + + putSum !code _ x = put code *> gput x + +------------------------------------------------------------------------ + +class SumSize f where + sumSize :: Tagged f Word64 + +newtype Tagged (s :: * -> *) b = Tagged {unTagged :: b} + +instance (SumSize a, SumSize b) => SumSize (a :+: b) where + sumSize = Tagged $ unTagged (sumSize :: Tagged a Word64) + + unTagged (sumSize :: Tagged b Word64) + +instance SumSize (C1 c a) where + sumSize = Tagged 1 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Binary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,52 @@ +{-# LANGUAGE CPP #-} + +#ifndef MIN_VERSION_binary +#define MIN_VERSION_binary(x, y, z) 0 +#endif + +module Distribution.Compat.Binary + ( decodeOrFailIO +#if __GLASGOW_HASKELL__ >= 708 || MIN_VERSION_binary(0,7,0) + , module Data.Binary +#else + , Binary(..) + , decode, encode +#endif + ) where + +import Control.Exception (ErrorCall(..), catch, evaluate) +import Data.ByteString.Lazy (ByteString) + +#if __GLASGOW_HASKELL__ < 706 +import Prelude hiding (catch) +#endif + +#if __GLASGOW_HASKELL__ >= 708 || MIN_VERSION_binary(0,7,0) + +import Data.Binary + +#else + +import Data.Binary.Get +import Data.Binary.Put + +import Distribution.Compat.Binary.Class +import Distribution.Compat.Binary.Generic () + +-- | Decode a value from a lazy ByteString, reconstructing the original structure. +-- +decode :: Binary a => ByteString -> a +decode = runGet get + +-- | Encode a value using binary serialisation to a lazy ByteString. +-- +encode :: Binary a => a -> ByteString +encode = runPut . put +{-# INLINE encode #-} + +#endif + +decodeOrFailIO :: Binary a => ByteString -> IO (Either String a) +decodeOrFailIO bs = + catch (evaluate (decode bs) >>= return . Right) + $ \(ErrorCall str) -> return $ Left str diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/CopyFile.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/CopyFile.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/CopyFile.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/CopyFile.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,109 @@ +{-# LANGUAGE CPP #-} +{-# OPTIONS_HADDOCK hide #-} +module Distribution.Compat.CopyFile ( + copyFile, + copyFileChanged, + filesEqual, + copyOrdinaryFile, + copyExecutableFile, + setFileOrdinary, + setFileExecutable, + setDirOrdinary, + ) where + + +import Control.Monad + ( when, unless ) +import Control.Exception + ( bracket, bracketOnError, throwIO ) +import qualified Data.ByteString.Lazy as BSL +import Distribution.Compat.Exception + ( catchIO ) +import System.IO.Error + ( ioeSetLocation ) +import System.Directory + ( doesFileExist, renameFile, removeFile ) +import Distribution.Compat.TempFile + ( openBinaryTempFile ) +import System.FilePath + ( takeDirectory ) +import System.IO + ( openBinaryFile, IOMode(ReadMode), hClose, hGetBuf, hPutBuf + , withBinaryFile ) +import Foreign + ( allocaBytes ) + +#ifndef mingw32_HOST_OS +import System.Posix.Internals (withFilePath) +import System.Posix.Types + ( FileMode ) +import System.Posix.Internals + ( c_chmod ) +import Foreign.C + ( throwErrnoPathIfMinus1_ ) +#endif /* mingw32_HOST_OS */ + +copyOrdinaryFile, copyExecutableFile :: FilePath -> FilePath -> IO () +copyOrdinaryFile src dest = copyFile src dest >> setFileOrdinary dest +copyExecutableFile src dest = copyFile src dest >> setFileExecutable dest + +setFileOrdinary, setFileExecutable, setDirOrdinary :: FilePath -> IO () +#ifndef mingw32_HOST_OS +setFileOrdinary path = setFileMode path 0o644 -- file perms -rw-r--r-- +setFileExecutable path = setFileMode path 0o755 -- file perms -rwxr-xr-x + +setFileMode :: FilePath -> FileMode -> IO () +setFileMode name m = + withFilePath name $ \s -> do + throwErrnoPathIfMinus1_ "setFileMode" name (c_chmod s m) +#else +setFileOrdinary _ = return () +setFileExecutable _ = return () +#endif +-- This happens to be true on Unix and currently on Windows too: +setDirOrdinary = setFileExecutable + +-- | Copies a file to a new destination. +-- Often you should use `copyFileChanged` instead. +copyFile :: FilePath -> FilePath -> IO () +copyFile fromFPath toFPath = + copy + `catchIO` (\ioe -> throwIO (ioeSetLocation ioe "copyFile")) + where copy = bracket (openBinaryFile fromFPath ReadMode) hClose $ \hFrom -> + bracketOnError openTmp cleanTmp $ \(tmpFPath, hTmp) -> + do allocaBytes bufferSize $ copyContents hFrom hTmp + hClose hTmp + renameFile tmpFPath toFPath + openTmp = openBinaryTempFile (takeDirectory toFPath) ".copyFile.tmp" + cleanTmp (tmpFPath, hTmp) = do + hClose hTmp `catchIO` \_ -> return () + removeFile tmpFPath `catchIO` \_ -> return () + bufferSize = 4096 + + copyContents hFrom hTo buffer = do + count <- hGetBuf hFrom buffer bufferSize + when (count > 0) $ do + hPutBuf hTo buffer count + copyContents hFrom hTo buffer + +-- | Like `copyFile`, but does not touch the target if source and destination +-- are already byte-identical. This is recommended as it is useful for +-- time-stamp based recompilation avoidance. +copyFileChanged :: FilePath -> FilePath -> IO () +copyFileChanged src dest = do + equal <- filesEqual src dest + unless equal $ copyFile src dest + +-- | Checks if two files are byte-identical. +-- Returns False if either of the files do not exist. +filesEqual :: FilePath -> FilePath -> IO Bool +filesEqual f1 f2 = do + ex1 <- doesFileExist f1 + ex2 <- doesFileExist f2 + if not (ex1 && ex2) then return False else do + + withBinaryFile f1 ReadMode $ \h1 -> + withBinaryFile f2 ReadMode $ \h2 -> do + c1 <- BSL.hGetContents h1 + c2 <- BSL.hGetContents h2 + return $! c1 == c2 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/CreatePipe.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/CreatePipe.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/CreatePipe.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/CreatePipe.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,62 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface #-} +module Distribution.Compat.CreatePipe (createPipe) where + +import System.IO (Handle, hSetEncoding, localeEncoding) + +-- The mingw32_HOST_OS CPP macro is GHC-specific +#if mingw32_HOST_OS +import Control.Exception (onException) +import Foreign.C.Error (throwErrnoIfMinus1_) +import Foreign.C.Types (CInt(..), CUInt(..)) +import Foreign.Ptr (Ptr) +import Foreign.Marshal.Array (allocaArray) +import Foreign.Storable (peek, peekElemOff) +import GHC.IO.FD (mkFD) +import GHC.IO.Device (IODeviceType(Stream)) +import GHC.IO.Handle.FD (mkHandleFromFD) +import System.IO (IOMode(ReadMode, WriteMode)) +#elif ghcjs_HOST_OS +#else +import System.Posix.IO (fdToHandle) +import qualified System.Posix.IO as Posix +#endif + +createPipe :: IO (Handle, Handle) +-- The mingw32_HOST_OS CPP macro is GHC-specific +#if mingw32_HOST_OS +createPipe = do + (readfd, writefd) <- allocaArray 2 $ \ pfds -> do + throwErrnoIfMinus1_ "_pipe" $ c__pipe pfds 2 ({- _O_BINARY -} 32768) + readfd <- peek pfds + writefd <- peekElemOff pfds 1 + return (readfd, writefd) + (do readh <- fdToHandle readfd ReadMode + writeh <- fdToHandle writefd WriteMode + hSetEncoding readh localeEncoding + hSetEncoding writeh localeEncoding + return (readh, writeh)) `onException` (close readfd >> close writefd) + where + fdToHandle :: CInt -> IOMode -> IO Handle + fdToHandle fd mode = do + (fd', deviceType) <- mkFD fd mode (Just (Stream, 0, 0)) False False + mkHandleFromFD fd' deviceType "" mode False Nothing + + close :: CInt -> IO () + close = throwErrnoIfMinus1_ "_close" . c__close + +foreign import ccall "io.h _pipe" c__pipe :: + Ptr CInt -> CUInt -> CInt -> IO CInt + +foreign import ccall "io.h _close" c__close :: + CInt -> IO CInt +#elif ghcjs_HOST_OS +createPipe = error "createPipe" +#else +createPipe = do + (readfd, writefd) <- Posix.createPipe + readh <- fdToHandle readfd + writeh <- fdToHandle writefd + hSetEncoding readh localeEncoding + hSetEncoding writeh localeEncoding + return (readh, writeh) +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Environment.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Environment.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Environment.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Environment.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,24 @@ +{-# LANGUAGE CPP #-} +{-# OPTIONS_HADDOCK hide #-} + +module Distribution.Compat.Environment (getEnvironment) + where + +import qualified System.Environment as System + +#ifdef mingw32_HOST_OS +import qualified Data.Char as Char (toUpper) +#endif + +getEnvironment :: IO [(String, String)] +#ifdef mingw32_HOST_OS +-- On Windows, the names of environment variables are case-insensitive, but are +-- often given in mixed-case (e.g. "PATH" is "Path"), so we have to normalise +-- them. +getEnvironment = fmap upcaseVars System.getEnvironment + where + upcaseVars = map upcaseVar + upcaseVar (var, val) = (map Char.toUpper var, val) +#else +getEnvironment = System.getEnvironment +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Exception.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Exception.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/Exception.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/Exception.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,17 @@ +module Distribution.Compat.Exception ( + catchIO, + catchExit, + tryIO, + ) where + +import System.Exit +import qualified Control.Exception as Exception + +tryIO :: IO a -> IO (Either Exception.IOException a) +tryIO = Exception.try + +catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a +catchIO = Exception.catch + +catchExit :: IO a -> (ExitCode -> IO a) -> IO a +catchExit = Exception.catch diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/ReadP.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/ReadP.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/ReadP.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/ReadP.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,398 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Compat.ReadP +-- Copyright : (c) The University of Glasgow 2002 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Portability : portable +-- +-- This is a library of parser combinators, originally written by Koen Claessen. +-- It parses all alternatives in parallel, so it never keeps hold of +-- the beginning of the input string, a common source of space leaks with +-- other parsers. The '(+++)' choice combinator is genuinely commutative; +-- it makes no difference which branch is \"shorter\". +-- +-- See also Koen's paper /Parallel Parsing Processes/ +-- (). +-- +-- This version of ReadP has been locally hacked to make it H98, by +-- Martin Sjögren +-- +-- The unit tests have been moved to UnitTest.Distribution.Compat.ReadP, by +-- Mark Lentczner +----------------------------------------------------------------------------- + +module Distribution.Compat.ReadP + ( + -- * The 'ReadP' type + ReadP, -- :: * -> *; instance Functor, Monad, MonadPlus + + -- * Primitive operations + get, -- :: ReadP Char + look, -- :: ReadP String + (+++), -- :: ReadP a -> ReadP a -> ReadP a + (<++), -- :: ReadP a -> ReadP a -> ReadP a + gather, -- :: ReadP a -> ReadP (String, a) + + -- * Other operations + pfail, -- :: ReadP a + satisfy, -- :: (Char -> Bool) -> ReadP Char + char, -- :: Char -> ReadP Char + string, -- :: String -> ReadP String + munch, -- :: (Char -> Bool) -> ReadP String + munch1, -- :: (Char -> Bool) -> ReadP String + skipSpaces, -- :: ReadP () + choice, -- :: [ReadP a] -> ReadP a + count, -- :: Int -> ReadP a -> ReadP [a] + between, -- :: ReadP open -> ReadP close -> ReadP a -> ReadP a + option, -- :: a -> ReadP a -> ReadP a + optional, -- :: ReadP a -> ReadP () + many, -- :: ReadP a -> ReadP [a] + many1, -- :: ReadP a -> ReadP [a] + skipMany, -- :: ReadP a -> ReadP () + skipMany1, -- :: ReadP a -> ReadP () + sepBy, -- :: ReadP a -> ReadP sep -> ReadP [a] + sepBy1, -- :: ReadP a -> ReadP sep -> ReadP [a] + endBy, -- :: ReadP a -> ReadP sep -> ReadP [a] + endBy1, -- :: ReadP a -> ReadP sep -> ReadP [a] + chainr, -- :: ReadP a -> ReadP (a -> a -> a) -> a -> ReadP a + chainl, -- :: ReadP a -> ReadP (a -> a -> a) -> a -> ReadP a + chainl1, -- :: ReadP a -> ReadP (a -> a -> a) -> ReadP a + chainr1, -- :: ReadP a -> ReadP (a -> a -> a) -> ReadP a + manyTill, -- :: ReadP a -> ReadP end -> ReadP [a] + + -- * Running a parser + ReadS, -- :: *; = String -> [(a,String)] + readP_to_S, -- :: ReadP a -> ReadS a + readS_to_P -- :: ReadS a -> ReadP a + ) + where + +import Control.Monad( MonadPlus(..), liftM, liftM2, ap ) +import Data.Char (isSpace) +#if __GLASGOW_HASKELL__ < 710 +import Control.Applicative (Applicative(..)) +#endif +import Control.Applicative (Alternative(empty, (<|>))) + +infixr 5 +++, <++ + +-- --------------------------------------------------------------------------- +-- The P type +-- is representation type -- should be kept abstract + +data P s a + = Get (s -> P s a) + | Look ([s] -> P s a) + | Fail + | Result a (P s a) + | Final [(a,[s])] -- invariant: list is non-empty! + +-- Monad, MonadPlus + +instance Functor (P s) where + fmap = liftM + +instance Applicative (P s) where + pure = return + (<*>) = ap + +instance Monad (P s) where + return x = Result x Fail + + (Get f) >>= k = Get (\c -> f c >>= k) + (Look f) >>= k = Look (\s -> f s >>= k) + Fail >>= _ = Fail + (Result x p) >>= k = k x `mplus` (p >>= k) + (Final r) >>= k = final [ys' | (x,s) <- r, ys' <- run (k x) s] + + fail _ = Fail + +instance Alternative (P s) where + empty = mzero + (<|>) = mplus + +instance MonadPlus (P s) where + mzero = Fail + + -- most common case: two gets are combined + Get f1 `mplus` Get f2 = Get (\c -> f1 c `mplus` f2 c) + + -- results are delivered as soon as possible + Result x p `mplus` q = Result x (p `mplus` q) + p `mplus` Result x q = Result x (p `mplus` q) + + -- fail disappears + Fail `mplus` p = p + p `mplus` Fail = p + + -- two finals are combined + -- final + look becomes one look and one final (=optimization) + -- final + sthg else becomes one look and one final + Final r `mplus` Final t = Final (r ++ t) + Final r `mplus` Look f = Look (\s -> Final (r ++ run (f s) s)) + Final r `mplus` p = Look (\s -> Final (r ++ run p s)) + Look f `mplus` Final r = Look (\s -> Final (run (f s) s ++ r)) + p `mplus` Final r = Look (\s -> Final (run p s ++ r)) + + -- two looks are combined (=optimization) + -- look + sthg else floats upwards + Look f `mplus` Look g = Look (\s -> f s `mplus` g s) + Look f `mplus` p = Look (\s -> f s `mplus` p) + p `mplus` Look f = Look (\s -> p `mplus` f s) + +-- --------------------------------------------------------------------------- +-- The ReadP type + +newtype Parser r s a = R ((a -> P s r) -> P s r) +type ReadP r a = Parser r Char a + +-- Functor, Monad, MonadPlus + +instance Functor (Parser r s) where + fmap h (R f) = R (\k -> f (k . h)) + +instance Applicative (Parser r s) where + pure = return + (<*>) = ap + +instance Monad (Parser r s) where + return x = R (\k -> k x) + fail _ = R (\_ -> Fail) + R m >>= f = R (\k -> m (\a -> let R m' = f a in m' k)) + +--instance MonadPlus (Parser r s) where +-- mzero = pfail +-- mplus = (+++) + +-- --------------------------------------------------------------------------- +-- Operations over P + +final :: [(a,[s])] -> P s a +-- Maintains invariant for Final constructor +final [] = Fail +final r = Final r + +run :: P c a -> ([c] -> [(a, [c])]) +run (Get f) (c:s) = run (f c) s +run (Look f) s = run (f s) s +run (Result x p) s = (x,s) : run p s +run (Final r) _ = r +run _ _ = [] + +-- --------------------------------------------------------------------------- +-- Operations over ReadP + +get :: ReadP r Char +-- ^ Consumes and returns the next character. +-- Fails if there is no input left. +get = R Get + +look :: ReadP r String +-- ^ Look-ahead: returns the part of the input that is left, without +-- consuming it. +look = R Look + +pfail :: ReadP r a +-- ^ Always fails. +pfail = R (\_ -> Fail) + +(+++) :: ReadP r a -> ReadP r a -> ReadP r a +-- ^ Symmetric choice. +R f1 +++ R f2 = R (\k -> f1 k `mplus` f2 k) + +(<++) :: ReadP a a -> ReadP r a -> ReadP r a +-- ^ Local, exclusive, left-biased choice: If left parser +-- locally produces any result at all, then right parser is +-- not used. +R f <++ q = + do s <- look + probe (f return) s 0 + where + probe (Get f') (c:s) n = probe (f' c) s (n+1 :: Int) + probe (Look f') s n = probe (f' s) s n + probe p@(Result _ _) _ n = discard n >> R (p >>=) + probe (Final r) _ _ = R (Final r >>=) + probe _ _ _ = q + + discard 0 = return () + discard n = get >> discard (n-1 :: Int) + +gather :: ReadP (String -> P Char r) a -> ReadP r (String, a) +-- ^ Transforms a parser into one that does the same, but +-- in addition returns the exact characters read. +-- IMPORTANT NOTE: 'gather' gives a runtime error if its first argument +-- is built using any occurrences of readS_to_P. +gather (R m) = + R (\k -> gath id (m (\a -> return (\s -> k (s,a))))) + where + gath l (Get f) = Get (\c -> gath (l.(c:)) (f c)) + gath _ Fail = Fail + gath l (Look f) = Look (\s -> gath l (f s)) + gath l (Result k p) = k (l []) `mplus` gath l p + gath _ (Final _) = error "do not use readS_to_P in gather!" + +-- --------------------------------------------------------------------------- +-- Derived operations + +satisfy :: (Char -> Bool) -> ReadP r Char +-- ^ Consumes and returns the next character, if it satisfies the +-- specified predicate. +satisfy p = do c <- get; if p c then return c else pfail + +char :: Char -> ReadP r Char +-- ^ Parses and returns the specified character. +char c = satisfy (c ==) + +string :: String -> ReadP r String +-- ^ Parses and returns the specified string. +string this = do s <- look; scan this s + where + scan [] _ = do return this + scan (x:xs) (y:ys) | x == y = do get >> scan xs ys + scan _ _ = do pfail + +munch :: (Char -> Bool) -> ReadP r String +-- ^ Parses the first zero or more characters satisfying the predicate. +munch p = + do s <- look + scan s + where + scan (c:cs) | p c = do _ <- get; s <- scan cs; return (c:s) + scan _ = do return "" + +munch1 :: (Char -> Bool) -> ReadP r String +-- ^ Parses the first one or more characters satisfying the predicate. +munch1 p = + do c <- get + if p c then do s <- munch p; return (c:s) + else pfail + +choice :: [ReadP r a] -> ReadP r a +-- ^ Combines all parsers in the specified list. +choice [] = pfail +choice [p] = p +choice (p:ps) = p +++ choice ps + +skipSpaces :: ReadP r () +-- ^ Skips all whitespace. +skipSpaces = + do s <- look + skip s + where + skip (c:s) | isSpace c = do _ <- get; skip s + skip _ = do return () + +count :: Int -> ReadP r a -> ReadP r [a] +-- ^ @ count n p @ parses @n@ occurrences of @p@ in sequence. A list of +-- results is returned. +count n p = sequence (replicate n p) + +between :: ReadP r open -> ReadP r close -> ReadP r a -> ReadP r a +-- ^ @ between open close p @ parses @open@, followed by @p@ and finally +-- @close@. Only the value of @p@ is returned. +between open close p = do _ <- open + x <- p + _ <- close + return x + +option :: a -> ReadP r a -> ReadP r a +-- ^ @option x p@ will either parse @p@ or return @x@ without consuming +-- any input. +option x p = p +++ return x + +optional :: ReadP r a -> ReadP r () +-- ^ @optional p@ optionally parses @p@ and always returns @()@. +optional p = (p >> return ()) +++ return () + +many :: ReadP r a -> ReadP r [a] +-- ^ Parses zero or more occurrences of the given parser. +many p = return [] +++ many1 p + +many1 :: ReadP r a -> ReadP r [a] +-- ^ Parses one or more occurrences of the given parser. +many1 p = liftM2 (:) p (many p) + +skipMany :: ReadP r a -> ReadP r () +-- ^ Like 'many', but discards the result. +skipMany p = many p >> return () + +skipMany1 :: ReadP r a -> ReadP r () +-- ^ Like 'many1', but discards the result. +skipMany1 p = p >> skipMany p + +sepBy :: ReadP r a -> ReadP r sep -> ReadP r [a] +-- ^ @sepBy p sep@ parses zero or more occurrences of @p@, separated by @sep@. +-- Returns a list of values returned by @p@. +sepBy p sep = sepBy1 p sep +++ return [] + +sepBy1 :: ReadP r a -> ReadP r sep -> ReadP r [a] +-- ^ @sepBy1 p sep@ parses one or more occurrences of @p@, separated by @sep@. +-- Returns a list of values returned by @p@. +sepBy1 p sep = liftM2 (:) p (many (sep >> p)) + +endBy :: ReadP r a -> ReadP r sep -> ReadP r [a] +-- ^ @endBy p sep@ parses zero or more occurrences of @p@, separated and ended +-- by @sep@. +endBy p sep = many (do x <- p ; _ <- sep ; return x) + +endBy1 :: ReadP r a -> ReadP r sep -> ReadP r [a] +-- ^ @endBy p sep@ parses one or more occurrences of @p@, separated and ended +-- by @sep@. +endBy1 p sep = many1 (do x <- p ; _ <- sep ; return x) + +chainr :: ReadP r a -> ReadP r (a -> a -> a) -> a -> ReadP r a +-- ^ @chainr p op x@ parses zero or more occurrences of @p@, separated by @op@. +-- Returns a value produced by a /right/ associative application of all +-- functions returned by @op@. If there are no occurrences of @p@, @x@ is +-- returned. +chainr p op x = chainr1 p op +++ return x + +chainl :: ReadP r a -> ReadP r (a -> a -> a) -> a -> ReadP r a +-- ^ @chainl p op x@ parses zero or more occurrences of @p@, separated by @op@. +-- Returns a value produced by a /left/ associative application of all +-- functions returned by @op@. If there are no occurrences of @p@, @x@ is +-- returned. +chainl p op x = chainl1 p op +++ return x + +chainr1 :: ReadP r a -> ReadP r (a -> a -> a) -> ReadP r a +-- ^ Like 'chainr', but parses one or more occurrences of @p@. +chainr1 p op = scan + where scan = p >>= rest + rest x = do f <- op + y <- scan + return (f x y) + +++ return x + +chainl1 :: ReadP r a -> ReadP r (a -> a -> a) -> ReadP r a +-- ^ Like 'chainl', but parses one or more occurrences of @p@. +chainl1 p op = p >>= rest + where rest x = do f <- op + y <- p + rest (f x y) + +++ return x + +manyTill :: ReadP r a -> ReadP [a] end -> ReadP r [a] +-- ^ @manyTill p end@ parses zero or more occurrences of @p@, until @end@ +-- succeeds. Returns a list of values returned by @p@. +manyTill p end = scan + where scan = (end >> return []) <++ (liftM2 (:) p scan) + +-- --------------------------------------------------------------------------- +-- Converting between ReadP and Read + +readP_to_S :: ReadP a a -> ReadS a +-- ^ Converts a parser into a Haskell ReadS-style function. +-- This is the main way in which you can \"run\" a 'ReadP' parser: +-- the expanded type is +-- @ readP_to_S :: ReadP a -> String -> [(a,String)] @ +readP_to_S (R f) = run (f return) + +readS_to_P :: ReadS a -> ReadP r a +-- ^ Converts a Haskell ReadS-style function into a parser. +-- Warning: This introduces local backtracking in the resulting +-- parser, and therefore a possible inefficiency. +readS_to_P r = + R (\k -> Look (\s -> final [bs'' | (a,s') <- r s, bs'' <- run (k a) s'])) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/TempFile.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/TempFile.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compat/TempFile.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compat/TempFile.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,128 @@ +{-# LANGUAGE CPP #-} +{-# OPTIONS_HADDOCK hide #-} +module Distribution.Compat.TempFile ( + openTempFile, + openBinaryTempFile, + openNewBinaryFile, + createTempDirectory, + ) where + + +import System.FilePath (()) +import Foreign.C (eEXIST) + +import System.IO (Handle, openTempFile, openBinaryTempFile) +import Data.Bits ((.|.)) +import System.Posix.Internals (c_open, c_close, o_CREAT, o_EXCL, o_RDWR, + o_BINARY, o_NONBLOCK, o_NOCTTY) +import System.IO.Error (isAlreadyExistsError) +import System.Posix.Internals (withFilePath) +import Foreign.C (CInt) +import GHC.IO.Handle.FD (fdToHandle) +import Distribution.Compat.Exception (tryIO) +import Control.Exception (onException) +import Foreign.C (getErrno, errnoToIOError) + +import System.Posix.Internals (c_getpid) + +#if defined(mingw32_HOST_OS) || defined(ghcjs_HOST_OS) +import System.Directory ( createDirectory ) +#else +import qualified System.Posix +#endif + +-- ------------------------------------------------------------ +-- * temporary files +-- ------------------------------------------------------------ + +-- This is here for Haskell implementations that do not come with +-- System.IO.openTempFile. This includes nhc-1.20, hugs-2006.9. +-- TODO: Not sure about JHC +-- TODO: This file should probably be removed. + +-- This is a copy/paste of the openBinaryTempFile definition, but +-- if uses 666 rather than 600 for the permissions. The base library +-- needs to be changed to make this better. +openNewBinaryFile :: FilePath -> String -> IO (FilePath, Handle) +openNewBinaryFile dir template = do + pid <- c_getpid + findTempName pid + where + -- We split off the last extension, so we can use .foo.ext files + -- for temporary files (hidden on Unix OSes). Unfortunately we're + -- below file path in the hierarchy here. + (prefix,suffix) = + case break (== '.') $ reverse template of + -- First case: template contains no '.'s. Just re-reverse it. + (rev_suffix, "") -> (reverse rev_suffix, "") + -- Second case: template contains at least one '.'. Strip the + -- dot from the prefix and prepend it to the suffix (if we don't + -- do this, the unique number will get added after the '.' and + -- thus be part of the extension, which is wrong.) + (rev_suffix, '.':rest) -> (reverse rest, '.':reverse rev_suffix) + -- Otherwise, something is wrong, because (break (== '.')) should + -- always return a pair with either the empty string or a string + -- beginning with '.' as the second component. + _ -> error "bug in System.IO.openTempFile" + + oflags = rw_flags .|. o_EXCL .|. o_BINARY + + findTempName x = do + fd <- withFilePath filepath $ \ f -> + c_open f oflags 0o666 + if fd < 0 + then do + errno <- getErrno + if errno == eEXIST + then findTempName (x+1) + else ioError (errnoToIOError "openNewBinaryFile" errno Nothing (Just dir)) + else do + -- TODO: We want to tell fdToHandle what the file path is, + -- as any exceptions etc will only be able to report the + -- FD currently + h <- fdToHandle fd `onException` c_close fd + return (filepath, h) + where + filename = prefix ++ show x ++ suffix + filepath = dir `combine` filename + + -- FIXME: bits copied from System.FilePath + combine a b + | null b = a + | null a = b + | last a == pathSeparator = a ++ b + | otherwise = a ++ [pathSeparator] ++ b + +-- FIXME: Should use System.FilePath library +pathSeparator :: Char +#ifdef mingw32_HOST_OS +pathSeparator = '\\' +#else +pathSeparator = '/' +#endif + +-- FIXME: Copied from GHC.Handle +std_flags, output_flags, rw_flags :: CInt +std_flags = o_NONBLOCK .|. o_NOCTTY +output_flags = std_flags .|. o_CREAT +rw_flags = output_flags .|. o_RDWR + +createTempDirectory :: FilePath -> String -> IO FilePath +createTempDirectory dir template = do + pid <- c_getpid + findTempName pid + where + findTempName x = do + let dirpath = dir template ++ "-" ++ show x + r <- tryIO $ mkPrivateDir dirpath + case r of + Right _ -> return dirpath + Left e | isAlreadyExistsError e -> findTempName (x+1) + | otherwise -> ioError e + +mkPrivateDir :: String -> IO () +#if defined(mingw32_HOST_OS) || defined(ghcjs_HOST_OS) +mkPrivateDir s = createDirectory s +#else +mkPrivateDir s = System.Posix.createDirectory s 0o700 +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compiler.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compiler.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Compiler.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Compiler.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,204 @@ +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Compiler +-- Copyright : Isaac Jones 2003-2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This has an enumeration of the various compilers that Cabal knows about. It +-- also specifies the default compiler. Sadly you'll often see code that does +-- case analysis on this compiler flavour enumeration like: +-- +-- > case compilerFlavor comp of +-- > GHC -> GHC.getInstalledPackages verbosity packageDb progconf +-- > JHC -> JHC.getInstalledPackages verbosity packageDb progconf +-- +-- Obviously it would be better to use the proper 'Compiler' abstraction +-- because that would keep all the compiler-specific code together. +-- Unfortunately we cannot make this change yet without breaking the +-- 'UserHooks' api, which would break all custom @Setup.hs@ files, so for the +-- moment we just have to live with this deficiency. If you're interested, see +-- ticket #57. + +module Distribution.Compiler ( + -- * Compiler flavor + CompilerFlavor(..), + buildCompilerId, + buildCompilerFlavor, + defaultCompilerFlavor, + parseCompilerFlavorCompat, + + -- * Compiler id + CompilerId(..), + + -- * Compiler info + CompilerInfo(..), + unknownCompilerInfo, + AbiTag(..), abiTagString + ) where + +import Distribution.Compat.Binary (Binary) +import Data.Data (Data) +import Data.Typeable (Typeable) +import Data.Maybe (fromMaybe) +import Distribution.Version (Version(..)) +import GHC.Generics (Generic) + +import Language.Haskell.Extension (Language, Extension) + +import qualified System.Info (compilerName, compilerVersion) +import Distribution.Text (Text(..), display) +import qualified Distribution.Compat.ReadP as Parse +import qualified Text.PrettyPrint as Disp +import Text.PrettyPrint ((<>)) + +import qualified Data.Char as Char (toLower, isDigit, isAlphaNum) +import Control.Monad (when) + +data CompilerFlavor = GHC | GHCJS | NHC | YHC | Hugs | HBC | Helium | JHC | LHC | UHC + | HaskellSuite String -- string is the id of the actual compiler + | OtherCompiler String + deriving (Generic, Show, Read, Eq, Ord, Typeable, Data) + +instance Binary CompilerFlavor + +knownCompilerFlavors :: [CompilerFlavor] +knownCompilerFlavors = [GHC, GHCJS, NHC, YHC, Hugs, HBC, Helium, JHC, LHC, UHC] + +instance Text CompilerFlavor where + disp (OtherCompiler name) = Disp.text name + disp (HaskellSuite name) = Disp.text name + disp NHC = Disp.text "nhc98" + disp other = Disp.text (lowercase (show other)) + + parse = do + comp <- Parse.munch1 Char.isAlphaNum + when (all Char.isDigit comp) Parse.pfail + return (classifyCompilerFlavor comp) + +classifyCompilerFlavor :: String -> CompilerFlavor +classifyCompilerFlavor s = + fromMaybe (OtherCompiler s) $ lookup (lowercase s) compilerMap + where + compilerMap = [ (display compiler, compiler) + | compiler <- knownCompilerFlavors ] + + +--TODO: In some future release, remove 'parseCompilerFlavorCompat' and use +-- ordinary 'parse'. Also add ("nhc", NHC) to the above 'compilerMap'. + +-- | Like 'classifyCompilerFlavor' but compatible with the old ReadS parser. +-- +-- It is compatible in the sense that it accepts only the same strings, +-- eg "GHC" but not "ghc". However other strings get mapped to 'OtherCompiler'. +-- The point of this is that we do not allow extra valid values that would +-- upset older Cabal versions that had a stricter parser however we cope with +-- new values more gracefully so that we'll be able to introduce new value in +-- future without breaking things so much. +-- +parseCompilerFlavorCompat :: Parse.ReadP r CompilerFlavor +parseCompilerFlavorCompat = do + comp <- Parse.munch1 Char.isAlphaNum + when (all Char.isDigit comp) Parse.pfail + case lookup comp compilerMap of + Just compiler -> return compiler + Nothing -> return (OtherCompiler comp) + where + compilerMap = [ (show compiler, compiler) + | compiler <- knownCompilerFlavors + , compiler /= YHC ] + +buildCompilerFlavor :: CompilerFlavor +buildCompilerFlavor = classifyCompilerFlavor System.Info.compilerName + +buildCompilerVersion :: Version +buildCompilerVersion = System.Info.compilerVersion + +buildCompilerId :: CompilerId +buildCompilerId = CompilerId buildCompilerFlavor buildCompilerVersion + +-- | The default compiler flavour to pick when compiling stuff. This defaults +-- to the compiler used to build the Cabal lib. +-- +-- However if it's not a recognised compiler then it's 'Nothing' and the user +-- will have to specify which compiler they want. +-- +defaultCompilerFlavor :: Maybe CompilerFlavor +defaultCompilerFlavor = case buildCompilerFlavor of + OtherCompiler _ -> Nothing + _ -> Just buildCompilerFlavor + +-- ------------------------------------------------------------ +-- * Compiler Id +-- ------------------------------------------------------------ + +data CompilerId = CompilerId CompilerFlavor Version + deriving (Eq, Generic, Ord, Read, Show) + +instance Binary CompilerId + +instance Text CompilerId where + disp (CompilerId f (Version [] _)) = disp f + disp (CompilerId f v) = disp f <> Disp.char '-' <> disp v + + parse = do + flavour <- parse + version <- (Parse.char '-' >> parse) Parse.<++ return (Version [] []) + return (CompilerId flavour version) + +lowercase :: String -> String +lowercase = map Char.toLower + +-- ------------------------------------------------------------ +-- * Compiler Info +-- ------------------------------------------------------------ + +-- | Compiler information used for resolving configurations. Some fields can be +-- set to Nothing to indicate that the information is unknown. + +data CompilerInfo = CompilerInfo { + compilerInfoId :: CompilerId, + -- ^ Compiler flavour and version. + compilerInfoAbiTag :: AbiTag, + -- ^ Tag for distinguishing incompatible ABI's on the same architecture/os. + compilerInfoCompat :: Maybe [CompilerId], + -- ^ Other implementations that this compiler claims to be compatible with, if known. + compilerInfoLanguages :: Maybe [Language], + -- ^ Supported language standards, if known. + compilerInfoExtensions :: Maybe [Extension] + -- ^ Supported extensions, if known. + } + deriving (Generic, Show, Read) + +instance Binary CompilerInfo + +data AbiTag + = NoAbiTag + | AbiTag String + deriving (Generic, Show, Read) + +instance Binary AbiTag + +instance Text AbiTag where + disp NoAbiTag = Disp.empty + disp (AbiTag tag) = Disp.text tag + + parse = do + tag <- Parse.munch (\c -> Char.isAlphaNum c || c == '_') + if null tag then return NoAbiTag else return (AbiTag tag) + +abiTagString :: AbiTag -> String +abiTagString NoAbiTag = "" +abiTagString (AbiTag tag) = tag + +-- | Make a CompilerInfo of which only the known information is its CompilerId, +-- its AbiTag and that it does not claim to be compatible with other +-- compiler id's. +unknownCompilerInfo :: CompilerId -> AbiTag -> CompilerInfo +unknownCompilerInfo compilerId abiTag = + CompilerInfo compilerId abiTag (Just []) Nothing Nothing diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/GetOpt.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/GetOpt.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/GetOpt.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/GetOpt.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,335 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.GetOpt +-- Copyright : (c) Sven Panne 2002-2005 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Portability : portable +-- +-- This library provides facilities for parsing the command-line options +-- in a standalone program. It is essentially a Haskell port of the GNU +-- @getopt@ library. +-- +----------------------------------------------------------------------------- + +{- +Sven Panne Oct. 1996 (small +changes Dec. 1997) + +Two rather obscure features are missing: The Bash 2.0 non-option hack +(if you don't already know it, you probably don't want to hear about +it...) and the recognition of long options with a single dash +(e.g. '-help' is recognised as '--help', as long as there is no short +option 'h'). + +Other differences between GNU's getopt and this implementation: + +* To enforce a coherent description of options and arguments, there + are explanation fields in the option/argument descriptor. + +* Error messages are now more informative, but no longer POSIX + compliant... :-( + +And a final Haskell advertisement: The GNU C implementation uses well +over 1100 lines, we need only 195 here, including a 46 line example! +:-) +-} + +{-# OPTIONS_HADDOCK hide #-} +module Distribution.GetOpt ( + -- * GetOpt + getOpt, getOpt', + usageInfo, + ArgOrder(..), + OptDescr(..), + ArgDescr(..), + + -- * Example + + -- $example +) where + +import Data.List ( isPrefixOf, intercalate, find ) +import Data.Maybe ( isJust ) + +-- |What to do with options following non-options +data ArgOrder a + = RequireOrder -- ^ no option processing after first non-option + | Permute -- ^ freely intersperse options and non-options + | ReturnInOrder (String -> a) -- ^ wrap non-options into options + +{-| +Each 'OptDescr' describes a single option. + +The arguments to 'Option' are: + +* list of short option characters + +* list of long option strings (without \"--\") + +* argument descriptor + +* explanation of option for user +-} +data OptDescr a = -- description of a single options: + Option [Char] -- list of short option characters + [String] -- list of long option strings (without "--") + (ArgDescr a) -- argument descriptor + String -- explanation of option for user + +-- |Describes whether an option takes an argument or not, and if so +-- how the argument is injected into a value of type @a@. +data ArgDescr a + = NoArg a -- ^ no argument expected + | ReqArg (String -> a) String -- ^ option requires argument + | OptArg (Maybe String -> a) String -- ^ optional argument + +data OptKind a -- kind of cmd line arg (internal use only): + = Opt a -- an option + | UnreqOpt String -- an un-recognized option + | NonOpt String -- a non-option + | EndOfOpts -- end-of-options marker (i.e. "--") + | OptErr String -- something went wrong... + +-- | Return a string describing the usage of a command, derived from +-- the header (first argument) and the options described by the +-- second argument. +usageInfo :: String -- header + -> [OptDescr a] -- option descriptors + -> String -- nicely formatted decription of options +usageInfo header optDescr = unlines (header:table) + where (ss,ls,ds) = unzip3 [ (intercalate ", " (map (fmtShort ad) sos) + ,concatMap (fmtLong ad) (take 1 los) + ,d) + | Option sos los ad d <- optDescr ] + ssWidth = (maximum . map length) ss + lsWidth = (maximum . map length) ls + dsWidth = 30 `max` (80 - (ssWidth + lsWidth + 3)) + table = [ " " ++ padTo ssWidth so' ++ + " " ++ padTo lsWidth lo' ++ + " " ++ d' + | (so,lo,d) <- zip3 ss ls ds + , (so',lo',d') <- fmtOpt dsWidth so lo d ] + padTo n x = take n (x ++ repeat ' ') + +fmtOpt :: Int -> String -> String -> String -> [(String, String, String)] +fmtOpt descrWidth so lo descr = + case wrapText descrWidth descr of + [] -> [(so,lo,"")] + (d:ds) -> (so,lo,d) : [ ("","",d') | d' <- ds ] + +fmtShort :: ArgDescr a -> Char -> String +fmtShort (NoArg _ ) so = "-" ++ [so] +fmtShort (ReqArg _ _) so = "-" ++ [so] +fmtShort (OptArg _ _) so = "-" ++ [so] + +fmtLong :: ArgDescr a -> String -> String +fmtLong (NoArg _ ) lo = "--" ++ lo +fmtLong (ReqArg _ ad) lo = "--" ++ lo ++ "=" ++ ad +fmtLong (OptArg _ ad) lo = "--" ++ lo ++ "[=" ++ ad ++ "]" + +wrapText :: Int -> String -> [String] +wrapText width = map unwords . wrap 0 [] . words + where wrap :: Int -> [String] -> [String] -> [[String]] + wrap 0 [] (w:ws) + | length w + 1 > width + = wrap (length w) [w] ws + wrap col line (w:ws) + | col + length w + 1 > width + = reverse line : wrap 0 [] (w:ws) + wrap col line (w:ws) + = let col' = col + length w + 1 + in wrap col' (w:line) ws + wrap _ [] [] = [] + wrap _ line [] = [reverse line] + +{-| +Process the command-line, and return the list of values that matched +(and those that didn\'t). The arguments are: + +* The order requirements (see 'ArgOrder') + +* The option descriptions (see 'OptDescr') + +* The actual command line arguments (presumably got from + 'System.Environment.getArgs'). + +'getOpt' returns a triple consisting of the option arguments, a list +of non-options, and a list of error messages. +-} +getOpt :: ArgOrder a -- non-option handling + -> [OptDescr a] -- option descriptors + -> [String] -- the command-line arguments + -> ([a],[String],[String]) -- (options,non-options,error messages) +getOpt ordering optDescr args = (os,xs,es ++ map errUnrec us) + where (os,xs,us,es) = getOpt' ordering optDescr args + +{-| +This is almost the same as 'getOpt', but returns a quadruple +consisting of the option arguments, a list of non-options, a list of +unrecognized options, and a list of error messages. +-} +getOpt' :: ArgOrder a -- non-option handling + -> [OptDescr a] -- option descriptors + -> [String] -- the command-line arguments + -> ([a],[String], [String] ,[String]) -- (options,non-options,unrecognized,error messages) +getOpt' _ _ [] = ([],[],[],[]) +getOpt' ordering optDescr (arg:args) = procNextOpt opt ordering + where procNextOpt (Opt o) _ = (o:os,xs,us,es) + procNextOpt (UnreqOpt u) _ = (os,xs,u:us,es) + procNextOpt (NonOpt x) RequireOrder = ([],x:rest,[],[]) + procNextOpt (NonOpt x) Permute = (os,x:xs,us,es) + procNextOpt (NonOpt x) (ReturnInOrder f) = (f x :os, xs,us,es) + procNextOpt EndOfOpts RequireOrder = ([],rest,[],[]) + procNextOpt EndOfOpts Permute = ([],rest,[],[]) + procNextOpt EndOfOpts (ReturnInOrder f) = (map f rest,[],[],[]) + procNextOpt (OptErr e) _ = (os,xs,us,e:es) + + (opt,rest) = getNext arg args optDescr + (os,xs,us,es) = getOpt' ordering optDescr rest + +-- take a look at the next cmd line arg and decide what to do with it +getNext :: String -> [String] -> [OptDescr a] -> (OptKind a,[String]) +getNext ('-':'-':[]) rest _ = (EndOfOpts,rest) +getNext ('-':'-':xs) rest optDescr = longOpt xs rest optDescr +getNext ('-': x :xs) rest optDescr = shortOpt x xs rest optDescr +getNext a rest _ = (NonOpt a,rest) + +-- handle long option +longOpt :: String -> [String] -> [OptDescr a] -> (OptKind a,[String]) +longOpt ls rs optDescr = long ads arg rs + where (opt,arg) = break (=='=') ls + getWith p = [ o | o@(Option _ xs _ _) <- optDescr + , isJust (find (p opt) xs)] + exact = getWith (==) + options = if null exact then getWith isPrefixOf else exact + ads = [ ad | Option _ _ ad _ <- options ] + optStr = "--" ++ opt + + long (_:_:_) _ rest = (errAmbig options optStr,rest) + long [NoArg a ] [] rest = (Opt a,rest) + long [NoArg _ ] ('=':_) rest = (errNoArg optStr,rest) + long [ReqArg _ d] [] [] = (errReq d optStr,[]) + long [ReqArg f _] [] (r:rest) = (Opt (f r),rest) + long [ReqArg f _] ('=':xs) rest = (Opt (f xs),rest) + long [OptArg f _] [] rest = (Opt (f Nothing),rest) + long [OptArg f _] ('=':xs) rest = (Opt (f (Just xs)),rest) + long _ _ rest = (UnreqOpt ("--"++ls),rest) + +-- handle short option +shortOpt :: Char -> String -> [String] -> [OptDescr a] -> (OptKind a,[String]) +shortOpt y ys rs optDescr = short ads ys rs + where options = [ o | o@(Option ss _ _ _) <- optDescr, s <- ss, y == s ] + ads = [ ad | Option _ _ ad _ <- options ] + optStr = '-':[y] + + short (_:_:_) _ rest = (errAmbig options optStr,rest) + short (NoArg a :_) [] rest = (Opt a,rest) + short (NoArg a :_) xs rest = (Opt a,('-':xs):rest) + short (ReqArg _ d:_) [] [] = (errReq d optStr,[]) + short (ReqArg f _:_) [] (r:rest) = (Opt (f r),rest) + short (ReqArg f _:_) xs rest = (Opt (f xs),rest) + short (OptArg f _:_) [] rest = (Opt (f Nothing),rest) + short (OptArg f _:_) xs rest = (Opt (f (Just xs)),rest) + short [] [] rest = (UnreqOpt optStr,rest) + short [] xs rest = (UnreqOpt (optStr++xs),rest) + +-- miscellaneous error formatting + +errAmbig :: [OptDescr a] -> String -> OptKind a +errAmbig ods optStr = OptErr (usageInfo header ods) + where header = "option `" ++ optStr ++ "' is ambiguous; could be one of:" + +errReq :: String -> String -> OptKind a +errReq d optStr = OptErr ("option `" ++ optStr ++ "' requires an argument " ++ d ++ "\n") + +errUnrec :: String -> String +errUnrec optStr = "unrecognized option `" ++ optStr ++ "'\n" + +errNoArg :: String -> OptKind a +errNoArg optStr = OptErr ("option `" ++ optStr ++ "' doesn't allow an argument\n") + +{- +----------------------------------------------------------------------------------------- +-- and here a small and hopefully enlightening example: + +data Flag = Verbose | Version | Name String | Output String | Arg String deriving Show + +options :: [OptDescr Flag] +options = + [Option ['v'] ["verbose"] (NoArg Verbose) "verbosely list files", + Option ['V','?'] ["version","release"] (NoArg Version) "show version info", + Option ['o'] ["output"] (OptArg out "FILE") "use FILE for dump", + Option ['n'] ["name"] (ReqArg Name "USER") "only dump USER's files"] + +out :: Maybe String -> Flag +out Nothing = Output "stdout" +out (Just o) = Output o + +test :: ArgOrder Flag -> [String] -> String +test order cmdline = case getOpt order options cmdline of + (o,n,[] ) -> "options=" ++ show o ++ " args=" ++ show n ++ "\n" + (_,_,errs) -> concat errs ++ usageInfo header options + where header = "Usage: foobar [OPTION...] files..." + +-- example runs: +-- putStr (test RequireOrder ["foo","-v"]) +-- ==> options=[] args=["foo", "-v"] +-- putStr (test Permute ["foo","-v"]) +-- ==> options=[Verbose] args=["foo"] +-- putStr (test (ReturnInOrder Arg) ["foo","-v"]) +-- ==> options=[Arg "foo", Verbose] args=[] +-- putStr (test Permute ["foo","--","-v"]) +-- ==> options=[] args=["foo", "-v"] +-- putStr (test Permute ["-?o","--name","bar","--na=baz"]) +-- ==> options=[Version, Output "stdout", Name "bar", Name "baz"] args=[] +-- putStr (test Permute ["--ver","foo"]) +-- ==> option `--ver' is ambiguous; could be one of: +-- -v --verbose verbosely list files +-- -V, -? --version, --release show version info +-- Usage: foobar [OPTION...] files... +-- -v --verbose verbosely list files +-- -V, -? --version, --release show version info +-- -o[FILE] --output[=FILE] use FILE for dump +-- -n USER --name=USER only dump USER's files +----------------------------------------------------------------------------------------- +-} + +{- $example + +To hopefully illuminate the role of the different data +structures, here\'s the command-line options for a (very simple) +compiler: + +> module Opts where +> +> import Distribution.GetOpt +> import Data.Maybe ( fromMaybe ) +> +> data Flag +> = Verbose | Version +> | Input String | Output String | LibDir String +> deriving Show +> +> options :: [OptDescr Flag] +> options = +> [ Option ['v'] ["verbose"] (NoArg Verbose) "chatty output on stderr" +> , Option ['V','?'] ["version"] (NoArg Version) "show version number" +> , Option ['o'] ["output"] (OptArg outp "FILE") "output FILE" +> , Option ['c'] [] (OptArg inp "FILE") "input FILE" +> , Option ['L'] ["libdir"] (ReqArg LibDir "DIR") "library directory" +> ] +> +> inp,outp :: Maybe String -> Flag +> outp = Output . fromMaybe "stdout" +> inp = Input . fromMaybe "stdin" +> +> compilerOpts :: [String] -> IO ([Flag], [String]) +> compilerOpts argv = +> case getOpt Permute options argv of +> (o,n,[] ) -> return (o,n) +> (_,_,errs) -> ioError (userError (concat errs ++ usageInfo header options)) +> where header = "Usage: ic [OPTION...] files..." + +-} diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/InstalledPackageInfo.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/InstalledPackageInfo.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/InstalledPackageInfo.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/InstalledPackageInfo.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,391 @@ +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.InstalledPackageInfo +-- Copyright : (c) The University of Glasgow 2004 +-- +-- Maintainer : libraries@haskell.org +-- Portability : portable +-- +-- This is the information about an /installed/ package that +-- is communicated to the @ghc-pkg@ program in order to register +-- a package. @ghc-pkg@ now consumes this package format (as of version +-- 6.4). This is specific to GHC at the moment. +-- +-- The @.cabal@ file format is for describing a package that is not yet +-- installed. It has a lot of flexibility, like conditionals and dependency +-- ranges. As such, that format is not at all suitable for describing a package +-- that has already been built and installed. By the time we get to that stage, +-- we have resolved all conditionals and resolved dependency version +-- constraints to exact versions of dependent packages. So, this module defines +-- the 'InstalledPackageInfo' data structure that contains all the info we keep +-- about an installed package. There is a parser and pretty printer. The +-- textual format is rather simpler than the @.cabal@ format: there are no +-- sections, for example. + +-- This module is meant to be local-only to Distribution... + +module Distribution.InstalledPackageInfo ( + InstalledPackageInfo_(..), InstalledPackageInfo, + OriginalModule(..), ExposedModule(..), + ParseResult(..), PError(..), PWarning, + emptyInstalledPackageInfo, + parseInstalledPackageInfo, + showInstalledPackageInfo, + showInstalledPackageInfoField, + showSimpleInstalledPackageInfoField, + fieldsInstalledPackageInfo, + ) where + +import Distribution.ParseUtils + ( FieldDescr(..), ParseResult(..), PError(..), PWarning + , simpleField, listField, parseLicenseQ + , showFields, showSingleNamedField, showSimpleSingleNamedField + , parseFieldsFlat + , parseFilePathQ, parseTokenQ, parseModuleNameQ, parsePackageNameQ + , showFilePath, showToken, boolField, parseOptVersion + , parseFreeText, showFreeText, parseOptCommaList ) +import Distribution.License ( License(..) ) +import Distribution.Package + ( PackageName(..), PackageIdentifier(..) + , PackageId, InstalledPackageId(..) + , packageName, packageVersion, PackageKey(..) ) +import qualified Distribution.Package as Package +import Distribution.ModuleName + ( ModuleName ) +import Distribution.Version + ( Version(..) ) +import Distribution.Text + ( Text(disp, parse) ) +import Text.PrettyPrint as Disp +import qualified Distribution.Compat.ReadP as Parse + +import Distribution.Compat.Binary (Binary) +import Data.Maybe (fromMaybe) +import GHC.Generics (Generic) + +-- ----------------------------------------------------------------------------- +-- The InstalledPackageInfo type + + +data InstalledPackageInfo_ m + = InstalledPackageInfo { + -- these parts are exactly the same as PackageDescription + installedPackageId :: InstalledPackageId, + sourcePackageId :: PackageId, + packageKey :: PackageKey, + license :: License, + copyright :: String, + maintainer :: String, + author :: String, + stability :: String, + homepage :: String, + pkgUrl :: String, + synopsis :: String, + description :: String, + category :: String, + -- these parts are required by an installed package only: + exposed :: Bool, + exposedModules :: [ExposedModule], + instantiatedWith :: [(m, OriginalModule)], + hiddenModules :: [m], + trusted :: Bool, + importDirs :: [FilePath], + libraryDirs :: [FilePath], + dataDir :: FilePath, + hsLibraries :: [String], + extraLibraries :: [String], + extraGHCiLibraries:: [String], -- overrides extraLibraries for GHCi + includeDirs :: [FilePath], + includes :: [String], + depends :: [InstalledPackageId], + ccOptions :: [String], + ldOptions :: [String], + frameworkDirs :: [FilePath], + frameworks :: [String], + haddockInterfaces :: [FilePath], + haddockHTMLs :: [FilePath], + pkgRoot :: Maybe FilePath + } + deriving (Generic, Read, Show) + +instance Binary m => Binary (InstalledPackageInfo_ m) + +instance Package.Package (InstalledPackageInfo_ str) where + packageId = sourcePackageId + +instance Package.PackageInstalled (InstalledPackageInfo_ str) where + installedPackageId = installedPackageId + installedDepends = depends + +type InstalledPackageInfo = InstalledPackageInfo_ ModuleName + +emptyInstalledPackageInfo :: InstalledPackageInfo_ m +emptyInstalledPackageInfo + = InstalledPackageInfo { + installedPackageId = InstalledPackageId "", + sourcePackageId = PackageIdentifier (PackageName "") noVersion, + packageKey = OldPackageKey (PackageIdentifier + (PackageName "") noVersion), + license = UnspecifiedLicense, + copyright = "", + maintainer = "", + author = "", + stability = "", + homepage = "", + pkgUrl = "", + synopsis = "", + description = "", + category = "", + exposed = False, + exposedModules = [], + hiddenModules = [], + instantiatedWith = [], + trusted = False, + importDirs = [], + libraryDirs = [], + dataDir = "", + hsLibraries = [], + extraLibraries = [], + extraGHCiLibraries= [], + includeDirs = [], + includes = [], + depends = [], + ccOptions = [], + ldOptions = [], + frameworkDirs = [], + frameworks = [], + haddockInterfaces = [], + haddockHTMLs = [], + pkgRoot = Nothing + } + +noVersion :: Version +noVersion = Version [] [] + +-- ----------------------------------------------------------------------------- +-- Exposed modules + +data OriginalModule + = OriginalModule { + originalPackageId :: InstalledPackageId, + originalModuleName :: ModuleName + } + deriving (Generic, Eq, Read, Show) + +data ExposedModule + = ExposedModule { + exposedName :: ModuleName, + exposedReexport :: Maybe OriginalModule, + exposedSignature :: Maybe OriginalModule -- This field is unused for now. + } + deriving (Generic, Read, Show) + +instance Text OriginalModule where + disp (OriginalModule ipi m) = + disp ipi <> Disp.char ':' <> disp m + parse = do + ipi <- parse + _ <- Parse.char ':' + m <- parse + return (OriginalModule ipi m) + +instance Text ExposedModule where + disp (ExposedModule m reexport signature) = + Disp.sep [ disp m + , case reexport of + Just m' -> Disp.sep [Disp.text "from", disp m'] + Nothing -> Disp.empty + , case signature of + Just m' -> Disp.sep [Disp.text "is", disp m'] + Nothing -> Disp.empty + ] + parse = do + m <- parseModuleNameQ + Parse.skipSpaces + reexport <- Parse.option Nothing $ do + _ <- Parse.string "from" + Parse.skipSpaces + fmap Just parse + Parse.skipSpaces + signature <- Parse.option Nothing $ do + _ <- Parse.string "is" + Parse.skipSpaces + fmap Just parse + return (ExposedModule m reexport signature) + + +instance Binary OriginalModule + +instance Binary ExposedModule + +-- To maintain backwards-compatibility, we accept both comma/non-comma +-- separated variants of this field. You SHOULD use the comma syntax if you +-- use any new functions, although actually it's unambiguous due to a quirk +-- of the fact that modules must start with capital letters. + +showExposedModules :: [ExposedModule] -> Disp.Doc +showExposedModules xs + | all isExposedModule xs = fsep (map disp xs) + | otherwise = fsep (Disp.punctuate comma (map disp xs)) + where isExposedModule (ExposedModule _ Nothing Nothing) = True + isExposedModule _ = False + +parseExposedModules :: Parse.ReadP r [ExposedModule] +parseExposedModules = parseOptCommaList parse + +-- ----------------------------------------------------------------------------- +-- Parsing + +parseInstalledPackageInfo :: String -> ParseResult InstalledPackageInfo +parseInstalledPackageInfo = + parseFieldsFlat (fieldsInstalledPackageInfo ++ deprecatedFieldDescrs) + emptyInstalledPackageInfo + +parseInstantiatedWith :: Parse.ReadP r (ModuleName, OriginalModule) +parseInstantiatedWith = do k <- parse + _ <- Parse.char '=' + n <- parse + _ <- Parse.char '@' + p <- parse + return (k, OriginalModule p n) + +-- ----------------------------------------------------------------------------- +-- Pretty-printing + +showInstalledPackageInfo :: InstalledPackageInfo -> String +showInstalledPackageInfo = showFields fieldsInstalledPackageInfo + +showInstalledPackageInfoField :: String -> Maybe (InstalledPackageInfo -> String) +showInstalledPackageInfoField = showSingleNamedField fieldsInstalledPackageInfo + +showSimpleInstalledPackageInfoField :: String -> Maybe (InstalledPackageInfo -> String) +showSimpleInstalledPackageInfoField = showSimpleSingleNamedField fieldsInstalledPackageInfo + +showInstantiatedWith :: (ModuleName, OriginalModule) -> Doc +showInstantiatedWith (k, OriginalModule p m) = disp k <> text "=" <> disp m <> text "@" <> disp p + +-- ----------------------------------------------------------------------------- +-- Description of the fields, for parsing/printing + +fieldsInstalledPackageInfo :: [FieldDescr InstalledPackageInfo] +fieldsInstalledPackageInfo = basicFieldDescrs ++ installedFieldDescrs + +basicFieldDescrs :: [FieldDescr InstalledPackageInfo] +basicFieldDescrs = + [ simpleField "name" + disp parsePackageNameQ + packageName (\name pkg -> pkg{sourcePackageId=(sourcePackageId pkg){pkgName=name}}) + , simpleField "version" + disp parseOptVersion + packageVersion (\ver pkg -> pkg{sourcePackageId=(sourcePackageId pkg){pkgVersion=ver}}) + , simpleField "id" + disp parse + installedPackageId (\ipid pkg -> pkg{installedPackageId=ipid}) + , simpleField "key" + disp parse + packageKey (\ipid pkg -> pkg{packageKey=ipid}) + , simpleField "license" + disp parseLicenseQ + license (\l pkg -> pkg{license=l}) + , simpleField "copyright" + showFreeText parseFreeText + copyright (\val pkg -> pkg{copyright=val}) + , simpleField "maintainer" + showFreeText parseFreeText + maintainer (\val pkg -> pkg{maintainer=val}) + , simpleField "stability" + showFreeText parseFreeText + stability (\val pkg -> pkg{stability=val}) + , simpleField "homepage" + showFreeText parseFreeText + homepage (\val pkg -> pkg{homepage=val}) + , simpleField "package-url" + showFreeText parseFreeText + pkgUrl (\val pkg -> pkg{pkgUrl=val}) + , simpleField "synopsis" + showFreeText parseFreeText + synopsis (\val pkg -> pkg{synopsis=val}) + , simpleField "description" + showFreeText parseFreeText + description (\val pkg -> pkg{description=val}) + , simpleField "category" + showFreeText parseFreeText + category (\val pkg -> pkg{category=val}) + , simpleField "author" + showFreeText parseFreeText + author (\val pkg -> pkg{author=val}) + ] + +installedFieldDescrs :: [FieldDescr InstalledPackageInfo] +installedFieldDescrs = [ + boolField "exposed" + exposed (\val pkg -> pkg{exposed=val}) + , simpleField "exposed-modules" + showExposedModules parseExposedModules + exposedModules (\xs pkg -> pkg{exposedModules=xs}) + , listField "hidden-modules" + disp parseModuleNameQ + hiddenModules (\xs pkg -> pkg{hiddenModules=xs}) + , listField "instantiated-with" + showInstantiatedWith parseInstantiatedWith + instantiatedWith (\xs pkg -> pkg{instantiatedWith=xs}) + , boolField "trusted" + trusted (\val pkg -> pkg{trusted=val}) + , listField "import-dirs" + showFilePath parseFilePathQ + importDirs (\xs pkg -> pkg{importDirs=xs}) + , listField "library-dirs" + showFilePath parseFilePathQ + libraryDirs (\xs pkg -> pkg{libraryDirs=xs}) + , simpleField "data-dir" + showFilePath (parseFilePathQ Parse.<++ return "") + dataDir (\val pkg -> pkg{dataDir=val}) + , listField "hs-libraries" + showFilePath parseTokenQ + hsLibraries (\xs pkg -> pkg{hsLibraries=xs}) + , listField "extra-libraries" + showToken parseTokenQ + extraLibraries (\xs pkg -> pkg{extraLibraries=xs}) + , listField "extra-ghci-libraries" + showToken parseTokenQ + extraGHCiLibraries (\xs pkg -> pkg{extraGHCiLibraries=xs}) + , listField "include-dirs" + showFilePath parseFilePathQ + includeDirs (\xs pkg -> pkg{includeDirs=xs}) + , listField "includes" + showFilePath parseFilePathQ + includes (\xs pkg -> pkg{includes=xs}) + , listField "depends" + disp parse + depends (\xs pkg -> pkg{depends=xs}) + , listField "cc-options" + showToken parseTokenQ + ccOptions (\path pkg -> pkg{ccOptions=path}) + , listField "ld-options" + showToken parseTokenQ + ldOptions (\path pkg -> pkg{ldOptions=path}) + , listField "framework-dirs" + showFilePath parseFilePathQ + frameworkDirs (\xs pkg -> pkg{frameworkDirs=xs}) + , listField "frameworks" + showToken parseTokenQ + frameworks (\xs pkg -> pkg{frameworks=xs}) + , listField "haddock-interfaces" + showFilePath parseFilePathQ + haddockInterfaces (\xs pkg -> pkg{haddockInterfaces=xs}) + , listField "haddock-html" + showFilePath parseFilePathQ + haddockHTMLs (\xs pkg -> pkg{haddockHTMLs=xs}) + , simpleField "pkgroot" + (const Disp.empty) parseFilePathQ + (fromMaybe "" . pkgRoot) (\xs pkg -> pkg{pkgRoot=Just xs}) + ] + +deprecatedFieldDescrs :: [FieldDescr InstalledPackageInfo] +deprecatedFieldDescrs = [ + listField "hugs-options" + showToken parseTokenQ + (const []) (const id) + ] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/License.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/License.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/License.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/License.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,177 @@ +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.License +-- Description : The License data type. +-- Copyright : Isaac Jones 2003-2005 +-- Duncan Coutts 2008 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Package descriptions contain fields for specifying the name of a software +-- license and the name of the file containing the text of that license. While +-- package authors may choose any license they like, Cabal provides an +-- enumeration of a small set of common free and open source software licenses. +-- This is done so that Hackage can recognise licenses, so that tools can detect +-- , +-- and to deter +-- . +-- +-- It is recommended that all package authors use the @license-file@ or +-- @license-files@ fields in their package descriptions. Further information +-- about these fields can be found in the +-- . +-- +-- = Additional resources +-- +-- The following websites provide information about free and open source +-- software licenses: +-- +-- * +-- +-- * +-- +-- = Disclaimer +-- +-- The descriptions of software licenses provided by this documentation are +-- intended for informational purposes only and in no way constitute legal +-- advice. Please read the text of the licenses and consult a lawyer for any +-- advice regarding software licensing. + +module Distribution.License ( + License(..), + knownLicenses, + ) where + +import Distribution.Version (Version(Version)) + +import Distribution.Text (Text(..), display) +import qualified Distribution.Compat.ReadP as Parse +import qualified Text.PrettyPrint as Disp +import Text.PrettyPrint ((<>)) +import Distribution.Compat.Binary (Binary) +import qualified Data.Char as Char (isAlphaNum) +import Data.Data (Data) +import Data.Typeable (Typeable) +import GHC.Generics (Generic) + +-- | Indicates the license under which a package's source code is released. +-- Versions of the licenses not listed here will be rejected by Hackage and +-- cause @cabal check@ to issue a warning. +data License = + -- TODO: * remove BSD4 + + -- | GNU General Public License, + -- or + -- . + GPL (Maybe Version) + + -- | . + | AGPL (Maybe Version) + + -- | GNU Lesser General Public License, + -- or + -- . + | LGPL (Maybe Version) + + -- | . + | BSD2 + + -- | . + | BSD3 + + -- | . + -- This license has not been approved by the OSI and is incompatible with + -- the GNU GPL. It is provided for historical reasons and should be avoided. + | BSD4 + + -- | . + | MIT + + -- | + | ISC + + -- | . + | MPL Version + + -- | . + | Apache (Maybe Version) + + -- | The author of a package disclaims any copyright to its source code and + -- dedicates it to the public domain. This is not a software license. Please + -- note that it is not possible to dedicate works to the public domain in + -- every jurisdiction, nor is a work that is in the public domain in one + -- jurisdiction necessarily in the public domain elsewhere. + | PublicDomain + + -- | Explicitly 'All Rights Reserved', eg for proprietary software. The + -- package may not be legally modified or redistributed by anyone but the + -- rightsholder. + | AllRightsReserved + + -- | No license specified which legally defaults to 'All Rights Reserved'. + -- The package may not be legally modified or redistributed by anyone but + -- the rightsholder. + | UnspecifiedLicense + + -- | Any other software license. + | OtherLicense + + -- | Indicates an erroneous license name. + | UnknownLicense String + deriving (Generic, Read, Show, Eq, Typeable, Data) + +instance Binary License + +-- | The list of all currently recognised licenses. +knownLicenses :: [License] +knownLicenses = [ GPL unversioned, GPL (version [2]), GPL (version [3]) + , LGPL unversioned, LGPL (version [2, 1]), LGPL (version [3]) + , AGPL unversioned, AGPL (version [3]) + , BSD2, BSD3, MIT, ISC + , MPL (Version [2, 0] []) + , Apache unversioned, Apache (version [2, 0]) + , PublicDomain, AllRightsReserved, OtherLicense] + where + unversioned = Nothing + version v = Just (Version v []) + +instance Text License where + disp (GPL version) = Disp.text "GPL" <> dispOptVersion version + disp (LGPL version) = Disp.text "LGPL" <> dispOptVersion version + disp (AGPL version) = Disp.text "AGPL" <> dispOptVersion version + disp (MPL version) = Disp.text "MPL" <> dispVersion version + disp (Apache version) = Disp.text "Apache" <> dispOptVersion version + disp (UnknownLicense other) = Disp.text other + disp other = Disp.text (show other) + + parse = do + name <- Parse.munch1 (\c -> Char.isAlphaNum c && c /= '-') + version <- Parse.option Nothing (Parse.char '-' >> fmap Just parse) + return $! case (name, version :: Maybe Version) of + ("GPL", _ ) -> GPL version + ("LGPL", _ ) -> LGPL version + ("AGPL", _ ) -> AGPL version + ("BSD2", Nothing) -> BSD2 + ("BSD3", Nothing) -> BSD3 + ("BSD4", Nothing) -> BSD4 + ("ISC", Nothing) -> ISC + ("MIT", Nothing) -> MIT + ("MPL", Just version') -> MPL version' + ("Apache", _ ) -> Apache version + ("PublicDomain", Nothing) -> PublicDomain + ("AllRightsReserved", Nothing) -> AllRightsReserved + ("OtherLicense", Nothing) -> OtherLicense + _ -> UnknownLicense $ name ++ + maybe "" (('-':) . display) version + +dispOptVersion :: Maybe Version -> Disp.Doc +dispOptVersion Nothing = Disp.empty +dispOptVersion (Just v) = dispVersion v + +dispVersion :: Version -> Disp.Doc +dispVersion v = Disp.char '-' <> disp v diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Make.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Make.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Make.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Make.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,184 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Make +-- Copyright : Martin Sjögren 2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is an alternative build system that delegates everything to the @make@ +-- program. All the commands just end up calling @make@ with appropriate +-- arguments. The intention was to allow preexisting packages that used +-- makefiles to be wrapped into Cabal packages. In practice essentially all +-- such packages were converted over to the \"Simple\" build system instead. +-- Consequently this module is not used much and it certainly only sees cursory +-- maintenance and no testing. Perhaps at some point we should stop pretending +-- that it works. +-- +-- Uses the parsed command-line from "Distribution.Simple.Setup" in order to build +-- Haskell tools using a back-end build system based on make. Obviously we +-- assume that there is a configure script, and that after the ConfigCmd has +-- been run, there is a Makefile. Further assumptions: +-- +-- [ConfigCmd] We assume the configure script accepts +-- @--with-hc@, +-- @--with-hc-pkg@, +-- @--prefix@, +-- @--bindir@, +-- @--libdir@, +-- @--libexecdir@, +-- @--datadir@. +-- +-- [BuildCmd] We assume that the default Makefile target will build everything. +-- +-- [InstallCmd] We assume there is an @install@ target. Note that we assume that +-- this does *not* register the package! +-- +-- [CopyCmd] We assume there is a @copy@ target, and a variable @$(destdir)@. +-- The @copy@ target should probably just invoke @make install@ +-- recursively (e.g. @$(MAKE) install prefix=$(destdir)\/$(prefix) +-- bindir=$(destdir)\/$(bindir)@. The reason we can\'t invoke @make +-- install@ directly here is that we don\'t know the value of @$(prefix)@. +-- +-- [SDistCmd] We assume there is a @dist@ target. +-- +-- [RegisterCmd] We assume there is a @register@ target and a variable @$(user)@. +-- +-- [UnregisterCmd] We assume there is an @unregister@ target. +-- +-- [HaddockCmd] We assume there is a @docs@ or @doc@ target. + + +-- copy : +-- $(MAKE) install prefix=$(destdir)/$(prefix) \ +-- bindir=$(destdir)/$(bindir) \ + +module Distribution.Make ( + module Distribution.Package, + License(..), Version(..), + defaultMain, defaultMainArgs, defaultMainNoRead + ) where + +-- local +import Distribution.Compat.Exception +import Distribution.Package --must not specify imports, since we're exporting moule. +import Distribution.Simple.Program(defaultProgramConfiguration) +import Distribution.PackageDescription +import Distribution.Simple.Setup +import Distribution.Simple.Command + +import Distribution.Simple.Utils (rawSystemExit, cabalVersion) + +import Distribution.License (License(..)) +import Distribution.Version + ( Version(..) ) +import Distribution.Text + ( display ) + +import System.Environment (getArgs, getProgName) +import Data.List (intercalate) +import System.Exit + +defaultMain :: IO () +defaultMain = getArgs >>= defaultMainArgs + +defaultMainArgs :: [String] -> IO () +defaultMainArgs = defaultMainHelper + +{-# DEPRECATED defaultMainNoRead "it ignores its PackageDescription arg" #-} +defaultMainNoRead :: PackageDescription -> IO () +defaultMainNoRead = const defaultMain + +defaultMainHelper :: [String] -> IO () +defaultMainHelper args = + case commandsRun (globalCommand commands) commands args of + CommandHelp help -> printHelp help + CommandList opts -> printOptionsList opts + CommandErrors errs -> printErrors errs + CommandReadyToGo (flags, commandParse) -> + case commandParse of + _ | fromFlag (globalVersion flags) -> printVersion + | fromFlag (globalNumericVersion flags) -> printNumericVersion + CommandHelp help -> printHelp help + CommandList opts -> printOptionsList opts + CommandErrors errs -> printErrors errs + CommandReadyToGo action -> action + + where + printHelp help = getProgName >>= putStr . help + printOptionsList = putStr . unlines + printErrors errs = do + putStr (intercalate "\n" errs) + exitWith (ExitFailure 1) + printNumericVersion = putStrLn $ display cabalVersion + printVersion = putStrLn $ "Cabal library version " + ++ display cabalVersion + + progs = defaultProgramConfiguration + commands = + [configureCommand progs `commandAddAction` configureAction + ,buildCommand progs `commandAddAction` buildAction + ,installCommand `commandAddAction` installAction + ,copyCommand `commandAddAction` copyAction + ,haddockCommand `commandAddAction` haddockAction + ,cleanCommand `commandAddAction` cleanAction + ,sdistCommand `commandAddAction` sdistAction + ,registerCommand `commandAddAction` registerAction + ,unregisterCommand `commandAddAction` unregisterAction + ] + +configureAction :: ConfigFlags -> [String] -> IO () +configureAction flags args = do + noExtraFlags args + let verbosity = fromFlag (configVerbosity flags) + rawSystemExit verbosity "sh" $ + "configure" + : configureArgs backwardsCompatHack flags + where backwardsCompatHack = True + +copyAction :: CopyFlags -> [String] -> IO () +copyAction flags args = do + noExtraFlags args + let destArgs = case fromFlag $ copyDest flags of + NoCopyDest -> ["install"] + CopyTo path -> ["copy", "destdir=" ++ path] + rawSystemExit (fromFlag $ copyVerbosity flags) "make" destArgs + +installAction :: InstallFlags -> [String] -> IO () +installAction flags args = do + noExtraFlags args + rawSystemExit (fromFlag $ installVerbosity flags) "make" ["install"] + rawSystemExit (fromFlag $ installVerbosity flags) "make" ["register"] + +haddockAction :: HaddockFlags -> [String] -> IO () +haddockAction flags args = do + noExtraFlags args + rawSystemExit (fromFlag $ haddockVerbosity flags) "make" ["docs"] + `catchIO` \_ -> + rawSystemExit (fromFlag $ haddockVerbosity flags) "make" ["doc"] + +buildAction :: BuildFlags -> [String] -> IO () +buildAction flags args = do + noExtraFlags args + rawSystemExit (fromFlag $ buildVerbosity flags) "make" [] + +cleanAction :: CleanFlags -> [String] -> IO () +cleanAction flags args = do + noExtraFlags args + rawSystemExit (fromFlag $ cleanVerbosity flags) "make" ["clean"] + +sdistAction :: SDistFlags -> [String] -> IO () +sdistAction flags args = do + noExtraFlags args + rawSystemExit (fromFlag $ sDistVerbosity flags) "make" ["dist"] + +registerAction :: RegisterFlags -> [String] -> IO () +registerAction flags args = do + noExtraFlags args + rawSystemExit (fromFlag $ regVerbosity flags) "make" ["register"] + +unregisterAction :: RegisterFlags -> [String] -> IO () +unregisterAction flags args = do + noExtraFlags args + rawSystemExit (fromFlag $ regVerbosity flags) "make" ["unregister"] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/ModuleName.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/ModuleName.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/ModuleName.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/ModuleName.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,110 @@ +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.ModuleName +-- Copyright : Duncan Coutts 2008 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Data type for Haskell module names. + +module Distribution.ModuleName ( + ModuleName, + fromString, + components, + toFilePath, + main, + simple, + ) where + +import Distribution.Text + ( Text(..) ) + +import Distribution.Compat.Binary (Binary) +import qualified Data.Char as Char + ( isAlphaNum, isUpper ) +import Data.Data (Data) +import Data.Typeable (Typeable) +import qualified Distribution.Compat.ReadP as Parse +import qualified Text.PrettyPrint as Disp +import Data.List + ( intercalate, intersperse ) +import GHC.Generics (Generic) +import System.FilePath + ( pathSeparator ) + +-- | A valid Haskell module name. +-- +newtype ModuleName = ModuleName [String] + deriving (Eq, Generic, Ord, Read, Show, Typeable, Data) + +instance Binary ModuleName + +instance Text ModuleName where + disp (ModuleName ms) = + Disp.hcat (intersperse (Disp.char '.') (map Disp.text ms)) + + parse = do + ms <- Parse.sepBy1 component (Parse.char '.') + return (ModuleName ms) + + where + component = do + c <- Parse.satisfy Char.isUpper + cs <- Parse.munch validModuleChar + return (c:cs) + +validModuleChar :: Char -> Bool +validModuleChar c = Char.isAlphaNum c || c == '_' || c == '\'' + +validModuleComponent :: String -> Bool +validModuleComponent [] = False +validModuleComponent (c:cs) = Char.isUpper c + && all validModuleChar cs + +{-# DEPRECATED simple "use ModuleName.fromString instead" #-} +simple :: String -> ModuleName +simple str = ModuleName [str] + +-- | Construct a 'ModuleName' from a valid module name 'String'. +-- +-- This is just a convenience function intended for valid module strings. It is +-- an error if it is used with a string that is not a valid module name. If you +-- are parsing user input then use 'Distribution.Text.simpleParse' instead. +-- +fromString :: String -> ModuleName +fromString string + | all validModuleComponent components' = ModuleName components' + | otherwise = error badName + + where + components' = split string + badName = "ModuleName.fromString: invalid module name " ++ show string + + split cs = case break (=='.') cs of + (chunk,[]) -> chunk : [] + (chunk,_:rest) -> chunk : split rest + +-- | The module name @Main@. +-- +main :: ModuleName +main = ModuleName ["Main"] + +-- | The individual components of a hierarchical module name. For example +-- +-- > components (fromString "A.B.C") = ["A", "B", "C"] +-- +components :: ModuleName -> [String] +components (ModuleName ms) = ms + +-- | Convert a module name to a file path, but without any file extension. +-- For example: +-- +-- > toFilePath (fromString "A.B.C") = "A/B/C" +-- +toFilePath :: ModuleName -> FilePath +toFilePath = intercalate [pathSeparator] . components diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,1590 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.PackageDescription.Check +-- Copyright : Lennart Kolmodin 2008 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This has code for checking for various problems in packages. There is one +-- set of checks that just looks at a 'PackageDescription' in isolation and +-- another set of checks that also looks at files in the package. Some of the +-- checks are basic sanity checks, others are portability standards that we'd +-- like to encourage. There is a 'PackageCheck' type that distinguishes the +-- different kinds of check so we can see which ones are appropriate to report +-- in different situations. This code gets uses when configuring a package when +-- we consider only basic problems. The higher standard is uses when when +-- preparing a source tarball and by Hackage when uploading new packages. The +-- reason for this is that we want to hold packages that are expected to be +-- distributed to a higher standard than packages that are only ever expected +-- to be used on the author's own environment. + +module Distribution.PackageDescription.Check ( + -- * Package Checking + PackageCheck(..), + checkPackage, + checkConfiguredPackage, + + -- ** Checking package contents + checkPackageFiles, + checkPackageContent, + CheckPackageContentOps(..), + checkPackageFileNames, + ) where + +import Data.Maybe + ( isNothing, isJust, catMaybes, maybeToList, fromMaybe ) +import Data.List (sort, group, isPrefixOf, nub, find) +import Control.Monad + ( filterM, liftM ) +import qualified System.Directory as System + ( doesFileExist, doesDirectoryExist ) +import qualified Data.Map as Map + +import Distribution.PackageDescription +import Distribution.PackageDescription.Configuration + ( flattenPackageDescription, finalizePackageDescription ) +import Distribution.Compiler + ( CompilerFlavor(..), buildCompilerFlavor, CompilerId(..) + , unknownCompilerInfo, AbiTag(..) ) +import Distribution.System + ( OS(..), Arch(..), buildPlatform ) +import Distribution.License + ( License(..), knownLicenses ) +import Distribution.Simple.CCompiler + ( filenameCDialect ) +import Distribution.Simple.Utils + ( cabalVersion, intercalate, parseFileGlob, FileGlob(..), lowercase ) + +import Distribution.Version + ( Version(..) + , VersionRange(..), foldVersionRange' + , anyVersion, noVersion, thisVersion, laterVersion, earlierVersion + , orLaterVersion, orEarlierVersion + , unionVersionRanges, intersectVersionRanges + , asVersionIntervals, UpperBound(..), isNoVersion ) +import Distribution.Package + ( PackageName(PackageName), packageName, packageVersion + , Dependency(..), pkgName ) + +import Distribution.Text + ( display, disp ) +import qualified Text.PrettyPrint as Disp +import Text.PrettyPrint ((<>), (<+>)) + +import qualified Language.Haskell.Extension as Extension (deprecatedExtensions) +import Language.Haskell.Extension + ( Language(UnknownLanguage), knownLanguages + , Extension(..), KnownExtension(..) ) +import System.FilePath + ( (), takeExtension, isRelative, isAbsolute + , splitDirectories, splitPath ) +import System.FilePath.Windows as FilePath.Windows + ( isValid ) + +-- | Results of some kind of failed package check. +-- +-- There are a range of severities, from merely dubious to totally insane. +-- All of them come with a human readable explanation. In future we may augment +-- them with more machine readable explanations, for example to help an IDE +-- suggest automatic corrections. +-- +data PackageCheck = + + -- | This package description is no good. There's no way it's going to + -- build sensibly. This should give an error at configure time. + PackageBuildImpossible { explanation :: String } + + -- | A problem that is likely to affect building the package, or an + -- issue that we'd like every package author to be aware of, even if + -- the package is never distributed. + | PackageBuildWarning { explanation :: String } + + -- | An issue that might not be a problem for the package author but + -- might be annoying or detrimental when the package is distributed to + -- users. We should encourage distributed packages to be free from these + -- issues, but occasionally there are justifiable reasons so we cannot + -- ban them entirely. + | PackageDistSuspicious { explanation :: String } + + -- | An issue that is OK in the author's environment but is almost + -- certain to be a portability problem for other environments. We can + -- quite legitimately refuse to publicly distribute packages with these + -- problems. + | PackageDistInexcusable { explanation :: String } + deriving (Eq) + +instance Show PackageCheck where + show notice = explanation notice + +check :: Bool -> PackageCheck -> Maybe PackageCheck +check False _ = Nothing +check True pc = Just pc + +checkSpecVersion :: PackageDescription -> [Int] -> Bool -> PackageCheck + -> Maybe PackageCheck +checkSpecVersion pkg specver cond pc + | specVersion pkg >= Version specver [] = Nothing + | otherwise = check cond pc + + +-- ------------------------------------------------------------ +-- * Standard checks +-- ------------------------------------------------------------ + +-- | Check for common mistakes and problems in package descriptions. +-- +-- This is the standard collection of checks covering all aspects except +-- for checks that require looking at files within the package. For those +-- see 'checkPackageFiles'. +-- +-- It requires the 'GenericPackageDescription' and optionally a particular +-- configuration of that package. If you pass 'Nothing' then we just check +-- a version of the generic description using 'flattenPackageDescription'. +-- +checkPackage :: GenericPackageDescription + -> Maybe PackageDescription + -> [PackageCheck] +checkPackage gpkg mpkg = + checkConfiguredPackage pkg + ++ checkConditionals gpkg + ++ checkPackageVersions gpkg + where + pkg = fromMaybe (flattenPackageDescription gpkg) mpkg + +--TODO: make this variant go away +-- we should always know the GenericPackageDescription +checkConfiguredPackage :: PackageDescription -> [PackageCheck] +checkConfiguredPackage pkg = + checkSanity pkg + ++ checkFields pkg + ++ checkLicense pkg + ++ checkSourceRepos pkg + ++ checkGhcOptions pkg + ++ checkCCOptions pkg + ++ checkCPPOptions pkg + ++ checkPaths pkg + ++ checkCabalVersion pkg + + +-- ------------------------------------------------------------ +-- * Basic sanity checks +-- ------------------------------------------------------------ + +-- | Check that this package description is sane. +-- +checkSanity :: PackageDescription -> [PackageCheck] +checkSanity pkg = + catMaybes [ + + check (null . (\(PackageName n) -> n) . packageName $ pkg) $ + PackageBuildImpossible "No 'name' field." + + , check (null . versionBranch . packageVersion $ pkg) $ + PackageBuildImpossible "No 'version' field." + + , check (null (executables pkg) && isNothing (library pkg)) $ + PackageBuildImpossible + "No executables and no library found. Nothing to do." + + , check (not (null duplicateNames)) $ + PackageBuildImpossible $ "Duplicate sections: " ++ commaSep duplicateNames + ++ ". The name of every executable, test suite, and benchmark section in" + ++ " the package must be unique." + ] + --TODO: check for name clashes case insensitively: windows file systems cannot + --cope. + + ++ maybe [] (checkLibrary pkg) (library pkg) + ++ concatMap (checkExecutable pkg) (executables pkg) + ++ concatMap (checkTestSuite pkg) (testSuites pkg) + ++ concatMap (checkBenchmark pkg) (benchmarks pkg) + + ++ catMaybes [ + + check (specVersion pkg > cabalVersion) $ + PackageBuildImpossible $ + "This package description follows version " + ++ display (specVersion pkg) ++ " of the Cabal specification. This " + ++ "tool only supports up to version " ++ display cabalVersion ++ "." + ] + where + exeNames = map exeName $ executables pkg + testNames = map testName $ testSuites pkg + bmNames = map benchmarkName $ benchmarks pkg + duplicateNames = dups $ exeNames ++ testNames ++ bmNames + +checkLibrary :: PackageDescription -> Library -> [PackageCheck] +checkLibrary pkg lib = + catMaybes [ + + check (not (null moduleDuplicates)) $ + PackageBuildImpossible $ + "Duplicate modules in library: " + ++ commaSep (map display moduleDuplicates) + + -- check use of required-signatures/exposed-signatures sections + , checkVersion [1,21] (not (null (requiredSignatures lib))) $ + PackageDistInexcusable $ + "To use the 'required-signatures' field the package needs to specify " + ++ "at least 'cabal-version: >= 1.21'." + + , checkVersion [1,21] (not (null (exposedSignatures lib))) $ + PackageDistInexcusable $ + "To use the 'exposed-signatures' field the package needs to specify " + ++ "at least 'cabal-version: >= 1.21'." + ] + + where + checkVersion :: [Int] -> Bool -> PackageCheck -> Maybe PackageCheck + checkVersion ver cond pc + | specVersion pkg >= Version ver [] = Nothing + | otherwise = check cond pc + + moduleDuplicates = dups (libModules lib ++ + map moduleReexportName (reexportedModules lib)) + +checkExecutable :: PackageDescription -> Executable -> [PackageCheck] +checkExecutable pkg exe = + catMaybes [ + + check (null (modulePath exe)) $ + PackageBuildImpossible $ + "No 'main-is' field found for executable " ++ exeName exe + + , check (not (null (modulePath exe)) + && (not $ fileExtensionSupportedLanguage $ modulePath exe)) $ + PackageBuildImpossible $ + "The 'main-is' field must specify a '.hs' or '.lhs' file " + ++ "(even if it is generated by a preprocessor), " + ++ "or it may specify a C/C++/obj-C source file." + + , checkSpecVersion pkg [1,17] + (fileExtensionSupportedLanguage (modulePath exe) + && takeExtension (modulePath exe) `notElem` [".hs", ".lhs"]) $ + PackageDistInexcusable $ + "The package uses a C/C++/obj-C source file for the 'main-is' field. " + ++ "To use this feature you must specify 'cabal-version: >= 1.18'." + + , check (not (null moduleDuplicates)) $ + PackageBuildImpossible $ + "Duplicate modules in executable '" ++ exeName exe ++ "': " + ++ commaSep (map display moduleDuplicates) + ] + where + moduleDuplicates = dups (exeModules exe) + +checkTestSuite :: PackageDescription -> TestSuite -> [PackageCheck] +checkTestSuite pkg test = + catMaybes [ + + case testInterface test of + TestSuiteUnsupported tt@(TestTypeUnknown _ _) -> Just $ + PackageBuildWarning $ + quote (display tt) ++ " is not a known type of test suite. " + ++ "The known test suite types are: " + ++ commaSep (map display knownTestTypes) + + TestSuiteUnsupported tt -> Just $ + PackageBuildWarning $ + quote (display tt) ++ " is not a supported test suite version. " + ++ "The known test suite types are: " + ++ commaSep (map display knownTestTypes) + _ -> Nothing + + , check (not $ null moduleDuplicates) $ + PackageBuildImpossible $ + "Duplicate modules in test suite '" ++ testName test ++ "': " + ++ commaSep (map display moduleDuplicates) + + , check mainIsWrongExt $ + PackageBuildImpossible $ + "The 'main-is' field must specify a '.hs' or '.lhs' file " + ++ "(even if it is generated by a preprocessor), " + ++ "or it may specify a C/C++/obj-C source file." + + , checkSpecVersion pkg [1,17] (mainIsNotHsExt && not mainIsWrongExt) $ + PackageDistInexcusable $ + "The package uses a C/C++/obj-C source file for the 'main-is' field. " + ++ "To use this feature you must specify 'cabal-version: >= 1.18'." + + -- Test suites might be built as (internal) libraries named after + -- the test suite and thus their names must not clash with the + -- name of the package. + , check libNameClash $ + PackageBuildImpossible $ + "The test suite " ++ testName test + ++ " has the same name as the package." + ] + where + moduleDuplicates = dups $ testModules test + + mainIsWrongExt = case testInterface test of + TestSuiteExeV10 _ f -> not $ fileExtensionSupportedLanguage f + _ -> False + + mainIsNotHsExt = case testInterface test of + TestSuiteExeV10 _ f -> takeExtension f `notElem` [".hs", ".lhs"] + _ -> False + + libNameClash = testName test `elem` [ libName + | _lib <- maybeToList (library pkg) + , let PackageName libName = + pkgName (package pkg) ] + +checkBenchmark :: PackageDescription -> Benchmark -> [PackageCheck] +checkBenchmark pkg bm = + catMaybes [ + + case benchmarkInterface bm of + BenchmarkUnsupported tt@(BenchmarkTypeUnknown _ _) -> Just $ + PackageBuildWarning $ + quote (display tt) ++ " is not a known type of benchmark. " + ++ "The known benchmark types are: " + ++ commaSep (map display knownBenchmarkTypes) + + BenchmarkUnsupported tt -> Just $ + PackageBuildWarning $ + quote (display tt) ++ " is not a supported benchmark version. " + ++ "The known benchmark types are: " + ++ commaSep (map display knownBenchmarkTypes) + _ -> Nothing + + , check (not $ null moduleDuplicates) $ + PackageBuildImpossible $ + "Duplicate modules in benchmark '" ++ benchmarkName bm ++ "': " + ++ commaSep (map display moduleDuplicates) + + , check mainIsWrongExt $ + PackageBuildImpossible $ + "The 'main-is' field must specify a '.hs' or '.lhs' file " + ++ "(even if it is generated by a preprocessor)." + + -- See comment for similar check on test suites. + , check libNameClash $ + PackageBuildImpossible $ + "The benchmark " ++ benchmarkName bm + ++ " has the same name as the package." + ] + where + moduleDuplicates = dups $ benchmarkModules bm + + mainIsWrongExt = case benchmarkInterface bm of + BenchmarkExeV10 _ f -> takeExtension f `notElem` [".hs", ".lhs"] + _ -> False + + libNameClash = benchmarkName bm `elem` [ libName + | _lib <- maybeToList (library pkg) + , let PackageName libName = + pkgName (package pkg) ] + +-- ------------------------------------------------------------ +-- * Additional pure checks +-- ------------------------------------------------------------ + +checkFields :: PackageDescription -> [PackageCheck] +checkFields pkg = + catMaybes [ + + check (not . FilePath.Windows.isValid . display . packageName $ pkg) $ + PackageDistInexcusable $ + "Unfortunately, the package name '" ++ display (packageName pkg) + ++ "' is one of the reserved system file names on Windows. Many tools " + ++ "need to convert package names to file names so using this name " + ++ "would cause problems." + + , check (isNothing (buildType pkg)) $ + PackageBuildWarning $ + "No 'build-type' specified. If you do not need a custom Setup.hs or " + ++ "./configure script then use 'build-type: Simple'." + + , case buildType pkg of + Just (UnknownBuildType unknown) -> Just $ + PackageBuildWarning $ + quote unknown ++ " is not a known 'build-type'. " + ++ "The known build types are: " + ++ commaSep (map display knownBuildTypes) + _ -> Nothing + + , check (not (null unknownCompilers)) $ + PackageBuildWarning $ + "Unknown compiler " ++ commaSep (map quote unknownCompilers) + ++ " in 'tested-with' field." + + , check (not (null unknownLanguages)) $ + PackageBuildWarning $ + "Unknown languages: " ++ commaSep unknownLanguages + + , check (not (null unknownExtensions)) $ + PackageBuildWarning $ + "Unknown extensions: " ++ commaSep unknownExtensions + + , check (not (null languagesUsedAsExtensions)) $ + PackageBuildWarning $ + "Languages listed as extensions: " + ++ commaSep languagesUsedAsExtensions + ++ ". Languages must be specified in either the 'default-language' " + ++ " or the 'other-languages' field." + + , check (not (null deprecatedExtensions)) $ + PackageDistSuspicious $ + "Deprecated extensions: " + ++ commaSep (map (quote . display . fst) deprecatedExtensions) + ++ ". " ++ unwords + [ "Instead of '" ++ display ext + ++ "' use '" ++ display replacement ++ "'." + | (ext, Just replacement) <- deprecatedExtensions ] + + , check (null (category pkg)) $ + PackageDistSuspicious "No 'category' field." + + , check (null (maintainer pkg)) $ + PackageDistSuspicious "No 'maintainer' field." + + , check (null (synopsis pkg) && null (description pkg)) $ + PackageDistInexcusable "No 'synopsis' or 'description' field." + + , check (null (description pkg) && not (null (synopsis pkg))) $ + PackageDistSuspicious "No 'description' field." + + , check (null (synopsis pkg) && not (null (description pkg))) $ + PackageDistSuspicious "No 'synopsis' field." + + --TODO: recommend the bug reports URL, author and homepage fields + --TODO: recommend not using the stability field + --TODO: recommend specifying a source repo + + , check (length (synopsis pkg) >= 80) $ + PackageDistSuspicious + "The 'synopsis' field is rather long (max 80 chars is recommended)." + + -- check use of impossible constraints "tested-with: GHC== 6.10 && ==6.12" + , check (not (null testedWithImpossibleRanges)) $ + PackageDistInexcusable $ + "Invalid 'tested-with' version range: " + ++ commaSep (map display testedWithImpossibleRanges) + ++ ". To indicate that you have tested a package with multiple " + ++ "different versions of the same compiler use multiple entries, " + ++ "for example 'tested-with: GHC==6.10.4, GHC==6.12.3' and not " + ++ "'tested-with: GHC==6.10.4 && ==6.12.3'." + ] + where + unknownCompilers = [ name | (OtherCompiler name, _) <- testedWith pkg ] + unknownLanguages = [ name | bi <- allBuildInfo pkg + , UnknownLanguage name <- allLanguages bi ] + unknownExtensions = [ name | bi <- allBuildInfo pkg + , UnknownExtension name <- allExtensions bi + , name `notElem` map display knownLanguages ] + deprecatedExtensions = nub $ catMaybes + [ find ((==ext) . fst) Extension.deprecatedExtensions + | bi <- allBuildInfo pkg + , ext <- allExtensions bi ] + languagesUsedAsExtensions = + [ name | bi <- allBuildInfo pkg + , UnknownExtension name <- allExtensions bi + , name `elem` map display knownLanguages ] + + testedWithImpossibleRanges = + [ Dependency (PackageName (display compiler)) vr + | (compiler, vr) <- testedWith pkg + , isNoVersion vr ] + + +checkLicense :: PackageDescription -> [PackageCheck] +checkLicense pkg = + catMaybes [ + + check (license pkg == UnspecifiedLicense) $ + PackageDistInexcusable + "The 'license' field is missing." + + , check (license pkg == AllRightsReserved) $ + PackageDistSuspicious + "The 'license' is AllRightsReserved. Is that really what you want?" + , case license pkg of + UnknownLicense l -> Just $ + PackageBuildWarning $ + quote ("license: " ++ l) ++ " is not a recognised license. The " + ++ "known licenses are: " + ++ commaSep (map display knownLicenses) + _ -> Nothing + + , check (license pkg == BSD4) $ + PackageDistSuspicious $ + "Using 'license: BSD4' is almost always a misunderstanding. 'BSD4' " + ++ "refers to the old 4-clause BSD license with the advertising " + ++ "clause. 'BSD3' refers the new 3-clause BSD license." + + , case unknownLicenseVersion (license pkg) of + Just knownVersions -> Just $ + PackageDistSuspicious $ + "'license: " ++ display (license pkg) ++ "' is not a known " + ++ "version of that license. The known versions are " + ++ commaSep (map display knownVersions) + ++ ". If this is not a mistake and you think it should be a known " + ++ "version then please file a ticket." + _ -> Nothing + + , check (license pkg `notElem` [ AllRightsReserved + , UnspecifiedLicense, PublicDomain] + -- AllRightsReserved and PublicDomain are not strictly + -- licenses so don't need license files. + && null (licenseFiles pkg)) $ + PackageDistSuspicious "A 'license-file' is not specified." + ] + where + unknownLicenseVersion (GPL (Just v)) + | v `notElem` knownVersions = Just knownVersions + where knownVersions = [ v' | GPL (Just v') <- knownLicenses ] + unknownLicenseVersion (LGPL (Just v)) + | v `notElem` knownVersions = Just knownVersions + where knownVersions = [ v' | LGPL (Just v') <- knownLicenses ] + unknownLicenseVersion (AGPL (Just v)) + | v `notElem` knownVersions = Just knownVersions + where knownVersions = [ v' | AGPL (Just v') <- knownLicenses ] + unknownLicenseVersion (Apache (Just v)) + | v `notElem` knownVersions = Just knownVersions + where knownVersions = [ v' | Apache (Just v') <- knownLicenses ] + unknownLicenseVersion _ = Nothing + +checkSourceRepos :: PackageDescription -> [PackageCheck] +checkSourceRepos pkg = + catMaybes $ concat [[ + + case repoKind repo of + RepoKindUnknown kind -> Just $ PackageDistInexcusable $ + quote kind ++ " is not a recognised kind of source-repository. " + ++ "The repo kind is usually 'head' or 'this'" + _ -> Nothing + + , check (isNothing (repoType repo)) $ + PackageDistInexcusable + "The source-repository 'type' is a required field." + + , check (isNothing (repoLocation repo)) $ + PackageDistInexcusable + "The source-repository 'location' is a required field." + + , check (repoType repo == Just CVS && isNothing (repoModule repo)) $ + PackageDistInexcusable + "For a CVS source-repository, the 'module' is a required field." + + , check (repoKind repo == RepoThis && isNothing (repoTag repo)) $ + PackageDistInexcusable $ + "For the 'this' kind of source-repository, the 'tag' is a required " + ++ "field. It should specify the tag corresponding to this version " + ++ "or release of the package." + + , check (maybe False System.FilePath.isAbsolute (repoSubdir repo)) $ + PackageDistInexcusable + "The 'subdir' field of a source-repository must be a relative path." + ] + | repo <- sourceRepos pkg ] + +--TODO: check location looks like a URL for some repo types. + +checkGhcOptions :: PackageDescription -> [PackageCheck] +checkGhcOptions pkg = + catMaybes [ + + check has_WerrorWall $ + PackageDistSuspicious $ + "'ghc-options: -Wall -Werror' makes the package very easy to " + ++ "break with future GHC versions because new GHC versions often " + ++ "add new warnings. Use just 'ghc-options: -Wall' instead." + + , check (not has_WerrorWall && has_Werror) $ + PackageDistSuspicious $ + "'ghc-options: -Werror' makes the package easy to " + ++ "break with future GHC versions because new GHC versions often " + ++ "add new warnings." + + , checkFlags ["-fasm"] $ + PackageDistInexcusable $ + "'ghc-options: -fasm' is unnecessary and will not work on CPU " + ++ "architectures other than x86, x86-64, ppc or sparc." + + , checkFlags ["-fvia-C"] $ + PackageDistSuspicious $ + "'ghc-options: -fvia-C' is usually unnecessary. If your package " + ++ "needs -via-C for correctness rather than performance then it " + ++ "is using the FFI incorrectly and will probably not work with GHC " + ++ "6.10 or later." + + , checkFlags ["-fdefer-type-errors"] $ + PackageDistInexcusable $ + "'ghc-options: -fdefer-type-errors' is fine during development but " + ++ "is not appropriate for a distributed package." + + , checkFlags ["-fhpc"] $ + PackageDistInexcusable $ + "'ghc-options: -fhpc' is not appropriate for a distributed package." + + -- -dynamic is not a debug flag + , check (any (\opt -> "-d" `isPrefixOf` opt && opt /= "-dynamic") + all_ghc_options) $ + PackageDistInexcusable $ + "'ghc-options: -d*' debug flags are not appropriate " + ++ "for a distributed package." + + , checkFlags ["-prof"] $ + PackageBuildWarning $ + "'ghc-options: -prof' is not necessary and will lead to problems " + ++ "when used on a library. Use the configure flag " + ++ "--enable-library-profiling and/or --enable-executable-profiling." + + , checkFlags ["-o"] $ + PackageBuildWarning $ + "'ghc-options: -o' is not needed. " + ++ "The output files are named automatically." + + , checkFlags ["-hide-package"] $ + PackageBuildWarning $ + "'ghc-options: -hide-package' is never needed. " + ++ "Cabal hides all packages." + + , checkFlags ["--make"] $ + PackageBuildWarning $ + "'ghc-options: --make' is never needed. Cabal uses this automatically." + + , checkFlags ["-main-is"] $ + PackageDistSuspicious $ + "'ghc-options: -main-is' is not portable." + + , checkFlags ["-O0", "-Onot"] $ + PackageDistSuspicious $ + "'ghc-options: -O0' is not needed. " + ++ "Use the --disable-optimization configure flag." + + , checkFlags [ "-O", "-O1"] $ + PackageDistInexcusable $ + "'ghc-options: -O' is not needed. " + ++ "Cabal automatically adds the '-O' flag. " + ++ "Setting it yourself interferes with the --disable-optimization flag." + + , checkFlags ["-O2"] $ + PackageDistSuspicious $ + "'ghc-options: -O2' is rarely needed. " + ++ "Check that it is giving a real benefit " + ++ "and not just imposing longer compile times on your users." + + , checkFlags ["-split-objs"] $ + PackageBuildWarning $ + "'ghc-options: -split-objs' is not needed. " + ++ "Use the --enable-split-objs configure flag." + + , checkFlags ["-optl-Wl,-s", "-optl-s"] $ + PackageDistInexcusable $ + "'ghc-options: -optl-Wl,-s' is not needed and is not portable to all" + ++ " operating systems. Cabal 1.4 and later automatically strip" + ++ " executables. Cabal also has a flag --disable-executable-stripping" + ++ " which is necessary when building packages for some Linux" + ++ " distributions and using '-optl-Wl,-s' prevents that from working." + + , checkFlags ["-fglasgow-exts"] $ + PackageDistSuspicious $ + "Instead of 'ghc-options: -fglasgow-exts' it is preferable to use " + ++ "the 'extensions' field." + + , checkProfFlags ["-auto-all"] $ + PackageDistSuspicious $ + "'ghc-prof-options: -auto-all' is fine during development, but " + ++ "not recommended in a distributed package. " + + , checkProfFlags ["-fprof-auto"] $ + PackageDistSuspicious $ + "'ghc-prof-options: -fprof-auto' is fine during development, but " + ++ "not recommended in a distributed package. " + + , check ("-threaded" `elem` lib_ghc_options) $ + PackageDistSuspicious $ + "'ghc-options: -threaded' has no effect for libraries. It should " + ++ "only be used for executables." + + , checkAlternatives "ghc-options" "extensions" + [ (flag, display extension) | flag <- all_ghc_options + , Just extension <- [ghcExtension flag] ] + + , checkAlternatives "ghc-options" "extensions" + [ (flag, extension) | flag@('-':'X':extension) <- all_ghc_options ] + + , checkAlternatives "ghc-options" "cpp-options" $ + [ (flag, flag) | flag@('-':'D':_) <- all_ghc_options ] + ++ [ (flag, flag) | flag@('-':'U':_) <- all_ghc_options ] + + , checkAlternatives "ghc-options" "include-dirs" + [ (flag, dir) | flag@('-':'I':dir) <- all_ghc_options ] + + , checkAlternatives "ghc-options" "extra-libraries" + [ (flag, lib) | flag@('-':'l':lib) <- all_ghc_options ] + + , checkAlternatives "ghc-options" "extra-lib-dirs" + [ (flag, dir) | flag@('-':'L':dir) <- all_ghc_options ] + ] + + where + has_WerrorWall = flip any ghc_options $ \opts -> + "-Werror" `elem` opts + && ("-Wall" `elem` opts || "-W" `elem` opts) + has_Werror = any (\opts -> "-Werror" `elem` opts) ghc_options + + (ghc_options, ghc_prof_options) = + unzip . map (\bi -> (hcOptions GHC bi, hcProfOptions GHC bi)) + $ (allBuildInfo pkg) + all_ghc_options = concat ghc_options + all_ghc_prof_options = concat ghc_prof_options + lib_ghc_options = maybe [] (hcOptions GHC . libBuildInfo) (library pkg) + + checkFlags,checkProfFlags :: [String] -> PackageCheck -> Maybe PackageCheck + checkFlags flags = doCheckFlags flags all_ghc_options + checkProfFlags flags = doCheckFlags flags all_ghc_prof_options + + doCheckFlags flags opts = check (any (`elem` flags) opts) + + ghcExtension ('-':'f':name) = case name of + "allow-overlapping-instances" -> enable OverlappingInstances + "no-allow-overlapping-instances" -> disable OverlappingInstances + "th" -> enable TemplateHaskell + "no-th" -> disable TemplateHaskell + "ffi" -> enable ForeignFunctionInterface + "no-ffi" -> disable ForeignFunctionInterface + "fi" -> enable ForeignFunctionInterface + "no-fi" -> disable ForeignFunctionInterface + "monomorphism-restriction" -> enable MonomorphismRestriction + "no-monomorphism-restriction" -> disable MonomorphismRestriction + "mono-pat-binds" -> enable MonoPatBinds + "no-mono-pat-binds" -> disable MonoPatBinds + "allow-undecidable-instances" -> enable UndecidableInstances + "no-allow-undecidable-instances" -> disable UndecidableInstances + "allow-incoherent-instances" -> enable IncoherentInstances + "no-allow-incoherent-instances" -> disable IncoherentInstances + "arrows" -> enable Arrows + "no-arrows" -> disable Arrows + "generics" -> enable Generics + "no-generics" -> disable Generics + "implicit-prelude" -> enable ImplicitPrelude + "no-implicit-prelude" -> disable ImplicitPrelude + "implicit-params" -> enable ImplicitParams + "no-implicit-params" -> disable ImplicitParams + "bang-patterns" -> enable BangPatterns + "no-bang-patterns" -> disable BangPatterns + "scoped-type-variables" -> enable ScopedTypeVariables + "no-scoped-type-variables" -> disable ScopedTypeVariables + "extended-default-rules" -> enable ExtendedDefaultRules + "no-extended-default-rules" -> disable ExtendedDefaultRules + _ -> Nothing + ghcExtension "-cpp" = enable CPP + ghcExtension _ = Nothing + + enable e = Just (EnableExtension e) + disable e = Just (DisableExtension e) + +checkCCOptions :: PackageDescription -> [PackageCheck] +checkCCOptions pkg = + catMaybes [ + + checkAlternatives "cc-options" "include-dirs" + [ (flag, dir) | flag@('-':'I':dir) <- all_ccOptions ] + + , checkAlternatives "cc-options" "extra-libraries" + [ (flag, lib) | flag@('-':'l':lib) <- all_ccOptions ] + + , checkAlternatives "cc-options" "extra-lib-dirs" + [ (flag, dir) | flag@('-':'L':dir) <- all_ccOptions ] + + , checkAlternatives "ld-options" "extra-libraries" + [ (flag, lib) | flag@('-':'l':lib) <- all_ldOptions ] + + , checkAlternatives "ld-options" "extra-lib-dirs" + [ (flag, dir) | flag@('-':'L':dir) <- all_ldOptions ] + + , checkCCFlags [ "-O", "-Os", "-O0", "-O1", "-O2", "-O3" ] $ + PackageDistSuspicious $ + "'cc-options: -O[n]' is generally not needed. When building with " + ++ " optimisations Cabal automatically adds '-O2' for C code. " + ++ "Setting it yourself interferes with the --disable-optimization " + ++ "flag." + ] + + where all_ccOptions = [ opts | bi <- allBuildInfo pkg + , opts <- ccOptions bi ] + all_ldOptions = [ opts | bi <- allBuildInfo pkg + , opts <- ldOptions bi ] + + checkCCFlags :: [String] -> PackageCheck -> Maybe PackageCheck + checkCCFlags flags = check (any (`elem` flags) all_ccOptions) + +checkCPPOptions :: PackageDescription -> [PackageCheck] +checkCPPOptions pkg = + catMaybes [ + checkAlternatives "cpp-options" "include-dirs" + [ (flag, dir) | flag@('-':'I':dir) <- all_cppOptions] + ] + where all_cppOptions = [ opts | bi <- allBuildInfo pkg + , opts <- cppOptions bi ] + +checkAlternatives :: String -> String -> [(String, String)] -> Maybe PackageCheck +checkAlternatives badField goodField flags = + check (not (null badFlags)) $ + PackageBuildWarning $ + "Instead of " ++ quote (badField ++ ": " ++ unwords badFlags) + ++ " use " ++ quote (goodField ++ ": " ++ unwords goodFlags) + + where (badFlags, goodFlags) = unzip flags + +checkPaths :: PackageDescription -> [PackageCheck] +checkPaths pkg = + [ PackageBuildWarning $ + quote (kind ++ ": " ++ path) + ++ " is a relative path outside of the source tree. " + ++ "This will not work when generating a tarball with 'sdist'." + | (path, kind) <- relPaths ++ absPaths + , isOutsideTree path ] + ++ + [ PackageDistInexcusable $ + quote (kind ++ ": " ++ path) ++ " is an absolute directory." + | (path, kind) <- relPaths + , isAbsolute path ] + ++ + [ PackageDistInexcusable $ + quote (kind ++ ": " ++ path) ++ " points inside the 'dist' " + ++ "directory. This is not reliable because the location of this " + ++ "directory is configurable by the user (or package manager). In " + ++ "addition the layout of the 'dist' directory is subject to change " + ++ "in future versions of Cabal." + | (path, kind) <- relPaths ++ absPaths + , isInsideDist path ] + ++ + [ PackageDistInexcusable $ + "The 'ghc-options' contains the path '" ++ path ++ "' which points " + ++ "inside the 'dist' directory. This is not reliable because the " + ++ "location of this directory is configurable by the user (or package " + ++ "manager). In addition the layout of the 'dist' directory is subject " + ++ "to change in future versions of Cabal." + | bi <- allBuildInfo pkg + , (GHC, flags) <- options bi + , path <- flags + , isInsideDist path ] + where + isOutsideTree path = case splitDirectories path of + "..":_ -> True + ".":"..":_ -> True + _ -> False + isInsideDist path = case map lowercase (splitDirectories path) of + "dist" :_ -> True + ".":"dist":_ -> True + _ -> False + -- paths that must be relative + relPaths = + [ (path, "extra-src-files") | path <- extraSrcFiles pkg ] + ++ [ (path, "extra-tmp-files") | path <- extraTmpFiles pkg ] + ++ [ (path, "extra-doc-files") | path <- extraDocFiles pkg ] + ++ [ (path, "data-files") | path <- dataFiles pkg ] + ++ [ (path, "data-dir") | path <- [dataDir pkg]] + ++ concat + [ [ (path, "c-sources") | path <- cSources bi ] + ++ [ (path, "js-sources") | path <- jsSources bi ] + ++ [ (path, "install-includes") | path <- installIncludes bi ] + ++ [ (path, "hs-source-dirs") | path <- hsSourceDirs bi ] + | bi <- allBuildInfo pkg ] + -- paths that are allowed to be absolute + absPaths = concat + [ [ (path, "includes") | path <- includes bi ] + ++ [ (path, "include-dirs") | path <- includeDirs bi ] + ++ [ (path, "extra-lib-dirs") | path <- extraLibDirs bi ] + | bi <- allBuildInfo pkg ] + +--TODO: check sets of paths that would be interpreted differently between Unix +-- and windows, ie case-sensitive or insensitive. Things that might clash, or +-- conversely be distinguished. + +--TODO: use the tar path checks on all the above paths + +-- | Check that the package declares the version in the @\"cabal-version\"@ +-- field correctly. +-- +checkCabalVersion :: PackageDescription -> [PackageCheck] +checkCabalVersion pkg = + catMaybes [ + + -- check syntax of cabal-version field + check (specVersion pkg >= Version [1,10] [] + && not simpleSpecVersionRangeSyntax) $ + PackageBuildWarning $ + "Packages relying on Cabal 1.10 or later must only specify a " + ++ "version range of the form 'cabal-version: >= x.y'. Use " + ++ "'cabal-version: >= " ++ display (specVersion pkg) ++ "'." + + -- check syntax of cabal-version field + , check (specVersion pkg < Version [1,9] [] + && not simpleSpecVersionRangeSyntax) $ + PackageDistSuspicious $ + "It is recommended that the 'cabal-version' field only specify a " + ++ "version range of the form '>= x.y'. Use " + ++ "'cabal-version: >= " ++ display (specVersion pkg) ++ "'. " + ++ "Tools based on Cabal 1.10 and later will ignore upper bounds." + + -- check syntax of cabal-version field + , checkVersion [1,12] simpleSpecVersionSyntax $ + PackageBuildWarning $ + "With Cabal 1.10 or earlier, the 'cabal-version' field must use " + ++ "range syntax rather than a simple version number. Use " + ++ "'cabal-version: >= " ++ display (specVersion pkg) ++ "'." + + -- check use of test suite sections + , checkVersion [1,8] (not (null $ testSuites pkg)) $ + PackageDistInexcusable $ + "The 'test-suite' section is new in Cabal 1.10. " + ++ "Unfortunately it messes up the parser in older Cabal versions " + ++ "so you must specify at least 'cabal-version: >= 1.8', but note " + ++ "that only Cabal 1.10 and later can actually run such test suites." + + -- check use of default-language field + -- note that we do not need to do an equivalent check for the + -- other-language field since that one does not change behaviour + , checkVersion [1,10] (any isJust (buildInfoField defaultLanguage)) $ + PackageBuildWarning $ + "To use the 'default-language' field the package needs to specify " + ++ "at least 'cabal-version: >= 1.10'." + + , check (specVersion pkg >= Version [1,10] [] + && (any isNothing (buildInfoField defaultLanguage))) $ + PackageBuildWarning $ + "Packages using 'cabal-version: >= 1.10' must specify the " + ++ "'default-language' field for each component (e.g. Haskell98 or " + ++ "Haskell2010). If a component uses different languages in " + ++ "different modules then list the other ones in the " + ++ "'other-languages' field." + + -- check use of reexported-modules sections + , checkVersion [1,21] + (maybe False (not.null.reexportedModules) (library pkg)) $ + PackageDistInexcusable $ + "To use the 'reexported-module' field the package needs to specify " + ++ "at least 'cabal-version: >= 1.21'." + + -- check use of thinning and renaming + , checkVersion [1,21] (not (null depsUsingThinningRenamingSyntax)) $ + PackageDistInexcusable $ + "The package uses " + ++ "thinning and renaming in the 'build-depends' field: " + ++ commaSep (map display depsUsingThinningRenamingSyntax) + ++ ". To use this new syntax, the package needs to specify at least" + ++ "'cabal-version: >= 1.21'." + + -- check use of default-extensions field + -- don't need to do the equivalent check for other-extensions + , checkVersion [1,10] (any (not . null) (buildInfoField defaultExtensions)) $ + PackageBuildWarning $ + "To use the 'default-extensions' field the package needs to specify " + ++ "at least 'cabal-version: >= 1.10'." + + -- check use of extensions field + , check (specVersion pkg >= Version [1,10] [] + && (any (not . null) (buildInfoField oldExtensions))) $ + PackageBuildWarning $ + "For packages using 'cabal-version: >= 1.10' the 'extensions' " + ++ "field is deprecated. The new 'default-extensions' field lists " + ++ "extensions that are used in all modules in the component, while " + ++ "the 'other-extensions' field lists extensions that are used in " + ++ "some modules, e.g. via the {-# LANGUAGE #-} pragma." + + -- check use of "foo (>= 1.0 && < 1.4) || >=1.8 " version-range syntax + , checkVersion [1,8] (not (null versionRangeExpressions)) $ + PackageDistInexcusable $ + "The package uses full version-range expressions " + ++ "in a 'build-depends' field: " + ++ commaSep (map displayRawDependency versionRangeExpressions) + ++ ". To use this new syntax the package needs to specify at least " + ++ "'cabal-version: >= 1.8'. Alternatively, if broader compatibility " + ++ "is important, then convert to conjunctive normal form, and use " + ++ "multiple 'build-depends:' lines, one conjunct per line." + + -- check use of "build-depends: foo == 1.*" syntax + , checkVersion [1,6] (not (null depsUsingWildcardSyntax)) $ + PackageDistInexcusable $ + "The package uses wildcard syntax in the 'build-depends' field: " + ++ commaSep (map display depsUsingWildcardSyntax) + ++ ". To use this new syntax the package need to specify at least " + ++ "'cabal-version: >= 1.6'. Alternatively, if broader compatibility " + ++ "is important then use: " ++ commaSep + [ display (Dependency name (eliminateWildcardSyntax versionRange)) + | Dependency name versionRange <- depsUsingWildcardSyntax ] + + -- check use of "tested-with: GHC (>= 1.0 && < 1.4) || >=1.8 " syntax + , checkVersion [1,8] (not (null testedWithVersionRangeExpressions)) $ + PackageDistInexcusable $ + "The package uses full version-range expressions " + ++ "in a 'tested-with' field: " + ++ commaSep (map displayRawDependency testedWithVersionRangeExpressions) + ++ ". To use this new syntax the package needs to specify at least " + ++ "'cabal-version: >= 1.8'." + + -- check use of "tested-with: GHC == 6.12.*" syntax + , checkVersion [1,6] (not (null testedWithUsingWildcardSyntax)) $ + PackageDistInexcusable $ + "The package uses wildcard syntax in the 'tested-with' field: " + ++ commaSep (map display testedWithUsingWildcardSyntax) + ++ ". To use this new syntax the package need to specify at least " + ++ "'cabal-version: >= 1.6'. Alternatively, if broader compatibility " + ++ "is important then use: " ++ commaSep + [ display (Dependency name (eliminateWildcardSyntax versionRange)) + | Dependency name versionRange <- testedWithUsingWildcardSyntax ] + + -- check use of "data-files: data/*.txt" syntax + , checkVersion [1,6] (not (null dataFilesUsingGlobSyntax)) $ + PackageDistInexcusable $ + "Using wildcards like " + ++ commaSep (map quote $ take 3 dataFilesUsingGlobSyntax) + ++ " in the 'data-files' field requires 'cabal-version: >= 1.6'. " + ++ "Alternatively if you require compatibility with earlier Cabal " + ++ "versions then list all the files explicitly." + + -- check use of "extra-source-files: mk/*.in" syntax + , checkVersion [1,6] (not (null extraSrcFilesUsingGlobSyntax)) $ + PackageDistInexcusable $ + "Using wildcards like " + ++ commaSep (map quote $ take 3 extraSrcFilesUsingGlobSyntax) + ++ " in the 'extra-source-files' field requires " + ++ "'cabal-version: >= 1.6'. Alternatively if you require " + ++ "compatibility with earlier Cabal versions then list all the files " + ++ "explicitly." + + -- check use of "source-repository" section + , checkVersion [1,6] (not (null (sourceRepos pkg))) $ + PackageDistInexcusable $ + "The 'source-repository' section is new in Cabal 1.6. " + ++ "Unfortunately it messes up the parser in earlier Cabal versions " + ++ "so you need to specify 'cabal-version: >= 1.6'." + + -- check for new licenses + , checkVersion [1,4] (license pkg `notElem` compatLicenses) $ + PackageDistInexcusable $ + "Unfortunately the license " ++ quote (display (license pkg)) + ++ " messes up the parser in earlier Cabal versions so you need to " + ++ "specify 'cabal-version: >= 1.4'. Alternatively if you require " + ++ "compatibility with earlier Cabal versions then use 'OtherLicense'." + + -- check for new language extensions + , checkVersion [1,2,3] (not (null mentionedExtensionsThatNeedCabal12)) $ + PackageDistInexcusable $ + "Unfortunately the language extensions " + ++ commaSep (map (quote . display) mentionedExtensionsThatNeedCabal12) + ++ " break the parser in earlier Cabal versions so you need to " + ++ "specify 'cabal-version: >= 1.2.3'. Alternatively if you require " + ++ "compatibility with earlier Cabal versions then you may be able to " + ++ "use an equivalent compiler-specific flag." + + , checkVersion [1,4] (not (null mentionedExtensionsThatNeedCabal14)) $ + PackageDistInexcusable $ + "Unfortunately the language extensions " + ++ commaSep (map (quote . display) mentionedExtensionsThatNeedCabal14) + ++ " break the parser in earlier Cabal versions so you need to " + ++ "specify 'cabal-version: >= 1.4'. Alternatively if you require " + ++ "compatibility with earlier Cabal versions then you may be able to " + ++ "use an equivalent compiler-specific flag." + ] + where + -- Perform a check on packages that use a version of the spec less than + -- the version given. This is for cases where a new Cabal version adds + -- a new feature and we want to check that it is not used prior to that + -- version. + checkVersion :: [Int] -> Bool -> PackageCheck -> Maybe PackageCheck + checkVersion ver cond pc + | specVersion pkg >= Version ver [] = Nothing + | otherwise = check cond pc + + buildInfoField field = map field (allBuildInfo pkg) + dataFilesUsingGlobSyntax = filter usesGlobSyntax (dataFiles pkg) + extraSrcFilesUsingGlobSyntax = filter usesGlobSyntax (extraSrcFiles pkg) + usesGlobSyntax str = case parseFileGlob str of + Just (FileGlob _ _) -> True + _ -> False + + versionRangeExpressions = + [ dep | dep@(Dependency _ vr) <- buildDepends pkg + , usesNewVersionRangeSyntax vr ] + + testedWithVersionRangeExpressions = + [ Dependency (PackageName (display compiler)) vr + | (compiler, vr) <- testedWith pkg + , usesNewVersionRangeSyntax vr ] + + simpleSpecVersionRangeSyntax = + either (const True) + (foldVersionRange' + True + (\_ -> False) + (\_ -> False) (\_ -> False) + (\_ -> True) -- >= + (\_ -> False) + (\_ _ -> False) + (\_ _ -> False) (\_ _ -> False) + id) + (specVersionRaw pkg) + + -- is the cabal-version field a simple version number, rather than a range + simpleSpecVersionSyntax = + either (const True) (const False) (specVersionRaw pkg) + + usesNewVersionRangeSyntax :: VersionRange -> Bool + usesNewVersionRangeSyntax = + (> 2) -- uses the new syntax if depth is more than 2 + . foldVersionRange' + (1 :: Int) + (const 1) + (const 1) (const 1) + (const 1) (const 1) + (const (const 1)) + (+) (+) + (const 3) -- uses new ()'s syntax + + depsUsingWildcardSyntax = [ dep | dep@(Dependency _ vr) <- buildDepends pkg + , usesWildcardSyntax vr ] + + -- XXX: If the user writes build-depends: foo with (), this is + -- indistinguishable from build-depends: foo, so there won't be an + -- error even though there should be + depsUsingThinningRenamingSyntax = + [ name + | bi <- allBuildInfo pkg + , (name, rns) <- Map.toList (targetBuildRenaming bi) + , rns /= ModuleRenaming True [] ] + + testedWithUsingWildcardSyntax = + [ Dependency (PackageName (display compiler)) vr + | (compiler, vr) <- testedWith pkg + , usesWildcardSyntax vr ] + + usesWildcardSyntax :: VersionRange -> Bool + usesWildcardSyntax = + foldVersionRange' + False (const False) + (const False) (const False) + (const False) (const False) + (\_ _ -> True) -- the wildcard case + (||) (||) id + + eliminateWildcardSyntax = + foldVersionRange' + anyVersion thisVersion + laterVersion earlierVersion + orLaterVersion orEarlierVersion + (\v v' -> intersectVersionRanges (orLaterVersion v) (earlierVersion v')) + intersectVersionRanges unionVersionRanges id + + compatLicenses = [ GPL Nothing, LGPL Nothing, AGPL Nothing, BSD3, BSD4 + , PublicDomain, AllRightsReserved + , UnspecifiedLicense, OtherLicense ] + + mentionedExtensions = [ ext | bi <- allBuildInfo pkg + , ext <- allExtensions bi ] + mentionedExtensionsThatNeedCabal12 = + nub (filter (`elem` compatExtensionsExtra) mentionedExtensions) + + -- As of Cabal-1.4 we can add new extensions without worrying about + -- breaking old versions of cabal. + mentionedExtensionsThatNeedCabal14 = + nub (filter (`notElem` compatExtensions) mentionedExtensions) + + -- The known extensions in Cabal-1.2.3 + compatExtensions = + map EnableExtension + [ OverlappingInstances, UndecidableInstances, IncoherentInstances + , RecursiveDo, ParallelListComp, MultiParamTypeClasses + , FunctionalDependencies, Rank2Types + , RankNTypes, PolymorphicComponents, ExistentialQuantification + , ScopedTypeVariables, ImplicitParams, FlexibleContexts + , FlexibleInstances, EmptyDataDecls, CPP, BangPatterns + , TypeSynonymInstances, TemplateHaskell, ForeignFunctionInterface + , Arrows, Generics, NamedFieldPuns, PatternGuards + , GeneralizedNewtypeDeriving, ExtensibleRecords, RestrictedTypeSynonyms + , HereDocuments] ++ + map DisableExtension + [MonomorphismRestriction, ImplicitPrelude] ++ + compatExtensionsExtra + + -- The extra known extensions in Cabal-1.2.3 vs Cabal-1.1.6 + -- (Cabal-1.1.6 came with ghc-6.6. Cabal-1.2 came with ghc-6.8) + compatExtensionsExtra = + map EnableExtension + [ KindSignatures, MagicHash, TypeFamilies, StandaloneDeriving + , UnicodeSyntax, PatternSignatures, UnliftedFFITypes, LiberalTypeSynonyms + , TypeOperators, RecordWildCards, RecordPuns, DisambiguateRecordFields + , OverloadedStrings, GADTs, RelaxedPolyRec + , ExtendedDefaultRules, UnboxedTuples, DeriveDataTypeable + , ConstrainedClassMethods + ] ++ + map DisableExtension + [MonoPatBinds] + +-- | A variation on the normal 'Text' instance, shows any ()'s in the original +-- textual syntax. We need to show these otherwise it's confusing to users when +-- we complain of their presence but do not pretty print them! +-- +displayRawVersionRange :: VersionRange -> String +displayRawVersionRange = + Disp.render + . fst + . foldVersionRange' -- precedence: + -- All the same as the usual pretty printer, except for the parens + ( Disp.text "-any" , 0 :: Int) + (\v -> (Disp.text "==" <> disp v , 0)) + (\v -> (Disp.char '>' <> disp v , 0)) + (\v -> (Disp.char '<' <> disp v , 0)) + (\v -> (Disp.text ">=" <> disp v , 0)) + (\v -> (Disp.text "<=" <> disp v , 0)) + (\v _ -> (Disp.text "==" <> dispWild v , 0)) + (\(r1, p1) (r2, p2) -> + (punct 2 p1 r1 <+> Disp.text "||" <+> punct 2 p2 r2 , 2)) + (\(r1, p1) (r2, p2) -> + (punct 1 p1 r1 <+> Disp.text "&&" <+> punct 1 p2 r2 , 1)) + (\(r, _ ) -> (Disp.parens r, 0)) -- parens + + where + dispWild (Version b _) = + Disp.hcat (Disp.punctuate (Disp.char '.') (map Disp.int b)) + <> Disp.text ".*" + punct p p' | p < p' = Disp.parens + | otherwise = id + +displayRawDependency :: Dependency -> String +displayRawDependency (Dependency pkg vr) = + display pkg ++ " " ++ displayRawVersionRange vr + + +-- ------------------------------------------------------------ +-- * Checks on the GenericPackageDescription +-- ------------------------------------------------------------ + +-- | Check the build-depends fields for any weirdness or bad practise. +-- +checkPackageVersions :: GenericPackageDescription -> [PackageCheck] +checkPackageVersions pkg = + catMaybes [ + + -- Check that the version of base is bounded above. + -- For example this bans "build-depends: base >= 3". + -- It should probably be "build-depends: base >= 3 && < 4" + -- which is the same as "build-depends: base == 3.*" + check (not (boundedAbove baseDependency)) $ + PackageDistInexcusable $ + "The dependency 'build-depends: base' does not specify an upper " + ++ "bound on the version number. Each major release of the 'base' " + ++ "package changes the API in various ways and most packages will " + ++ "need some changes to compile with it. The recommended practise " + ++ "is to specify an upper bound on the version of the 'base' " + ++ "package. This ensures your package will continue to build when a " + ++ "new major version of the 'base' package is released. If you are " + ++ "not sure what upper bound to use then use the next major " + ++ "version. For example if you have tested your package with 'base' " + ++ "version 2 and 3 then use 'build-depends: base >= 2 && < 4'." + + ] + where + -- TODO: What we really want to do is test if there exists any + -- configuration in which the base version is unbounded above. + -- However that's a bit tricky because there are many possible + -- configurations. As a cheap easy and safe approximation we will + -- pick a single "typical" configuration and check if that has an + -- open upper bound. To get a typical configuration we finalise + -- using no package index and the current platform. + finalised = finalizePackageDescription + [] (const True) buildPlatform + (unknownCompilerInfo + (CompilerId buildCompilerFlavor (Version [] [])) NoAbiTag) + [] pkg + baseDependency = case finalised of + Right (pkg', _) | not (null baseDeps) -> + foldr intersectVersionRanges anyVersion baseDeps + where + baseDeps = + [ vr | Dependency (PackageName "base") vr <- buildDepends pkg' ] + + -- Just in case finalizePackageDescription fails for any reason, + -- or if the package doesn't depend on the base package at all, + -- then we will just skip the check, since boundedAbove noVersion = True + _ -> noVersion + + boundedAbove :: VersionRange -> Bool + boundedAbove vr = case asVersionIntervals vr of + [] -> True -- this is the inconsistent version range. + intervals -> case last intervals of + (_, UpperBound _ _) -> True + (_, NoUpperBound ) -> False + + +checkConditionals :: GenericPackageDescription -> [PackageCheck] +checkConditionals pkg = + catMaybes [ + + check (not $ null unknownOSs) $ + PackageDistInexcusable $ + "Unknown operating system name " + ++ commaSep (map quote unknownOSs) + + , check (not $ null unknownArches) $ + PackageDistInexcusable $ + "Unknown architecture name " + ++ commaSep (map quote unknownArches) + + , check (not $ null unknownImpls) $ + PackageDistInexcusable $ + "Unknown compiler name " + ++ commaSep (map quote unknownImpls) + ] + where + unknownOSs = [ os | OS (OtherOS os) <- conditions ] + unknownArches = [ arch | Arch (OtherArch arch) <- conditions ] + unknownImpls = [ impl | Impl (OtherCompiler impl) _ <- conditions ] + conditions = maybe [] freeVars (condLibrary pkg) + ++ concatMap (freeVars . snd) (condExecutables pkg) + freeVars (CondNode _ _ ifs) = concatMap compfv ifs + compfv (c, ct, mct) = condfv c ++ freeVars ct ++ maybe [] freeVars mct + condfv c = case c of + Var v -> [v] + Lit _ -> [] + CNot c1 -> condfv c1 + COr c1 c2 -> condfv c1 ++ condfv c2 + CAnd c1 c2 -> condfv c1 ++ condfv c2 + +-- ------------------------------------------------------------ +-- * Checks involving files in the package +-- ------------------------------------------------------------ + +-- | Sanity check things that requires IO. It looks at the files in the +-- package and expects to find the package unpacked in at the given file path. +-- +checkPackageFiles :: PackageDescription -> FilePath -> IO [PackageCheck] +checkPackageFiles pkg root = checkPackageContent checkFilesIO pkg + where + checkFilesIO = CheckPackageContentOps { + doesFileExist = System.doesFileExist . relative, + doesDirectoryExist = System.doesDirectoryExist . relative + } + relative path = root path + +-- | A record of operations needed to check the contents of packages. +-- Used by 'checkPackageContent'. +-- +data CheckPackageContentOps m = CheckPackageContentOps { + doesFileExist :: FilePath -> m Bool, + doesDirectoryExist :: FilePath -> m Bool + } + +-- | Sanity check things that requires looking at files in the package. +-- This is a generalised version of 'checkPackageFiles' that can work in any +-- monad for which you can provide 'CheckPackageContentOps' operations. +-- +-- The point of this extra generality is to allow doing checks in some virtual +-- file system, for example a tarball in memory. +-- +checkPackageContent :: Monad m => CheckPackageContentOps m + -> PackageDescription + -> m [PackageCheck] +checkPackageContent ops pkg = do + licenseErrors <- checkLicensesExist ops pkg + setupError <- checkSetupExists ops pkg + configureError <- checkConfigureExists ops pkg + localPathErrors <- checkLocalPathsExist ops pkg + vcsLocation <- checkMissingVcsInfo ops pkg + + return $ licenseErrors + ++ catMaybes [setupError, configureError] + ++ localPathErrors + ++ vcsLocation + +checkLicensesExist :: Monad m => CheckPackageContentOps m + -> PackageDescription + -> m [PackageCheck] +checkLicensesExist ops pkg = do + exists <- mapM (doesFileExist ops) (licenseFiles pkg) + return + [ PackageBuildWarning $ + "The '" ++ fieldname ++ "' field refers to the file " + ++ quote file ++ " which does not exist." + | (file, False) <- zip (licenseFiles pkg) exists ] + where + fieldname | length (licenseFiles pkg) == 1 = "license-file" + | otherwise = "license-files" + +checkSetupExists :: Monad m => CheckPackageContentOps m + -> PackageDescription + -> m (Maybe PackageCheck) +checkSetupExists ops _ = do + hsexists <- doesFileExist ops "Setup.hs" + lhsexists <- doesFileExist ops "Setup.lhs" + return $ check (not hsexists && not lhsexists) $ + PackageDistInexcusable $ + "The package is missing a Setup.hs or Setup.lhs script." + +checkConfigureExists :: Monad m => CheckPackageContentOps m + -> PackageDescription + -> m (Maybe PackageCheck) +checkConfigureExists ops PackageDescription { buildType = Just Configure } = do + exists <- doesFileExist ops "configure" + return $ check (not exists) $ + PackageBuildWarning $ + "The 'build-type' is 'Configure' but there is no 'configure' script. " + ++ "You probably need to run 'autoreconf -i' to generate it." +checkConfigureExists _ _ = return Nothing + +checkLocalPathsExist :: Monad m => CheckPackageContentOps m + -> PackageDescription + -> m [PackageCheck] +checkLocalPathsExist ops pkg = do + let dirs = [ (dir, kind) + | bi <- allBuildInfo pkg + , (dir, kind) <- + [ (dir, "extra-lib-dirs") | dir <- extraLibDirs bi ] + ++ [ (dir, "include-dirs") | dir <- includeDirs bi ] + ++ [ (dir, "hs-source-dirs") | dir <- hsSourceDirs bi ] + , isRelative dir ] + missing <- filterM (liftM not . doesDirectoryExist ops . fst) dirs + return [ PackageBuildWarning { + explanation = quote (kind ++ ": " ++ dir) + ++ " directory does not exist." + } + | (dir, kind) <- missing ] + +checkMissingVcsInfo :: Monad m => CheckPackageContentOps m + -> PackageDescription + -> m [PackageCheck] +checkMissingVcsInfo ops pkg | null (sourceRepos pkg) = do + vcsInUse <- liftM or $ mapM (doesDirectoryExist ops) repoDirnames + if vcsInUse + then return [ PackageDistSuspicious message ] + else return [] + where + repoDirnames = [ dirname | repo <- knownRepoTypes + , dirname <- repoTypeDirname repo ] + message = "When distributing packages it is encouraged to specify source " + ++ "control information in the .cabal file using one or more " + ++ "'source-repository' sections. See the Cabal user guide for " + ++ "details." + +checkMissingVcsInfo _ _ = return [] + +repoTypeDirname :: RepoType -> [FilePath] +repoTypeDirname Darcs = ["_darcs"] +repoTypeDirname Git = [".git"] +repoTypeDirname SVN = [".svn"] +repoTypeDirname CVS = ["CVS"] +repoTypeDirname Mercurial = [".hg"] +repoTypeDirname GnuArch = [".arch-params"] +repoTypeDirname Bazaar = [".bzr"] +repoTypeDirname Monotone = ["_MTN"] +repoTypeDirname _ = [] + +-- ------------------------------------------------------------ +-- * Checks involving files in the package +-- ------------------------------------------------------------ + +-- | Check the names of all files in a package for portability problems. This +-- should be done for example when creating or validating a package tarball. +-- +checkPackageFileNames :: [FilePath] -> [PackageCheck] +checkPackageFileNames files = + (take 1 . catMaybes . map checkWindowsPath $ files) + ++ (take 1 . catMaybes . map checkTarPath $ files) + -- If we get any of these checks triggering then we're likely to get + -- many, and that's probably not helpful, so return at most one. + +checkWindowsPath :: FilePath -> Maybe PackageCheck +checkWindowsPath path = + check (not $ FilePath.Windows.isValid path') $ + PackageDistInexcusable $ + "Unfortunately, the file " ++ quote path ++ " is not a valid file " + ++ "name on Windows which would cause portability problems for this " + ++ "package. Windows file names cannot contain any of the characters " + ++ "\":*?<>|\" and there are a few reserved names including \"aux\", " + ++ "\"nul\", \"con\", \"prn\", \"com1-9\", \"lpt1-9\" and \"clock$\"." + where + path' = ".\\" ++ path + -- force a relative name to catch invalid file names like "f:oo" which + -- otherwise parse as file "oo" in the current directory on the 'f' drive. + +-- | Check a file name is valid for the portable POSIX tar format. +-- +-- The POSIX tar format has a restriction on the length of file names. It is +-- unfortunately not a simple restriction like a maximum length. The exact +-- restriction is that either the whole path be 100 characters or less, or it +-- be possible to split the path on a directory separator such that the first +-- part is 155 characters or less and the second part 100 characters or less. +-- +checkTarPath :: FilePath -> Maybe PackageCheck +checkTarPath path + | length path > 255 = Just longPath + | otherwise = case pack nameMax (reverse (splitPath path)) of + Left err -> Just err + Right [] -> Nothing + Right (first:rest) -> case pack prefixMax remainder of + Left err -> Just err + Right [] -> Nothing + Right (_:_) -> Just noSplit + where + -- drop the '/' between the name and prefix: + remainder = init first : rest + + where + nameMax, prefixMax :: Int + nameMax = 100 + prefixMax = 155 + + pack _ [] = Left emptyName + pack maxLen (c:cs) + | n > maxLen = Left longName + | otherwise = Right (pack' maxLen n cs) + where n = length c + + pack' maxLen n (c:cs) + | n' <= maxLen = pack' maxLen n' cs + where n' = n + length c + pack' _ _ cs = cs + + longPath = PackageDistInexcusable $ + "The following file name is too long to store in a portable POSIX " + ++ "format tar archive. The maximum length is 255 ASCII characters.\n" + ++ "The file in question is:\n " ++ path + longName = PackageDistInexcusable $ + "The following file name is too long to store in a portable POSIX " + ++ "format tar archive. The maximum length for the name part (including " + ++ "extension) is 100 ASCII characters. The maximum length for any " + ++ "individual directory component is 155.\n" + ++ "The file in question is:\n " ++ path + noSplit = PackageDistInexcusable $ + "The following file name is too long to store in a portable POSIX " + ++ "format tar archive. While the total length is less than 255 ASCII " + ++ "characters, there are unfortunately further restrictions. It has to " + ++ "be possible to split the file path on a directory separator into " + ++ "two parts such that the first part fits in 155 characters or less " + ++ "and the second part fits in 100 characters or less. Basically you " + ++ "have to make the file name or directory names shorter, or you could " + ++ "split a long directory name into nested subdirectories with shorter " + ++ "names.\nThe file in question is:\n " ++ path + emptyName = PackageDistInexcusable $ + "Encountered a file with an empty name, something is very wrong! " + ++ "Files with an empty name cannot be stored in a tar archive or in " + ++ "standard file systems." + +-- ------------------------------------------------------------ +-- * Utils +-- ------------------------------------------------------------ + +quote :: String -> String +quote s = "'" ++ s ++ "'" + +commaSep :: [String] -> String +commaSep = intercalate ", " + +dups :: Ord a => [a] -> [a] +dups xs = [ x | (x:_:_) <- group (sort xs) ] + +fileExtensionSupportedLanguage :: FilePath -> Bool +fileExtensionSupportedLanguage path = + isHaskell || isC + where + extension = takeExtension path + isHaskell = extension `elem` [".hs", ".lhs"] + isC = isJust (filenameCDialect extension) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Configuration.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Configuration.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Configuration.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Configuration.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,609 @@ +{-# LANGUAGE CPP #-} +-- -fno-warn-deprecations for use of Map.foldWithKey +{-# OPTIONS_GHC -fno-warn-deprecations #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.PackageDescription.Configuration +-- Copyright : Thomas Schilling, 2007 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is about the cabal configurations feature. It exports +-- 'finalizePackageDescription' and 'flattenPackageDescription' which are +-- functions for converting 'GenericPackageDescription's down to +-- 'PackageDescription's. It has code for working with the tree of conditions +-- and resolving or flattening conditions. + +module Distribution.PackageDescription.Configuration ( + finalizePackageDescription, + flattenPackageDescription, + + -- Utils + parseCondition, + freeVars, + mapCondTree, + mapTreeData, + mapTreeConds, + mapTreeConstrs, + ) where + +import Distribution.Package + ( PackageName, Dependency(..) ) +import Distribution.PackageDescription + ( GenericPackageDescription(..), PackageDescription(..) + , Library(..), Executable(..), BuildInfo(..) + , Flag(..), FlagName(..), FlagAssignment + , Benchmark(..), CondTree(..), ConfVar(..), Condition(..) + , TestSuite(..) ) +import Distribution.PackageDescription.Utils + ( cabalBug, userBug ) +import Distribution.Version + ( VersionRange, anyVersion, intersectVersionRanges, withinRange ) +import Distribution.Compiler + ( CompilerId(CompilerId) ) +import Distribution.System + ( Platform(..), OS, Arch ) +import Distribution.Simple.Utils + ( currentDir, lowercase ) +import Distribution.Simple.Compiler + ( CompilerInfo(..) ) + +import Distribution.Text + ( Text(parse) ) +import Distribution.Compat.ReadP as ReadP hiding ( char ) +import Control.Arrow (first) +import qualified Distribution.Compat.ReadP as ReadP ( char ) + +import Data.Char ( isAlphaNum ) +import Data.Maybe ( catMaybes, maybeToList ) +import Data.Map ( Map, fromListWith, toList ) +import qualified Data.Map as Map +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid +#endif + +------------------------------------------------------------------------------ + +-- | Simplify the condition and return its free variables. +simplifyCondition :: Condition c + -> (c -> Either d Bool) -- ^ (partial) variable assignment + -> (Condition d, [d]) +simplifyCondition cond i = fv . walk $ cond + where + walk cnd = case cnd of + Var v -> either Var Lit (i v) + Lit b -> Lit b + CNot c -> case walk c of + Lit True -> Lit False + Lit False -> Lit True + c' -> CNot c' + COr c d -> case (walk c, walk d) of + (Lit False, d') -> d' + (Lit True, _) -> Lit True + (c', Lit False) -> c' + (_, Lit True) -> Lit True + (c',d') -> COr c' d' + CAnd c d -> case (walk c, walk d) of + (Lit False, _) -> Lit False + (Lit True, d') -> d' + (_, Lit False) -> Lit False + (c', Lit True) -> c' + (c',d') -> CAnd c' d' + -- gather free vars + fv c = (c, fv' c) + fv' c = case c of + Var v -> [v] + Lit _ -> [] + CNot c' -> fv' c' + COr c1 c2 -> fv' c1 ++ fv' c2 + CAnd c1 c2 -> fv' c1 ++ fv' c2 + +-- | Simplify a configuration condition using the OS and arch names. Returns +-- the names of all the flags occurring in the condition. +simplifyWithSysParams :: OS -> Arch -> CompilerInfo -> Condition ConfVar + -> (Condition FlagName, [FlagName]) +simplifyWithSysParams os arch cinfo cond = (cond', flags) + where + (cond', flags) = simplifyCondition cond interp + interp (OS os') = Right $ os' == os + interp (Arch arch') = Right $ arch' == arch + interp (Impl comp vr) + | matchImpl (compilerInfoId cinfo) = Right True + | otherwise = case compilerInfoCompat cinfo of + -- fixme: treat Nothing as unknown, rather than empty list once we + -- support partial resolution of system parameters + Nothing -> Right False + Just compat -> Right (any matchImpl compat) + where + matchImpl (CompilerId c v) = comp == c && v `withinRange` vr + interp (Flag f) = Left f + +-- TODO: Add instances and check +-- +-- prop_sC_idempotent cond a o = cond' == cond'' +-- where +-- cond' = simplifyCondition cond a o +-- cond'' = simplifyCondition cond' a o +-- +-- prop_sC_noLits cond a o = isLit res || not (hasLits res) +-- where +-- res = simplifyCondition cond a o +-- hasLits (Lit _) = True +-- hasLits (CNot c) = hasLits c +-- hasLits (COr l r) = hasLits l || hasLits r +-- hasLits (CAnd l r) = hasLits l || hasLits r +-- hasLits _ = False +-- + +-- | Parse a configuration condition from a string. +parseCondition :: ReadP r (Condition ConfVar) +parseCondition = condOr + where + condOr = sepBy1 condAnd (oper "||") >>= return . foldl1 COr + condAnd = sepBy1 cond (oper "&&")>>= return . foldl1 CAnd + cond = sp >> (boolLiteral +++ inparens condOr +++ notCond +++ osCond + +++ archCond +++ flagCond +++ implCond ) + inparens = between (ReadP.char '(' >> sp) (sp >> ReadP.char ')' >> sp) + notCond = ReadP.char '!' >> sp >> cond >>= return . CNot + osCond = string "os" >> sp >> inparens osIdent >>= return . Var + archCond = string "arch" >> sp >> inparens archIdent >>= return . Var + flagCond = string "flag" >> sp >> inparens flagIdent >>= return . Var + implCond = string "impl" >> sp >> inparens implIdent >>= return . Var + boolLiteral = fmap Lit parse + archIdent = fmap Arch parse + osIdent = fmap OS parse + flagIdent = fmap (Flag . FlagName . lowercase) (munch1 isIdentChar) + isIdentChar c = isAlphaNum c || c == '_' || c == '-' + oper s = sp >> string s >> sp + sp = skipSpaces + implIdent = do i <- parse + vr <- sp >> option anyVersion parse + return $ Impl i vr + +------------------------------------------------------------------------------ + +mapCondTree :: (a -> b) -> (c -> d) -> (Condition v -> Condition w) + -> CondTree v c a -> CondTree w d b +mapCondTree fa fc fcnd (CondNode a c ifs) = + CondNode (fa a) (fc c) (map g ifs) + where + g (cnd, t, me) = (fcnd cnd, mapCondTree fa fc fcnd t, + fmap (mapCondTree fa fc fcnd) me) + +mapTreeConstrs :: (c -> d) -> CondTree v c a -> CondTree v d a +mapTreeConstrs f = mapCondTree id f id + +mapTreeConds :: (Condition v -> Condition w) -> CondTree v c a -> CondTree w c a +mapTreeConds f = mapCondTree id id f + +mapTreeData :: (a -> b) -> CondTree v c a -> CondTree v c b +mapTreeData f = mapCondTree f id id + +-- | Result of dependency test. Isomorphic to @Maybe d@ but renamed for +-- clarity. +data DepTestRslt d = DepOk | MissingDeps d + +instance Monoid d => Monoid (DepTestRslt d) where + mempty = DepOk + mappend DepOk x = x + mappend x DepOk = x + mappend (MissingDeps d) (MissingDeps d') = MissingDeps (d `mappend` d') + + +data BT a = BTN a | BTB (BT a) (BT a) -- very simple binary tree + + +-- | Try to find a flag assignment that satisfies the constraints of all trees. +-- +-- Returns either the missing dependencies, or a tuple containing the +-- resulting data, the associated dependencies, and the chosen flag +-- assignments. +-- +-- In case of failure, the _smallest_ number of of missing dependencies is +-- returned. [TODO: Could also be specified with a function argument.] +-- +-- TODO: The current algorithm is rather naive. A better approach would be to: +-- +-- * Rule out possible paths, by taking a look at the associated dependencies. +-- +-- * Infer the required values for the conditions of these paths, and +-- calculate the required domains for the variables used in these +-- conditions. Then picking a flag assignment would be linear (I guess). +-- +-- This would require some sort of SAT solving, though, thus it's not +-- implemented unless we really need it. +-- +resolveWithFlags :: + [(FlagName,[Bool])] + -- ^ Domain for each flag name, will be tested in order. + -> OS -- ^ OS as returned by Distribution.System.buildOS + -> Arch -- ^ Arch as returned by Distribution.System.buildArch + -> CompilerInfo -- ^ Compiler information + -> [Dependency] -- ^ Additional constraints + -> [CondTree ConfVar [Dependency] PDTagged] + -> ([Dependency] -> DepTestRslt [Dependency]) -- ^ Dependency test function. + -> Either [Dependency] (TargetSet PDTagged, FlagAssignment) + -- ^ Either the missing dependencies (error case), or a pair of + -- (set of build targets with dependencies, chosen flag assignments) +resolveWithFlags dom os arch impl constrs trees checkDeps = + case try dom [] of + Right r -> Right r + Left dbt -> Left $ findShortest dbt + where + extraConstrs = toDepMap constrs + + -- simplify trees by (partially) evaluating all conditions and converting + -- dependencies to dependency maps. + simplifiedTrees = map ( mapTreeConstrs toDepMap -- convert to maps + . mapTreeConds (fst . simplifyWithSysParams os arch impl)) + trees + + -- @try@ recursively tries all possible flag assignments in the domain and + -- either succeeds or returns a binary tree with the missing dependencies + -- encountered in each run. Since the tree is constructed lazily, we + -- avoid some computation overhead in the successful case. + try [] flags = + let targetSet = TargetSet $ flip map simplifiedTrees $ + -- apply additional constraints to all dependencies + first (`constrainBy` extraConstrs) . + simplifyCondTree (env flags) + deps = overallDependencies targetSet + in case checkDeps (fromDepMap deps) of + DepOk -> Right (targetSet, flags) + MissingDeps mds -> Left (BTN mds) + + try ((n, vals):rest) flags = + tryAll $ map (\v -> try rest ((n, v):flags)) vals + + tryAll = foldr mp mz + + -- special version of `mplus' for our local purposes + mp (Left xs) (Left ys) = (Left (BTB xs ys)) + mp (Left _) m@(Right _) = m + mp m@(Right _) _ = m + + -- `mzero' + mz = Left (BTN []) + + env flags flag = (maybe (Left flag) Right . lookup flag) flags + + -- for the error case we inspect our lazy tree of missing dependencies and + -- pick the shortest list of missing dependencies + findShortest (BTN x) = x + findShortest (BTB lt rt) = + let l = findShortest lt + r = findShortest rt + in case (l,r) of + ([], xs) -> xs -- [] is too short + (xs, []) -> xs + ([x], _) -> [x] -- single elem is optimum + (_, [x]) -> [x] + (xs, ys) -> if lazyLengthCmp xs ys + then xs else ys + -- lazy variant of @\xs ys -> length xs <= length ys@ + lazyLengthCmp [] _ = True + lazyLengthCmp _ [] = False + lazyLengthCmp (_:xs) (_:ys) = lazyLengthCmp xs ys + +-- | A map of dependencies. Newtyped since the default monoid instance is not +-- appropriate. The monoid instance uses 'intersectVersionRanges'. +newtype DependencyMap = DependencyMap { unDependencyMap :: Map PackageName VersionRange } + deriving (Show, Read) + +instance Monoid DependencyMap where + mempty = DependencyMap Map.empty + (DependencyMap a) `mappend` (DependencyMap b) = + DependencyMap (Map.unionWith intersectVersionRanges a b) + +toDepMap :: [Dependency] -> DependencyMap +toDepMap ds = + DependencyMap $ fromListWith intersectVersionRanges [ (p,vr) | Dependency p vr <- ds ] + +fromDepMap :: DependencyMap -> [Dependency] +fromDepMap m = [ Dependency p vr | (p,vr) <- toList (unDependencyMap m) ] + +simplifyCondTree :: (Monoid a, Monoid d) => + (v -> Either v Bool) + -> CondTree v d a + -> (d, a) +simplifyCondTree env (CondNode a d ifs) = + mconcat $ (d, a) : catMaybes (map simplifyIf ifs) + where + simplifyIf (cnd, t, me) = + case simplifyCondition cnd env of + (Lit True, _) -> Just $ simplifyCondTree env t + (Lit False, _) -> fmap (simplifyCondTree env) me + _ -> error $ "Environment not defined for all free vars" + +-- | Flatten a CondTree. This will resolve the CondTree by taking all +-- possible paths into account. Note that since branches represent exclusive +-- choices this may not result in a \"sane\" result. +ignoreConditions :: (Monoid a, Monoid c) => CondTree v c a -> (a, c) +ignoreConditions (CondNode a c ifs) = (a, c) `mappend` mconcat (concatMap f ifs) + where f (_, t, me) = ignoreConditions t + : maybeToList (fmap ignoreConditions me) + +freeVars :: CondTree ConfVar c a -> [FlagName] +freeVars t = [ f | Flag f <- freeVars' t ] + where + freeVars' (CondNode _ _ ifs) = concatMap compfv ifs + compfv (c, ct, mct) = condfv c ++ freeVars' ct ++ maybe [] freeVars' mct + condfv c = case c of + Var v -> [v] + Lit _ -> [] + CNot c' -> condfv c' + COr c1 c2 -> condfv c1 ++ condfv c2 + CAnd c1 c2 -> condfv c1 ++ condfv c2 + + +------------------------------------------------------------------------------ + +-- | A set of targets with their package dependencies +newtype TargetSet a = TargetSet [(DependencyMap, a)] + +-- | Combine the target-specific dependencies in a TargetSet to give the +-- dependencies for the package as a whole. +overallDependencies :: TargetSet PDTagged -> DependencyMap +overallDependencies (TargetSet targets) = mconcat depss + where + (depss, _) = unzip $ filter (removeDisabledSections . snd) targets + removeDisabledSections :: PDTagged -> Bool + removeDisabledSections (Lib _) = True + removeDisabledSections (Exe _ _) = True + removeDisabledSections (Test _ t) = testEnabled t + removeDisabledSections (Bench _ b) = benchmarkEnabled b + removeDisabledSections PDNull = True + +-- Apply extra constraints to a dependency map. +-- Combines dependencies where the result will only contain keys from the left +-- (first) map. If a key also exists in the right map, both constraints will +-- be intersected. +constrainBy :: DependencyMap -- ^ Input map + -> DependencyMap -- ^ Extra constraints + -> DependencyMap +constrainBy left extra = + DependencyMap $ + Map.foldWithKey tightenConstraint (unDependencyMap left) + (unDependencyMap extra) + where tightenConstraint n c l = + case Map.lookup n l of + Nothing -> l + Just vr -> Map.insert n (intersectVersionRanges vr c) l + +-- | Collect up the targets in a TargetSet of tagged targets, storing the +-- dependencies as we go. +flattenTaggedTargets :: TargetSet PDTagged -> + (Maybe Library, [(String, Executable)], [(String, TestSuite)] + , [(String, Benchmark)]) +flattenTaggedTargets (TargetSet targets) = foldr untag (Nothing, [], [], []) targets + where + untag (_, Lib _) (Just _, _, _, _) = userBug "Only one library expected" + untag (deps, Lib l) (Nothing, exes, tests, bms) = + (Just l', exes, tests, bms) + where + l' = l { + libBuildInfo = (libBuildInfo l) { targetBuildDepends = fromDepMap deps } + } + untag (deps, Exe n e) (mlib, exes, tests, bms) + | any ((== n) . fst) exes = + userBug $ "There exist several exes with the same name: '" ++ n ++ "'" + | any ((== n) . fst) tests = + userBug $ "There exists a test with the same name as an exe: '" ++ n ++ "'" + | any ((== n) . fst) bms = + userBug $ "There exists a benchmark with the same name as an exe: '" ++ n ++ "'" + | otherwise = (mlib, (n, e'):exes, tests, bms) + where + e' = e { + buildInfo = (buildInfo e) { targetBuildDepends = fromDepMap deps } + } + untag (deps, Test n t) (mlib, exes, tests, bms) + | any ((== n) . fst) tests = + userBug $ "There exist several tests with the same name: '" ++ n ++ "'" + | any ((== n) . fst) exes = + userBug $ "There exists an exe with the same name as the test: '" ++ n ++ "'" + | any ((== n) . fst) bms = + userBug $ "There exists a benchmark with the same name as the test: '" ++ n ++ "'" + | otherwise = (mlib, exes, (n, t'):tests, bms) + where + t' = t { + testBuildInfo = (testBuildInfo t) + { targetBuildDepends = fromDepMap deps } + } + untag (deps, Bench n b) (mlib, exes, tests, bms) + | any ((== n) . fst) bms = + userBug $ "There exist several benchmarks with the same name: '" ++ n ++ "'" + | any ((== n) . fst) exes = + userBug $ "There exists an exe with the same name as the benchmark: '" ++ n ++ "'" + | any ((== n) . fst) tests = + userBug $ "There exists a test with the same name as the benchmark: '" ++ n ++ "'" + | otherwise = (mlib, exes, tests, (n, b'):bms) + where + b' = b { + benchmarkBuildInfo = (benchmarkBuildInfo b) + { targetBuildDepends = fromDepMap deps } + } + untag (_, PDNull) x = x -- actually this should not happen, but let's be liberal + + +------------------------------------------------------------------------------ +-- Convert GenericPackageDescription to PackageDescription +-- + +data PDTagged = Lib Library + | Exe String Executable + | Test String TestSuite + | Bench String Benchmark + | PDNull + deriving Show + +instance Monoid PDTagged where + mempty = PDNull + PDNull `mappend` x = x + x `mappend` PDNull = x + Lib l `mappend` Lib l' = Lib (l `mappend` l') + Exe n e `mappend` Exe n' e' | n == n' = Exe n (e `mappend` e') + Test n t `mappend` Test n' t' | n == n' = Test n (t `mappend` t') + Bench n b `mappend` Bench n' b' | n == n' = Bench n (b `mappend` b') + _ `mappend` _ = cabalBug "Cannot combine incompatible tags" + +-- | Create a package description with all configurations resolved. +-- +-- This function takes a `GenericPackageDescription` and several environment +-- parameters and tries to generate `PackageDescription` by finding a flag +-- assignment that result in satisfiable dependencies. +-- +-- It takes as inputs a not necessarily complete specifications of flags +-- assignments, an optional package index as well as platform parameters. If +-- some flags are not assigned explicitly, this function will try to pick an +-- assignment that causes this function to succeed. The package index is +-- optional since on some platforms we cannot determine which packages have +-- been installed before. When no package index is supplied, every dependency +-- is assumed to be satisfiable, therefore all not explicitly assigned flags +-- will get their default values. +-- +-- This function will fail if it cannot find a flag assignment that leads to +-- satisfiable dependencies. (It will not try alternative assignments for +-- explicitly specified flags.) In case of failure it will return a /minimum/ +-- number of dependencies that could not be satisfied. On success, it will +-- return the package description and the full flag assignment chosen. +-- +finalizePackageDescription :: + FlagAssignment -- ^ Explicitly specified flag assignments + -> (Dependency -> Bool) -- ^ Is a given dependency satisfiable from the set of + -- available packages? If this is unknown then use + -- True. + -> Platform -- ^ The 'Arch' and 'OS' + -> CompilerInfo -- ^ Compiler information + -> [Dependency] -- ^ Additional constraints + -> GenericPackageDescription + -> Either [Dependency] + (PackageDescription, FlagAssignment) + -- ^ Either missing dependencies or the resolved package + -- description along with the flag assignments chosen. +finalizePackageDescription userflags satisfyDep + (Platform arch os) impl constraints + (GenericPackageDescription pkg flags mlib0 exes0 tests0 bms0) = + case resolveFlags of + Right ((mlib, exes', tests', bms'), targetSet, flagVals) -> + Right ( pkg { library = mlib + , executables = exes' + , testSuites = tests' + , benchmarks = bms' + , buildDepends = fromDepMap (overallDependencies targetSet) + --TODO: we need to find a way to avoid pulling in deps + -- for non-buildable components. However cannot simply + -- filter at this stage, since if the package were not + -- available we would have failed already. + } + , flagVals ) + + Left missing -> Left missing + where + -- Combine lib, exes, and tests into one list of @CondTree@s with tagged data + condTrees = maybeToList (fmap (mapTreeData Lib) mlib0 ) + ++ map (\(name,tree) -> mapTreeData (Exe name) tree) exes0 + ++ map (\(name,tree) -> mapTreeData (Test name) tree) tests0 + ++ map (\(name,tree) -> mapTreeData (Bench name) tree) bms0 + + resolveFlags = + case resolveWithFlags flagChoices os arch impl constraints condTrees check of + Right (targetSet, fs) -> + let (mlib, exes, tests, bms) = flattenTaggedTargets targetSet in + Right ( (fmap libFillInDefaults mlib, + map (\(n,e) -> (exeFillInDefaults e) { exeName = n }) exes, + map (\(n,t) -> (testFillInDefaults t) { testName = n }) tests, + map (\(n,b) -> (benchFillInDefaults b) { benchmarkName = n }) bms), + targetSet, fs) + Left missing -> Left missing + + flagChoices = map (\(MkFlag n _ d manual) -> (n, d2c manual n d)) flags + d2c manual n b = case lookup n userflags of + Just val -> [val] + Nothing + | manual -> [b] + | otherwise -> [b, not b] + --flagDefaults = map (\(n,x:_) -> (n,x)) flagChoices + check ds = let missingDeps = filter (not . satisfyDep) ds + in if null missingDeps + then DepOk + else MissingDeps missingDeps + +{- +let tst_p = (CondNode [1::Int] [Distribution.Package.Dependency "a" AnyVersion] []) +let tst_p2 = (CondNode [1::Int] [Distribution.Package.Dependency "a" (EarlierVersion (Version [1,0] [])), Distribution.Package.Dependency "a" (LaterVersion (Version [2,0] []))] []) + +let p_index = Distribution.Simple.PackageIndex.fromList [Distribution.Package.PackageIdentifier "a" (Version [0,5] []), Distribution.Package.PackageIdentifier "a" (Version [2,5] [])] +let look = not . null . Distribution.Simple.PackageIndex.lookupDependency p_index +let looks ds = mconcat $ map (\d -> if look d then DepOk else MissingDeps [d]) ds +resolveWithFlags [] Distribution.System.Linux Distribution.System.I386 (Distribution.Compiler.GHC,Version [6,8,2] []) [tst_p] looks ===> Right ... +resolveWithFlags [] Distribution.System.Linux Distribution.System.I386 (Distribution.Compiler.GHC,Version [6,8,2] []) [tst_p2] looks ===> Left ... +-} + +-- | Flatten a generic package description by ignoring all conditions and just +-- join the field descriptors into on package description. Note, however, +-- that this may lead to inconsistent field values, since all values are +-- joined into one field, which may not be possible in the original package +-- description, due to the use of exclusive choices (if ... else ...). +-- +-- TODO: One particularly tricky case is defaulting. In the original package +-- description, e.g., the source directory might either be the default or a +-- certain, explicitly set path. Since defaults are filled in only after the +-- package has been resolved and when no explicit value has been set, the +-- default path will be missing from the package description returned by this +-- function. +flattenPackageDescription :: GenericPackageDescription -> PackageDescription +flattenPackageDescription (GenericPackageDescription pkg _ mlib0 exes0 tests0 bms0) = + pkg { library = mlib + , executables = reverse exes + , testSuites = reverse tests + , benchmarks = reverse bms + , buildDepends = ldeps ++ reverse edeps ++ reverse tdeps ++ reverse bdeps + } + where + (mlib, ldeps) = case mlib0 of + Just lib -> let (l,ds) = ignoreConditions lib in + (Just (libFillInDefaults l), ds) + Nothing -> (Nothing, []) + (exes, edeps) = foldr flattenExe ([],[]) exes0 + (tests, tdeps) = foldr flattenTst ([],[]) tests0 + (bms, bdeps) = foldr flattenBm ([],[]) bms0 + flattenExe (n, t) (es, ds) = + let (e, ds') = ignoreConditions t in + ( (exeFillInDefaults $ e { exeName = n }) : es, ds' ++ ds ) + flattenTst (n, t) (es, ds) = + let (e, ds') = ignoreConditions t in + ( (testFillInDefaults $ e { testName = n }) : es, ds' ++ ds ) + flattenBm (n, t) (es, ds) = + let (e, ds') = ignoreConditions t in + ( (benchFillInDefaults $ e { benchmarkName = n }) : es, ds' ++ ds ) + +-- This is in fact rather a hack. The original version just overrode the +-- default values, however, when adding conditions we had to switch to a +-- modifier-based approach. There, nothing is ever overwritten, but only +-- joined together. +-- +-- This is the cleanest way i could think of, that doesn't require +-- changing all field parsing functions to return modifiers instead. +libFillInDefaults :: Library -> Library +libFillInDefaults lib@(Library { libBuildInfo = bi }) = + lib { libBuildInfo = biFillInDefaults bi } + +exeFillInDefaults :: Executable -> Executable +exeFillInDefaults exe@(Executable { buildInfo = bi }) = + exe { buildInfo = biFillInDefaults bi } + +testFillInDefaults :: TestSuite -> TestSuite +testFillInDefaults tst@(TestSuite { testBuildInfo = bi }) = + tst { testBuildInfo = biFillInDefaults bi } + +benchFillInDefaults :: Benchmark -> Benchmark +benchFillInDefaults bm@(Benchmark { benchmarkBuildInfo = bi }) = + bm { benchmarkBuildInfo = biFillInDefaults bi } + +biFillInDefaults :: BuildInfo -> BuildInfo +biFillInDefaults bi = + if null (hsSourceDirs bi) + then bi { hsSourceDirs = [currentDir] } + else bi diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Parse.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Parse.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Parse.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Parse.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,1279 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveDataTypeable #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.PackageDescription.Parse +-- Copyright : Isaac Jones 2003-2005 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This defined parsers and partial pretty printers for the @.cabal@ format. +-- Some of the complexity in this module is due to the fact that we have to be +-- backwards compatible with old @.cabal@ files, so there's code to translate +-- into the newer structure. + +module Distribution.PackageDescription.Parse ( + -- * Package descriptions + readPackageDescription, + writePackageDescription, + parsePackageDescription, + showPackageDescription, + + -- ** Parsing + ParseResult(..), + FieldDescr(..), + LineNo, + + -- ** Supplementary build information + readHookedBuildInfo, + parseHookedBuildInfo, + writeHookedBuildInfo, + showHookedBuildInfo, + + pkgDescrFieldDescrs, + libFieldDescrs, + executableFieldDescrs, + binfoFieldDescrs, + sourceRepoFieldDescrs, + testSuiteFieldDescrs, + flagFieldDescrs + ) where + +import Data.Char (isSpace) +import Data.Maybe (listToMaybe, isJust) +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid ( Monoid(..) ) +#endif +import Data.List (nub, unfoldr, partition, (\\)) +import Control.Monad (liftM, foldM, when, unless, ap) +#if __GLASGOW_HASKELL__ < 710 +import Control.Applicative (Applicative(..)) +#endif +import Control.Arrow (first) +import System.Directory (doesFileExist) +import qualified Data.ByteString.Lazy.Char8 as BS.Char8 +import Data.Typeable +import Data.Data +import qualified Data.Map as Map + +import Distribution.Text + ( Text(disp, parse), display, simpleParse ) +import Distribution.Compat.ReadP + ((+++), option) +import qualified Distribution.Compat.ReadP as Parse +import Text.PrettyPrint + +import Distribution.ParseUtils hiding (parseFields) +import Distribution.PackageDescription +import Distribution.PackageDescription.Utils + ( cabalBug, userBug ) +import Distribution.Package + ( PackageIdentifier(..), Dependency(..), packageName, packageVersion ) +import Distribution.ModuleName ( ModuleName ) +import Distribution.Version + ( Version(Version), orLaterVersion + , LowerBound(..), asVersionIntervals ) +import Distribution.Verbosity (Verbosity) +import Distribution.Compiler (CompilerFlavor(..)) +import Distribution.PackageDescription.Configuration (parseCondition, freeVars) +import Distribution.Simple.Utils + ( die, dieWithLocation, warn, intercalate, lowercase, cabalVersion + , withFileContents, withUTF8FileContents + , writeFileAtomic, writeUTF8File ) + + +-- ----------------------------------------------------------------------------- +-- The PackageDescription type + +pkgDescrFieldDescrs :: [FieldDescr PackageDescription] +pkgDescrFieldDescrs = + [ simpleField "name" + disp parse + packageName (\name pkg -> pkg{package=(package pkg){pkgName=name}}) + , simpleField "version" + disp parse + packageVersion (\ver pkg -> pkg{package=(package pkg){pkgVersion=ver}}) + , simpleField "cabal-version" + (either disp disp) (liftM Left parse +++ liftM Right parse) + specVersionRaw (\v pkg -> pkg{specVersionRaw=v}) + , simpleField "build-type" + (maybe empty disp) (fmap Just parse) + buildType (\t pkg -> pkg{buildType=t}) + , simpleField "license" + disp parseLicenseQ + license (\l pkg -> pkg{license=l}) + -- We have both 'license-file' and 'license-files' fields. + -- Rather than declaring license-file to be deprecated, we will continue + -- to allow both. The 'license-file' will continue to only allow single + -- tokens, while 'license-files' allows multiple. On pretty-printing, we + -- will use 'license-file' if there's just one, and use 'license-files' + -- otherwise. + , simpleField "license-file" + showFilePath parseFilePathQ + (\pkg -> case licenseFiles pkg of + [x] -> x + _ -> "") + (\l pkg -> pkg{licenseFiles=licenseFiles pkg ++ [l]}) + , listField "license-files" + showFilePath parseFilePathQ + (\pkg -> case licenseFiles pkg of + [_] -> [] + xs -> xs) + (\ls pkg -> pkg{licenseFiles=ls}) + , simpleField "copyright" + showFreeText parseFreeText + copyright (\val pkg -> pkg{copyright=val}) + , simpleField "maintainer" + showFreeText parseFreeText + maintainer (\val pkg -> pkg{maintainer=val}) + , simpleField "stability" + showFreeText parseFreeText + stability (\val pkg -> pkg{stability=val}) + , simpleField "homepage" + showFreeText parseFreeText + homepage (\val pkg -> pkg{homepage=val}) + , simpleField "package-url" + showFreeText parseFreeText + pkgUrl (\val pkg -> pkg{pkgUrl=val}) + , simpleField "bug-reports" + showFreeText parseFreeText + bugReports (\val pkg -> pkg{bugReports=val}) + , simpleField "synopsis" + showFreeText parseFreeText + synopsis (\val pkg -> pkg{synopsis=val}) + , simpleField "description" + showFreeText parseFreeText + description (\val pkg -> pkg{description=val}) + , simpleField "category" + showFreeText parseFreeText + category (\val pkg -> pkg{category=val}) + , simpleField "author" + showFreeText parseFreeText + author (\val pkg -> pkg{author=val}) + , listField "tested-with" + showTestedWith parseTestedWithQ + testedWith (\val pkg -> pkg{testedWith=val}) + , listFieldWithSep vcat "data-files" + showFilePath parseFilePathQ + dataFiles (\val pkg -> pkg{dataFiles=val}) + , simpleField "data-dir" + showFilePath parseFilePathQ + dataDir (\val pkg -> pkg{dataDir=val}) + , listFieldWithSep vcat "extra-source-files" + showFilePath parseFilePathQ + extraSrcFiles (\val pkg -> pkg{extraSrcFiles=val}) + , listFieldWithSep vcat "extra-tmp-files" + showFilePath parseFilePathQ + extraTmpFiles (\val pkg -> pkg{extraTmpFiles=val}) + , listFieldWithSep vcat "extra-doc-files" + showFilePath parseFilePathQ + extraDocFiles (\val pkg -> pkg{extraDocFiles=val}) + ] + +-- | Store any fields beginning with "x-" in the customFields field of +-- a PackageDescription. All other fields will generate a warning. +storeXFieldsPD :: UnrecFieldParser PackageDescription +storeXFieldsPD (f@('x':'-':_),val) pkg = + Just pkg{ customFieldsPD = + customFieldsPD pkg ++ [(f,val)]} +storeXFieldsPD _ _ = Nothing + +-- --------------------------------------------------------------------------- +-- The Library type + +libFieldDescrs :: [FieldDescr Library] +libFieldDescrs = + [ listFieldWithSep vcat "exposed-modules" disp parseModuleNameQ + exposedModules (\mods lib -> lib{exposedModules=mods}) + + , commaListFieldWithSep vcat "reexported-modules" disp parse + reexportedModules (\mods lib -> lib{reexportedModules=mods}) + + , listFieldWithSep vcat "required-signatures" disp parseModuleNameQ + requiredSignatures (\mods lib -> lib{requiredSignatures=mods}) + + , listFieldWithSep vcat "exposed-signatures" disp parseModuleNameQ + exposedSignatures (\mods lib -> lib{exposedSignatures=mods}) + + , boolField "exposed" + libExposed (\val lib -> lib{libExposed=val}) + ] ++ map biToLib binfoFieldDescrs + where biToLib = liftField libBuildInfo (\bi lib -> lib{libBuildInfo=bi}) + +storeXFieldsLib :: UnrecFieldParser Library +storeXFieldsLib (f@('x':'-':_), val) l@(Library { libBuildInfo = bi }) = + Just $ l {libBuildInfo = + bi{ customFieldsBI = customFieldsBI bi ++ [(f,val)]}} +storeXFieldsLib _ _ = Nothing + +-- --------------------------------------------------------------------------- +-- The Executable type + + +executableFieldDescrs :: [FieldDescr Executable] +executableFieldDescrs = + [ -- note ordering: configuration must come first, for + -- showPackageDescription. + simpleField "executable" + showToken parseTokenQ + exeName (\xs exe -> exe{exeName=xs}) + , simpleField "main-is" + showFilePath parseFilePathQ + modulePath (\xs exe -> exe{modulePath=xs}) + ] + ++ map biToExe binfoFieldDescrs + where biToExe = liftField buildInfo (\bi exe -> exe{buildInfo=bi}) + +storeXFieldsExe :: UnrecFieldParser Executable +storeXFieldsExe (f@('x':'-':_), val) e@(Executable { buildInfo = bi }) = + Just $ e {buildInfo = bi{ customFieldsBI = (f,val):customFieldsBI bi}} +storeXFieldsExe _ _ = Nothing + +-- --------------------------------------------------------------------------- +-- The TestSuite type + +-- | An intermediate type just used for parsing the test-suite stanza. +-- After validation it is converted into the proper 'TestSuite' type. +data TestSuiteStanza = TestSuiteStanza { + testStanzaTestType :: Maybe TestType, + testStanzaMainIs :: Maybe FilePath, + testStanzaTestModule :: Maybe ModuleName, + testStanzaBuildInfo :: BuildInfo + } + +emptyTestStanza :: TestSuiteStanza +emptyTestStanza = TestSuiteStanza Nothing Nothing Nothing mempty + +testSuiteFieldDescrs :: [FieldDescr TestSuiteStanza] +testSuiteFieldDescrs = + [ simpleField "type" + (maybe empty disp) (fmap Just parse) + testStanzaTestType (\x suite -> suite { testStanzaTestType = x }) + , simpleField "main-is" + (maybe empty showFilePath) (fmap Just parseFilePathQ) + testStanzaMainIs (\x suite -> suite { testStanzaMainIs = x }) + , simpleField "test-module" + (maybe empty disp) (fmap Just parseModuleNameQ) + testStanzaTestModule (\x suite -> suite { testStanzaTestModule = x }) + ] + ++ map biToTest binfoFieldDescrs + where + biToTest = liftField testStanzaBuildInfo + (\bi suite -> suite { testStanzaBuildInfo = bi }) + +storeXFieldsTest :: UnrecFieldParser TestSuiteStanza +storeXFieldsTest (f@('x':'-':_), val) t@(TestSuiteStanza { testStanzaBuildInfo = bi }) = + Just $ t {testStanzaBuildInfo = bi{ customFieldsBI = (f,val):customFieldsBI bi}} +storeXFieldsTest _ _ = Nothing + +validateTestSuite :: LineNo -> TestSuiteStanza -> ParseResult TestSuite +validateTestSuite line stanza = + case testStanzaTestType stanza of + Nothing -> return $ + emptyTestSuite { testBuildInfo = testStanzaBuildInfo stanza } + + Just tt@(TestTypeUnknown _ _) -> + return emptyTestSuite { + testInterface = TestSuiteUnsupported tt, + testBuildInfo = testStanzaBuildInfo stanza + } + + Just tt | tt `notElem` knownTestTypes -> + return emptyTestSuite { + testInterface = TestSuiteUnsupported tt, + testBuildInfo = testStanzaBuildInfo stanza + } + + Just tt@(TestTypeExe ver) -> + case testStanzaMainIs stanza of + Nothing -> syntaxError line (missingField "main-is" tt) + Just file -> do + when (isJust (testStanzaTestModule stanza)) $ + warning (extraField "test-module" tt) + return emptyTestSuite { + testInterface = TestSuiteExeV10 ver file, + testBuildInfo = testStanzaBuildInfo stanza + } + + Just tt@(TestTypeLib ver) -> + case testStanzaTestModule stanza of + Nothing -> syntaxError line (missingField "test-module" tt) + Just module_ -> do + when (isJust (testStanzaMainIs stanza)) $ + warning (extraField "main-is" tt) + return emptyTestSuite { + testInterface = TestSuiteLibV09 ver module_, + testBuildInfo = testStanzaBuildInfo stanza + } + + where + missingField name tt = "The '" ++ name ++ "' field is required for the " + ++ display tt ++ " test suite type." + + extraField name tt = "The '" ++ name ++ "' field is not used for the '" + ++ display tt ++ "' test suite type." + + +-- --------------------------------------------------------------------------- +-- The Benchmark type + +-- | An intermediate type just used for parsing the benchmark stanza. +-- After validation it is converted into the proper 'Benchmark' type. +data BenchmarkStanza = BenchmarkStanza { + benchmarkStanzaBenchmarkType :: Maybe BenchmarkType, + benchmarkStanzaMainIs :: Maybe FilePath, + benchmarkStanzaBenchmarkModule :: Maybe ModuleName, + benchmarkStanzaBuildInfo :: BuildInfo + } + +emptyBenchmarkStanza :: BenchmarkStanza +emptyBenchmarkStanza = BenchmarkStanza Nothing Nothing Nothing mempty + +benchmarkFieldDescrs :: [FieldDescr BenchmarkStanza] +benchmarkFieldDescrs = + [ simpleField "type" + (maybe empty disp) (fmap Just parse) + benchmarkStanzaBenchmarkType + (\x suite -> suite { benchmarkStanzaBenchmarkType = x }) + , simpleField "main-is" + (maybe empty showFilePath) (fmap Just parseFilePathQ) + benchmarkStanzaMainIs + (\x suite -> suite { benchmarkStanzaMainIs = x }) + ] + ++ map biToBenchmark binfoFieldDescrs + where + biToBenchmark = liftField benchmarkStanzaBuildInfo + (\bi suite -> suite { benchmarkStanzaBuildInfo = bi }) + +storeXFieldsBenchmark :: UnrecFieldParser BenchmarkStanza +storeXFieldsBenchmark (f@('x':'-':_), val) + t@(BenchmarkStanza { benchmarkStanzaBuildInfo = bi }) = + Just $ t {benchmarkStanzaBuildInfo = + bi{ customFieldsBI = (f,val):customFieldsBI bi}} +storeXFieldsBenchmark _ _ = Nothing + +validateBenchmark :: LineNo -> BenchmarkStanza -> ParseResult Benchmark +validateBenchmark line stanza = + case benchmarkStanzaBenchmarkType stanza of + Nothing -> return $ + emptyBenchmark { benchmarkBuildInfo = benchmarkStanzaBuildInfo stanza } + + Just tt@(BenchmarkTypeUnknown _ _) -> + return emptyBenchmark { + benchmarkInterface = BenchmarkUnsupported tt, + benchmarkBuildInfo = benchmarkStanzaBuildInfo stanza + } + + Just tt | tt `notElem` knownBenchmarkTypes -> + return emptyBenchmark { + benchmarkInterface = BenchmarkUnsupported tt, + benchmarkBuildInfo = benchmarkStanzaBuildInfo stanza + } + + Just tt@(BenchmarkTypeExe ver) -> + case benchmarkStanzaMainIs stanza of + Nothing -> syntaxError line (missingField "main-is" tt) + Just file -> do + when (isJust (benchmarkStanzaBenchmarkModule stanza)) $ + warning (extraField "benchmark-module" tt) + return emptyBenchmark { + benchmarkInterface = BenchmarkExeV10 ver file, + benchmarkBuildInfo = benchmarkStanzaBuildInfo stanza + } + + where + missingField name tt = "The '" ++ name ++ "' field is required for the " + ++ display tt ++ " benchmark type." + + extraField name tt = "The '" ++ name ++ "' field is not used for the '" + ++ display tt ++ "' benchmark type." + +-- --------------------------------------------------------------------------- +-- The BuildInfo type + + +binfoFieldDescrs :: [FieldDescr BuildInfo] +binfoFieldDescrs = + [ boolField "buildable" + buildable (\val binfo -> binfo{buildable=val}) + , commaListField "build-tools" + disp parseBuildTool + buildTools (\xs binfo -> binfo{buildTools=xs}) + , commaListFieldWithSep vcat "build-depends" + disp parse + buildDependsWithRenaming + setBuildDependsWithRenaming + , spaceListField "cpp-options" + showToken parseTokenQ' + cppOptions (\val binfo -> binfo{cppOptions=val}) + , spaceListField "cc-options" + showToken parseTokenQ' + ccOptions (\val binfo -> binfo{ccOptions=val}) + , spaceListField "ld-options" + showToken parseTokenQ' + ldOptions (\val binfo -> binfo{ldOptions=val}) + , commaListField "pkgconfig-depends" + disp parsePkgconfigDependency + pkgconfigDepends (\xs binfo -> binfo{pkgconfigDepends=xs}) + , listField "frameworks" + showToken parseTokenQ + frameworks (\val binfo -> binfo{frameworks=val}) + , listFieldWithSep vcat "c-sources" + showFilePath parseFilePathQ + cSources (\paths binfo -> binfo{cSources=paths}) + , listFieldWithSep vcat "js-sources" + showFilePath parseFilePathQ + jsSources (\paths binfo -> binfo{jsSources=paths}) + , simpleField "default-language" + (maybe empty disp) (option Nothing (fmap Just parseLanguageQ)) + defaultLanguage (\lang binfo -> binfo{defaultLanguage=lang}) + , listField "other-languages" + disp parseLanguageQ + otherLanguages (\langs binfo -> binfo{otherLanguages=langs}) + , listField "default-extensions" + disp parseExtensionQ + defaultExtensions (\exts binfo -> binfo{defaultExtensions=exts}) + , listField "other-extensions" + disp parseExtensionQ + otherExtensions (\exts binfo -> binfo{otherExtensions=exts}) + , listField "extensions" + disp parseExtensionQ + oldExtensions (\exts binfo -> binfo{oldExtensions=exts}) + + , listFieldWithSep vcat "extra-libraries" + showToken parseTokenQ + extraLibs (\xs binfo -> binfo{extraLibs=xs}) + , listFieldWithSep vcat "extra-ghci-libraries" + showToken parseTokenQ + extraGHCiLibs (\xs binfo -> binfo{extraGHCiLibs=xs}) + , listField "extra-lib-dirs" + showFilePath parseFilePathQ + extraLibDirs (\xs binfo -> binfo{extraLibDirs=xs}) + , listFieldWithSep vcat "includes" + showFilePath parseFilePathQ + includes (\paths binfo -> binfo{includes=paths}) + , listFieldWithSep vcat "install-includes" + showFilePath parseFilePathQ + installIncludes (\paths binfo -> binfo{installIncludes=paths}) + , listField "include-dirs" + showFilePath parseFilePathQ + includeDirs (\paths binfo -> binfo{includeDirs=paths}) + , listField "hs-source-dirs" + showFilePath parseFilePathQ + hsSourceDirs (\paths binfo -> binfo{hsSourceDirs=paths}) + , listFieldWithSep vcat "other-modules" + disp parseModuleNameQ + otherModules (\val binfo -> binfo{otherModules=val}) + , optsField "ghc-prof-options" GHC + profOptions (\val binfo -> binfo{profOptions=val}) + , optsField "ghcjs-prof-options" GHCJS + profOptions (\val binfo -> binfo{profOptions=val}) + , optsField "ghc-shared-options" GHC + sharedOptions (\val binfo -> binfo{sharedOptions=val}) + , optsField "ghcjs-shared-options" GHCJS + sharedOptions (\val binfo -> binfo{sharedOptions=val}) + , optsField "ghc-options" GHC + options (\path binfo -> binfo{options=path}) + , optsField "ghcjs-options" GHCJS + options (\path binfo -> binfo{options=path}) + , optsField "jhc-options" JHC + options (\path binfo -> binfo{options=path}) + + -- NOTE: Hugs and NHC are not supported anymore, but these fields are kept + -- around for backwards compatibility. + , optsField "hugs-options" Hugs + options (const id) + , optsField "nhc98-options" NHC + options (const id) + ] + +storeXFieldsBI :: UnrecFieldParser BuildInfo +storeXFieldsBI (f@('x':'-':_),val) bi = Just bi{ customFieldsBI = (f,val):customFieldsBI bi } +storeXFieldsBI _ _ = Nothing + +------------------------------------------------------------------------------ + +flagFieldDescrs :: [FieldDescr Flag] +flagFieldDescrs = + [ simpleField "description" + showFreeText parseFreeText + flagDescription (\val fl -> fl{ flagDescription = val }) + , boolField "default" + flagDefault (\val fl -> fl{ flagDefault = val }) + , boolField "manual" + flagManual (\val fl -> fl{ flagManual = val }) + ] + +------------------------------------------------------------------------------ + +sourceRepoFieldDescrs :: [FieldDescr SourceRepo] +sourceRepoFieldDescrs = + [ simpleField "type" + (maybe empty disp) (fmap Just parse) + repoType (\val repo -> repo { repoType = val }) + , simpleField "location" + (maybe empty showFreeText) (fmap Just parseFreeText) + repoLocation (\val repo -> repo { repoLocation = val }) + , simpleField "module" + (maybe empty showToken) (fmap Just parseTokenQ) + repoModule (\val repo -> repo { repoModule = val }) + , simpleField "branch" + (maybe empty showToken) (fmap Just parseTokenQ) + repoBranch (\val repo -> repo { repoBranch = val }) + , simpleField "tag" + (maybe empty showToken) (fmap Just parseTokenQ) + repoTag (\val repo -> repo { repoTag = val }) + , simpleField "subdir" + (maybe empty showFilePath) (fmap Just parseFilePathQ) + repoSubdir (\val repo -> repo { repoSubdir = val }) + ] + +-- --------------------------------------------------------------- +-- Parsing + +-- | Given a parser and a filename, return the parse of the file, +-- after checking if the file exists. +readAndParseFile :: (FilePath -> (String -> IO a) -> IO a) + -> (String -> ParseResult a) + -> Verbosity + -> FilePath -> IO a +readAndParseFile withFileContents' parser verbosity fpath = do + exists <- doesFileExist fpath + unless exists + (die $ "Error Parsing: file \"" ++ fpath ++ "\" doesn't exist. Cannot continue.") + withFileContents' fpath $ \str -> case parser str of + ParseFailed e -> do + let (line, message) = locatedErrorMsg e + dieWithLocation fpath line message + ParseOk warnings x -> do + mapM_ (warn verbosity . showPWarning fpath) $ reverse warnings + return x + +readHookedBuildInfo :: Verbosity -> FilePath -> IO HookedBuildInfo +readHookedBuildInfo = + readAndParseFile withFileContents parseHookedBuildInfo + +-- |Parse the given package file. +readPackageDescription :: Verbosity -> FilePath -> IO GenericPackageDescription +readPackageDescription = + readAndParseFile withUTF8FileContents parsePackageDescription + +stanzas :: [Field] -> [[Field]] +stanzas [] = [] +stanzas (f:fields) = (f:this) : stanzas rest + where + (this, rest) = break isStanzaHeader fields + +isStanzaHeader :: Field -> Bool +isStanzaHeader (F _ f _) = f == "executable" +isStanzaHeader _ = False + +------------------------------------------------------------------------------ + + +mapSimpleFields :: (Field -> ParseResult Field) -> [Field] + -> ParseResult [Field] +mapSimpleFields f = mapM walk + where + walk fld@F{} = f fld + walk (IfBlock l c fs1 fs2) = do + fs1' <- mapM walk fs1 + fs2' <- mapM walk fs2 + return (IfBlock l c fs1' fs2') + walk (Section ln n l fs1) = do + fs1' <- mapM walk fs1 + return (Section ln n l fs1') + +-- prop_isMapM fs = mapSimpleFields return fs == return fs + + +-- names of fields that represents dependencies, thus consrca +constraintFieldNames :: [String] +constraintFieldNames = ["build-depends"] + +-- Possible refactoring would be to have modifiers be explicit about what +-- they add and define an accessor that specifies what the dependencies +-- are. This way we would completely reuse the parsing knowledge from the +-- field descriptor. +parseConstraint :: Field -> ParseResult [DependencyWithRenaming] +parseConstraint (F l n v) + | n == "build-depends" = runP l n (parseCommaList parse) v +parseConstraint f = userBug $ "Constraint was expected (got: " ++ show f ++ ")" + +{- +headerFieldNames :: [String] +headerFieldNames = filter (\n -> not (n `elem` constraintFieldNames)) + . map fieldName $ pkgDescrFieldDescrs +-} + +libFieldNames :: [String] +libFieldNames = map fieldName libFieldDescrs + ++ buildInfoNames ++ constraintFieldNames + +-- exeFieldNames :: [String] +-- exeFieldNames = map fieldName executableFieldDescrs +-- ++ buildInfoNames + +buildInfoNames :: [String] +buildInfoNames = map fieldName binfoFieldDescrs + ++ map fst deprecatedFieldsBuildInfo + +-- A minimal implementation of the StateT monad transformer to avoid depending +-- on the 'mtl' package. +newtype StT s m a = StT { runStT :: s -> m (a,s) } + +instance Functor f => Functor (StT s f) where + fmap g (StT f) = StT $ fmap (first g) . f + +instance (Monad m, Functor m) => Applicative (StT s m) where + pure = return + (<*>) = ap + +instance Monad m => Monad (StT s m) where + return a = StT (\s -> return (a,s)) + StT f >>= g = StT $ \s -> do + (a,s') <- f s + runStT (g a) s' + +get :: Monad m => StT s m s +get = StT $ \s -> return (s, s) + +modify :: Monad m => (s -> s) -> StT s m () +modify f = StT $ \s -> return ((),f s) + +lift :: Monad m => m a -> StT s m a +lift m = StT $ \s -> m >>= \a -> return (a,s) + +evalStT :: Monad m => StT s m a -> s -> m a +evalStT st s = liftM fst $ runStT st s + +-- Our monad for parsing a list/tree of fields. +-- +-- The state represents the remaining fields to be processed. +type PM a = StT [Field] ParseResult a + + + +-- return look-ahead field or nothing if we're at the end of the file +peekField :: PM (Maybe Field) +peekField = liftM listToMaybe get + +-- Unconditionally discard the first field in our state. Will error when it +-- reaches end of file. (Yes, that's evil.) +skipField :: PM () +skipField = modify tail + +--FIXME: this should take a ByteString, not a String. We have to be able to +-- decode UTF8 and handle the BOM. + +-- | Parses the given file into a 'GenericPackageDescription'. +-- +-- In Cabal 1.2 the syntax for package descriptions was changed to a format +-- with sections and possibly indented property descriptions. +parsePackageDescription :: String -> ParseResult GenericPackageDescription +parsePackageDescription file = do + + -- This function is quite complex because it needs to be able to parse + -- both pre-Cabal-1.2 and post-Cabal-1.2 files. Additionally, it contains + -- a lot of parser-related noise since we do not want to depend on Parsec. + -- + -- If we detect an pre-1.2 file we implicitly convert it to post-1.2 + -- style. See 'sectionizeFields' below for details about the conversion. + + fields0 <- readFields file `catchParseError` \err -> + let tabs = findIndentTabs file in + case err of + -- In case of a TabsError report them all at once. + TabsError tabLineNo -> reportTabsError + -- but only report the ones including and following + -- the one that caused the actual error + [ t | t@(lineNo',_) <- tabs + , lineNo' >= tabLineNo ] + _ -> parseFail err + + let cabalVersionNeeded = + head $ [ minVersionBound versionRange + | Just versionRange <- [ simpleParse v + | F _ "cabal-version" v <- fields0 ] ] + ++ [Version [0] []] + minVersionBound versionRange = + case asVersionIntervals versionRange of + [] -> Version [0] [] + ((LowerBound version _, _):_) -> version + + handleFutureVersionParseFailure cabalVersionNeeded $ do + + let sf = sectionizeFields fields0 -- ensure 1.2 format + + -- figure out and warn about deprecated stuff (warnings are collected + -- inside our parsing monad) + fields <- mapSimpleFields deprecField sf + + -- Our parsing monad takes the not-yet-parsed fields as its state. + -- After each successful parse we remove the field from the state + -- ('skipField') and move on to the next one. + -- + -- Things are complicated a bit, because fields take a tree-like + -- structure -- they can be sections or "if"/"else" conditionals. + + flip evalStT fields $ do + + -- The header consists of all simple fields up to the first section + -- (flag, library, executable). + header_fields <- getHeader [] + + -- Parses just the header fields and stores them in a + -- 'PackageDescription'. Note that our final result is a + -- 'GenericPackageDescription'; for pragmatic reasons we just store + -- the partially filled-out 'PackageDescription' inside the + -- 'GenericPackageDescription'. + pkg <- lift $ parseFields pkgDescrFieldDescrs + storeXFieldsPD + emptyPackageDescription + header_fields + + -- 'getBody' assumes that the remaining fields only consist of + -- flags, lib and exe sections. + (repos, flags, mlib, exes, tests, bms) <- getBody + warnIfRest -- warn if getBody did not parse up to the last field. + -- warn about using old/new syntax with wrong cabal-version: + maybeWarnCabalVersion (not $ oldSyntax fields0) pkg + checkForUndefinedFlags flags mlib exes tests + return $ GenericPackageDescription + pkg { sourceRepos = repos } + flags mlib exes tests bms + + where + oldSyntax = all isSimpleField + reportTabsError tabs = + syntaxError (fst (head tabs)) $ + "Do not use tabs for indentation (use spaces instead)\n" + ++ " Tabs were used at (line,column): " ++ show tabs + + maybeWarnCabalVersion newsyntax pkg + | newsyntax && specVersion pkg < Version [1,2] [] + = lift $ warning $ + "A package using section syntax must specify at least\n" + ++ "'cabal-version: >= 1.2'." + + maybeWarnCabalVersion newsyntax pkg + | not newsyntax && specVersion pkg >= Version [1,2] [] + = lift $ warning $ + "A package using 'cabal-version: " + ++ displaySpecVersion (specVersionRaw pkg) + ++ "' must use section syntax. See the Cabal user guide for details." + where + displaySpecVersion (Left version) = display version + displaySpecVersion (Right versionRange) = + case asVersionIntervals versionRange of + [] {- impossible -} -> display versionRange + ((LowerBound version _, _):_) -> display (orLaterVersion version) + + maybeWarnCabalVersion _ _ = return () + + + handleFutureVersionParseFailure cabalVersionNeeded parseBody = + (unless versionOk (warning message) >> parseBody) + `catchParseError` \parseError -> case parseError of + TabsError _ -> parseFail parseError + _ | versionOk -> parseFail parseError + | otherwise -> fail message + where versionOk = cabalVersionNeeded <= cabalVersion + message = "This package requires at least Cabal version " + ++ display cabalVersionNeeded + + -- "Sectionize" an old-style Cabal file. A sectionized file has: + -- + -- * all global fields at the beginning, followed by + -- + -- * all flag declarations, followed by + -- + -- * an optional library section, and an arbitrary number of executable + -- sections (in any order). + -- + -- The current implementation just gathers all library-specific fields + -- in a library section and wraps all executable stanzas in an executable + -- section. + sectionizeFields :: [Field] -> [Field] + sectionizeFields fs + | oldSyntax fs = + let + -- "build-depends" is a local field now. To be backwards + -- compatible, we still allow it as a global field in old-style + -- package description files and translate it to a local field by + -- adding it to every non-empty section + (hdr0, exes0) = break ((=="executable") . fName) fs + (hdr, libfs0) = partition (not . (`elem` libFieldNames) . fName) hdr0 + + (deps, libfs) = partition ((== "build-depends") . fName) + libfs0 + + exes = unfoldr toExe exes0 + toExe [] = Nothing + toExe (F l e n : r) + | e == "executable" = + let (efs, r') = break ((=="executable") . fName) r + in Just (Section l "executable" n (deps ++ efs), r') + toExe _ = cabalBug "unexpected input to 'toExe'" + in + hdr ++ + (if null libfs then [] + else [Section (lineNo (head libfs)) "library" "" (deps ++ libfs)]) + ++ exes + | otherwise = fs + + isSimpleField F{} = True + isSimpleField _ = False + + -- warn if there's something at the end of the file + warnIfRest :: PM () + warnIfRest = do + s <- get + case s of + [] -> return () + _ -> lift $ warning "Ignoring trailing declarations." -- add line no. + + -- all simple fields at the beginning of the file are (considered) header + -- fields + getHeader :: [Field] -> PM [Field] + getHeader acc = peekField >>= \mf -> case mf of + Just f@F{} -> skipField >> getHeader (f:acc) + _ -> return (reverse acc) + + -- + -- body ::= { repo | flag | library | executable | test }+ -- at most one lib + -- + -- The body consists of an optional sequence of declarations of flags and + -- an arbitrary number of executables and at most one library. + getBody :: PM ([SourceRepo], [Flag] + ,Maybe (CondTree ConfVar [Dependency] Library) + ,[(String, CondTree ConfVar [Dependency] Executable)] + ,[(String, CondTree ConfVar [Dependency] TestSuite)] + ,[(String, CondTree ConfVar [Dependency] Benchmark)]) + getBody = peekField >>= \mf -> case mf of + Just (Section line_no sec_type sec_label sec_fields) + | sec_type == "executable" -> do + when (null sec_label) $ lift $ syntaxError line_no + "'executable' needs one argument (the executable's name)" + exename <- lift $ runP line_no "executable" parseTokenQ sec_label + flds <- collectFields parseExeFields sec_fields + skipField + (repos, flags, lib, exes, tests, bms) <- getBody + return (repos, flags, lib, (exename, flds): exes, tests, bms) + + | sec_type == "test-suite" -> do + when (null sec_label) $ lift $ syntaxError line_no + "'test-suite' needs one argument (the test suite's name)" + testname <- lift $ runP line_no "test" parseTokenQ sec_label + flds <- collectFields (parseTestFields line_no) sec_fields + + -- Check that a valid test suite type has been chosen. A type + -- field may be given inside a conditional block, so we must + -- check for that before complaining that a type field has not + -- been given. The test suite must always have a valid type, so + -- we need to check both the 'then' and 'else' blocks, though + -- the blocks need not have the same type. + let checkTestType ts ct = + let ts' = mappend ts $ condTreeData ct + -- If a conditional has only a 'then' block and no + -- 'else' block, then it cannot have a valid type + -- in every branch, unless the type is specified at + -- a higher level in the tree. + checkComponent (_, _, Nothing) = False + -- If a conditional has a 'then' block and an 'else' + -- block, both must specify a test type, unless the + -- type is specified higher in the tree. + checkComponent (_, t, Just e) = + checkTestType ts' t && checkTestType ts' e + -- Does the current node specify a test type? + hasTestType = testInterface ts' + /= testInterface emptyTestSuite + components = condTreeComponents ct + -- If the current level of the tree specifies a type, + -- then we are done. If not, then one of the conditional + -- branches below the current node must specify a type. + -- Each node may have multiple immediate children; we + -- only one need one to specify a type because the + -- configure step uses 'mappend' to join together the + -- results of flag resolution. + in hasTestType || any checkComponent components + if checkTestType emptyTestSuite flds + then do + skipField + (repos, flags, lib, exes, tests, bms) <- getBody + return (repos, flags, lib, exes, (testname, flds) : tests, bms) + else lift $ syntaxError line_no $ + "Test suite \"" ++ testname + ++ "\" is missing required field \"type\" or the field " + ++ "is not present in all conditional branches. The " + ++ "available test types are: " + ++ intercalate ", " (map display knownTestTypes) + + | sec_type == "benchmark" -> do + when (null sec_label) $ lift $ syntaxError line_no + "'benchmark' needs one argument (the benchmark's name)" + benchname <- lift $ runP line_no "benchmark" parseTokenQ sec_label + flds <- collectFields (parseBenchmarkFields line_no) sec_fields + + -- Check that a valid benchmark type has been chosen. A type + -- field may be given inside a conditional block, so we must + -- check for that before complaining that a type field has not + -- been given. The benchmark must always have a valid type, so + -- we need to check both the 'then' and 'else' blocks, though + -- the blocks need not have the same type. + let checkBenchmarkType ts ct = + let ts' = mappend ts $ condTreeData ct + -- If a conditional has only a 'then' block and no + -- 'else' block, then it cannot have a valid type + -- in every branch, unless the type is specified at + -- a higher level in the tree. + checkComponent (_, _, Nothing) = False + -- If a conditional has a 'then' block and an 'else' + -- block, both must specify a benchmark type, unless the + -- type is specified higher in the tree. + checkComponent (_, t, Just e) = + checkBenchmarkType ts' t && checkBenchmarkType ts' e + -- Does the current node specify a benchmark type? + hasBenchmarkType = benchmarkInterface ts' + /= benchmarkInterface emptyBenchmark + components = condTreeComponents ct + -- If the current level of the tree specifies a type, + -- then we are done. If not, then one of the conditional + -- branches below the current node must specify a type. + -- Each node may have multiple immediate children; we + -- only one need one to specify a type because the + -- configure step uses 'mappend' to join together the + -- results of flag resolution. + in hasBenchmarkType || any checkComponent components + if checkBenchmarkType emptyBenchmark flds + then do + skipField + (repos, flags, lib, exes, tests, bms) <- getBody + return (repos, flags, lib, exes, tests, (benchname, flds) : bms) + else lift $ syntaxError line_no $ + "Benchmark \"" ++ benchname + ++ "\" is missing required field \"type\" or the field " + ++ "is not present in all conditional branches. The " + ++ "available benchmark types are: " + ++ intercalate ", " (map display knownBenchmarkTypes) + + | sec_type == "library" -> do + unless (null sec_label) $ lift $ + syntaxError line_no "'library' expects no argument" + flds <- collectFields parseLibFields sec_fields + skipField + (repos, flags, lib, exes, tests, bms) <- getBody + when (isJust lib) $ lift $ syntaxError line_no + "There can only be one library section in a package description." + return (repos, flags, Just flds, exes, tests, bms) + + | sec_type == "flag" -> do + when (null sec_label) $ lift $ + syntaxError line_no "'flag' needs one argument (the flag's name)" + flag <- lift $ parseFields + flagFieldDescrs + warnUnrec + (MkFlag (FlagName (lowercase sec_label)) "" True False) + sec_fields + skipField + (repos, flags, lib, exes, tests, bms) <- getBody + return (repos, flag:flags, lib, exes, tests, bms) + + | sec_type == "source-repository" -> do + when (null sec_label) $ lift $ syntaxError line_no $ + "'source-repository' needs one argument, " + ++ "the repo kind which is usually 'head' or 'this'" + kind <- case simpleParse sec_label of + Just kind -> return kind + Nothing -> lift $ syntaxError line_no $ + "could not parse repo kind: " ++ sec_label + repo <- lift $ parseFields + sourceRepoFieldDescrs + warnUnrec + SourceRepo { + repoKind = kind, + repoType = Nothing, + repoLocation = Nothing, + repoModule = Nothing, + repoBranch = Nothing, + repoTag = Nothing, + repoSubdir = Nothing + } + sec_fields + skipField + (repos, flags, lib, exes, tests, bms) <- getBody + return (repo:repos, flags, lib, exes, tests, bms) + + | otherwise -> do + lift $ warning $ "Ignoring unknown section type: " ++ sec_type + skipField + getBody + Just f@(F {}) -> do + _ <- lift $ syntaxError (lineNo f) $ + "Plain fields are not allowed in between stanzas: " ++ show f + skipField + getBody + Just f@(IfBlock {}) -> do + _ <- lift $ syntaxError (lineNo f) $ + "If-blocks are not allowed in between stanzas: " ++ show f + skipField + getBody + Nothing -> return ([], [], Nothing, [], [], []) + + -- Extracts all fields in a block and returns a 'CondTree'. + -- + -- We have to recurse down into conditionals and we treat fields that + -- describe dependencies specially. + collectFields :: ([Field] -> PM a) -> [Field] + -> PM (CondTree ConfVar [Dependency] a) + collectFields parser allflds = do + + let simplFlds = [ F l n v | F l n v <- allflds ] + condFlds = [ f | f@IfBlock{} <- allflds ] + sections = [ s | s@Section{} <- allflds ] + + -- Put these through the normal parsing pass too, so that we + -- collect the ModRenamings + let depFlds = filter isConstraint simplFlds + + mapM_ + (\(Section l n _ _) -> lift . warning $ + "Unexpected section '" ++ n ++ "' on line " ++ show l) + sections + + a <- parser simplFlds + deps <- liftM concat . mapM (lift . fmap (map dependency) . parseConstraint) $ depFlds + + ifs <- mapM processIfs condFlds + + return (CondNode a deps ifs) + where + isConstraint (F _ n _) = n `elem` constraintFieldNames + isConstraint _ = False + + processIfs (IfBlock l c t e) = do + cnd <- lift $ runP l "if" parseCondition c + t' <- collectFields parser t + e' <- case e of + [] -> return Nothing + es -> do fs <- collectFields parser es + return (Just fs) + return (cnd, t', e') + processIfs _ = cabalBug "processIfs called with wrong field type" + + parseLibFields :: [Field] -> PM Library + parseLibFields = lift . parseFields libFieldDescrs storeXFieldsLib emptyLibrary + + -- Note: we don't parse the "executable" field here, hence the tail hack. + parseExeFields :: [Field] -> PM Executable + parseExeFields = lift . parseFields (tail executableFieldDescrs) + storeXFieldsExe emptyExecutable + + parseTestFields :: LineNo -> [Field] -> PM TestSuite + parseTestFields line fields = do + x <- lift $ parseFields testSuiteFieldDescrs storeXFieldsTest + emptyTestStanza fields + lift $ validateTestSuite line x + + parseBenchmarkFields :: LineNo -> [Field] -> PM Benchmark + parseBenchmarkFields line fields = do + x <- lift $ parseFields benchmarkFieldDescrs storeXFieldsBenchmark + emptyBenchmarkStanza fields + lift $ validateBenchmark line x + + checkForUndefinedFlags :: + [Flag] -> + Maybe (CondTree ConfVar [Dependency] Library) -> + [(String, CondTree ConfVar [Dependency] Executable)] -> + [(String, CondTree ConfVar [Dependency] TestSuite)] -> + PM () + checkForUndefinedFlags flags mlib exes tests = do + let definedFlags = map flagName flags + maybe (return ()) (checkCondTreeFlags definedFlags) mlib + mapM_ (checkCondTreeFlags definedFlags . snd) exes + mapM_ (checkCondTreeFlags definedFlags . snd) tests + + checkCondTreeFlags :: [FlagName] -> CondTree ConfVar c a -> PM () + checkCondTreeFlags definedFlags ct = do + let fv = nub $ freeVars ct + unless (all (`elem` definedFlags) fv) $ + fail $ "These flags are used without having been defined: " + ++ intercalate ", " [ n | FlagName n <- fv \\ definedFlags ] + + +-- | Parse a list of fields, given a list of field descriptions, +-- a structure to accumulate the parsed fields, and a function +-- that can decide what to do with fields which don't match any +-- of the field descriptions. +parseFields :: [FieldDescr a] -- ^ descriptions of fields we know how to + -- parse + -> UnrecFieldParser a -- ^ possibly do something with + -- unrecognized fields + -> a -- ^ accumulator + -> [Field] -- ^ fields to be parsed + -> ParseResult a +parseFields descrs unrec ini fields = + do (a, unknowns) <- foldM (parseField descrs unrec) (ini, []) fields + unless (null unknowns) $ warning $ render $ + text "Unknown fields:" <+> + commaSep (map (\(l,u) -> u ++ " (line " ++ show l ++ ")") + (reverse unknowns)) + $+$ + text "Fields allowed in this section:" $$ + nest 4 (commaSep $ map fieldName descrs) + return a + where + commaSep = fsep . punctuate comma . map text + +parseField :: [FieldDescr a] -- ^ list of parseable fields + -> UnrecFieldParser a -- ^ possibly do something with + -- unrecognized fields + -> (a,[(Int,String)]) -- ^ accumulated result and warnings + -> Field -- ^ the field to be parsed + -> ParseResult (a, [(Int,String)]) +parseField (FieldDescr name _ parser : fields) unrec (a, us) (F line f val) + | name == f = parser line val a >>= \a' -> return (a',us) + | otherwise = parseField fields unrec (a,us) (F line f val) +parseField [] unrec (a,us) (F l f val) = return $ + case unrec (f,val) a of -- no fields matched, see if the 'unrec' + Just a' -> (a',us) -- function wants to do anything with it + Nothing -> (a, (l,f):us) +parseField _ _ _ _ = cabalBug "'parseField' called on a non-field" + +deprecatedFields :: [(String,String)] +deprecatedFields = + deprecatedFieldsPkgDescr ++ deprecatedFieldsBuildInfo + +deprecatedFieldsPkgDescr :: [(String,String)] +deprecatedFieldsPkgDescr = [ ("other-files", "extra-source-files") ] + +deprecatedFieldsBuildInfo :: [(String,String)] +deprecatedFieldsBuildInfo = [ ("hs-source-dir","hs-source-dirs") ] + +-- Handle deprecated fields +deprecField :: Field -> ParseResult Field +deprecField (F line fld val) = do + fld' <- case lookup fld deprecatedFields of + Nothing -> return fld + Just newName -> do + warning $ "The field \"" ++ fld + ++ "\" is deprecated, please use \"" ++ newName ++ "\"" + return newName + return (F line fld' val) +deprecField _ = cabalBug "'deprecField' called on a non-field" + + +parseHookedBuildInfo :: String -> ParseResult HookedBuildInfo +parseHookedBuildInfo inp = do + fields <- readFields inp + let ss@(mLibFields:exes) = stanzas fields + mLib <- parseLib mLibFields + biExes <- mapM parseExe (maybe ss (const exes) mLib) + return (mLib, biExes) + where + parseLib :: [Field] -> ParseResult (Maybe BuildInfo) + parseLib (bi@(F _ inFieldName _:_)) + | lowercase inFieldName /= "executable" = liftM Just (parseBI bi) + parseLib _ = return Nothing + + parseExe :: [Field] -> ParseResult (String, BuildInfo) + parseExe (F line inFieldName mName:bi) + | lowercase inFieldName == "executable" + = do bis <- parseBI bi + return (mName, bis) + | otherwise = syntaxError line "expecting 'executable' at top of stanza" + parseExe (_:_) = cabalBug "`parseExe' called on a non-field" + parseExe [] = syntaxError 0 "error in parsing buildinfo file. Expected executable stanza" + + parseBI st = parseFields binfoFieldDescrs storeXFieldsBI emptyBuildInfo st + +-- --------------------------------------------------------------------------- +-- Pretty printing + +writePackageDescription :: FilePath -> PackageDescription -> IO () +writePackageDescription fpath pkg = writeUTF8File fpath (showPackageDescription pkg) + +--TODO: make this use section syntax +-- add equivalent for GenericPackageDescription +showPackageDescription :: PackageDescription -> String +showPackageDescription pkg = render $ + ppPackage pkg + $$ ppCustomFields (customFieldsPD pkg) + $$ (case library pkg of + Nothing -> empty + Just lib -> ppLibrary lib) + $$ vcat [ space $$ ppExecutable exe | exe <- executables pkg ] + where + ppPackage = ppFields pkgDescrFieldDescrs + ppLibrary = ppFields libFieldDescrs + ppExecutable = ppFields executableFieldDescrs + +ppCustomFields :: [(String,String)] -> Doc +ppCustomFields flds = vcat (map ppCustomField flds) + +ppCustomField :: (String,String) -> Doc +ppCustomField (name,val) = text name <> colon <+> showFreeText val + +writeHookedBuildInfo :: FilePath -> HookedBuildInfo -> IO () +writeHookedBuildInfo fpath = writeFileAtomic fpath . BS.Char8.pack + . showHookedBuildInfo + +showHookedBuildInfo :: HookedBuildInfo -> String +showHookedBuildInfo (mb_lib_bi, ex_bis) = render $ + (case mb_lib_bi of + Nothing -> empty + Just bi -> ppBuildInfo bi) + $$ vcat [ space + $$ text "executable:" <+> text name + $$ ppBuildInfo bi + | (name, bi) <- ex_bis ] + where + ppBuildInfo bi = ppFields binfoFieldDescrs bi + $$ ppCustomFields (customFieldsBI bi) + +-- replace all tabs used as indentation with whitespace, also return where +-- tabs were found +findIndentTabs :: String -> [(Int,Int)] +findIndentTabs = concatMap checkLine + . zip [1..] + . lines + where + checkLine (lineno, l) = + let (indent, _content) = span isSpace l + tabCols = map fst . filter ((== '\t') . snd) . zip [0..] + addLineNo = map (\col -> (lineno,col)) + in addLineNo (tabCols indent) + +--test_findIndentTabs = findIndentTabs $ unlines $ +-- [ "foo", " bar", " \t baz", "\t biz\t", "\t\t \t mib" ] + +-- | Dependencies plus module renamings. This is what users specify; however, +-- renaming information is not used for dependency resolution. +data DependencyWithRenaming = DependencyWithRenaming Dependency ModuleRenaming + deriving (Read, Show, Eq, Typeable, Data) + +dependency :: DependencyWithRenaming -> Dependency +dependency (DependencyWithRenaming dep _) = dep + +instance Text DependencyWithRenaming where + disp (DependencyWithRenaming d rns) = disp d <+> disp rns + parse = do d <- parse + Parse.skipSpaces + rns <- parse + Parse.skipSpaces + return (DependencyWithRenaming d rns) + +buildDependsWithRenaming :: BuildInfo -> [DependencyWithRenaming] +buildDependsWithRenaming pkg = + map (\dep@(Dependency n _) -> + DependencyWithRenaming dep + (Map.findWithDefault defaultRenaming n (targetBuildRenaming pkg))) + (targetBuildDepends pkg) + +setBuildDependsWithRenaming :: [DependencyWithRenaming] -> BuildInfo -> BuildInfo +setBuildDependsWithRenaming deps pkg = pkg { + targetBuildDepends = map dependency deps, + targetBuildRenaming = Map.fromList (map (\(DependencyWithRenaming (Dependency n _) rns) -> (n, rns)) deps) + } diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/PrettyPrint.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/PrettyPrint.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/PrettyPrint.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/PrettyPrint.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,243 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.PackageDescription.PrettyPrint +-- Copyright : Jürgen Nicklisch-Franken 2010 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Pretty printing for cabal files +-- +----------------------------------------------------------------------------- + +module Distribution.PackageDescription.PrettyPrint ( + writeGenericPackageDescription, + showGenericPackageDescription, +) where + +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid (Monoid(mempty)) +#endif +import Distribution.PackageDescription + ( Benchmark(..), BenchmarkInterface(..), benchmarkType + , TestSuite(..), TestSuiteInterface(..), testType + , SourceRepo(..), + customFieldsBI, CondTree(..), Condition(..), + FlagName(..), ConfVar(..), Executable(..), Library(..), + Flag(..), PackageDescription(..), + GenericPackageDescription(..)) +import Text.PrettyPrint + (hsep, parens, char, nest, empty, + isEmpty, ($$), (<+>), colon, (<>), text, vcat, ($+$), Doc, render) +import Distribution.Simple.Utils (writeUTF8File) +import Distribution.ParseUtils (showFreeText, FieldDescr(..), indentWith, ppField, ppFields) +import Distribution.PackageDescription.Parse (pkgDescrFieldDescrs,binfoFieldDescrs,libFieldDescrs, + sourceRepoFieldDescrs,flagFieldDescrs) +import Distribution.Package (Dependency(..)) +import Distribution.Text (Text(..)) +import Data.Maybe (isJust, fromJust, isNothing) + +-- | Recompile with false for regression testing +simplifiedPrinting :: Bool +simplifiedPrinting = False + +-- | Writes a .cabal file from a generic package description +writeGenericPackageDescription :: FilePath -> GenericPackageDescription -> IO () +writeGenericPackageDescription fpath pkg = writeUTF8File fpath (showGenericPackageDescription pkg) + +-- | Writes a generic package description to a string +showGenericPackageDescription :: GenericPackageDescription -> String +showGenericPackageDescription = render . ppGenericPackageDescription + +ppGenericPackageDescription :: GenericPackageDescription -> Doc +ppGenericPackageDescription gpd = + ppPackageDescription (packageDescription gpd) + $+$ ppGenPackageFlags (genPackageFlags gpd) + $+$ ppLibrary (condLibrary gpd) + $+$ ppExecutables (condExecutables gpd) + $+$ ppTestSuites (condTestSuites gpd) + $+$ ppBenchmarks (condBenchmarks gpd) + +ppPackageDescription :: PackageDescription -> Doc +ppPackageDescription pd = ppFields pkgDescrFieldDescrs pd + $+$ ppCustomFields (customFieldsPD pd) + $+$ ppSourceRepos (sourceRepos pd) + +ppSourceRepos :: [SourceRepo] -> Doc +ppSourceRepos [] = empty +ppSourceRepos (hd:tl) = ppSourceRepo hd $+$ ppSourceRepos tl + +ppSourceRepo :: SourceRepo -> Doc +ppSourceRepo repo = + emptyLine $ text "source-repository" <+> disp (repoKind repo) $+$ + (nest indentWith (ppFields sourceRepoFieldDescrs' repo)) + where + sourceRepoFieldDescrs' = [fd | fd <- sourceRepoFieldDescrs, fieldName fd /= "kind"] + +-- TODO: this is a temporary hack. Ideally, fields containing default values +-- would be filtered out when the @FieldDescr a@ list is generated. +ppFieldsFiltered :: [(String, String)] -> [FieldDescr a] -> a -> Doc +ppFieldsFiltered removable fields x = ppFields (filter nondefault fields) x + where + nondefault (FieldDescr name getter _) = + maybe True (render (getter x) /=) (lookup name removable) + +binfoDefaults :: [(String, String)] +binfoDefaults = [("buildable", "True")] + +libDefaults :: [(String, String)] +libDefaults = ("exposed", "True") : binfoDefaults + +flagDefaults :: [(String, String)] +flagDefaults = [("default", "True"), ("manual", "False")] + +ppDiffFields :: [FieldDescr a] -> a -> a -> Doc +ppDiffFields fields x y = + vcat [ ppField name (getter x) + | FieldDescr name getter _ <- fields + , render (getter x) /= render (getter y) + ] + +ppCustomFields :: [(String,String)] -> Doc +ppCustomFields flds = vcat [ppCustomField f | f <- flds] + +ppCustomField :: (String,String) -> Doc +ppCustomField (name,val) = text name <> colon <+> showFreeText val + +ppGenPackageFlags :: [Flag] -> Doc +ppGenPackageFlags flds = vcat [ppFlag f | f <- flds] + +ppFlag :: Flag -> Doc +ppFlag flag@(MkFlag name _ _ _) = + emptyLine $ text "flag" <+> ppFlagName name $+$ nest indentWith fields + where + fields = ppFieldsFiltered flagDefaults flagFieldDescrs flag + +ppLibrary :: (Maybe (CondTree ConfVar [Dependency] Library)) -> Doc +ppLibrary Nothing = empty +ppLibrary (Just condTree) = + emptyLine $ text "library" $+$ nest indentWith (ppCondTree condTree Nothing ppLib) + where + ppLib lib Nothing = ppFieldsFiltered libDefaults libFieldDescrs lib + $$ ppCustomFields (customFieldsBI (libBuildInfo lib)) + ppLib lib (Just plib) = ppDiffFields libFieldDescrs lib plib + $$ ppCustomFields (customFieldsBI (libBuildInfo lib)) + +ppExecutables :: [(String, CondTree ConfVar [Dependency] Executable)] -> Doc +ppExecutables exes = + vcat [emptyLine $ text ("executable " ++ n) + $+$ nest indentWith (ppCondTree condTree Nothing ppExe)| (n,condTree) <- exes] + where + ppExe (Executable _ modulePath' buildInfo') Nothing = + (if modulePath' == "" then empty else text "main-is:" <+> text modulePath') + $+$ ppFieldsFiltered binfoDefaults binfoFieldDescrs buildInfo' + $+$ ppCustomFields (customFieldsBI buildInfo') + ppExe (Executable _ modulePath' buildInfo') + (Just (Executable _ modulePath2 buildInfo2)) = + (if modulePath' == "" || modulePath' == modulePath2 + then empty else text "main-is:" <+> text modulePath') + $+$ ppDiffFields binfoFieldDescrs buildInfo' buildInfo2 + $+$ ppCustomFields (customFieldsBI buildInfo') + +ppTestSuites :: [(String, CondTree ConfVar [Dependency] TestSuite)] -> Doc +ppTestSuites suites = + emptyLine $ vcat [ text ("test-suite " ++ n) + $+$ nest indentWith (ppCondTree condTree Nothing ppTestSuite) + | (n,condTree) <- suites] + where + ppTestSuite testsuite Nothing = + maybe empty (\t -> text "type:" <+> disp t) + maybeTestType + $+$ maybe empty (\f -> text "main-is:" <+> text f) + (testSuiteMainIs testsuite) + $+$ maybe empty (\m -> text "test-module:" <+> disp m) + (testSuiteModule testsuite) + $+$ ppFieldsFiltered binfoDefaults binfoFieldDescrs (testBuildInfo testsuite) + $+$ ppCustomFields (customFieldsBI (testBuildInfo testsuite)) + where + maybeTestType | testInterface testsuite == mempty = Nothing + | otherwise = Just (testType testsuite) + + ppTestSuite (TestSuite _ _ buildInfo' _) + (Just (TestSuite _ _ buildInfo2 _)) = + ppDiffFields binfoFieldDescrs buildInfo' buildInfo2 + $+$ ppCustomFields (customFieldsBI buildInfo') + + testSuiteMainIs test = case testInterface test of + TestSuiteExeV10 _ f -> Just f + _ -> Nothing + + testSuiteModule test = case testInterface test of + TestSuiteLibV09 _ m -> Just m + _ -> Nothing + +ppBenchmarks :: [(String, CondTree ConfVar [Dependency] Benchmark)] -> Doc +ppBenchmarks suites = + emptyLine $ vcat [ text ("benchmark " ++ n) + $+$ nest indentWith (ppCondTree condTree Nothing ppBenchmark) + | (n,condTree) <- suites] + where + ppBenchmark benchmark Nothing = + maybe empty (\t -> text "type:" <+> disp t) + maybeBenchmarkType + $+$ maybe empty (\f -> text "main-is:" <+> text f) + (benchmarkMainIs benchmark) + $+$ ppFieldsFiltered binfoDefaults binfoFieldDescrs (benchmarkBuildInfo benchmark) + $+$ ppCustomFields (customFieldsBI (benchmarkBuildInfo benchmark)) + where + maybeBenchmarkType | benchmarkInterface benchmark == mempty = Nothing + | otherwise = Just (benchmarkType benchmark) + + ppBenchmark (Benchmark _ _ buildInfo' _) + (Just (Benchmark _ _ buildInfo2 _)) = + ppDiffFields binfoFieldDescrs buildInfo' buildInfo2 + $+$ ppCustomFields (customFieldsBI buildInfo') + + benchmarkMainIs benchmark = case benchmarkInterface benchmark of + BenchmarkExeV10 _ f -> Just f + _ -> Nothing + +ppCondition :: Condition ConfVar -> Doc +ppCondition (Var x) = ppConfVar x +ppCondition (Lit b) = text (show b) +ppCondition (CNot c) = char '!' <> (ppCondition c) +ppCondition (COr c1 c2) = parens (hsep [ppCondition c1, text "||" + <+> ppCondition c2]) +ppCondition (CAnd c1 c2) = parens (hsep [ppCondition c1, text "&&" + <+> ppCondition c2]) +ppConfVar :: ConfVar -> Doc +ppConfVar (OS os) = text "os" <> parens (disp os) +ppConfVar (Arch arch) = text "arch" <> parens (disp arch) +ppConfVar (Flag name) = text "flag" <> parens (ppFlagName name) +ppConfVar (Impl c v) = text "impl" <> parens (disp c <+> disp v) + +ppFlagName :: FlagName -> Doc +ppFlagName (FlagName name) = text name + +ppCondTree :: CondTree ConfVar [Dependency] a -> Maybe a -> (a -> Maybe a -> Doc) -> Doc +ppCondTree ct@(CondNode it _ ifs) mbIt ppIt = + let res = (vcat $ map ppIf ifs) + $+$ ppIt it mbIt + in if isJust mbIt && isEmpty res + then ppCondTree ct Nothing ppIt + else res + where + -- TODO: this ends up printing trailing spaces when combined with nest. + ppIf (c,thenTree,mElseTree) = + ((emptyLine $ text "if" <+> ppCondition c) $$ + nest indentWith (ppCondTree thenTree + (if simplifiedPrinting then (Just it) else Nothing) ppIt)) + $+$ (if isNothing mElseTree + then empty + else text "else" + $$ nest indentWith (ppCondTree (fromJust mElseTree) + (if simplifiedPrinting then (Just it) else Nothing) ppIt)) + +emptyLine :: Doc -> Doc +emptyLine d = text "" $+$ d + + + diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Utils.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Utils.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Utils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription/Utils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,23 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.PackageDescription.Utils +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Common utils used by modules under Distribution.PackageDescription.*. + +module Distribution.PackageDescription.Utils ( + cabalBug, userBug + ) where + +-- ---------------------------------------------------------------------------- +-- Exception and logging utils + +userBug :: String -> a +userBug msg = error $ msg ++ ". This is a bug in your .cabal file." + +cabalBug :: String -> a +cabalBug msg = error $ msg ++ ". This is possibly a bug in Cabal.\n" + ++ "Please report it to the developers: " + ++ "https://github.com/haskell/cabal/issues/new" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/PackageDescription.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/PackageDescription.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,1157 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.PackageDescription +-- Copyright : Isaac Jones 2003-2005 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This defines the data structure for the @.cabal@ file format. There are +-- several parts to this structure. It has top level info and then 'Library', +-- 'Executable', 'TestSuite', and 'Benchmark' sections each of which have +-- associated 'BuildInfo' data that's used to build the library, exe, test, or +-- benchmark. To further complicate things there is both a 'PackageDescription' +-- and a 'GenericPackageDescription'. This distinction relates to cabal +-- configurations. When we initially read a @.cabal@ file we get a +-- 'GenericPackageDescription' which has all the conditional sections. +-- Before actually building a package we have to decide +-- on each conditional. Once we've done that we get a 'PackageDescription'. +-- It was done this way initially to avoid breaking too much stuff when the +-- feature was introduced. It could probably do with being rationalised at some +-- point to make it simpler. + +module Distribution.PackageDescription ( + -- * Package descriptions + PackageDescription(..), + emptyPackageDescription, + specVersion, + descCabalVersion, + BuildType(..), + knownBuildTypes, + + -- ** Renaming + ModuleRenaming(..), + defaultRenaming, + lookupRenaming, + + -- ** Libraries + Library(..), + ModuleReexport(..), + emptyLibrary, + withLib, + hasLibs, + libModules, + + -- ** Executables + Executable(..), + emptyExecutable, + withExe, + hasExes, + exeModules, + + -- * Tests + TestSuite(..), + TestSuiteInterface(..), + TestType(..), + testType, + knownTestTypes, + emptyTestSuite, + hasTests, + withTest, + testModules, + enabledTests, + + -- * Benchmarks + Benchmark(..), + BenchmarkInterface(..), + BenchmarkType(..), + benchmarkType, + knownBenchmarkTypes, + emptyBenchmark, + hasBenchmarks, + withBenchmark, + benchmarkModules, + enabledBenchmarks, + + -- * Build information + BuildInfo(..), + emptyBuildInfo, + allBuildInfo, + allLanguages, + allExtensions, + usedExtensions, + hcOptions, + hcProfOptions, + hcSharedOptions, + + -- ** Supplementary build information + HookedBuildInfo, + emptyHookedBuildInfo, + updatePackageDescription, + + -- * package configuration + GenericPackageDescription(..), + Flag(..), FlagName(..), FlagAssignment, + CondTree(..), ConfVar(..), Condition(..), + + -- * Source repositories + SourceRepo(..), + RepoKind(..), + RepoType(..), + knownRepoTypes, + ) where + +import Distribution.Compat.Binary (Binary) +import Data.Data (Data) +import Data.List (nub, intercalate) +import Data.Maybe (fromMaybe, maybeToList) +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid (Monoid(mempty, mappend)) +#endif +import Data.Typeable ( Typeable ) +import Control.Monad (MonadPlus(mplus)) +import GHC.Generics (Generic) +import Text.PrettyPrint as Disp +import qualified Distribution.Compat.ReadP as Parse +import Distribution.Compat.ReadP ((<++)) +import qualified Data.Char as Char (isAlphaNum, isDigit, toLower) +import qualified Data.Map as Map +import Data.Map (Map) + +import Distribution.Package + ( PackageName(PackageName), PackageIdentifier(PackageIdentifier) + , Dependency, Package(..), PackageName, packageName ) +import Distribution.ModuleName ( ModuleName ) +import Distribution.Version + ( Version(Version), VersionRange, anyVersion, orLaterVersion + , asVersionIntervals, LowerBound(..) ) +import Distribution.License (License(UnspecifiedLicense)) +import Distribution.Compiler (CompilerFlavor) +import Distribution.System (OS, Arch) +import Distribution.Text + ( Text(..), display ) +import Language.Haskell.Extension + ( Language, Extension ) + +-- ----------------------------------------------------------------------------- +-- The PackageDescription type + +-- | This data type is the internal representation of the file @pkg.cabal@. +-- It contains two kinds of information about the package: information +-- which is needed for all packages, such as the package name and version, and +-- information which is needed for the simple build system only, such as +-- the compiler options and library name. +-- +data PackageDescription + = PackageDescription { + -- the following are required by all packages: + package :: PackageIdentifier, + license :: License, + licenseFiles :: [FilePath], + copyright :: String, + maintainer :: String, + author :: String, + stability :: String, + testedWith :: [(CompilerFlavor,VersionRange)], + homepage :: String, + pkgUrl :: String, + bugReports :: String, + sourceRepos :: [SourceRepo], + synopsis :: String, -- ^A one-line summary of this package + description :: String, -- ^A more verbose description of this package + category :: String, + customFieldsPD :: [(String,String)], -- ^Custom fields starting + -- with x-, stored in a + -- simple assoc-list. + -- | YOU PROBABLY DON'T WANT TO USE THIS FIELD. This field is + -- special! Depending on how far along processing the + -- PackageDescription we are, the contents of this field are + -- either nonsense, or the collected dependencies of *all* the + -- components in this package. buildDepends is initialized by + -- 'finalizePackageDescription' and 'flattenPackageDescription'; + -- prior to that, dependency info is stored in the 'CondTree' + -- built around a 'GenericPackageDescription'. When this + -- resolution is done, dependency info is written to the inner + -- 'BuildInfo' and this field. This is all horrible, and #2066 + -- tracks progress to get rid of this field. + buildDepends :: [Dependency], + -- | The version of the Cabal spec that this package description uses. + -- For historical reasons this is specified with a version range but + -- only ranges of the form @>= v@ make sense. We are in the process of + -- transitioning to specifying just a single version, not a range. + specVersionRaw :: Either Version VersionRange, + buildType :: Maybe BuildType, + -- components + library :: Maybe Library, + executables :: [Executable], + testSuites :: [TestSuite], + benchmarks :: [Benchmark], + dataFiles :: [FilePath], + dataDir :: FilePath, + extraSrcFiles :: [FilePath], + extraTmpFiles :: [FilePath], + extraDocFiles :: [FilePath] + } + deriving (Generic, Show, Read, Eq, Typeable, Data) + +instance Binary PackageDescription + +instance Package PackageDescription where + packageId = package + +-- | The version of the Cabal spec that this package should be interpreted +-- against. +-- +-- Historically we used a version range but we are switching to using a single +-- version. Currently we accept either. This function converts into a single +-- version by ignoring upper bounds in the version range. +-- +specVersion :: PackageDescription -> Version +specVersion pkg = case specVersionRaw pkg of + Left version -> version + Right versionRange -> case asVersionIntervals versionRange of + [] -> Version [0] [] + ((LowerBound version _, _):_) -> version + +-- | The range of versions of the Cabal tools that this package is intended to +-- work with. +-- +-- This function is deprecated and should not be used for new purposes, only to +-- support old packages that rely on the old interpretation. +-- +descCabalVersion :: PackageDescription -> VersionRange +descCabalVersion pkg = case specVersionRaw pkg of + Left version -> orLaterVersion version + Right versionRange -> versionRange +{-# DEPRECATED descCabalVersion "Use specVersion instead" #-} + +emptyPackageDescription :: PackageDescription +emptyPackageDescription + = PackageDescription { + package = PackageIdentifier (PackageName "") + (Version [] []), + license = UnspecifiedLicense, + licenseFiles = [], + specVersionRaw = Right anyVersion, + buildType = Nothing, + copyright = "", + maintainer = "", + author = "", + stability = "", + testedWith = [], + buildDepends = [], + homepage = "", + pkgUrl = "", + bugReports = "", + sourceRepos = [], + synopsis = "", + description = "", + category = "", + customFieldsPD = [], + library = Nothing, + executables = [], + testSuites = [], + benchmarks = [], + dataFiles = [], + dataDir = "", + extraSrcFiles = [], + extraTmpFiles = [], + extraDocFiles = [] + } + +-- | The type of build system used by this package. +data BuildType + = Simple -- ^ calls @Distribution.Simple.defaultMain@ + | Configure -- ^ calls @Distribution.Simple.defaultMainWithHooks defaultUserHooks@, + -- which invokes @configure@ to generate additional build + -- information used by later phases. + | Make -- ^ calls @Distribution.Make.defaultMain@ + | Custom -- ^ uses user-supplied @Setup.hs@ or @Setup.lhs@ (default) + | UnknownBuildType String + -- ^ a package that uses an unknown build type cannot actually + -- be built. Doing it this way rather than just giving a + -- parse error means we get better error messages and allows + -- you to inspect the rest of the package description. + deriving (Generic, Show, Read, Eq, Typeable, Data) + +instance Binary BuildType + +knownBuildTypes :: [BuildType] +knownBuildTypes = [Simple, Configure, Make, Custom] + +instance Text BuildType where + disp (UnknownBuildType other) = Disp.text other + disp other = Disp.text (show other) + + parse = do + name <- Parse.munch1 Char.isAlphaNum + return $ case name of + "Simple" -> Simple + "Configure" -> Configure + "Custom" -> Custom + "Make" -> Make + _ -> UnknownBuildType name + +-- --------------------------------------------------------------------------- +-- Module renaming + +-- | Renaming applied to the modules provided by a package. +-- The boolean indicates whether or not to also include all of the +-- original names of modules. Thus, @ModuleRenaming False []@ is +-- "don't expose any modules, and @ModuleRenaming True [("Data.Bool", "Bool")]@ +-- is, "expose all modules, but also expose @Data.Bool@ as @Bool@". +-- +data ModuleRenaming = ModuleRenaming Bool [(ModuleName, ModuleName)] + deriving (Show, Read, Eq, Ord, Typeable, Data, Generic) + +defaultRenaming :: ModuleRenaming +defaultRenaming = ModuleRenaming True [] + +lookupRenaming :: Package pkg => pkg -> Map PackageName ModuleRenaming -> ModuleRenaming +lookupRenaming pkg rns = + Map.findWithDefault + (error ("lookupRenaming: missing renaming for " ++ display (packageName pkg))) + (packageName pkg) rns + +instance Binary ModuleRenaming where + +instance Monoid ModuleRenaming where + ModuleRenaming b rns `mappend` ModuleRenaming b' rns' + = ModuleRenaming (b || b') (rns ++ rns') -- ToDo: dedupe? + mempty = ModuleRenaming False [] + +-- NB: parentheses are mandatory, because later we may extend this syntax +-- to allow "hiding (A, B)" or other modifier words. +instance Text ModuleRenaming where + disp (ModuleRenaming True []) = Disp.empty + disp (ModuleRenaming b vs) = (if b then text "with" else Disp.empty) <+> dispRns + where dispRns = Disp.parens + (Disp.hsep + (Disp.punctuate Disp.comma (map dispEntry vs))) + dispEntry (orig, new) + | orig == new = disp orig + | otherwise = disp orig <+> text "as" <+> disp new + + parse = do Parse.string "with" >> Parse.skipSpaces + fmap (ModuleRenaming True) parseRns + <++ fmap (ModuleRenaming False) parseRns + <++ return (ModuleRenaming True []) + where parseRns = do + rns <- Parse.between (Parse.char '(') (Parse.char ')') parseList + Parse.skipSpaces + return rns + parseList = + Parse.sepBy parseEntry (Parse.char ',' >> Parse.skipSpaces) + parseEntry :: Parse.ReadP r (ModuleName, ModuleName) + parseEntry = do + orig <- parse + Parse.skipSpaces + (do _ <- Parse.string "as" + Parse.skipSpaces + new <- parse + Parse.skipSpaces + return (orig, new) + <++ + return (orig, orig)) + +-- --------------------------------------------------------------------------- +-- The Library type + +data Library = Library { + exposedModules :: [ModuleName], + reexportedModules :: [ModuleReexport], + requiredSignatures:: [ModuleName], -- ^ What sigs need implementations? + exposedSignatures:: [ModuleName], -- ^ What sigs are visible to users? + libExposed :: Bool, -- ^ Is the lib to be exposed by default? + libBuildInfo :: BuildInfo + } + deriving (Generic, Show, Eq, Read, Typeable, Data) + +instance Binary Library + +instance Monoid Library where + mempty = Library { + exposedModules = mempty, + reexportedModules = mempty, + requiredSignatures = mempty, + exposedSignatures = mempty, + libExposed = True, + libBuildInfo = mempty + } + mappend a b = Library { + exposedModules = combine exposedModules, + reexportedModules = combine reexportedModules, + requiredSignatures = combine requiredSignatures, + exposedSignatures = combine exposedSignatures, + libExposed = libExposed a && libExposed b, -- so False propagates + libBuildInfo = combine libBuildInfo + } + where combine field = field a `mappend` field b + +emptyLibrary :: Library +emptyLibrary = mempty + +-- |does this package have any libraries? +hasLibs :: PackageDescription -> Bool +hasLibs p = maybe False (buildable . libBuildInfo) (library p) + +-- |'Maybe' version of 'hasLibs' +maybeHasLibs :: PackageDescription -> Maybe Library +maybeHasLibs p = + library p >>= \lib -> if buildable (libBuildInfo lib) + then Just lib + else Nothing + +-- |If the package description has a library section, call the given +-- function with the library build info as argument. +withLib :: PackageDescription -> (Library -> IO ()) -> IO () +withLib pkg_descr f = + maybe (return ()) f (maybeHasLibs pkg_descr) + +-- | Get all the module names from the library (exposed and internal modules) +-- which need to be compiled. (This does not include reexports, which +-- do not need to be compiled.) +libModules :: Library -> [ModuleName] +libModules lib = exposedModules lib + ++ otherModules (libBuildInfo lib) + ++ exposedSignatures lib + ++ requiredSignatures lib + +-- ----------------------------------------------------------------------------- +-- Module re-exports + +data ModuleReexport = ModuleReexport { + moduleReexportOriginalPackage :: Maybe PackageName, + moduleReexportOriginalName :: ModuleName, + moduleReexportName :: ModuleName + } + deriving (Eq, Generic, Read, Show, Typeable, Data) + +instance Binary ModuleReexport + +instance Text ModuleReexport where + disp (ModuleReexport mpkgname origname newname) = + maybe Disp.empty (\pkgname -> disp pkgname <> Disp.char ':') mpkgname + <> disp origname + <+> if newname == origname + then Disp.empty + else Disp.text "as" <+> disp newname + + parse = do + mpkgname <- Parse.option Nothing $ do + pkgname <- parse + _ <- Parse.char ':' + return (Just pkgname) + origname <- parse + newname <- Parse.option origname $ do + Parse.skipSpaces + _ <- Parse.string "as" + Parse.skipSpaces + parse + return (ModuleReexport mpkgname origname newname) + +-- --------------------------------------------------------------------------- +-- The Executable type + +data Executable = Executable { + exeName :: String, + modulePath :: FilePath, + buildInfo :: BuildInfo + } + deriving (Generic, Show, Read, Eq, Typeable, Data) + +instance Binary Executable + +instance Monoid Executable where + mempty = Executable { + exeName = mempty, + modulePath = mempty, + buildInfo = mempty + } + mappend a b = Executable{ + exeName = combine' exeName, + modulePath = combine modulePath, + buildInfo = combine buildInfo + } + where combine field = field a `mappend` field b + combine' field = case (field a, field b) of + ("","") -> "" + ("", x) -> x + (x, "") -> x + (x, y) -> error $ "Ambiguous values for executable field: '" + ++ x ++ "' and '" ++ y ++ "'" + +emptyExecutable :: Executable +emptyExecutable = mempty + +-- |does this package have any executables? +hasExes :: PackageDescription -> Bool +hasExes p = any (buildable . buildInfo) (executables p) + +-- | Perform the action on each buildable 'Executable' in the package +-- description. +withExe :: PackageDescription -> (Executable -> IO ()) -> IO () +withExe pkg_descr f = + sequence_ [f exe | exe <- executables pkg_descr, buildable (buildInfo exe)] + +-- | Get all the module names from an exe +exeModules :: Executable -> [ModuleName] +exeModules exe = otherModules (buildInfo exe) + +-- --------------------------------------------------------------------------- +-- The TestSuite type + +-- | A \"test-suite\" stanza in a cabal file. +-- +data TestSuite = TestSuite { + testName :: String, + testInterface :: TestSuiteInterface, + testBuildInfo :: BuildInfo, + testEnabled :: Bool + -- TODO: By having a 'testEnabled' field in the PackageDescription, we + -- are mixing build status information (i.e., arguments to 'configure') + -- with static package description information. This is undesirable, but + -- a better solution is waiting on the next overhaul to the + -- GenericPackageDescription -> PackageDescription resolution process. + } + deriving (Generic, Show, Read, Eq, Typeable, Data) + +instance Binary TestSuite + +-- | The test suite interfaces that are currently defined. Each test suite must +-- specify which interface it supports. +-- +-- More interfaces may be defined in future, either new revisions or totally +-- new interfaces. +-- +data TestSuiteInterface = + + -- | Test interface \"exitcode-stdio-1.0\". The test-suite takes the form + -- of an executable. It returns a zero exit code for success, non-zero for + -- failure. The stdout and stderr channels may be logged. It takes no + -- command line parameters and nothing on stdin. + -- + TestSuiteExeV10 Version FilePath + + -- | Test interface \"detailed-0.9\". The test-suite takes the form of a + -- library containing a designated module that exports \"tests :: [Test]\". + -- + | TestSuiteLibV09 Version ModuleName + + -- | A test suite that does not conform to one of the above interfaces for + -- the given reason (e.g. unknown test type). + -- + | TestSuiteUnsupported TestType + deriving (Eq, Generic, Read, Show, Typeable, Data) + +instance Binary TestSuiteInterface + +instance Monoid TestSuite where + mempty = TestSuite { + testName = mempty, + testInterface = mempty, + testBuildInfo = mempty, + testEnabled = False + } + + mappend a b = TestSuite { + testName = combine' testName, + testInterface = combine testInterface, + testBuildInfo = combine testBuildInfo, + testEnabled = testEnabled a || testEnabled b + } + where combine field = field a `mappend` field b + combine' f = case (f a, f b) of + ("", x) -> x + (x, "") -> x + (x, y) -> error "Ambiguous values for test field: '" + ++ x ++ "' and '" ++ y ++ "'" + +instance Monoid TestSuiteInterface where + mempty = TestSuiteUnsupported (TestTypeUnknown mempty (Version [] [])) + mappend a (TestSuiteUnsupported _) = a + mappend _ b = b + +emptyTestSuite :: TestSuite +emptyTestSuite = mempty + +-- | Does this package have any test suites? +hasTests :: PackageDescription -> Bool +hasTests = any (buildable . testBuildInfo) . testSuites + +-- | Get all the enabled test suites from a package. +enabledTests :: PackageDescription -> [TestSuite] +enabledTests = filter testEnabled . testSuites + +-- | Perform an action on each buildable 'TestSuite' in a package. +withTest :: PackageDescription -> (TestSuite -> IO ()) -> IO () +withTest pkg_descr f = + mapM_ f $ filter (buildable . testBuildInfo) $ enabledTests pkg_descr + +-- | Get all the module names from a test suite. +testModules :: TestSuite -> [ModuleName] +testModules test = (case testInterface test of + TestSuiteLibV09 _ m -> [m] + _ -> []) + ++ otherModules (testBuildInfo test) + +-- | The \"test-type\" field in the test suite stanza. +-- +data TestType = TestTypeExe Version -- ^ \"type: exitcode-stdio-x.y\" + | TestTypeLib Version -- ^ \"type: detailed-x.y\" + | TestTypeUnknown String Version -- ^ Some unknown test type e.g. \"type: foo\" + deriving (Generic, Show, Read, Eq, Typeable, Data) + +instance Binary TestType + +knownTestTypes :: [TestType] +knownTestTypes = [ TestTypeExe (Version [1,0] []) + , TestTypeLib (Version [0,9] []) ] + +stdParse :: Text ver => (ver -> String -> res) -> Parse.ReadP r res +stdParse f = do + cs <- Parse.sepBy1 component (Parse.char '-') + _ <- Parse.char '-' + ver <- parse + let name = intercalate "-" cs + return $! f ver (lowercase name) + where + component = do + cs <- Parse.munch1 Char.isAlphaNum + if all Char.isDigit cs then Parse.pfail else return cs + -- each component must contain an alphabetic character, to avoid + -- ambiguity in identifiers like foo-1 (the 1 is the version number). + +instance Text TestType where + disp (TestTypeExe ver) = text "exitcode-stdio-" <> disp ver + disp (TestTypeLib ver) = text "detailed-" <> disp ver + disp (TestTypeUnknown name ver) = text name <> char '-' <> disp ver + + parse = stdParse $ \ver name -> case name of + "exitcode-stdio" -> TestTypeExe ver + "detailed" -> TestTypeLib ver + _ -> TestTypeUnknown name ver + + +testType :: TestSuite -> TestType +testType test = case testInterface test of + TestSuiteExeV10 ver _ -> TestTypeExe ver + TestSuiteLibV09 ver _ -> TestTypeLib ver + TestSuiteUnsupported testtype -> testtype + +-- --------------------------------------------------------------------------- +-- The Benchmark type + +-- | A \"benchmark\" stanza in a cabal file. +-- +data Benchmark = Benchmark { + benchmarkName :: String, + benchmarkInterface :: BenchmarkInterface, + benchmarkBuildInfo :: BuildInfo, + benchmarkEnabled :: Bool + -- TODO: See TODO for 'testEnabled'. + } + deriving (Generic, Show, Read, Eq, Typeable, Data) + +instance Binary Benchmark + +-- | The benchmark interfaces that are currently defined. Each +-- benchmark must specify which interface it supports. +-- +-- More interfaces may be defined in future, either new revisions or +-- totally new interfaces. +-- +data BenchmarkInterface = + + -- | Benchmark interface \"exitcode-stdio-1.0\". The benchmark + -- takes the form of an executable. It returns a zero exit code + -- for success, non-zero for failure. The stdout and stderr + -- channels may be logged. It takes no command line parameters + -- and nothing on stdin. + -- + BenchmarkExeV10 Version FilePath + + -- | A benchmark that does not conform to one of the above + -- interfaces for the given reason (e.g. unknown benchmark type). + -- + | BenchmarkUnsupported BenchmarkType + deriving (Eq, Generic, Read, Show, Typeable, Data) + +instance Binary BenchmarkInterface + +instance Monoid Benchmark where + mempty = Benchmark { + benchmarkName = mempty, + benchmarkInterface = mempty, + benchmarkBuildInfo = mempty, + benchmarkEnabled = False + } + + mappend a b = Benchmark { + benchmarkName = combine' benchmarkName, + benchmarkInterface = combine benchmarkInterface, + benchmarkBuildInfo = combine benchmarkBuildInfo, + benchmarkEnabled = benchmarkEnabled a || benchmarkEnabled b + } + where combine field = field a `mappend` field b + combine' f = case (f a, f b) of + ("", x) -> x + (x, "") -> x + (x, y) -> error "Ambiguous values for benchmark field: '" + ++ x ++ "' and '" ++ y ++ "'" + +instance Monoid BenchmarkInterface where + mempty = BenchmarkUnsupported (BenchmarkTypeUnknown mempty (Version [] [])) + mappend a (BenchmarkUnsupported _) = a + mappend _ b = b + +emptyBenchmark :: Benchmark +emptyBenchmark = mempty + +-- | Does this package have any benchmarks? +hasBenchmarks :: PackageDescription -> Bool +hasBenchmarks = any (buildable . benchmarkBuildInfo) . benchmarks + +-- | Get all the enabled benchmarks from a package. +enabledBenchmarks :: PackageDescription -> [Benchmark] +enabledBenchmarks = filter benchmarkEnabled . benchmarks + +-- | Perform an action on each buildable 'Benchmark' in a package. +withBenchmark :: PackageDescription -> (Benchmark -> IO ()) -> IO () +withBenchmark pkg_descr f = + mapM_ f $ filter (buildable . benchmarkBuildInfo) $ enabledBenchmarks pkg_descr + +-- | Get all the module names from a benchmark. +benchmarkModules :: Benchmark -> [ModuleName] +benchmarkModules benchmark = otherModules (benchmarkBuildInfo benchmark) + +-- | The \"benchmark-type\" field in the benchmark stanza. +-- +data BenchmarkType = BenchmarkTypeExe Version + -- ^ \"type: exitcode-stdio-x.y\" + | BenchmarkTypeUnknown String Version + -- ^ Some unknown benchmark type e.g. \"type: foo\" + deriving (Generic, Show, Read, Eq, Typeable, Data) + +instance Binary BenchmarkType + +knownBenchmarkTypes :: [BenchmarkType] +knownBenchmarkTypes = [ BenchmarkTypeExe (Version [1,0] []) ] + +instance Text BenchmarkType where + disp (BenchmarkTypeExe ver) = text "exitcode-stdio-" <> disp ver + disp (BenchmarkTypeUnknown name ver) = text name <> char '-' <> disp ver + + parse = stdParse $ \ver name -> case name of + "exitcode-stdio" -> BenchmarkTypeExe ver + _ -> BenchmarkTypeUnknown name ver + + +benchmarkType :: Benchmark -> BenchmarkType +benchmarkType benchmark = case benchmarkInterface benchmark of + BenchmarkExeV10 ver _ -> BenchmarkTypeExe ver + BenchmarkUnsupported benchmarktype -> benchmarktype + +-- --------------------------------------------------------------------------- +-- The BuildInfo type + +-- Consider refactoring into executable and library versions. +data BuildInfo = BuildInfo { + buildable :: Bool, -- ^ component is buildable here + buildTools :: [Dependency], -- ^ tools needed to build this bit + cppOptions :: [String], -- ^ options for pre-processing Haskell code + ccOptions :: [String], -- ^ options for C compiler + ldOptions :: [String], -- ^ options for linker + pkgconfigDepends :: [Dependency], -- ^ pkg-config packages that are used + frameworks :: [String], -- ^support frameworks for Mac OS X + cSources :: [FilePath], + jsSources :: [FilePath], + hsSourceDirs :: [FilePath], -- ^ where to look for the Haskell module hierarchy + otherModules :: [ModuleName], -- ^ non-exposed or non-main modules + + defaultLanguage :: Maybe Language,-- ^ language used when not explicitly specified + otherLanguages :: [Language], -- ^ other languages used within the package + defaultExtensions :: [Extension], -- ^ language extensions used by all modules + otherExtensions :: [Extension], -- ^ other language extensions used within the package + oldExtensions :: [Extension], -- ^ the old extensions field, treated same as 'defaultExtensions' + + extraLibs :: [String], -- ^ what libraries to link with when compiling a program that uses your package + extraGHCiLibs :: [String], -- ^ if present, overrides extraLibs when package is loaded with GHCi. + extraLibDirs :: [String], + includeDirs :: [FilePath], -- ^directories to find .h files + includes :: [FilePath], -- ^ The .h files to be found in includeDirs + installIncludes :: [FilePath], -- ^ .h files to install with the package + options :: [(CompilerFlavor,[String])], + profOptions :: [(CompilerFlavor,[String])], + sharedOptions :: [(CompilerFlavor,[String])], + customFieldsBI :: [(String,String)], -- ^Custom fields starting + -- with x-, stored in a + -- simple assoc-list. + targetBuildDepends :: [Dependency], -- ^ Dependencies specific to a library or executable target + targetBuildRenaming :: Map PackageName ModuleRenaming + } + deriving (Generic, Show, Read, Eq, Typeable, Data) + +instance Binary BuildInfo + +instance Monoid BuildInfo where + mempty = BuildInfo { + buildable = True, + buildTools = [], + cppOptions = [], + ccOptions = [], + ldOptions = [], + pkgconfigDepends = [], + frameworks = [], + cSources = [], + jsSources = [], + hsSourceDirs = [], + otherModules = [], + defaultLanguage = Nothing, + otherLanguages = [], + defaultExtensions = [], + otherExtensions = [], + oldExtensions = [], + extraLibs = [], + extraGHCiLibs = [], + extraLibDirs = [], + includeDirs = [], + includes = [], + installIncludes = [], + options = [], + profOptions = [], + sharedOptions = [], + customFieldsBI = [], + targetBuildDepends = [], + targetBuildRenaming = Map.empty + } + mappend a b = BuildInfo { + buildable = buildable a && buildable b, + buildTools = combine buildTools, + cppOptions = combine cppOptions, + ccOptions = combine ccOptions, + ldOptions = combine ldOptions, + pkgconfigDepends = combine pkgconfigDepends, + frameworks = combineNub frameworks, + cSources = combineNub cSources, + jsSources = combineNub jsSources, + hsSourceDirs = combineNub hsSourceDirs, + otherModules = combineNub otherModules, + defaultLanguage = combineMby defaultLanguage, + otherLanguages = combineNub otherLanguages, + defaultExtensions = combineNub defaultExtensions, + otherExtensions = combineNub otherExtensions, + oldExtensions = combineNub oldExtensions, + extraLibs = combine extraLibs, + extraGHCiLibs = combine extraGHCiLibs, + extraLibDirs = combineNub extraLibDirs, + includeDirs = combineNub includeDirs, + includes = combineNub includes, + installIncludes = combineNub installIncludes, + options = combine options, + profOptions = combine profOptions, + sharedOptions = combine sharedOptions, + customFieldsBI = combine customFieldsBI, + targetBuildDepends = combineNub targetBuildDepends, + targetBuildRenaming = combineMap targetBuildRenaming + } + where + combine field = field a `mappend` field b + combineNub field = nub (combine field) + combineMby field = field b `mplus` field a + combineMap field = Map.unionWith mappend (field a) (field b) + +emptyBuildInfo :: BuildInfo +emptyBuildInfo = mempty + +-- | The 'BuildInfo' for the library (if there is one and it's buildable), and +-- all buildable executables, test suites and benchmarks. Useful for gathering +-- dependencies. +allBuildInfo :: PackageDescription -> [BuildInfo] +allBuildInfo pkg_descr = [ bi | Just lib <- [library pkg_descr] + , let bi = libBuildInfo lib + , buildable bi ] + ++ [ bi | exe <- executables pkg_descr + , let bi = buildInfo exe + , buildable bi ] + ++ [ bi | tst <- testSuites pkg_descr + , let bi = testBuildInfo tst + , buildable bi + , testEnabled tst ] + ++ [ bi | tst <- benchmarks pkg_descr + , let bi = benchmarkBuildInfo tst + , buildable bi + , benchmarkEnabled tst ] + --FIXME: many of the places where this is used, we actually want to look at + -- unbuildable bits too, probably need separate functions + +-- | The 'Language's used by this component +-- +allLanguages :: BuildInfo -> [Language] +allLanguages bi = maybeToList (defaultLanguage bi) + ++ otherLanguages bi + +-- | The 'Extension's that are used somewhere by this component +-- +allExtensions :: BuildInfo -> [Extension] +allExtensions bi = usedExtensions bi + ++ otherExtensions bi + +-- | The 'Extensions' that are used by all modules in this component +-- +usedExtensions :: BuildInfo -> [Extension] +usedExtensions bi = oldExtensions bi + ++ defaultExtensions bi + +type HookedBuildInfo = (Maybe BuildInfo, [(String, BuildInfo)]) + +emptyHookedBuildInfo :: HookedBuildInfo +emptyHookedBuildInfo = (Nothing, []) + +-- |Select options for a particular Haskell compiler. +hcOptions :: CompilerFlavor -> BuildInfo -> [String] +hcOptions = lookupHcOptions options + +hcProfOptions :: CompilerFlavor -> BuildInfo -> [String] +hcProfOptions = lookupHcOptions profOptions + +hcSharedOptions :: CompilerFlavor -> BuildInfo -> [String] +hcSharedOptions = lookupHcOptions sharedOptions + +lookupHcOptions :: (BuildInfo -> [(CompilerFlavor,[String])]) + -> CompilerFlavor -> BuildInfo -> [String] +lookupHcOptions f hc bi = [ opt | (hc',opts) <- f bi + , hc' == hc + , opt <- opts ] + +-- ------------------------------------------------------------ +-- * Source repos +-- ------------------------------------------------------------ + +-- | Information about the source revision control system for a package. +-- +-- When specifying a repo it is useful to know the meaning or intention of the +-- information as doing so enables automation. There are two obvious common +-- purposes: one is to find the repo for the latest development version, the +-- other is to find the repo for this specific release. The 'ReopKind' +-- specifies which one we mean (or another custom one). +-- +-- A package can specify one or the other kind or both. Most will specify just +-- a head repo but some may want to specify a repo to reconstruct the sources +-- for this package release. +-- +-- The required information is the 'RepoType' which tells us if it's using +-- 'Darcs', 'Git' for example. The 'repoLocation' and other details are +-- interpreted according to the repo type. +-- +data SourceRepo = SourceRepo { + -- | The kind of repo. This field is required. + repoKind :: RepoKind, + + -- | The type of the source repository system for this repo, eg 'Darcs' or + -- 'Git'. This field is required. + repoType :: Maybe RepoType, + + -- | The location of the repository. For most 'RepoType's this is a URL. + -- This field is required. + repoLocation :: Maybe String, + + -- | 'CVS' can put multiple \"modules\" on one server and requires a + -- module name in addition to the location to identify a particular repo. + -- Logically this is part of the location but unfortunately has to be + -- specified separately. This field is required for the 'CVS' 'RepoType' and + -- should not be given otherwise. + repoModule :: Maybe String, + + -- | The name or identifier of the branch, if any. Many source control + -- systems have the notion of multiple branches in a repo that exist in the + -- same location. For example 'Git' and 'CVS' use this while systems like + -- 'Darcs' use different locations for different branches. This field is + -- optional but should be used if necessary to identify the sources, + -- especially for the 'RepoThis' repo kind. + repoBranch :: Maybe String, + + -- | The tag identify a particular state of the repository. This should be + -- given for the 'RepoThis' repo kind and not for 'RepoHead' kind. + -- + repoTag :: Maybe String, + + -- | Some repositories contain multiple projects in different subdirectories + -- This field specifies the subdirectory where this packages sources can be + -- found, eg the subdirectory containing the @.cabal@ file. It is interpreted + -- relative to the root of the repository. This field is optional. If not + -- given the default is \".\" ie no subdirectory. + repoSubdir :: Maybe FilePath +} + deriving (Eq, Generic, Read, Show, Typeable, Data) + +instance Binary SourceRepo + +-- | What this repo info is for, what it represents. +-- +data RepoKind = + -- | The repository for the \"head\" or development version of the project. + -- This repo is where we should track the latest development activity or + -- the usual repo people should get to contribute patches. + RepoHead + + -- | The repository containing the sources for this exact package version + -- or release. For this kind of repo a tag should be given to give enough + -- information to re-create the exact sources. + | RepoThis + + | RepoKindUnknown String + deriving (Eq, Generic, Ord, Read, Show, Typeable, Data) + +instance Binary RepoKind + +-- | An enumeration of common source control systems. The fields used in the +-- 'SourceRepo' depend on the type of repo. The tools and methods used to +-- obtain and track the repo depend on the repo type. +-- +data RepoType = Darcs | Git | SVN | CVS + | Mercurial | GnuArch | Bazaar | Monotone + | OtherRepoType String + deriving (Eq, Generic, Ord, Read, Show, Typeable, Data) + +instance Binary RepoType + +knownRepoTypes :: [RepoType] +knownRepoTypes = [Darcs, Git, SVN, CVS + ,Mercurial, GnuArch, Bazaar, Monotone] + +repoTypeAliases :: RepoType -> [String] +repoTypeAliases Bazaar = ["bzr"] +repoTypeAliases Mercurial = ["hg"] +repoTypeAliases GnuArch = ["arch"] +repoTypeAliases _ = [] + +instance Text RepoKind where + disp RepoHead = Disp.text "head" + disp RepoThis = Disp.text "this" + disp (RepoKindUnknown other) = Disp.text other + + parse = do + name <- ident + return $ case lowercase name of + "head" -> RepoHead + "this" -> RepoThis + _ -> RepoKindUnknown name + +instance Text RepoType where + disp (OtherRepoType other) = Disp.text other + disp other = Disp.text (lowercase (show other)) + parse = fmap classifyRepoType ident + +classifyRepoType :: String -> RepoType +classifyRepoType s = + fromMaybe (OtherRepoType s) $ lookup (lowercase s) repoTypeMap + where + repoTypeMap = [ (name, repoType') + | repoType' <- knownRepoTypes + , name <- display repoType' : repoTypeAliases repoType' ] + +ident :: Parse.ReadP r String +ident = Parse.munch1 (\c -> Char.isAlphaNum c || c == '_' || c == '-') + +lowercase :: String -> String +lowercase = map Char.toLower + +-- ------------------------------------------------------------ +-- * Utils +-- ------------------------------------------------------------ + +updatePackageDescription :: HookedBuildInfo -> PackageDescription -> PackageDescription +updatePackageDescription (mb_lib_bi, exe_bi) p + = p{ executables = updateExecutables exe_bi (executables p) + , library = updateLibrary mb_lib_bi (library p) + } + where + updateLibrary :: Maybe BuildInfo -> Maybe Library -> Maybe Library + updateLibrary (Just bi) (Just lib) = Just (lib{libBuildInfo = bi `mappend` libBuildInfo lib}) + updateLibrary Nothing mb_lib = mb_lib + updateLibrary (Just _) Nothing = Nothing + + updateExecutables :: [(String, BuildInfo)] -- ^[(exeName, new buildinfo)] + -> [Executable] -- ^list of executables to update + -> [Executable] -- ^list with exeNames updated + updateExecutables exe_bi' executables' = foldr updateExecutable executables' exe_bi' + + updateExecutable :: (String, BuildInfo) -- ^(exeName, new buildinfo) + -> [Executable] -- ^list of executables to update + -> [Executable] -- ^list with exeName updated + updateExecutable _ [] = [] + updateExecutable exe_bi'@(name,bi) (exe:exes) + | exeName exe == name = exe{buildInfo = bi `mappend` buildInfo exe} : exes + | otherwise = exe : updateExecutable exe_bi' exes + +-- --------------------------------------------------------------------------- +-- The GenericPackageDescription type + +data GenericPackageDescription = + GenericPackageDescription { + packageDescription :: PackageDescription, + genPackageFlags :: [Flag], + condLibrary :: Maybe (CondTree ConfVar [Dependency] Library), + condExecutables :: [(String, CondTree ConfVar [Dependency] Executable)], + condTestSuites :: [(String, CondTree ConfVar [Dependency] TestSuite)], + condBenchmarks :: [(String, CondTree ConfVar [Dependency] Benchmark)] + } + deriving (Show, Eq, Typeable, Data) + +instance Package GenericPackageDescription where + packageId = packageId . packageDescription + +--TODO: make PackageDescription an instance of Text. + +-- | A flag can represent a feature to be included, or a way of linking +-- a target against its dependencies, or in fact whatever you can think of. +data Flag = MkFlag + { flagName :: FlagName + , flagDescription :: String + , flagDefault :: Bool + , flagManual :: Bool + } + deriving (Show, Eq, Typeable, Data) + +-- | A 'FlagName' is the name of a user-defined configuration flag +newtype FlagName = FlagName String + deriving (Eq, Generic, Ord, Show, Read, Typeable, Data) + +instance Binary FlagName + +-- | A 'FlagAssignment' is a total or partial mapping of 'FlagName's to +-- 'Bool' flag values. It represents the flags chosen by the user or +-- discovered during configuration. For example @--flags=foo --flags=-bar@ +-- becomes @[("foo", True), ("bar", False)]@ +-- +type FlagAssignment = [(FlagName, Bool)] + +-- | A @ConfVar@ represents the variable type used. +data ConfVar = OS OS + | Arch Arch + | Flag FlagName + | Impl CompilerFlavor VersionRange + deriving (Eq, Show, Typeable, Data) + +-- | A boolean expression parameterized over the variable type used. +data Condition c = Var c + | Lit Bool + | CNot (Condition c) + | COr (Condition c) (Condition c) + | CAnd (Condition c) (Condition c) + deriving (Show, Eq, Typeable, Data) + +data CondTree v c a = CondNode + { condTreeData :: a + , condTreeConstraints :: c + , condTreeComponents :: [( Condition v + , CondTree v c a + , Maybe (CondTree v c a))] + } + deriving (Show, Eq, Typeable, Data) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Package.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Package.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Package.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Package.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,379 @@ +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Package +-- Copyright : Isaac Jones 2003-2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Defines a package identifier along with a parser and pretty printer for it. +-- 'PackageIdentifier's consist of a name and an exact version. It also defines +-- a 'Dependency' data type. A dependency is a package name and a version +-- range, like @\"foo >= 1.2 && < 2\"@. + +module Distribution.Package ( + -- * Package ids + PackageName(..), + PackageIdentifier(..), + PackageId, + + -- * Installed package identifiers + InstalledPackageId(..), + + -- * Package keys (used for linker symbols and library name) + PackageKey(..), + mkPackageKey, + packageKeyHash, + packageKeyLibraryName, + + -- * Package source dependencies + Dependency(..), + thisPackageVersion, + notThisPackageVersion, + simplifyDependency, + + -- * Package classes + Package(..), packageName, packageVersion, + PackageFixedDeps(..), + PackageInstalled(..), + ) where + +import Distribution.ModuleName ( ModuleName ) +import Distribution.Version + ( Version(..), VersionRange, anyVersion, thisVersion + , notThisVersion, simplifyVersionRange ) + +import Distribution.Text (Text(..), display) +import qualified Distribution.Compat.ReadP as Parse +import Distribution.Compat.ReadP ((<++)) +import qualified Text.PrettyPrint as Disp + +import Control.DeepSeq (NFData(..)) +import Data.Ord ( comparing ) +import Distribution.Compat.Binary (Binary) +import qualified Data.Char as Char + ( isDigit, isAlphaNum, isUpper, isLower, ord, chr ) +import Data.Data ( Data ) +import Data.List ( intercalate, foldl', sortBy ) +import Data.Typeable ( Typeable ) +import Data.Word ( Word64 ) +import GHC.Fingerprint ( Fingerprint(..), fingerprintString ) +import GHC.Generics (Generic) +import Numeric ( showIntAtBase ) +import Text.PrettyPrint ((<>), (<+>), text) + +newtype PackageName = PackageName { unPackageName :: String } + deriving (Generic, Read, Show, Eq, Ord, Typeable, Data) + +instance Binary PackageName + +instance Text PackageName where + disp (PackageName n) = Disp.text n + parse = do + ns <- Parse.sepBy1 component (Parse.char '-') + return (PackageName (intercalate "-" ns)) + where + component = do + cs <- Parse.munch1 Char.isAlphaNum + if all Char.isDigit cs then Parse.pfail else return cs + -- each component must contain an alphabetic character, to avoid + -- ambiguity in identifiers like foo-1 (the 1 is the version number). + +instance NFData PackageName where + rnf (PackageName pkg) = rnf pkg + +-- | Type alias so we can use the shorter name PackageId. +type PackageId = PackageIdentifier + +-- | The name and version of a package. +data PackageIdentifier + = PackageIdentifier { + pkgName :: PackageName, -- ^The name of this package, eg. foo + pkgVersion :: Version -- ^the version of this package, eg 1.2 + } + deriving (Generic, Read, Show, Eq, Ord, Typeable, Data) + +instance Binary PackageIdentifier + +instance Text PackageIdentifier where + disp (PackageIdentifier n v) = case v of + Version [] _ -> disp n -- if no version, don't show version. + _ -> disp n <> Disp.char '-' <> disp v + + parse = do + n <- parse + v <- (Parse.char '-' >> parse) <++ return (Version [] []) + return (PackageIdentifier n v) + +instance NFData PackageIdentifier where + rnf (PackageIdentifier name version) = rnf name `seq` rnf version + +-- ------------------------------------------------------------ +-- * Installed Package Ids +-- ------------------------------------------------------------ + +-- | An InstalledPackageId uniquely identifies an instance of an installed +-- package. There can be at most one package with a given 'InstalledPackageId' +-- in a package database, or overlay of databases. +-- +newtype InstalledPackageId = InstalledPackageId String + deriving (Generic, Read,Show,Eq,Ord,Typeable,Data) + +instance Binary InstalledPackageId + +instance Text InstalledPackageId where + disp (InstalledPackageId str) = text str + + parse = InstalledPackageId `fmap` Parse.munch1 abi_char + where abi_char c = Char.isAlphaNum c || c `elem` "-_." + +-- ------------------------------------------------------------ +-- * Package Keys +-- ------------------------------------------------------------ + +-- | A 'PackageKey' is the notion of "package ID" which is visible to the +-- compiler. Why is this not a 'PackageId'? The 'PackageId' is a user-visible +-- concept written explicity in Cabal files; on the other hand, a 'PackageKey' +-- may contain, for example, information about the transitive dependency +-- tree of a package. Why is this not an 'InstalledPackageId'? A 'PackageKey' +-- affects the ABI because it is used for linker symbols; however, an +-- 'InstalledPackageId' can be used to distinguish two ABI-compatible versions +-- of a library. +-- +-- The key is defined to be a 128-bit MD5 hash, separated into two 64-bit +-- components (the most significant component coming first) which are +-- individually base-62 encoded (A-Z, a-z, 0-9). +-- +-- @ +-- key ::= hash64 hash64 +-- hash64 ::= [A-Za-z0-9]{11} +-- @ +-- +-- The string that is hashed is specified as raw_key: +-- +-- @ +-- raw_key ::= package_id "\n" +-- holes_nl +-- depends_nl +-- package_id ::= package_name "-" package_version +-- holes_nl ::= "" +-- | hole_inst "\n" holes_nl +-- hole_inst ::= modulename " " key ":" modulename +-- depends_nl ::= "" +-- | depend "\n" depends_nl +-- depend ::= key +-- @ +-- +-- The holes list MUST be sorted by the first modulename; the depends list +-- MUST be sorted by the key. holes describes the backing implementations of +-- all holes in the package; depends describes all of the build-depends of +-- a package. A package key MAY be used in holes even if it is not +-- mentioned in depends: depends contains STRICTLY packages which are +-- textually mentioned in the package description. +-- +-- The trailing newline is MANDATORY. +-- +-- There is also a variant of package key which is prefixed by a informational +-- string. This key MUST NOT be used in the computation of the hash proper, +-- but it is useful for human-readable consumption. +-- +-- @ +-- infokey ::= infostring "_" key +-- infostring ::= [A-Za-z0-9-]+ +-- @ +-- +-- For example, Cabal provides a key with the first five characters of the +-- package name for linker symbols. +-- +data PackageKey + -- | Modern package key which is a hash of the PackageId and the transitive + -- dependency key. Manually inline it here so we can get the instances + -- we need. Also contains a short informative string + = PackageKey !String {-# UNPACK #-} !Word64 {-# UNPACK #-} !Word64 + -- | Old-style package key which is just a 'PackageId'. Required because + -- old versions of GHC assume that the 'sourcePackageId' recorded for an + -- installed package coincides with the package key it was compiled with. + | OldPackageKey !PackageId + deriving (Generic, Read, Show, Eq, Ord, Typeable, Data) + +instance Binary PackageKey + +-- | Convenience function which converts a fingerprint into a new-style package +-- key. +fingerprintPackageKey :: String -> Fingerprint -> PackageKey +fingerprintPackageKey s (Fingerprint a b) = PackageKey s a b + +-- | Generates a 'PackageKey' from a 'PackageId', sorted package keys of the +-- immediate dependencies. +mkPackageKey :: Bool -- are modern style package keys supported? + -> PackageId + -> [PackageKey] -- dependencies + -> [(ModuleName, (PackageKey, ModuleName))] -- hole instantiations + -> PackageKey +mkPackageKey True pid deps holes = + fingerprintPackageKey stubName . fingerprintString $ + display pid ++ "\n" ++ + -- NB: packageKeyHash, NOT display + concat [ display m ++ " " ++ packageKeyHash p' ++ ":" ++ display m' ++ "\n" + | (m, (p', m')) <- sortBy (comparing fst) holes] ++ + concat [ packageKeyHash d ++ "\n" + | d <- sortBy (comparing packageKeyHash) deps] + where stubName = take 5 (filter (/= '-') (unPackageName (pkgName pid))) +mkPackageKey False pid _ _ = OldPackageKey pid + +-- The base-62 code is based off of 'locators' +-- ((c) Operational Dynamics Consulting, BSD3 licensed) + +-- Note: Instead of base-62 encoding a single 128-bit integer +-- (ceil(21.49) characters), we'll base-62 a pair of 64-bit integers +-- (2 * ceil(10.75) characters). Luckily for us, it's the same number of +-- characters! In the long term, this should go in GHC.Fingerprint, +-- but not now... + +-- | Size of a 64-bit word when written as a base-62 string +word64Base62Len :: Int +word64Base62Len = 11 + +-- | Converts a 64-bit word into a base-62 string +toBase62 :: Word64 -> String +toBase62 w = pad ++ str + where + pad = replicate len '0' + len = word64Base62Len - length str -- 11 == ceil(64 / lg 62) + str = showIntAtBase 62 represent w "" + represent :: Int -> Char + represent x + | x < 10 = Char.chr (48 + x) + | x < 36 = Char.chr (65 + x - 10) + | x < 62 = Char.chr (97 + x - 36) + | otherwise = error ("represent (base 62): impossible!") + +-- | Parses a base-62 string into a 64-bit word +fromBase62 :: String -> Word64 +fromBase62 ss = foldl' multiply 0 ss + where + value :: Char -> Int + value c + | Char.isDigit c = Char.ord c - 48 + | Char.isUpper c = Char.ord c - 65 + 10 + | Char.isLower c = Char.ord c - 97 + 36 + | otherwise = error ("value (base 62): impossible!") + + multiply :: Word64 -> Char -> Word64 + multiply acc c = acc * 62 + (fromIntegral $ value c) + +-- | Parses a base-62 string into a fingerprint. +readBase62Fingerprint :: String -> Fingerprint +readBase62Fingerprint s = Fingerprint w1 w2 + where (s1,s2) = splitAt word64Base62Len s + w1 = fromBase62 s1 + w2 = fromBase62 (take word64Base62Len s2) + +packageKeyHash :: PackageKey -> String +packageKeyHash (PackageKey _ w1 w2) = toBase62 w1 ++ toBase62 w2 +packageKeyHash (OldPackageKey pid) = display pid + +packageKeyLibraryName :: PackageId -> PackageKey -> String +packageKeyLibraryName pid (PackageKey _ w1 w2) = display pid ++ "-" ++ toBase62 w1 ++ toBase62 w2 +packageKeyLibraryName _ (OldPackageKey pid) = display pid + +instance Text PackageKey where + disp (PackageKey prefix w1 w2) = Disp.text prefix <> Disp.char '_' + <> Disp.text (toBase62 w1) <> Disp.text (toBase62 w2) + disp (OldPackageKey pid) = disp pid + + parse = parseNew <++ parseOld + where parseNew = do + prefix <- Parse.munch1 (\c -> Char.isAlphaNum c || c `elem` "-") + _ <- Parse.char '_' -- if we use '-' it's ambiguous + fmap (fingerprintPackageKey prefix . readBase62Fingerprint) + . Parse.count (word64Base62Len * 2) + $ Parse.satisfy Char.isAlphaNum + parseOld = do pid <- parse + return (OldPackageKey pid) + +instance NFData PackageKey where + rnf (PackageKey prefix _ _) = rnf prefix + rnf (OldPackageKey pid) = rnf pid + +-- ------------------------------------------------------------ +-- * Package source dependencies +-- ------------------------------------------------------------ + +-- | Describes a dependency on a source package (API) +-- +data Dependency = Dependency PackageName VersionRange + deriving (Generic, Read, Show, Eq, Typeable, Data) + +instance Binary Dependency + +instance Text Dependency where + disp (Dependency name ver) = + disp name <+> disp ver + + parse = do name <- parse + Parse.skipSpaces + ver <- parse <++ return anyVersion + Parse.skipSpaces + return (Dependency name ver) + +thisPackageVersion :: PackageIdentifier -> Dependency +thisPackageVersion (PackageIdentifier n v) = + Dependency n (thisVersion v) + +notThisPackageVersion :: PackageIdentifier -> Dependency +notThisPackageVersion (PackageIdentifier n v) = + Dependency n (notThisVersion v) + +-- | Simplify the 'VersionRange' expression in a 'Dependency'. +-- See 'simplifyVersionRange'. +-- +simplifyDependency :: Dependency -> Dependency +simplifyDependency (Dependency name range) = + Dependency name (simplifyVersionRange range) + +-- | Class of things that have a 'PackageIdentifier' +-- +-- Types in this class are all notions of a package. This allows us to have +-- different types for the different phases that packages go though, from +-- simple name\/id, package description, configured or installed packages. +-- +-- Not all kinds of packages can be uniquely identified by a +-- 'PackageIdentifier'. In particular, installed packages cannot, there may be +-- many installed instances of the same source package. +-- +class Package pkg where + packageId :: pkg -> PackageIdentifier + +packageName :: Package pkg => pkg -> PackageName +packageName = pkgName . packageId + +packageVersion :: Package pkg => pkg -> Version +packageVersion = pkgVersion . packageId + +instance Package PackageIdentifier where + packageId = id + +-- | Subclass of packages that have specific versioned dependencies. +-- +-- So for example a not-yet-configured package has dependencies on version +-- ranges, not specific versions. A configured or an already installed package +-- depends on exact versions. Some operations or data structures (like +-- dependency graphs) only make sense on this subclass of package types. +-- +class Package pkg => PackageFixedDeps pkg where + depends :: pkg -> [PackageIdentifier] + +-- | Class of installed packages. +-- +-- The primary data type which is an instance of this package is +-- 'InstalledPackageInfo', but when we are doing install plans in Cabal install +-- we may have other, installed package-like things which contain more metadata. +-- Installed packages have exact dependencies 'installedDepends'. +class Package pkg => PackageInstalled pkg where + installedPackageId :: pkg -> InstalledPackageId + installedDepends :: pkg -> [InstalledPackageId] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/ParseUtils.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/ParseUtils.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/ParseUtils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/ParseUtils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,755 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.ParseUtils +-- Copyright : (c) The University of Glasgow 2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Utilities for parsing 'PackageDescription' and 'InstalledPackageInfo'. +-- +-- The @.cabal@ file format is not trivial, especially with the introduction +-- of configurations and the section syntax that goes with that. This module +-- has a bunch of parsing functions that is used by the @.cabal@ parser and a +-- couple others. It has the parsing framework code and also little parsers for +-- many of the formats we get in various @.cabal@ file fields, like module +-- names, comma separated lists etc. + +-- This module is meant to be local-only to Distribution... + +{-# OPTIONS_HADDOCK hide #-} +module Distribution.ParseUtils ( + LineNo, PError(..), PWarning(..), locatedErrorMsg, syntaxError, warning, + runP, runE, ParseResult(..), catchParseError, parseFail, showPWarning, + Field(..), fName, lineNo, + FieldDescr(..), ppField, ppFields, readFields, readFieldsFlat, + showFields, showSingleNamedField, showSimpleSingleNamedField, + parseFields, parseFieldsFlat, + parseFilePathQ, parseTokenQ, parseTokenQ', + parseModuleNameQ, parseBuildTool, parsePkgconfigDependency, + parseOptVersion, parsePackageNameQ, parseVersionRangeQ, + parseTestedWithQ, parseLicenseQ, parseLanguageQ, parseExtensionQ, + parseSepList, parseCommaList, parseOptCommaList, + showFilePath, showToken, showTestedWith, showFreeText, parseFreeText, + field, simpleField, listField, listFieldWithSep, spaceListField, + commaListField, commaListFieldWithSep, commaNewLineListField, + optsField, liftField, boolField, parseQuoted, indentWith, + + UnrecFieldParser, warnUnrec, ignoreUnrec, + ) where + +import Distribution.Compiler (CompilerFlavor, parseCompilerFlavorCompat) +import Distribution.License +import Distribution.Version + ( Version(..), VersionRange, anyVersion ) +import Distribution.Package ( PackageName(..), Dependency(..) ) +import Distribution.ModuleName (ModuleName) +import Distribution.Compat.ReadP as ReadP hiding (get) +import Distribution.ReadE +import Distribution.Text + ( Text(..) ) +import Distribution.Simple.Utils + ( comparing, dropWhileEndLE, intercalate, lowercase + , normaliseLineEndings ) +import Language.Haskell.Extension + ( Language, Extension ) + +import Text.PrettyPrint hiding (braces) +import Data.Char (isSpace, toLower, isAlphaNum, isDigit) +import Data.Maybe (fromMaybe) +import Data.Tree as Tree (Tree(..), flatten) +import qualified Data.Map as Map +import Control.Monad (foldM, ap) +#if __GLASGOW_HASKELL__ < 710 +import Control.Applicative (Applicative(..)) +#endif +import System.FilePath (normalise) +import Data.List (sortBy) + +-- ----------------------------------------------------------------------------- + +type LineNo = Int +type Separator = ([Doc] -> Doc) + +data PError = AmbiguousParse String LineNo + | NoParse String LineNo + | TabsError LineNo + | FromString String (Maybe LineNo) + deriving (Eq, Show) + +data PWarning = PWarning String + | UTFWarning LineNo String + deriving (Eq, Show) + +showPWarning :: FilePath -> PWarning -> String +showPWarning fpath (PWarning msg) = + normalise fpath ++ ": " ++ msg +showPWarning fpath (UTFWarning line fname) = + normalise fpath ++ ":" ++ show line + ++ ": Invalid UTF-8 text in the '" ++ fname ++ "' field." + +data ParseResult a = ParseFailed PError | ParseOk [PWarning] a + deriving Show + +instance Functor ParseResult where + fmap _ (ParseFailed err) = ParseFailed err + fmap f (ParseOk ws x) = ParseOk ws $ f x + +instance Applicative ParseResult where + pure = return + (<*>) = ap + + +instance Monad ParseResult where + return = ParseOk [] + ParseFailed err >>= _ = ParseFailed err + ParseOk ws x >>= f = case f x of + ParseFailed err -> ParseFailed err + ParseOk ws' x' -> ParseOk (ws'++ws) x' + fail s = ParseFailed (FromString s Nothing) + +catchParseError :: ParseResult a -> (PError -> ParseResult a) + -> ParseResult a +p@(ParseOk _ _) `catchParseError` _ = p +ParseFailed e `catchParseError` k = k e + +parseFail :: PError -> ParseResult a +parseFail = ParseFailed + +runP :: LineNo -> String -> ReadP a a -> String -> ParseResult a +runP line fieldname p s = + case [ x | (x,"") <- results ] of + [a] -> ParseOk (utf8Warnings line fieldname s) a + --TODO: what is this double parse thing all about? + -- Can't we just do the all isSpace test the first time? + [] -> case [ x | (x,ys) <- results, all isSpace ys ] of + [a] -> ParseOk (utf8Warnings line fieldname s) a + [] -> ParseFailed (NoParse fieldname line) + _ -> ParseFailed (AmbiguousParse fieldname line) + _ -> ParseFailed (AmbiguousParse fieldname line) + where results = readP_to_S p s + +runE :: LineNo -> String -> ReadE a -> String -> ParseResult a +runE line fieldname p s = + case runReadE p s of + Right a -> ParseOk (utf8Warnings line fieldname s) a + Left e -> syntaxError line $ + "Parse of field '" ++ fieldname ++ "' failed (" ++ e ++ "): " ++ s + +utf8Warnings :: LineNo -> String -> String -> [PWarning] +utf8Warnings line fieldname s = + take 1 [ UTFWarning n fieldname + | (n,l) <- zip [line..] (lines s) + , '\xfffd' `elem` l ] + +locatedErrorMsg :: PError -> (Maybe LineNo, String) +locatedErrorMsg (AmbiguousParse f n) = (Just n, + "Ambiguous parse in field '"++f++"'.") +locatedErrorMsg (NoParse f n) = (Just n, + "Parse of field '"++f++"' failed.") +locatedErrorMsg (TabsError n) = (Just n, "Tab used as indentation.") +locatedErrorMsg (FromString s n) = (n, s) + +syntaxError :: LineNo -> String -> ParseResult a +syntaxError n s = ParseFailed $ FromString s (Just n) + +tabsError :: LineNo -> ParseResult a +tabsError ln = ParseFailed $ TabsError ln + +warning :: String -> ParseResult () +warning s = ParseOk [PWarning s] () + +-- | Field descriptor. The parameter @a@ parameterizes over where the field's +-- value is stored in. +data FieldDescr a + = FieldDescr + { fieldName :: String + , fieldGet :: a -> Doc + , fieldSet :: LineNo -> String -> a -> ParseResult a + -- ^ @fieldSet n str x@ Parses the field value from the given input + -- string @str@ and stores the result in @x@ if the parse was + -- successful. Otherwise, reports an error on line number @n@. + } + +field :: String -> (a -> Doc) -> ReadP a a -> FieldDescr a +field name showF readF = + FieldDescr name showF (\line val _st -> runP line name readF val) + +-- Lift a field descriptor storing into an 'a' to a field descriptor storing +-- into a 'b'. +liftField :: (b -> a) -> (a -> b -> b) -> FieldDescr a -> FieldDescr b +liftField get set (FieldDescr name showF parseF) + = FieldDescr name (showF . get) + (\line str b -> do + a <- parseF line str (get b) + return (set a b)) + +-- Parser combinator for simple fields. Takes a field name, a pretty printer, +-- a parser function, an accessor, and a setter, returns a FieldDescr over the +-- compoid structure. +simpleField :: String -> (a -> Doc) -> ReadP a a + -> (b -> a) -> (a -> b -> b) -> FieldDescr b +simpleField name showF readF get set + = liftField get set $ field name showF readF + +commaListFieldWithSep :: Separator -> String -> (a -> Doc) -> ReadP [a] a + -> (b -> [a]) -> ([a] -> b -> b) -> FieldDescr b +commaListFieldWithSep separator name showF readF get set = + liftField get set' $ + field name showF' (parseCommaList readF) + where + set' xs b = set (get b ++ xs) b + showF' = separator . punctuate comma . map showF + +commaListField :: String -> (a -> Doc) -> ReadP [a] a + -> (b -> [a]) -> ([a] -> b -> b) -> FieldDescr b +commaListField = commaListFieldWithSep fsep + +commaNewLineListField :: String -> (a -> Doc) -> ReadP [a] a + -> (b -> [a]) -> ([a] -> b -> b) -> FieldDescr b +commaNewLineListField = commaListFieldWithSep sep + +spaceListField :: String -> (a -> Doc) -> ReadP [a] a + -> (b -> [a]) -> ([a] -> b -> b) -> FieldDescr b +spaceListField name showF readF get set = + liftField get set' $ + field name showF' (parseSpaceList readF) + where + set' xs b = set (get b ++ xs) b + showF' = fsep . map showF + +listFieldWithSep :: Separator -> String -> (a -> Doc) -> ReadP [a] a + -> (b -> [a]) -> ([a] -> b -> b) -> FieldDescr b +listFieldWithSep separator name showF readF get set = + liftField get set' $ + field name showF' (parseOptCommaList readF) + where + set' xs b = set (get b ++ xs) b + showF' = separator . map showF + +listField :: String -> (a -> Doc) -> ReadP [a] a + -> (b -> [a]) -> ([a] -> b -> b) -> FieldDescr b +listField = listFieldWithSep fsep + +optsField :: String -> CompilerFlavor -> (b -> [(CompilerFlavor,[String])]) + -> ([(CompilerFlavor,[String])] -> b -> b) -> FieldDescr b +optsField name flavor get set = + liftField (fromMaybe [] . lookup flavor . get) + (\opts b -> set (reorder (update flavor opts (get b))) b) $ + field name showF (sepBy parseTokenQ' (munch1 isSpace)) + where + update _ opts l | all null opts = l --empty opts as if no opts + update f opts [] = [(f,opts)] + update f opts ((f',opts'):rest) + | f == f' = (f, opts' ++ opts) : rest + | otherwise = (f',opts') : update f opts rest + reorder = sortBy (comparing fst) + showF = hsep . map text + +-- TODO: this is a bit smelly hack. It's because we want to parse bool fields +-- liberally but not accept new parses. We cannot do that with ReadP +-- because it does not support warnings. We need a new parser framework! +boolField :: String -> (b -> Bool) -> (Bool -> b -> b) -> FieldDescr b +boolField name get set = liftField get set (FieldDescr name showF readF) + where + showF = text . show + readF line str _ + | str == "True" = ParseOk [] True + | str == "False" = ParseOk [] False + | lstr == "true" = ParseOk [caseWarning] True + | lstr == "false" = ParseOk [caseWarning] False + | otherwise = ParseFailed (NoParse name line) + where + lstr = lowercase str + caseWarning = PWarning $ + "The '" ++ name ++ "' field is case sensitive, use 'True' or 'False'." + +ppFields :: [FieldDescr a] -> a -> Doc +ppFields fields x = + vcat [ ppField name (getter x) | FieldDescr name getter _ <- fields ] + +ppField :: String -> Doc -> Doc +ppField name fielddoc + | isEmpty fielddoc = empty + | name `elem` nestedFields = text name <> colon $+$ nest indentWith fielddoc + | otherwise = text name <> colon <+> fielddoc + where + nestedFields = + [ "description" + , "build-depends" + , "data-files" + , "extra-source-files" + , "extra-tmp-files" + , "exposed-modules" + , "c-sources" + , "js-sources" + , "extra-libraries" + , "includes" + , "install-includes" + , "other-modules" + , "depends" + ] + +showFields :: [FieldDescr a] -> a -> String +showFields fields = render . ($+$ text "") . ppFields fields + +showSingleNamedField :: [FieldDescr a] -> String -> Maybe (a -> String) +showSingleNamedField fields f = + case [ get | (FieldDescr f' get _) <- fields, f' == f ] of + [] -> Nothing + (get:_) -> Just (render . ppField f . get) + +showSimpleSingleNamedField :: [FieldDescr a] -> String -> Maybe (a -> String) +showSimpleSingleNamedField fields f = + case [ get | (FieldDescr f' get _) <- fields, f' == f ] of + [] -> Nothing + (get:_) -> Just (renderStyle myStyle . get) + where myStyle = style { mode = LeftMode } + +parseFields :: [FieldDescr a] -> a -> String -> ParseResult a +parseFields fields initial str = + readFields str >>= accumFields fields initial + +parseFieldsFlat :: [FieldDescr a] -> a -> String -> ParseResult a +parseFieldsFlat fields initial str = + readFieldsFlat str >>= accumFields fields initial + +accumFields :: [FieldDescr a] -> a -> [Field] -> ParseResult a +accumFields fields = foldM setField + where + fieldMap = Map.fromList + [ (name, f) | f@(FieldDescr name _ _) <- fields ] + setField accum (F line name value) = case Map.lookup name fieldMap of + Just (FieldDescr _ _ set) -> set line value accum + Nothing -> do + warning ("Unrecognized field " ++ name ++ " on line " ++ show line) + return accum + setField accum f = do + warning ("Unrecognized stanza on line " ++ show (lineNo f)) + return accum + +-- | The type of a function which, given a name-value pair of an +-- unrecognized field, and the current structure being built, +-- decides whether to incorporate the unrecognized field +-- (by returning Just x, where x is a possibly modified version +-- of the structure being built), or not (by returning Nothing). +type UnrecFieldParser a = (String,String) -> a -> Maybe a + +-- | A default unrecognized field parser which simply returns Nothing, +-- i.e. ignores all unrecognized fields, so warnings will be generated. +warnUnrec :: UnrecFieldParser a +warnUnrec _ _ = Nothing + +-- | A default unrecognized field parser which silently (i.e. no +-- warnings will be generated) ignores unrecognized fields, by +-- returning the structure being built unmodified. +ignoreUnrec :: UnrecFieldParser a +ignoreUnrec _ = Just + +------------------------------------------------------------------------------ + +-- The data type for our three syntactic categories +data Field + = F LineNo String String + -- ^ A regular @: @ field + | Section LineNo String String [Field] + -- ^ A section with a name and possible parameter. The syntactic + -- structure is: + -- + -- @ + -- { + -- * + -- } + -- @ + | IfBlock LineNo String [Field] [Field] + -- ^ A conditional block with an optional else branch: + -- + -- @ + -- if { + -- * + -- } else { + -- * + -- } + -- @ + deriving (Show + ,Eq) -- for testing + +lineNo :: Field -> LineNo +lineNo (F n _ _) = n +lineNo (Section n _ _ _) = n +lineNo (IfBlock n _ _ _) = n + +fName :: Field -> String +fName (F _ n _) = n +fName (Section _ n _ _) = n +fName _ = error "fname: not a field or section" + +readFields :: String -> ParseResult [Field] +readFields input = ifelse + =<< mapM (mkField 0) + =<< mkTree tokens + + where ls = (lines . normaliseLineEndings) input + tokens = (concatMap tokeniseLine . trimLines) ls + +readFieldsFlat :: String -> ParseResult [Field] +readFieldsFlat input = mapM (mkField 0) + =<< mkTree tokens + where ls = (lines . normaliseLineEndings) input + tokens = (concatMap tokeniseLineFlat . trimLines) ls + +-- attach line number and determine indentation +trimLines :: [String] -> [(LineNo, Indent, HasTabs, String)] +trimLines ls = [ (lineno, indent, hastabs, trimTrailing l') + | (lineno, l) <- zip [1..] ls + , let (sps, l') = span isSpace l + indent = length sps + hastabs = '\t' `elem` sps + , validLine l' ] + where validLine ('-':'-':_) = False -- Comment + validLine [] = False -- blank line + validLine _ = True + +-- | We parse generically based on indent level and braces '{' '}'. To do that +-- we split into lines and then '{' '}' tokens and other spans within a line. +data Token = + -- | The 'Line' token is for bits that /start/ a line, eg: + -- + -- > "\n blah blah { blah" + -- + -- tokenises to: + -- + -- > [Line n 2 False "blah blah", OpenBracket, Span n "blah"] + -- + -- so lines are the only ones that can have nested layout, since they + -- have a known indentation level. + -- + -- eg: we can't have this: + -- + -- > if ... { + -- > } else + -- > other + -- + -- because other cannot nest under else, since else doesn't start a line + -- so cannot have nested layout. It'd have to be: + -- + -- > if ... { + -- > } + -- > else + -- > other + -- + -- but that's not so common, people would normally use layout or + -- brackets not both in a single @if else@ construct. + -- + -- > if ... { foo : bar } + -- > else + -- > other + -- + -- this is OK + Line LineNo Indent HasTabs String + | Span LineNo String -- ^ span in a line, following brackets + | OpenBracket LineNo | CloseBracket LineNo + +type Indent = Int +type HasTabs = Bool + +-- | Tokenise a single line, splitting on '{' '}' and the spans in between. +-- Also trims leading & trailing space on those spans within the line. +tokeniseLine :: (LineNo, Indent, HasTabs, String) -> [Token] +tokeniseLine (n0, i, t, l) = case split n0 l of + (Span _ l':ss) -> Line n0 i t l' :ss + cs -> cs + where split _ "" = [] + split n s = case span (\c -> c /='}' && c /= '{') s of + ("", '{' : s') -> OpenBracket n : split n s' + (w , '{' : s') -> mkspan n w (OpenBracket n : split n s') + ("", '}' : s') -> CloseBracket n : split n s' + (w , '}' : s') -> mkspan n w (CloseBracket n : split n s') + (w , _) -> mkspan n w [] + + mkspan n s ss | null s' = ss + | otherwise = Span n s' : ss + where s' = trimTrailing (trimLeading s) + +tokeniseLineFlat :: (LineNo, Indent, HasTabs, String) -> [Token] +tokeniseLineFlat (n0, i, t, l) + | null l' = [] + | otherwise = [Line n0 i t l'] + where + l' = trimTrailing (trimLeading l) + +trimLeading, trimTrailing :: String -> String +trimLeading = dropWhile isSpace +trimTrailing = dropWhileEndLE isSpace + + +type SyntaxTree = Tree (LineNo, HasTabs, String) + +-- | Parse the stream of tokens into a tree of them, based on indent \/ layout +mkTree :: [Token] -> ParseResult [SyntaxTree] +mkTree toks = + layout 0 [] toks >>= \(trees, trailing) -> case trailing of + [] -> return trees + OpenBracket n:_ -> syntaxError n "mismatched brackets, unexpected {" + CloseBracket n:_ -> syntaxError n "mismatched brackets, unexpected }" + -- the following two should never happen: + Span n l :_ -> syntaxError n $ "unexpected span: " ++ show l + Line n _ _ l :_ -> syntaxError n $ "unexpected line: " ++ show l + + +-- | Parse the stream of tokens into a tree of them, based on indent +-- This parse state expect to be in a layout context, though possibly +-- nested within a braces context so we may still encounter closing braces. +layout :: Indent -- ^ indent level of the parent\/previous line + -> [SyntaxTree] -- ^ accumulating param, trees in this level + -> [Token] -- ^ remaining tokens + -> ParseResult ([SyntaxTree], [Token]) + -- ^ collected trees on this level and trailing tokens +layout _ a [] = return (reverse a, []) +layout i a (s@(Line _ i' _ _):ss) | i' < i = return (reverse a, s:ss) +layout i a (Line n _ t l:OpenBracket n':ss) = do + (sub, ss') <- braces n' [] ss + layout i (Node (n,t,l) sub:a) ss' + +layout i a (Span n l:OpenBracket n':ss) = do + (sub, ss') <- braces n' [] ss + layout i (Node (n,False,l) sub:a) ss' + +-- look ahead to see if following lines are more indented, giving a sub-tree +layout i a (Line n i' t l:ss) = do + lookahead <- layout (i'+1) [] ss + case lookahead of + ([], _) -> layout i (Node (n,t,l) [] :a) ss + (ts, ss') -> layout i (Node (n,t,l) ts :a) ss' + +layout _ _ ( OpenBracket n :_) = syntaxError n "unexpected '{'" +layout _ a (s@(CloseBracket _):ss) = return (reverse a, s:ss) +layout _ _ ( Span n l : _) = syntaxError n $ "unexpected span: " + ++ show l + +-- | Parse the stream of tokens into a tree of them, based on explicit braces +-- This parse state expects to find a closing bracket. +braces :: LineNo -- ^ line of the '{', used for error messages + -> [SyntaxTree] -- ^ accumulating param, trees in this level + -> [Token] -- ^ remaining tokens + -> ParseResult ([SyntaxTree],[Token]) + -- ^ collected trees on this level and trailing tokens +braces m a (Line n _ t l:OpenBracket n':ss) = do + (sub, ss') <- braces n' [] ss + braces m (Node (n,t,l) sub:a) ss' + +braces m a (Span n l:OpenBracket n':ss) = do + (sub, ss') <- braces n' [] ss + braces m (Node (n,False,l) sub:a) ss' + +braces m a (Line n i t l:ss) = do + lookahead <- layout (i+1) [] ss + case lookahead of + ([], _) -> braces m (Node (n,t,l) [] :a) ss + (ts, ss') -> braces m (Node (n,t,l) ts :a) ss' + +braces m a (Span n l:ss) = braces m (Node (n,False,l) []:a) ss +braces _ a (CloseBracket _:ss) = return (reverse a, ss) +braces n _ [] = syntaxError n $ "opening brace '{'" + ++ "has no matching closing brace '}'" +braces _ _ (OpenBracket n:_) = syntaxError n "unexpected '{'" + +-- | Convert the parse tree into the Field AST +-- Also check for dodgy uses of tabs in indentation. +mkField :: Int -> SyntaxTree -> ParseResult Field +mkField d (Node (n,t,_) _) | d >= 1 && t = tabsError n +mkField d (Node (n,_,l) ts) = case span (\c -> isAlphaNum c || c == '-') l of + ([], _) -> syntaxError n $ "unrecognised field or section: " ++ show l + (name, rest) -> case trimLeading rest of + (':':rest') -> do let followingLines = concatMap Tree.flatten ts + tabs = not (null [()| (_,True,_) <- followingLines ]) + if tabs && d >= 1 + then tabsError n + else return $ F n (map toLower name) + (fieldValue rest' followingLines) + rest' -> do ts' <- mapM (mkField (d+1)) ts + return (Section n (map toLower name) rest' ts') + where fieldValue firstLine followingLines = + let firstLine' = trimLeading firstLine + followingLines' = map (\(_,_,s) -> stripDot s) followingLines + allLines | null firstLine' = followingLines' + | otherwise = firstLine' : followingLines' + in intercalate "\n" allLines + stripDot "." = "" + stripDot s = s + +-- | Convert if/then/else 'Section's to 'IfBlock's +ifelse :: [Field] -> ParseResult [Field] +ifelse [] = return [] +ifelse (Section n "if" cond thenpart + :Section _ "else" as elsepart:fs) + | null cond = syntaxError n "'if' with missing condition" + | null thenpart = syntaxError n "'then' branch of 'if' is empty" + | not (null as) = syntaxError n "'else' takes no arguments" + | null elsepart = syntaxError n "'else' branch of 'if' is empty" + | otherwise = do tp <- ifelse thenpart + ep <- ifelse elsepart + fs' <- ifelse fs + return (IfBlock n cond tp ep:fs') +ifelse (Section n "if" cond thenpart:fs) + | null cond = syntaxError n "'if' with missing condition" + | null thenpart = syntaxError n "'then' branch of 'if' is empty" + | otherwise = do tp <- ifelse thenpart + fs' <- ifelse fs + return (IfBlock n cond tp []:fs') +ifelse (Section n "else" _ _:_) = syntaxError n + "stray 'else' with no preceding 'if'" +ifelse (Section n s a fs':fs) = do fs'' <- ifelse fs' + fs''' <- ifelse fs + return (Section n s a fs'' : fs''') +ifelse (f:fs) = do fs' <- ifelse fs + return (f : fs') + +------------------------------------------------------------------------------ + +-- |parse a module name +parseModuleNameQ :: ReadP r ModuleName +parseModuleNameQ = parseQuoted parse <++ parse + +parseFilePathQ :: ReadP r FilePath +parseFilePathQ = parseTokenQ + -- removed until normalise is no longer broken, was: + -- liftM normalise parseTokenQ + +betweenSpaces :: ReadP r a -> ReadP r a +betweenSpaces act = do skipSpaces + res <- act + skipSpaces + return res + +parseBuildTool :: ReadP r Dependency +parseBuildTool = do name <- parseBuildToolNameQ + ver <- betweenSpaces $ + parseVersionRangeQ <++ return anyVersion + return $ Dependency name ver + +parseBuildToolNameQ :: ReadP r PackageName +parseBuildToolNameQ = parseQuoted parseBuildToolName <++ parseBuildToolName + +-- like parsePackageName but accepts symbols in components +parseBuildToolName :: ReadP r PackageName +parseBuildToolName = do ns <- sepBy1 component (ReadP.char '-') + return (PackageName (intercalate "-" ns)) + where component = do + cs <- munch1 (\c -> isAlphaNum c || c == '+' || c == '_') + if all isDigit cs then pfail else return cs + +-- pkg-config allows versions and other letters in package names, +-- eg "gtk+-2.0" is a valid pkg-config package _name_. +-- It then has a package version number like 2.10.13 +parsePkgconfigDependency :: ReadP r Dependency +parsePkgconfigDependency = do name <- munch1 + (\c -> isAlphaNum c || c `elem` "+-._") + ver <- betweenSpaces $ + parseVersionRangeQ <++ return anyVersion + return $ Dependency (PackageName name) ver + +parsePackageNameQ :: ReadP r PackageName +parsePackageNameQ = parseQuoted parse <++ parse + +parseVersionRangeQ :: ReadP r VersionRange +parseVersionRangeQ = parseQuoted parse <++ parse + +parseOptVersion :: ReadP r Version +parseOptVersion = parseQuoted ver <++ ver + where ver :: ReadP r Version + ver = parse <++ return noVersion + noVersion = Version [] [] + +parseTestedWithQ :: ReadP r (CompilerFlavor,VersionRange) +parseTestedWithQ = parseQuoted tw <++ tw + where + tw :: ReadP r (CompilerFlavor,VersionRange) + tw = do compiler <- parseCompilerFlavorCompat + version <- betweenSpaces $ parse <++ return anyVersion + return (compiler,version) + +parseLicenseQ :: ReadP r License +parseLicenseQ = parseQuoted parse <++ parse + +-- urgh, we can't define optQuotes :: ReadP r a -> ReadP r a +-- because the "compat" version of ReadP isn't quite powerful enough. In +-- particular, the type of <++ is ReadP r r -> ReadP r a -> ReadP r a +-- Hence the trick above to make 'lic' polymorphic. + +parseLanguageQ :: ReadP r Language +parseLanguageQ = parseQuoted parse <++ parse + +parseExtensionQ :: ReadP r Extension +parseExtensionQ = parseQuoted parse <++ parse + +parseHaskellString :: ReadP r String +parseHaskellString = readS_to_P reads + +parseTokenQ :: ReadP r String +parseTokenQ = parseHaskellString <++ munch1 (\x -> not (isSpace x) && x /= ',') + +parseTokenQ' :: ReadP r String +parseTokenQ' = parseHaskellString <++ munch1 (not . isSpace) + +parseSepList :: ReadP r b + -> ReadP r a -- ^The parser for the stuff between commas + -> ReadP r [a] +parseSepList sepr p = sepBy p separator + where separator = betweenSpaces sepr + +parseSpaceList :: ReadP r a -- ^The parser for the stuff between commas + -> ReadP r [a] +parseSpaceList p = sepBy p skipSpaces + +parseCommaList :: ReadP r a -- ^The parser for the stuff between commas + -> ReadP r [a] +parseCommaList = parseSepList (ReadP.char ',') + +parseOptCommaList :: ReadP r a -- ^The parser for the stuff between commas + -> ReadP r [a] +parseOptCommaList = parseSepList (optional (ReadP.char ',')) + +parseQuoted :: ReadP r a -> ReadP r a +parseQuoted = between (ReadP.char '"') (ReadP.char '"') + +parseFreeText :: ReadP.ReadP s String +parseFreeText = ReadP.munch (const True) + +-- -------------------------------------------- +-- ** Pretty printing + +showFilePath :: FilePath -> Doc +showFilePath "" = empty +showFilePath x = showToken x + +showToken :: String -> Doc +showToken str + | not (any dodgy str) && + not (null str) = text str + | otherwise = text (show str) + where dodgy c = isSpace c || c == ',' + +showTestedWith :: (CompilerFlavor,VersionRange) -> Doc +showTestedWith (compiler, version) = text (show compiler) <+> disp version + +-- | Pretty-print free-format text, ensuring that it is vertically aligned, +-- and with blank lines replaced by dots for correct re-parsing. +showFreeText :: String -> Doc +showFreeText "" = empty +showFreeText s = vcat [text (if null l then "." else l) | l <- lines_ s] + +-- | 'lines_' breaks a string up into a list of strings at newline +-- characters. The resulting strings do not contain newlines. +lines_ :: String -> [String] +lines_ [] = [""] +lines_ s = let (l, s') = break (== '\n') s + in l : case s' of + [] -> [] + (_:s'') -> lines_ s'' + +-- | the indentation used for pretty printing +indentWith :: Int +indentWith = 4 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/ReadE.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/ReadE.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/ReadE.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/ReadE.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,51 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.ReadE +-- Copyright : Jose Iborra 2008 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Simple parsing with failure + +module Distribution.ReadE ( + -- * ReadE + ReadE(..), succeedReadE, failReadE, + -- * Projections + parseReadE, readEOrFail, + readP_to_E + ) where + +import Distribution.Compat.ReadP +import Data.Char ( isSpace ) + +-- | Parser with simple error reporting +newtype ReadE a = ReadE {runReadE :: String -> Either ErrorMsg a} +type ErrorMsg = String + +instance Functor ReadE where + fmap f (ReadE p) = ReadE $ \txt -> case p txt of + Right a -> Right (f a) + Left err -> Left err + +succeedReadE :: (String -> a) -> ReadE a +succeedReadE f = ReadE (Right . f) + +failReadE :: ErrorMsg -> ReadE a +failReadE = ReadE . const . Left + +parseReadE :: ReadE a -> ReadP r a +parseReadE (ReadE p) = do + txt <- look + either fail return (p txt) + +readEOrFail :: ReadE a -> String -> a +readEOrFail r = either error id . runReadE r + +readP_to_E :: (String -> ErrorMsg) -> ReadP a a -> ReadE a +readP_to_E err r = + ReadE $ \txt -> case [ p | (p, s) <- readP_to_S r txt + , all isSpace s ] + of [] -> Left (err txt) + (p:_) -> Right p diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Bench.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Bench.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Bench.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Bench.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,128 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Bench +-- Copyright : Johan Tibell 2011 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is the entry point into running the benchmarks in a built +-- package. It performs the \"@.\/setup bench@\" action. It runs +-- benchmarks designated in the package description. + +module Distribution.Simple.Bench + ( bench + ) where + +import qualified Distribution.PackageDescription as PD + ( PackageDescription(..), BuildInfo(buildable) + , Benchmark(..), BenchmarkInterface(..), benchmarkType, hasBenchmarks ) +import Distribution.Simple.BuildPaths ( exeExtension ) +import Distribution.Simple.Compiler ( compilerInfo ) +import Distribution.Simple.InstallDirs + ( fromPathTemplate, initialPathTemplateEnv, PathTemplateVariable(..) + , substPathTemplate , toPathTemplate, PathTemplate ) +import qualified Distribution.Simple.LocalBuildInfo as LBI + ( LocalBuildInfo(..) ) +import Distribution.Simple.Setup ( BenchmarkFlags(..), fromFlag ) +import Distribution.Simple.UserHooks ( Args ) +import Distribution.Simple.Utils ( die, notice, rawSystemExitCode ) +import Distribution.Text + +import Control.Monad ( when, unless ) +import System.Exit ( ExitCode(..), exitFailure, exitWith ) +import System.Directory ( doesFileExist ) +import System.FilePath ( (), (<.>) ) + +-- | Perform the \"@.\/setup bench@\" action. +bench :: Args -- ^positional command-line arguments + -> PD.PackageDescription -- ^information from the .cabal file + -> LBI.LocalBuildInfo -- ^information from the configure step + -> BenchmarkFlags -- ^flags sent to benchmark + -> IO () +bench args pkg_descr lbi flags = do + let verbosity = fromFlag $ benchmarkVerbosity flags + benchmarkNames = args + pkgBenchmarks = PD.benchmarks pkg_descr + enabledBenchmarks = [ t | t <- pkgBenchmarks + , PD.benchmarkEnabled t + , PD.buildable (PD.benchmarkBuildInfo t) ] + + -- Run the benchmark + doBench :: PD.Benchmark -> IO ExitCode + doBench bm = + case PD.benchmarkInterface bm of + PD.BenchmarkExeV10 _ _ -> do + let cmd = LBI.buildDir lbi PD.benchmarkName bm + PD.benchmarkName bm <.> exeExtension + options = map (benchOption pkg_descr lbi bm) $ + benchmarkOptions flags + name = PD.benchmarkName bm + -- Check that the benchmark executable exists. + exists <- doesFileExist cmd + unless exists $ die $ + "Error: Could not find benchmark program \"" + ++ cmd ++ "\". Did you build the package first?" + + notice verbosity $ startMessage name + -- This will redirect the child process + -- stdout/stderr to the parent process. + exitcode <- rawSystemExitCode verbosity cmd options + notice verbosity $ finishMessage name exitcode + return exitcode + + _ -> do + notice verbosity $ "No support for running " + ++ "benchmark " ++ PD.benchmarkName bm ++ " of type: " + ++ show (disp $ PD.benchmarkType bm) + exitFailure + + when (not $ PD.hasBenchmarks pkg_descr) $ do + notice verbosity "Package has no benchmarks." + exitWith ExitSuccess + + when (PD.hasBenchmarks pkg_descr && null enabledBenchmarks) $ + die $ "No benchmarks enabled. Did you remember to configure with " + ++ "\'--enable-benchmarks\'?" + + bmsToRun <- case benchmarkNames of + [] -> return enabledBenchmarks + names -> flip mapM names $ \bmName -> + let benchmarkMap = zip enabledNames enabledBenchmarks + enabledNames = map PD.benchmarkName enabledBenchmarks + allNames = map PD.benchmarkName pkgBenchmarks + in case lookup bmName benchmarkMap of + Just t -> return t + _ | bmName `elem` allNames -> + die $ "Package configured with benchmark " + ++ bmName ++ " disabled." + | otherwise -> die $ "no such benchmark: " ++ bmName + + let totalBenchmarks = length bmsToRun + notice verbosity $ "Running " ++ show totalBenchmarks ++ " benchmarks..." + exitcodes <- mapM doBench bmsToRun + let allOk = totalBenchmarks == length (filter (== ExitSuccess) exitcodes) + unless allOk exitFailure + where + startMessage name = "Benchmark " ++ name ++ ": RUNNING...\n" + finishMessage name exitcode = "Benchmark " ++ name ++ ": " + ++ (case exitcode of + ExitSuccess -> "FINISH" + ExitFailure _ -> "ERROR") + + +-- TODO: This is abusing the notion of a 'PathTemplate'. The result isn't +-- necessarily a path. +benchOption :: PD.PackageDescription + -> LBI.LocalBuildInfo + -> PD.Benchmark + -> PathTemplate + -> String +benchOption pkg_descr lbi bm template = + fromPathTemplate $ substPathTemplate env template + where + env = initialPathTemplateEnv + (PD.package pkg_descr) (LBI.pkgKey lbi) + (compilerInfo $ LBI.compiler lbi) (LBI.hostPlatform lbi) ++ + [(BenchmarkNameVar, toPathTemplate $ PD.benchmarkName bm)] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Build/Macros.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Build/Macros.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Build/Macros.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Build/Macros.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,109 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Build.Macros +-- Copyright : Simon Marlow 2008 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Generate cabal_macros.h - CPP macros for package version testing +-- +-- When using CPP you get +-- +-- > VERSION_ +-- > MIN_VERSION_(A,B,C) +-- +-- for each /package/ in @build-depends@, which is true if the version of +-- /package/ in use is @>= A.B.C@, using the normal ordering on version +-- numbers. +-- +module Distribution.Simple.Build.Macros ( + generate, + generatePackageVersionMacros, + ) where + +import Data.Maybe + ( isJust ) +import Distribution.Package + ( PackageIdentifier(PackageIdentifier) ) +import Distribution.Version + ( Version(versionBranch) ) +import Distribution.PackageDescription + ( PackageDescription ) +import Distribution.Simple.Compiler + ( packageKeySupported ) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(compiler, pkgKey, withPrograms), externalPackageDeps ) +import Distribution.Simple.Program.Db + ( configuredPrograms ) +import Distribution.Simple.Program.Types + ( ConfiguredProgram(programId, programVersion) ) +import Distribution.Text + ( display ) + +-- ------------------------------------------------------------ +-- * Generate cabal_macros.h +-- ------------------------------------------------------------ + +-- | The contents of the @cabal_macros.h@ for the given configured package. +-- +generate :: PackageDescription -> LocalBuildInfo -> String +generate _pkg_descr lbi = + "/* DO NOT EDIT: This file is automatically generated by Cabal */\n\n" ++ + generatePackageVersionMacros (map snd (externalPackageDeps lbi)) ++ + generateToolVersionMacros (configuredPrograms . withPrograms $ lbi) ++ + generatePackageKeyMacro lbi + +-- | Helper function that generates just the @VERSION_pkg@ and @MIN_VERSION_pkg@ +-- macros for a list of package ids (usually used with the specific deps of +-- a configured package). +-- +generatePackageVersionMacros :: [PackageIdentifier] -> String +generatePackageVersionMacros pkgids = concat + [ "/* package " ++ display pkgid ++ " */\n" + ++ generateMacros "" pkgname version + | pkgid@(PackageIdentifier name version) <- pkgids + , let pkgname = map fixchar (display name) + ] + +-- | Helper function that generates just the @TOOL_VERSION_pkg@ and +-- @MIN_TOOL_VERSION_pkg@ macros for a list of configured programs. +-- +generateToolVersionMacros :: [ConfiguredProgram] -> String +generateToolVersionMacros progs = concat + [ "/* tool " ++ progid ++ " */\n" + ++ generateMacros "TOOL_" progname version + | prog <- progs + , isJust . programVersion $ prog + , let progid = programId prog ++ "-" ++ display version + progname = map fixchar (programId prog) + Just version = programVersion prog + ] + +-- | Common implementation of 'generatePackageVersionMacros' and +-- 'generateToolVersionMacros'. +-- +generateMacros :: String -> String -> Version -> String +generateMacros prefix name version = + concat + ["#define ", prefix, "VERSION_",name," ",show (display version),"\n" + ,"#define MIN_", prefix, "VERSION_",name,"(major1,major2,minor) (\\\n" + ," (major1) < ",major1," || \\\n" + ," (major1) == ",major1," && (major2) < ",major2," || \\\n" + ," (major1) == ",major1," && (major2) == ",major2," && (minor) <= ",minor,")" + ,"\n\n" + ] + where + (major1:major2:minor:_) = map show (versionBranch version ++ repeat 0) + +-- | Generate the @CURRENT_PACKAGE_KEY@ definition for the package key +-- of the current package, if supported by the compiler +generatePackageKeyMacro :: LocalBuildInfo -> String +generatePackageKeyMacro lbi + | packageKeySupported (compiler lbi) = + "#define CURRENT_PACKAGE_KEY \"" ++ display (pkgKey lbi) ++ "\"\n\n" + | otherwise = "" + +fixchar :: Char -> Char +fixchar '-' = '_' +fixchar c = c diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Build/PathsModule.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Build/PathsModule.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Build/PathsModule.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Build/PathsModule.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,303 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Build.Macros +-- Copyright : Isaac Jones 2003-2005, +-- Ross Paterson 2006, +-- Duncan Coutts 2007-2008 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Generating the Paths_pkgname module. +-- +-- This is a module that Cabal generates for the benefit of packages. It +-- enables them to find their version number and find any installed data files +-- at runtime. This code should probably be split off into another module. +-- +module Distribution.Simple.Build.PathsModule ( + generate, pkgPathEnvVar + ) where + +import Distribution.System + ( OS(Windows), buildOS, Arch(..), buildArch ) +import Distribution.Simple.Compiler + ( CompilerFlavor(..), compilerFlavor, compilerVersion ) +import Distribution.Package + ( packageId, packageName, packageVersion ) +import Distribution.PackageDescription + ( PackageDescription(..), hasLibs ) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), InstallDirs(..) + , absoluteInstallDirs, prefixRelativeInstallDirs ) +import Distribution.Simple.Setup ( CopyDest(NoCopyDest) ) +import Distribution.Simple.BuildPaths + ( autogenModuleName ) +import Distribution.Simple.Utils + ( shortRelativePath ) +import Distribution.Text + ( display ) +import Distribution.Version + ( Version(..), orLaterVersion, withinRange ) + +import System.FilePath + ( pathSeparator ) +import Data.Maybe + ( fromJust, isNothing ) + +-- ------------------------------------------------------------ +-- * Building Paths_.hs +-- ------------------------------------------------------------ + +generate :: PackageDescription -> LocalBuildInfo -> String +generate pkg_descr lbi = + let pragmas + | absolute = "" + | supports_language_pragma = + "{-# LANGUAGE ForeignFunctionInterface #-}\n" + | otherwise = + "{-# OPTIONS_GHC -fffi #-}\n"++ + "{-# OPTIONS_JHC -fffi #-}\n" + + foreign_imports + | absolute = "" + | otherwise = + "import Foreign\n"++ + "import Foreign.C\n" + + reloc_imports + | reloc = + "import System.Environment (getExecutablePath)\n" + | otherwise = "" + + header = + pragmas++ + "module " ++ display paths_modulename ++ " (\n"++ + " version,\n"++ + " getBinDir, getLibDir, getDataDir, getLibexecDir,\n"++ + " getDataFileName, getSysconfDir\n"++ + " ) where\n"++ + "\n"++ + foreign_imports++ + "import qualified Control.Exception as Exception\n"++ + "import Data.Version (Version(..))\n"++ + "import System.Environment (getEnv)\n"++ + reloc_imports ++ + "import Prelude\n"++ + "\n"++ + "catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a\n"++ + "catchIO = Exception.catch\n" ++ + "\n"++ + "version :: Version"++ + "\nversion = Version " ++ show branch ++ " " ++ show tags + where Version branch tags = packageVersion pkg_descr + + body + | reloc = + "\n\nbindirrel :: FilePath\n" ++ + "bindirrel = " ++ show flat_bindirreloc ++ + "\n"++ + "\ngetBinDir, getLibDir, getDataDir, getLibexecDir, getSysconfDir :: IO FilePath\n"++ + "getBinDir = "++mkGetEnvOrReloc "bindir" flat_bindirreloc++"\n"++ + "getLibDir = "++mkGetEnvOrReloc "libdir" flat_libdirreloc++"\n"++ + "getDataDir = "++mkGetEnvOrReloc "datadir" flat_datadirreloc++"\n"++ + "getLibexecDir = "++mkGetEnvOrReloc "libexecdir" flat_libexecdirreloc++"\n"++ + "getSysconfDir = "++mkGetEnvOrReloc "sysconfdir" flat_sysconfdirreloc++"\n"++ + "\n"++ + "getDataFileName :: FilePath -> IO FilePath\n"++ + "getDataFileName name = do\n"++ + " dir <- getDataDir\n"++ + " return (dir `joinFileName` name)\n"++ + "\n"++ + get_prefix_reloc_stuff++ + "\n"++ + filename_stuff + | absolute = + "\nbindir, libdir, datadir, libexecdir, sysconfdir :: FilePath\n"++ + "\nbindir = " ++ show flat_bindir ++ + "\nlibdir = " ++ show flat_libdir ++ + "\ndatadir = " ++ show flat_datadir ++ + "\nlibexecdir = " ++ show flat_libexecdir ++ + "\nsysconfdir = " ++ show flat_sysconfdir ++ + "\n"++ + "\ngetBinDir, getLibDir, getDataDir, getLibexecDir, getSysconfDir :: IO FilePath\n"++ + "getBinDir = "++mkGetEnvOr "bindir" "return bindir"++"\n"++ + "getLibDir = "++mkGetEnvOr "libdir" "return libdir"++"\n"++ + "getDataDir = "++mkGetEnvOr "datadir" "return datadir"++"\n"++ + "getLibexecDir = "++mkGetEnvOr "libexecdir" "return libexecdir"++"\n"++ + "getSysconfDir = "++mkGetEnvOr "sysconfdir" "return sysconfdir"++"\n"++ + "\n"++ + "getDataFileName :: FilePath -> IO FilePath\n"++ + "getDataFileName name = do\n"++ + " dir <- getDataDir\n"++ + " return (dir ++ "++path_sep++" ++ name)\n" + | otherwise = + "\nprefix, bindirrel :: FilePath" ++ + "\nprefix = " ++ show flat_prefix ++ + "\nbindirrel = " ++ show (fromJust flat_bindirrel) ++ + "\n\n"++ + "getBinDir :: IO FilePath\n"++ + "getBinDir = getPrefixDirRel bindirrel\n\n"++ + "getLibDir :: IO FilePath\n"++ + "getLibDir = "++mkGetDir flat_libdir flat_libdirrel++"\n\n"++ + "getDataDir :: IO FilePath\n"++ + "getDataDir = "++ mkGetEnvOr "datadir" + (mkGetDir flat_datadir flat_datadirrel)++"\n\n"++ + "getLibexecDir :: IO FilePath\n"++ + "getLibexecDir = "++mkGetDir flat_libexecdir flat_libexecdirrel++"\n\n"++ + "getSysconfDir :: IO FilePath\n"++ + "getSysconfDir = "++mkGetDir flat_sysconfdir flat_sysconfdirrel++"\n\n"++ + "getDataFileName :: FilePath -> IO FilePath\n"++ + "getDataFileName name = do\n"++ + " dir <- getDataDir\n"++ + " return (dir `joinFileName` name)\n"++ + "\n"++ + get_prefix_stuff++ + "\n"++ + filename_stuff + in header++body + + where + InstallDirs { + prefix = flat_prefix, + bindir = flat_bindir, + libdir = flat_libdir, + datadir = flat_datadir, + libexecdir = flat_libexecdir, + sysconfdir = flat_sysconfdir + } = absoluteInstallDirs pkg_descr lbi NoCopyDest + InstallDirs { + bindir = flat_bindirrel, + libdir = flat_libdirrel, + datadir = flat_datadirrel, + libexecdir = flat_libexecdirrel, + sysconfdir = flat_sysconfdirrel + } = prefixRelativeInstallDirs (packageId pkg_descr) lbi + + flat_bindirreloc = shortRelativePath flat_prefix flat_bindir + flat_libdirreloc = shortRelativePath flat_prefix flat_libdir + flat_datadirreloc = shortRelativePath flat_prefix flat_datadir + flat_libexecdirreloc = shortRelativePath flat_prefix flat_libexecdir + flat_sysconfdirreloc = shortRelativePath flat_prefix flat_sysconfdir + + mkGetDir _ (Just dirrel) = "getPrefixDirRel " ++ show dirrel + mkGetDir dir Nothing = "return " ++ show dir + + mkGetEnvOrReloc var dirrel = "catchIO (getEnv \""++var'++"\")" ++ + " (\\_ -> getPrefixDirReloc \"" ++ dirrel ++ + "\")" + where var' = pkgPathEnvVar pkg_descr var + + mkGetEnvOr var expr = "catchIO (getEnv \""++var'++"\")"++ + " (\\_ -> "++expr++")" + where var' = pkgPathEnvVar pkg_descr var + + -- In several cases we cannot make relocatable installations + absolute = + hasLibs pkg_descr -- we can only make progs relocatable + || isNothing flat_bindirrel -- if the bin dir is an absolute path + || not (supportsRelocatableProgs (compilerFlavor (compiler lbi))) + + reloc = relocatable lbi + + supportsRelocatableProgs GHC = case buildOS of + Windows -> True + _ -> False + supportsRelocatableProgs GHCJS = case buildOS of + Windows -> True + _ -> False + supportsRelocatableProgs _ = False + + paths_modulename = autogenModuleName pkg_descr + + get_prefix_stuff = get_prefix_win32 buildArch + + path_sep = show [pathSeparator] + + supports_language_pragma = + (compilerFlavor (compiler lbi) == GHC && + (compilerVersion (compiler lbi) + `withinRange` orLaterVersion (Version [6,6,1] []))) || + compilerFlavor (compiler lbi) == GHCJS + +-- | Generates the name of the environment variable controlling the path +-- component of interest. +pkgPathEnvVar :: PackageDescription + -> String -- ^ path component; one of \"bindir\", \"libdir\", + -- \"datadir\", \"libexecdir\", or \"sysconfdir\" + -> String -- ^ environment variable name +pkgPathEnvVar pkg_descr var = + showPkgName (packageName pkg_descr) ++ "_" ++ var + where + showPkgName = map fixchar . display + fixchar '-' = '_' + fixchar c = c + +get_prefix_reloc_stuff :: String +get_prefix_reloc_stuff = + "getPrefixDirReloc :: FilePath -> IO FilePath\n"++ + "getPrefixDirReloc dirRel = do\n"++ + " exePath <- getExecutablePath\n"++ + " let (bindir,_) = splitFileName exePath\n"++ + " return ((bindir `minusFileName` bindirrel) `joinFileName` dirRel)\n" + +get_prefix_win32 :: Arch -> String +get_prefix_win32 arch = + "getPrefixDirRel :: FilePath -> IO FilePath\n"++ + "getPrefixDirRel dirRel = try_size 2048 -- plenty, PATH_MAX is 512 under Win32.\n"++ + " where\n"++ + " try_size size = allocaArray (fromIntegral size) $ \\buf -> do\n"++ + " ret <- c_GetModuleFileName nullPtr buf size\n"++ + " case ret of\n"++ + " 0 -> return (prefix `joinFileName` dirRel)\n"++ + " _ | ret < size -> do\n"++ + " exePath <- peekCWString buf\n"++ + " let (bindir,_) = splitFileName exePath\n"++ + " return ((bindir `minusFileName` bindirrel) `joinFileName` dirRel)\n"++ + " | otherwise -> try_size (size * 2)\n"++ + "\n"++ + "foreign import " ++ cconv ++ " unsafe \"windows.h GetModuleFileNameW\"\n"++ + " c_GetModuleFileName :: Ptr () -> CWString -> Int32 -> IO Int32\n" + where cconv = case arch of + I386 -> "stdcall" + X86_64 -> "ccall" + _ -> error "win32 supported only with I386, X86_64" + +filename_stuff :: String +filename_stuff = + "minusFileName :: FilePath -> String -> FilePath\n"++ + "minusFileName dir \"\" = dir\n"++ + "minusFileName dir \".\" = dir\n"++ + "minusFileName dir suffix =\n"++ + " minusFileName (fst (splitFileName dir)) (fst (splitFileName suffix))\n"++ + "\n"++ + "joinFileName :: String -> String -> FilePath\n"++ + "joinFileName \"\" fname = fname\n"++ + "joinFileName \".\" fname = fname\n"++ + "joinFileName dir \"\" = dir\n"++ + "joinFileName dir fname\n"++ + " | isPathSeparator (last dir) = dir++fname\n"++ + " | otherwise = dir++pathSeparator:fname\n"++ + "\n"++ + "splitFileName :: FilePath -> (String, String)\n"++ + "splitFileName p = (reverse (path2++drive), reverse fname)\n"++ + " where\n"++ + " (path,drive) = case p of\n"++ + " (c:':':p') -> (reverse p',[':',c])\n"++ + " _ -> (reverse p ,\"\")\n"++ + " (fname,path1) = break isPathSeparator path\n"++ + " path2 = case path1 of\n"++ + " [] -> \".\"\n"++ + " [_] -> path1 -- don't remove the trailing slash if \n"++ + " -- there is only one character\n"++ + " (c:path') | isPathSeparator c -> path'\n"++ + " _ -> path1\n"++ + "\n"++ + "pathSeparator :: Char\n"++ + (case buildOS of + Windows -> "pathSeparator = '\\\\'\n" + _ -> "pathSeparator = '/'\n") ++ + "\n"++ + "isPathSeparator :: Char -> Bool\n"++ + (case buildOS of + Windows -> "isPathSeparator c = c == '/' || c == '\\\\'\n" + _ -> "isPathSeparator c = c == '/'\n") diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Build.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Build.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Build.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Build.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,605 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Build +-- Copyright : Isaac Jones 2003-2005, +-- Ross Paterson 2006, +-- Duncan Coutts 2007-2008, 2012 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is the entry point to actually building the modules in a package. It +-- doesn't actually do much itself, most of the work is delegated to +-- compiler-specific actions. It does do some non-compiler specific bits like +-- running pre-processors. +-- + +module Distribution.Simple.Build ( + build, repl, + startInterpreter, + + initialBuildSteps, + writeAutogenFiles, + ) where + +import qualified Distribution.Simple.GHC as GHC +import qualified Distribution.Simple.GHCJS as GHCJS +import qualified Distribution.Simple.JHC as JHC +import qualified Distribution.Simple.LHC as LHC +import qualified Distribution.Simple.UHC as UHC +import qualified Distribution.Simple.HaskellSuite as HaskellSuite + +import qualified Distribution.Simple.Build.Macros as Build.Macros +import qualified Distribution.Simple.Build.PathsModule as Build.PathsModule + +import Distribution.Package + ( Package(..), PackageName(..), PackageIdentifier(..) + , Dependency(..), thisPackageVersion, mkPackageKey, packageName ) +import Distribution.Simple.Compiler + ( Compiler, CompilerFlavor(..), compilerFlavor + , PackageDB(..), PackageDBStack, packageKeySupported ) +import Distribution.PackageDescription + ( PackageDescription(..), BuildInfo(..), Library(..), Executable(..) + , TestSuite(..), TestSuiteInterface(..), Benchmark(..) + , BenchmarkInterface(..), defaultRenaming ) +import qualified Distribution.InstalledPackageInfo as IPI +import qualified Distribution.ModuleName as ModuleName +import Distribution.ModuleName (ModuleName) + +import Distribution.Simple.Setup + ( Flag(..), BuildFlags(..), ReplFlags(..), fromFlag ) +import Distribution.Simple.BuildTarget + ( BuildTarget(..), readBuildTargets ) +import Distribution.Simple.PreProcess + ( preprocessComponent, PPSuffixHandler ) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(compiler, buildDir, withPackageDB, withPrograms, pkgKey) + , Component(..), componentName, getComponent, componentBuildInfo + , ComponentLocalBuildInfo(..), pkgEnabledComponents + , withComponentsInBuildOrder, componentsInBuildOrder + , ComponentName(..), showComponentName + , ComponentDisabledReason(..), componentDisabledReason + , inplacePackageId, LibraryName(..) ) +import Distribution.Simple.Program.Types +import Distribution.Simple.Program.Db +import qualified Distribution.Simple.Program.HcPkg as HcPkg +import Distribution.Simple.BuildPaths + ( autogenModulesDir, autogenModuleName, cppHeaderName, exeExtension ) +import Distribution.Simple.Register + ( registerPackage, inplaceInstalledPackageInfo ) +import Distribution.Simple.Test.LibV09 ( stubFilePath, stubName ) +import Distribution.Simple.Utils + ( createDirectoryIfMissingVerbose, rewriteFile + , die, info, debug, warn, setupMessage ) + +import Distribution.Verbosity + ( Verbosity ) +import Distribution.Text + ( display ) + +import qualified Data.Map as Map +import Data.Maybe + ( maybeToList ) +import Data.Either + ( partitionEithers ) +import Data.List + ( intersect, intercalate ) +import Control.Monad + ( when, unless, forM_ ) +import System.FilePath + ( (), (<.>) ) +import System.Directory + ( getCurrentDirectory, removeDirectoryRecursive, removeFile + , doesDirectoryExist, doesFileExist ) + +-- ----------------------------------------------------------------------------- +-- |Build the libraries and executables in this package. + +build :: PackageDescription -- ^ Mostly information from the .cabal file + -> LocalBuildInfo -- ^ Configuration information + -> BuildFlags -- ^ Flags that the user passed to build + -> [ PPSuffixHandler ] -- ^ preprocessors to run before compiling + -> IO () +build pkg_descr lbi flags suffixes = do + let distPref = fromFlag (buildDistPref flags) + verbosity = fromFlag (buildVerbosity flags) + + targets <- readBuildTargets pkg_descr (buildArgs flags) + targets' <- checkBuildTargets verbosity pkg_descr targets + let componentsToBuild = map fst (componentsInBuildOrder lbi (map fst targets')) + info verbosity $ "Component build order: " + ++ intercalate ", " (map showComponentName componentsToBuild) + + initialBuildSteps distPref pkg_descr lbi verbosity + when (null targets) $ + -- Only bother with this message if we're building the whole package + setupMessage verbosity "Building" (packageId pkg_descr) + + internalPackageDB <- createInternalPackageDB verbosity lbi distPref + + withComponentsInBuildOrder pkg_descr lbi componentsToBuild $ \comp clbi -> + let bi = componentBuildInfo comp + progs' = addInternalBuildTools pkg_descr lbi bi (withPrograms lbi) + lbi' = lbi { + withPrograms = progs', + withPackageDB = withPackageDB lbi ++ [internalPackageDB] + } + in buildComponent verbosity (buildNumJobs flags) pkg_descr + lbi' suffixes comp clbi distPref + + +repl :: PackageDescription -- ^ Mostly information from the .cabal file + -> LocalBuildInfo -- ^ Configuration information + -> ReplFlags -- ^ Flags that the user passed to build + -> [ PPSuffixHandler ] -- ^ preprocessors to run before compiling + -> [String] + -> IO () +repl pkg_descr lbi flags suffixes args = do + let distPref = fromFlag (replDistPref flags) + verbosity = fromFlag (replVerbosity flags) + + targets <- readBuildTargets pkg_descr args + targets' <- case targets of + [] -> return $ take 1 [ componentName c + | c <- pkgEnabledComponents pkg_descr ] + [target] -> fmap (map fst) (checkBuildTargets verbosity pkg_descr [target]) + _ -> die $ "The 'repl' command does not support multiple targets at once." + let componentsToBuild = componentsInBuildOrder lbi targets' + componentForRepl = last componentsToBuild + debug verbosity $ "Component build order: " + ++ intercalate ", " + [ showComponentName c | (c,_) <- componentsToBuild ] + + initialBuildSteps distPref pkg_descr lbi verbosity + + internalPackageDB <- createInternalPackageDB verbosity lbi distPref + + let lbiForComponent comp lbi' = + lbi' { + withPackageDB = withPackageDB lbi ++ [internalPackageDB], + withPrograms = addInternalBuildTools pkg_descr lbi' + (componentBuildInfo comp) (withPrograms lbi') + } + + -- build any dependent components + sequence_ + [ let comp = getComponent pkg_descr cname + lbi' = lbiForComponent comp lbi + in buildComponent verbosity NoFlag + pkg_descr lbi' suffixes comp clbi distPref + | (cname, clbi) <- init componentsToBuild ] + + -- REPL for target components + let (cname, clbi) = componentForRepl + comp = getComponent pkg_descr cname + lbi' = lbiForComponent comp lbi + in replComponent verbosity pkg_descr lbi' suffixes comp clbi distPref + + +-- | Start an interpreter without loading any package files. +startInterpreter :: Verbosity -> ProgramDb -> Compiler -> PackageDBStack -> IO () +startInterpreter verbosity programDb comp packageDBs = + case compilerFlavor comp of + GHC -> GHC.startInterpreter verbosity programDb comp packageDBs + GHCJS -> GHCJS.startInterpreter verbosity programDb comp packageDBs + _ -> die "A REPL is not supported with this compiler." + +buildComponent :: Verbosity + -> Flag (Maybe Int) + -> PackageDescription + -> LocalBuildInfo + -> [PPSuffixHandler] + -> Component + -> ComponentLocalBuildInfo + -> FilePath + -> IO () +buildComponent verbosity numJobs pkg_descr lbi suffixes + comp@(CLib lib) clbi distPref = do + preprocessComponent pkg_descr comp lbi False verbosity suffixes + info verbosity "Building library..." + buildLib verbosity numJobs pkg_descr lbi lib clbi + + -- Register the library in-place, so exes can depend + -- on internally defined libraries. + pwd <- getCurrentDirectory + let -- The in place registration uses the "-inplace" suffix, not an ABI hash + ipkgid = inplacePackageId (packageId installedPkgInfo) + installedPkgInfo = inplaceInstalledPackageInfo pwd distPref pkg_descr + ipkgid lib lbi clbi + + registerPackage verbosity + installedPkgInfo pkg_descr lbi True -- True meaning in place + (withPackageDB lbi) + + +buildComponent verbosity numJobs pkg_descr lbi suffixes + comp@(CExe exe) clbi _ = do + preprocessComponent pkg_descr comp lbi False verbosity suffixes + info verbosity $ "Building executable " ++ exeName exe ++ "..." + buildExe verbosity numJobs pkg_descr lbi exe clbi + + +buildComponent verbosity numJobs pkg_descr lbi suffixes + comp@(CTest test@TestSuite { testInterface = TestSuiteExeV10{} }) + clbi _distPref = do + let exe = testSuiteExeV10AsExe test + preprocessComponent pkg_descr comp lbi False verbosity suffixes + info verbosity $ "Building test suite " ++ testName test ++ "..." + buildExe verbosity numJobs pkg_descr lbi exe clbi + + +buildComponent verbosity numJobs pkg_descr lbi0 suffixes + comp@(CTest + test@TestSuite { testInterface = TestSuiteLibV09{} }) + clbi -- This ComponentLocalBuildInfo corresponds to a detailed + -- test suite and not a real component. It should not + -- be used, except to construct the CLBIs for the + -- library and stub executable that will actually be + -- built. + distPref = do + pwd <- getCurrentDirectory + let (pkg, lib, libClbi, lbi, ipi, exe, exeClbi) = + testSuiteLibV09AsLibAndExe pkg_descr test clbi lbi0 distPref pwd + preprocessComponent pkg_descr comp lbi False verbosity suffixes + info verbosity $ "Building test suite " ++ testName test ++ "..." + buildLib verbosity numJobs pkg lbi lib libClbi + registerPackage verbosity ipi pkg lbi True $ withPackageDB lbi + buildExe verbosity numJobs pkg_descr lbi exe exeClbi + + +buildComponent _ _ _ _ _ + (CTest TestSuite { testInterface = TestSuiteUnsupported tt }) + _ _ = + die $ "No support for building test suite type " ++ display tt + + +buildComponent verbosity numJobs pkg_descr lbi suffixes + comp@(CBench bm@Benchmark { benchmarkInterface = BenchmarkExeV10 {} }) + clbi _ = do + let (exe, exeClbi) = benchmarkExeV10asExe bm clbi + preprocessComponent pkg_descr comp lbi False verbosity suffixes + info verbosity $ "Building benchmark " ++ benchmarkName bm ++ "..." + buildExe verbosity numJobs pkg_descr lbi exe exeClbi + + +buildComponent _ _ _ _ _ + (CBench Benchmark { benchmarkInterface = BenchmarkUnsupported tt }) + _ _ = + die $ "No support for building benchmark type " ++ display tt + + +replComponent :: Verbosity + -> PackageDescription + -> LocalBuildInfo + -> [PPSuffixHandler] + -> Component + -> ComponentLocalBuildInfo + -> FilePath + -> IO () +replComponent verbosity pkg_descr lbi suffixes + comp@(CLib lib) clbi _ = do + preprocessComponent pkg_descr comp lbi False verbosity suffixes + replLib verbosity pkg_descr lbi lib clbi + +replComponent verbosity pkg_descr lbi suffixes + comp@(CExe exe) clbi _ = do + preprocessComponent pkg_descr comp lbi False verbosity suffixes + replExe verbosity pkg_descr lbi exe clbi + + +replComponent verbosity pkg_descr lbi suffixes + comp@(CTest test@TestSuite { testInterface = TestSuiteExeV10{} }) + clbi _distPref = do + let exe = testSuiteExeV10AsExe test + preprocessComponent pkg_descr comp lbi False verbosity suffixes + replExe verbosity pkg_descr lbi exe clbi + + +replComponent verbosity pkg_descr lbi0 suffixes + comp@(CTest + test@TestSuite { testInterface = TestSuiteLibV09{} }) + clbi distPref = do + pwd <- getCurrentDirectory + let (pkg, lib, libClbi, lbi, _, _, _) = + testSuiteLibV09AsLibAndExe pkg_descr test clbi lbi0 distPref pwd + preprocessComponent pkg_descr comp lbi False verbosity suffixes + replLib verbosity pkg lbi lib libClbi + + +replComponent _ _ _ _ + (CTest TestSuite { testInterface = TestSuiteUnsupported tt }) + _ _ = + die $ "No support for building test suite type " ++ display tt + + +replComponent verbosity pkg_descr lbi suffixes + comp@(CBench bm@Benchmark { benchmarkInterface = BenchmarkExeV10 {} }) + clbi _ = do + let (exe, exeClbi) = benchmarkExeV10asExe bm clbi + preprocessComponent pkg_descr comp lbi False verbosity suffixes + replExe verbosity pkg_descr lbi exe exeClbi + + +replComponent _ _ _ _ + (CBench Benchmark { benchmarkInterface = BenchmarkUnsupported tt }) + _ _ = + die $ "No support for building benchmark type " ++ display tt + +---------------------------------------------------- +-- Shared code for buildComponent and replComponent +-- + +-- | Translate a exe-style 'TestSuite' component into an exe for building +testSuiteExeV10AsExe :: TestSuite -> Executable +testSuiteExeV10AsExe test@TestSuite { testInterface = TestSuiteExeV10 _ mainFile } = + Executable { + exeName = testName test, + modulePath = mainFile, + buildInfo = testBuildInfo test + } +testSuiteExeV10AsExe TestSuite{} = error "testSuiteExeV10AsExe: wrong kind" + +-- | Translate a lib-style 'TestSuite' component into a lib + exe for building +testSuiteLibV09AsLibAndExe :: PackageDescription + -> TestSuite + -> ComponentLocalBuildInfo + -> LocalBuildInfo + -> FilePath + -> FilePath + -> (PackageDescription, + Library, ComponentLocalBuildInfo, + LocalBuildInfo, + IPI.InstalledPackageInfo_ ModuleName, + Executable, ComponentLocalBuildInfo) +testSuiteLibV09AsLibAndExe pkg_descr + test@TestSuite { testInterface = TestSuiteLibV09 _ m } + clbi lbi distPref pwd = + (pkg, lib, libClbi, lbi', ipi, exe, exeClbi) + where + bi = testBuildInfo test + lib = Library { + exposedModules = [ m ], + reexportedModules = [], + requiredSignatures = [], + exposedSignatures = [], + libExposed = True, + libBuildInfo = bi + } + libClbi = LibComponentLocalBuildInfo + { componentPackageDeps = componentPackageDeps clbi + , componentPackageRenaming = componentPackageRenaming clbi + , componentLibraries = [LibraryName (testName test)] + , componentExposedModules = [IPI.ExposedModule m Nothing Nothing] + } + pkg = pkg_descr { + package = (package pkg_descr) { + pkgName = PackageName (testName test) + } + , buildDepends = targetBuildDepends $ testBuildInfo test + , executables = [] + , testSuites = [] + , library = Just lib + } + -- Hack to make the library compile with the right package key. + -- Probably the "right" way to do this is move this information to + -- the ComponentLocalBuildInfo, but it seems odd that a single package + -- can define multiple actual packages. + lbi' = lbi { + pkgKey = mkPackageKey (packageKeySupported (compiler lbi)) + (package pkg) [] [] + } + ipkgid = inplacePackageId (packageId pkg) + ipi = inplaceInstalledPackageInfo pwd distPref pkg ipkgid lib lbi' libClbi + testDir = buildDir lbi stubName test + stubName test ++ "-tmp" + testLibDep = thisPackageVersion $ package pkg + exe = Executable { + exeName = stubName test, + modulePath = stubFilePath test, + buildInfo = (testBuildInfo test) { + hsSourceDirs = [ testDir ], + targetBuildDepends = testLibDep + : (targetBuildDepends $ testBuildInfo test), + targetBuildRenaming = + Map.insert (packageName pkg) defaultRenaming + (targetBuildRenaming $ testBuildInfo test) + } + } + -- | The stub executable needs a new 'ComponentLocalBuildInfo' + -- that exposes the relevant test suite library. + exeClbi = ExeComponentLocalBuildInfo { + componentPackageDeps = + (IPI.installedPackageId ipi, packageId ipi) + : (filter (\(_, x) -> let PackageName name = pkgName x + in name == "Cabal" || name == "base") + (componentPackageDeps clbi)), + componentPackageRenaming = + Map.insert (packageName ipi) defaultRenaming + (componentPackageRenaming clbi) + } +testSuiteLibV09AsLibAndExe _ TestSuite{} _ _ _ _ = error "testSuiteLibV09AsLibAndExe: wrong kind" + + +-- | Translate a exe-style 'Benchmark' component into an exe for building +benchmarkExeV10asExe :: Benchmark -> ComponentLocalBuildInfo + -> (Executable, ComponentLocalBuildInfo) +benchmarkExeV10asExe bm@Benchmark { benchmarkInterface = BenchmarkExeV10 _ f } + clbi = + (exe, exeClbi) + where + exe = Executable { + exeName = benchmarkName bm, + modulePath = f, + buildInfo = benchmarkBuildInfo bm + } + exeClbi = ExeComponentLocalBuildInfo { + componentPackageDeps = componentPackageDeps clbi, + componentPackageRenaming = componentPackageRenaming clbi + } +benchmarkExeV10asExe Benchmark{} _ = error "benchmarkExeV10asExe: wrong kind" + +-- | Initialize a new package db file for libraries defined +-- internally to the package. +createInternalPackageDB :: Verbosity -> LocalBuildInfo -> FilePath + -> IO PackageDB +createInternalPackageDB verbosity lbi distPref = do + case compilerFlavor (compiler lbi) of + GHC -> createWith $ GHC.hcPkgInfo (withPrograms lbi) + GHCJS -> createWith $ GHCJS.hcPkgInfo (withPrograms lbi) + LHC -> createWith $ LHC.hcPkgInfo (withPrograms lbi) + _ -> return packageDB + where + dbPath = distPref "package.conf.inplace" + packageDB = SpecificPackageDB dbPath + createWith hpi = do + dir_exists <- doesDirectoryExist dbPath + if dir_exists + then removeDirectoryRecursive dbPath + else do file_exists <- doesFileExist dbPath + when file_exists $ removeFile dbPath + if HcPkg.useSingleFileDb hpi + then writeFile dbPath "[]" + else HcPkg.init hpi verbosity dbPath + return packageDB + +addInternalBuildTools :: PackageDescription -> LocalBuildInfo -> BuildInfo + -> ProgramDb -> ProgramDb +addInternalBuildTools pkg lbi bi progs = + foldr updateProgram progs internalBuildTools + where + internalBuildTools = + [ simpleConfiguredProgram toolName (FoundOnSystem toolLocation) + | toolName <- toolNames + , let toolLocation = buildDir lbi toolName toolName <.> exeExtension ] + toolNames = intersect buildToolNames internalExeNames + internalExeNames = map exeName (executables pkg) + buildToolNames = map buildToolName (buildTools bi) + where + buildToolName (Dependency (PackageName name) _ ) = name + + +-- TODO: build separate libs in separate dirs so that we can build +-- multiple libs, e.g. for 'LibTest' library-style test suites +buildLib :: Verbosity -> Flag (Maybe Int) + -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO () +buildLib verbosity numJobs pkg_descr lbi lib clbi = + case compilerFlavor (compiler lbi) of + GHC -> GHC.buildLib verbosity numJobs pkg_descr lbi lib clbi + GHCJS -> GHCJS.buildLib verbosity numJobs pkg_descr lbi lib clbi + JHC -> JHC.buildLib verbosity pkg_descr lbi lib clbi + LHC -> LHC.buildLib verbosity pkg_descr lbi lib clbi + UHC -> UHC.buildLib verbosity pkg_descr lbi lib clbi + HaskellSuite {} -> HaskellSuite.buildLib verbosity pkg_descr lbi lib clbi + _ -> die "Building is not supported with this compiler." + +buildExe :: Verbosity -> Flag (Maybe Int) + -> PackageDescription -> LocalBuildInfo + -> Executable -> ComponentLocalBuildInfo -> IO () +buildExe verbosity numJobs pkg_descr lbi exe clbi = + case compilerFlavor (compiler lbi) of + GHC -> GHC.buildExe verbosity numJobs pkg_descr lbi exe clbi + GHCJS -> GHCJS.buildExe verbosity numJobs pkg_descr lbi exe clbi + JHC -> JHC.buildExe verbosity pkg_descr lbi exe clbi + LHC -> LHC.buildExe verbosity pkg_descr lbi exe clbi + UHC -> UHC.buildExe verbosity pkg_descr lbi exe clbi + _ -> die "Building is not supported with this compiler." + +replLib :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO () +replLib verbosity pkg_descr lbi lib clbi = + case compilerFlavor (compiler lbi) of + -- 'cabal repl' doesn't need to support 'ghc --make -j', so we just pass + -- NoFlag as the numJobs parameter. + GHC -> GHC.replLib verbosity NoFlag pkg_descr lbi lib clbi + GHCJS -> GHCJS.replLib verbosity NoFlag pkg_descr lbi lib clbi + _ -> die "A REPL is not supported for this compiler." + +replExe :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Executable -> ComponentLocalBuildInfo -> IO () +replExe verbosity pkg_descr lbi exe clbi = + case compilerFlavor (compiler lbi) of + GHC -> GHC.replExe verbosity NoFlag pkg_descr lbi exe clbi + GHCJS -> GHCJS.replExe verbosity NoFlag pkg_descr lbi exe clbi + _ -> die "A REPL is not supported for this compiler." + + +initialBuildSteps :: FilePath -- ^"dist" prefix + -> PackageDescription -- ^mostly information from the .cabal file + -> LocalBuildInfo -- ^Configuration information + -> Verbosity -- ^The verbosity to use + -> IO () +initialBuildSteps _distPref pkg_descr lbi verbosity = do + -- check that there's something to build + let buildInfos = + map libBuildInfo (maybeToList (library pkg_descr)) ++ + map buildInfo (executables pkg_descr) + unless (any buildable buildInfos) $ do + let name = display (packageId pkg_descr) + die ("Package " ++ name ++ " can't be built on this system.") + + createDirectoryIfMissingVerbose verbosity True (buildDir lbi) + + writeAutogenFiles verbosity pkg_descr lbi + +-- | Generate and write out the Paths_.hs and cabal_macros.h files +-- +writeAutogenFiles :: Verbosity + -> PackageDescription + -> LocalBuildInfo + -> IO () +writeAutogenFiles verbosity pkg lbi = do + createDirectoryIfMissingVerbose verbosity True (autogenModulesDir lbi) + + let pathsModulePath = autogenModulesDir lbi + ModuleName.toFilePath (autogenModuleName pkg) <.> "hs" + rewriteFile pathsModulePath (Build.PathsModule.generate pkg lbi) + + let cppHeaderPath = autogenModulesDir lbi cppHeaderName + rewriteFile cppHeaderPath (Build.Macros.generate pkg lbi) + +-- | Check that the given build targets are valid in the current context. +-- +-- Also swizzle into a more convenient form. +-- +checkBuildTargets :: Verbosity -> PackageDescription -> [BuildTarget] + -> IO [(ComponentName, Maybe (Either ModuleName FilePath))] +checkBuildTargets _ pkg [] = + return [ (componentName c, Nothing) | c <- pkgEnabledComponents pkg ] + +checkBuildTargets verbosity pkg targets = do + + let (enabled, disabled) = + partitionEithers + [ case componentDisabledReason (getComponent pkg cname) of + Nothing -> Left target' + Just reason -> Right (cname, reason) + | target <- targets + , let target'@(cname,_) = swizzleTarget target ] + + case disabled of + [] -> return () + ((cname,reason):_) -> die $ formatReason (showComponentName cname) reason + + forM_ [ (c, t) | (c, Just t) <- enabled ] $ \(c, t) -> + warn verbosity $ "Ignoring '" ++ either display id t ++ ". The whole " + ++ showComponentName c ++ " will be built. (Support for " + ++ "module and file targets has not been implemented yet.)" + + return enabled + + where + swizzleTarget (BuildTargetComponent c) = (c, Nothing) + swizzleTarget (BuildTargetModule c m) = (c, Just (Left m)) + swizzleTarget (BuildTargetFile c f) = (c, Just (Right f)) + + formatReason cn DisabledComponent = + "Cannot build the " ++ cn ++ " because the component is marked " + ++ "as disabled in the .cabal file." + formatReason cn DisabledAllTests = + "Cannot build the " ++ cn ++ " because test suites are not " + ++ "enabled. Run configure with the flag --enable-tests" + formatReason cn DisabledAllBenchmarks = + "Cannot build the " ++ cn ++ " because benchmarks are not " + ++ "enabled. Re-run configure with the flag --enable-benchmarks" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/BuildPaths.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/BuildPaths.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/BuildPaths.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/BuildPaths.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,121 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.BuildPaths +-- Copyright : Isaac Jones 2003-2004, +-- Duncan Coutts 2008 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- A bunch of dirs, paths and file names used for intermediate build steps. +-- + +module Distribution.Simple.BuildPaths ( + defaultDistPref, srcPref, + hscolourPref, haddockPref, + autogenModulesDir, + + autogenModuleName, + cppHeaderName, + haddockName, + + mkLibName, + mkProfLibName, + mkSharedLibName, + + exeExtension, + objExtension, + dllExtension, + + ) where + + +import System.FilePath ((), (<.>)) + +import Distribution.Package + ( packageName ) +import Distribution.ModuleName (ModuleName) +import qualified Distribution.ModuleName as ModuleName +import Distribution.Compiler + ( CompilerId(..) ) +import Distribution.PackageDescription (PackageDescription) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(buildDir), LibraryName(..) ) +import Distribution.Simple.Setup (defaultDistPref) +import Distribution.Text + ( display ) +import Distribution.System (OS(..), buildOS) + +-- --------------------------------------------------------------------------- +-- Build directories and files + +srcPref :: FilePath -> FilePath +srcPref distPref = distPref "src" + +hscolourPref :: FilePath -> PackageDescription -> FilePath +hscolourPref = haddockPref + +haddockPref :: FilePath -> PackageDescription -> FilePath +haddockPref distPref pkg_descr + = distPref "doc" "html" display (packageName pkg_descr) + +-- |The directory in which we put auto-generated modules +autogenModulesDir :: LocalBuildInfo -> String +autogenModulesDir lbi = buildDir lbi "autogen" + +cppHeaderName :: String +cppHeaderName = "cabal_macros.h" + +-- |The name of the auto-generated module associated with a package +autogenModuleName :: PackageDescription -> ModuleName +autogenModuleName pkg_descr = + ModuleName.fromString $ + "Paths_" ++ map fixchar (display (packageName pkg_descr)) + where fixchar '-' = '_' + fixchar c = c + +haddockName :: PackageDescription -> FilePath +haddockName pkg_descr = display (packageName pkg_descr) <.> "haddock" + +-- --------------------------------------------------------------------------- +-- Library file names + +mkLibName :: LibraryName -> String +mkLibName (LibraryName lib) = "lib" ++ lib <.> "a" + +mkProfLibName :: LibraryName -> String +mkProfLibName (LibraryName lib) = "lib" ++ lib ++ "_p" <.> "a" + +-- Implement proper name mangling for dynamical shared objects +-- libHS- +-- e.g. libHSbase-2.1-ghc6.6.1.so +mkSharedLibName :: CompilerId -> LibraryName -> String +mkSharedLibName (CompilerId compilerFlavor compilerVersion) (LibraryName lib) + = "lib" ++ lib ++ "-" ++ comp <.> dllExtension + where comp = display compilerFlavor ++ display compilerVersion + +-- ------------------------------------------------------------ +-- * Platform file extensions +-- ------------------------------------------------------------ + +-- ToDo: This should be determined via autoconf (AC_EXEEXT) +-- | Extension for executable files +-- (typically @\"\"@ on Unix and @\"exe\"@ on Windows or OS\/2) +exeExtension :: String +exeExtension = case buildOS of + Windows -> "exe" + _ -> "" + +-- TODO: This should be determined via autoconf (AC_OBJEXT) +-- | Extension for object files. For GHC the extension is @\"o\"@. +objExtension :: String +objExtension = "o" + +-- | Extension for dynamically linked (or shared) libraries +-- (typically @\"so\"@ on Unix and @\"dll\"@ on Windows) +dllExtension :: String +dllExtension = case buildOS of + Windows -> "dll" + OSX -> "dylib" + _ -> "so" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/BuildTarget.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/BuildTarget.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/BuildTarget.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/BuildTarget.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,939 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.BuildTargets +-- Copyright : (c) Duncan Coutts 2012 +-- License : BSD-like +-- +-- Maintainer : duncan@community.haskell.org +-- +-- Handling for user-specified build targets +----------------------------------------------------------------------------- +module Distribution.Simple.BuildTarget ( + + -- * Build targets + BuildTarget(..), + readBuildTargets, + + -- * Parsing user build targets + UserBuildTarget, + readUserBuildTargets, + UserBuildTargetProblem(..), + reportUserBuildTargetProblems, + + -- * Resolving build targets + resolveBuildTargets, + BuildTargetProblem(..), + reportBuildTargetProblems, + ) where + +import Distribution.Package + ( Package(..), PackageId, packageName ) + +import Distribution.PackageDescription + ( PackageDescription + , Executable(..) + , TestSuite(..), TestSuiteInterface(..), testModules + , Benchmark(..), BenchmarkInterface(..), benchmarkModules + , BuildInfo(..), libModules, exeModules ) +import Distribution.ModuleName + ( ModuleName, toFilePath ) +import Distribution.Simple.LocalBuildInfo + ( Component(..), ComponentName(..) + , pkgComponents, componentName, componentBuildInfo ) + +import Distribution.Text + ( display ) +import Distribution.Simple.Utils + ( die, lowercase, equating ) + +import Data.List + ( nub, stripPrefix, sortBy, groupBy, partition, intercalate ) +import Data.Ord +import Data.Maybe + ( listToMaybe, catMaybes ) +import Data.Either + ( partitionEithers ) +import qualified Data.Map as Map +import Control.Monad +#if __GLASGOW_HASKELL__ < 710 +import Control.Applicative (Applicative(..)) +#endif +import Control.Applicative (Alternative(..)) +import qualified Distribution.Compat.ReadP as Parse +import Distribution.Compat.ReadP + ( (+++), (<++) ) +import Data.Char + ( isSpace, isAlphaNum ) +import System.FilePath as FilePath + ( dropExtension, normalise, splitDirectories, joinPath, splitPath + , hasTrailingPathSeparator ) +import System.Directory + ( doesFileExist, doesDirectoryExist ) + +-- ------------------------------------------------------------ +-- * User build targets +-- ------------------------------------------------------------ + +-- | Various ways that a user may specify a build target. +-- +data UserBuildTarget = + + -- | A target specified by a single name. This could be a component + -- module or file. + -- + -- > cabal build foo + -- > cabal build Data.Foo + -- > cabal build Data/Foo.hs Data/Foo.hsc + -- + UserBuildTargetSingle String + + -- | A target specified by a qualifier and name. This could be a component + -- name qualified by the component namespace kind, or a module or file + -- qualified by the component name. + -- + -- > cabal build lib:foo exe:foo + -- > cabal build foo:Data.Foo + -- > cabal build foo:Data/Foo.hs + -- + | UserBuildTargetDouble String String + + -- A fully qualified target, either a module or file qualified by a + -- component name with the component namespace kind. + -- + -- > cabal build lib:foo:Data/Foo.hs exe:foo:Data/Foo.hs + -- > cabal build lib:foo:Data.Foo exe:foo:Data.Foo + -- + | UserBuildTargetTriple String String String + deriving (Show, Eq, Ord) + + +-- ------------------------------------------------------------ +-- * Resolved build targets +-- ------------------------------------------------------------ + +-- | A fully resolved build target. +-- +data BuildTarget = + + -- | A specific component + -- + BuildTargetComponent ComponentName + + -- | A specific module within a specific component. + -- + | BuildTargetModule ComponentName ModuleName + + -- | A specific file within a specific component. + -- + | BuildTargetFile ComponentName FilePath + deriving (Show,Eq) + + +-- ------------------------------------------------------------ +-- * Do everything +-- ------------------------------------------------------------ + +readBuildTargets :: PackageDescription -> [String] -> IO [BuildTarget] +readBuildTargets pkg targetStrs = do + let (uproblems, utargets) = readUserBuildTargets targetStrs + reportUserBuildTargetProblems uproblems + + utargets' <- mapM checkTargetExistsAsFile utargets + + let (bproblems, btargets) = resolveBuildTargets pkg utargets' + reportBuildTargetProblems bproblems + + return btargets + +checkTargetExistsAsFile :: UserBuildTarget -> IO (UserBuildTarget, Bool) +checkTargetExistsAsFile t = do + fexists <- existsAsFile (fileComponentOfTarget t) + return (t, fexists) + + where + existsAsFile f = do + exists <- doesFileExist f + case splitPath f of + (d:_) | hasTrailingPathSeparator d -> doesDirectoryExist d + (d:_:_) | not exists -> doesDirectoryExist d + _ -> return exists + + fileComponentOfTarget (UserBuildTargetSingle s1) = s1 + fileComponentOfTarget (UserBuildTargetDouble _ s2) = s2 + fileComponentOfTarget (UserBuildTargetTriple _ _ s3) = s3 + + +-- ------------------------------------------------------------ +-- * Parsing user targets +-- ------------------------------------------------------------ + +readUserBuildTargets :: [String] -> ([UserBuildTargetProblem] + ,[UserBuildTarget]) +readUserBuildTargets = partitionEithers . map readUserBuildTarget + +readUserBuildTarget :: String -> Either UserBuildTargetProblem + UserBuildTarget +readUserBuildTarget targetstr = + case readPToMaybe parseTargetApprox targetstr of + Nothing -> Left (UserBuildTargetUnrecognised targetstr) + Just tgt -> Right tgt + + where + parseTargetApprox :: Parse.ReadP r UserBuildTarget + parseTargetApprox = + (do a <- tokenQ + return (UserBuildTargetSingle a)) + +++ (do a <- token + _ <- Parse.char ':' + b <- tokenQ + return (UserBuildTargetDouble a b)) + +++ (do a <- token + _ <- Parse.char ':' + b <- token + _ <- Parse.char ':' + c <- tokenQ + return (UserBuildTargetTriple a b c)) + + token = Parse.munch1 (\x -> not (isSpace x) && x /= ':') + tokenQ = parseHaskellString <++ token + parseHaskellString :: Parse.ReadP r String + parseHaskellString = Parse.readS_to_P reads + + readPToMaybe :: Parse.ReadP a a -> String -> Maybe a + readPToMaybe p str = listToMaybe [ r | (r,s) <- Parse.readP_to_S p str + , all isSpace s ] + +data UserBuildTargetProblem + = UserBuildTargetUnrecognised String + deriving Show + +reportUserBuildTargetProblems :: [UserBuildTargetProblem] -> IO () +reportUserBuildTargetProblems problems = do + case [ target | UserBuildTargetUnrecognised target <- problems ] of + [] -> return () + target -> + die $ unlines + [ "Unrecognised build target '" ++ name ++ "'." + | name <- target ] + ++ "Examples:\n" + ++ " - build foo -- component name " + ++ "(library, executable, test-suite or benchmark)\n" + ++ " - build Data.Foo -- module name\n" + ++ " - build Data/Foo.hsc -- file name\n" + ++ " - build lib:foo exe:foo -- component qualified by kind\n" + ++ " - build foo:Data.Foo -- module qualified by component\n" + ++ " - build foo:Data/Foo.hsc -- file qualified by component" + +showUserBuildTarget :: UserBuildTarget -> String +showUserBuildTarget = intercalate ":" . components + where + components (UserBuildTargetSingle s1) = [s1] + components (UserBuildTargetDouble s1 s2) = [s1,s2] + components (UserBuildTargetTriple s1 s2 s3) = [s1,s2,s3] + + +-- ------------------------------------------------------------ +-- * Resolving user targets to build targets +-- ------------------------------------------------------------ + +{- +stargets = + [ BuildTargetComponent (CExeName "foo") + , BuildTargetModule (CExeName "foo") (mkMn "Foo") + , BuildTargetModule (CExeName "tst") (mkMn "Foo") + ] + where + mkMn :: String -> ModuleName + mkMn = fromJust . simpleParse + +ex_pkgid :: PackageIdentifier +Just ex_pkgid = simpleParse "thelib" +-} + +-- | Given a bunch of user-specified targets, try to resolve what it is they +-- refer to. +-- +resolveBuildTargets :: PackageDescription + -> [(UserBuildTarget, Bool)] + -> ([BuildTargetProblem], [BuildTarget]) +resolveBuildTargets pkg = partitionEithers + . map (uncurry (resolveBuildTarget pkg)) + +resolveBuildTarget :: PackageDescription -> UserBuildTarget -> Bool + -> Either BuildTargetProblem BuildTarget +resolveBuildTarget pkg userTarget fexists = + case findMatch (matchBuildTarget pkg userTarget fexists) of + Unambiguous target -> Right target + Ambiguous targets -> Left (BuildTargetAmbigious userTarget targets') + where targets' = disambiguateBuildTargets + (packageId pkg) userTarget + targets + None errs -> Left (classifyMatchErrors errs) + + where + classifyMatchErrors errs + | not (null expected) = let (things, got:_) = unzip expected in + BuildTargetExpected userTarget things got + | not (null nosuch) = BuildTargetNoSuch userTarget nosuch + | otherwise = error $ "resolveBuildTarget: internal error in matching" + where + expected = [ (thing, got) | MatchErrorExpected thing got <- errs ] + nosuch = [ (thing, got) | MatchErrorNoSuch thing got <- errs ] + + +data BuildTargetProblem + = BuildTargetExpected UserBuildTarget [String] String + -- ^ [expected thing] (actually got) + | BuildTargetNoSuch UserBuildTarget [(String, String)] + -- ^ [(no such thing, actually got)] + | BuildTargetAmbigious UserBuildTarget [(UserBuildTarget, BuildTarget)] + deriving Show + + +disambiguateBuildTargets :: PackageId -> UserBuildTarget -> [BuildTarget] + -> [(UserBuildTarget, BuildTarget)] +disambiguateBuildTargets pkgid original = + disambiguate (userTargetQualLevel original) + where + disambiguate ql ts + | null amb = unamb + | otherwise = unamb ++ disambiguate (succ ql) amb + where + (amb, unamb) = step ql ts + + userTargetQualLevel (UserBuildTargetSingle _ ) = QL1 + userTargetQualLevel (UserBuildTargetDouble _ _ ) = QL2 + userTargetQualLevel (UserBuildTargetTriple _ _ _) = QL3 + + step :: QualLevel -> [BuildTarget] + -> ([BuildTarget], [(UserBuildTarget, BuildTarget)]) + step ql = (\(amb, unamb) -> (map snd $ concat amb, concat unamb)) + . partition (\g -> length g > 1) + . groupBy (equating fst) + . sortBy (comparing fst) + . map (\t -> (renderBuildTarget ql t pkgid, t)) + +data QualLevel = QL1 | QL2 | QL3 + deriving (Enum, Show) + +renderBuildTarget :: QualLevel -> BuildTarget -> PackageId -> UserBuildTarget +renderBuildTarget ql target pkgid = + case ql of + QL1 -> UserBuildTargetSingle s1 where s1 = single target + QL2 -> UserBuildTargetDouble s1 s2 where (s1, s2) = double target + QL3 -> UserBuildTargetTriple s1 s2 s3 where (s1, s2, s3) = triple target + + where + single (BuildTargetComponent cn ) = dispCName cn + single (BuildTargetModule _ m) = display m + single (BuildTargetFile _ f) = f + + double (BuildTargetComponent cn ) = (dispKind cn, dispCName cn) + double (BuildTargetModule cn m) = (dispCName cn, display m) + double (BuildTargetFile cn f) = (dispCName cn, f) + + triple (BuildTargetComponent _ ) = error "triple BuildTargetComponent" + triple (BuildTargetModule cn m) = (dispKind cn, dispCName cn, display m) + triple (BuildTargetFile cn f) = (dispKind cn, dispCName cn, f) + + dispCName = componentStringName pkgid + dispKind = showComponentKindShort . componentKind + +reportBuildTargetProblems :: [BuildTargetProblem] -> IO () +reportBuildTargetProblems problems = do + + case [ (t, e, g) | BuildTargetExpected t e g <- problems ] of + [] -> return () + targets -> + die $ unlines + [ "Unrecognised build target '" ++ showUserBuildTarget target + ++ "'.\n" + ++ "Expected a " ++ intercalate " or " expected + ++ ", rather than '" ++ got ++ "'." + | (target, expected, got) <- targets ] + + case [ (t, e) | BuildTargetNoSuch t e <- problems ] of + [] -> return () + targets -> + die $ unlines + [ "Unknown build target '" ++ showUserBuildTarget target + ++ "'.\nThere is no " + ++ intercalate " or " [ mungeThing thing ++ " '" ++ got ++ "'" + | (thing, got) <- nosuch ] ++ "." + | (target, nosuch) <- targets ] + where + mungeThing "file" = "file target" + mungeThing thing = thing + + case [ (t, ts) | BuildTargetAmbigious t ts <- problems ] of + [] -> return () + targets -> + die $ unlines + [ "Ambiguous build target '" ++ showUserBuildTarget target + ++ "'. It could be:\n " + ++ unlines [ " "++ showUserBuildTarget ut ++ + " (" ++ showBuildTargetKind bt ++ ")" + | (ut, bt) <- amb ] + | (target, amb) <- targets ] + + where + showBuildTargetKind (BuildTargetComponent _ ) = "component" + showBuildTargetKind (BuildTargetModule _ _) = "module" + showBuildTargetKind (BuildTargetFile _ _) = "file" + + +---------------------------------- +-- Top level BuildTarget matcher +-- + +matchBuildTarget :: PackageDescription + -> UserBuildTarget -> Bool -> Match BuildTarget +matchBuildTarget pkg = \utarget fexists -> + case utarget of + UserBuildTargetSingle str1 -> + matchBuildTarget1 cinfo str1 fexists + + UserBuildTargetDouble str1 str2 -> + matchBuildTarget2 cinfo str1 str2 fexists + + UserBuildTargetTriple str1 str2 str3 -> + matchBuildTarget3 cinfo str1 str2 str3 fexists + where + cinfo = pkgComponentInfo pkg + +matchBuildTarget1 :: [ComponentInfo] -> String -> Bool -> Match BuildTarget +matchBuildTarget1 cinfo str1 fexists = + matchComponent1 cinfo str1 + `matchPlusShadowing` matchModule1 cinfo str1 + `matchPlusShadowing` matchFile1 cinfo str1 fexists + + +matchBuildTarget2 :: [ComponentInfo] -> String -> String -> Bool + -> Match BuildTarget +matchBuildTarget2 cinfo str1 str2 fexists = + matchComponent2 cinfo str1 str2 + `matchPlusShadowing` matchModule2 cinfo str1 str2 + `matchPlusShadowing` matchFile2 cinfo str1 str2 fexists + + +matchBuildTarget3 :: [ComponentInfo] -> String -> String -> String -> Bool + -> Match BuildTarget +matchBuildTarget3 cinfo str1 str2 str3 fexists = + matchModule3 cinfo str1 str2 str3 + `matchPlusShadowing` matchFile3 cinfo str1 str2 str3 fexists + + +data ComponentInfo = ComponentInfo { + cinfoName :: ComponentName, + cinfoStrName :: ComponentStringName, + cinfoSrcDirs :: [FilePath], + cinfoModules :: [ModuleName], + cinfoHsFiles :: [FilePath], -- other hs files (like main.hs) + cinfoCFiles :: [FilePath], + cinfoJsFiles :: [FilePath] + } + +type ComponentStringName = String + +pkgComponentInfo :: PackageDescription -> [ComponentInfo] +pkgComponentInfo pkg = + [ ComponentInfo { + cinfoName = componentName c, + cinfoStrName = componentStringName pkg (componentName c), + cinfoSrcDirs = hsSourceDirs bi, + cinfoModules = componentModules c, + cinfoHsFiles = componentHsFiles c, + cinfoCFiles = cSources bi, + cinfoJsFiles = jsSources bi + } + | c <- pkgComponents pkg + , let bi = componentBuildInfo c ] + +componentStringName :: Package pkg => pkg -> ComponentName -> ComponentStringName +componentStringName pkg CLibName = display (packageName pkg) +componentStringName _ (CExeName name) = name +componentStringName _ (CTestName name) = name +componentStringName _ (CBenchName name) = name + +componentModules :: Component -> [ModuleName] +componentModules (CLib lib) = libModules lib +componentModules (CExe exe) = exeModules exe +componentModules (CTest test) = testModules test +componentModules (CBench bench) = benchmarkModules bench + +componentHsFiles :: Component -> [FilePath] +componentHsFiles (CExe exe) = [modulePath exe] +componentHsFiles (CTest TestSuite { + testInterface = TestSuiteExeV10 _ mainfile + }) = [mainfile] +componentHsFiles (CBench Benchmark { + benchmarkInterface = BenchmarkExeV10 _ mainfile + }) = [mainfile] +componentHsFiles _ = [] + +{- +ex_cs :: [ComponentInfo] +ex_cs = + [ (mkC (CExeName "foo") ["src1", "src1/src2"] ["Foo", "Src2.Bar", "Bar"]) + , (mkC (CExeName "tst") ["src1", "test"] ["Foo"]) + ] + where + mkC n ds ms = ComponentInfo n (componentStringName pkgid n) ds (map mkMn ms) + mkMn :: String -> ModuleName + mkMn = fromJust . simpleParse + pkgid :: PackageIdentifier + Just pkgid = simpleParse "thelib" +-} + +------------------------------ +-- Matching component kinds +-- + +data ComponentKind = LibKind | ExeKind | TestKind | BenchKind + deriving (Eq, Ord, Show) + +componentKind :: ComponentName -> ComponentKind +componentKind CLibName = LibKind +componentKind (CExeName _) = ExeKind +componentKind (CTestName _) = TestKind +componentKind (CBenchName _) = BenchKind + +cinfoKind :: ComponentInfo -> ComponentKind +cinfoKind = componentKind . cinfoName + +matchComponentKind :: String -> Match ComponentKind +matchComponentKind s + | s `elem` ["lib", "library"] = increaseConfidence >> return LibKind + | s `elem` ["exe", "executable"] = increaseConfidence >> return ExeKind + | s `elem` ["tst", "test", "test-suite"] = increaseConfidence + >> return TestKind + | s `elem` ["bench", "benchmark"] = increaseConfidence + >> return BenchKind + | otherwise = matchErrorExpected + "component kind" s + +showComponentKind :: ComponentKind -> String +showComponentKind LibKind = "library" +showComponentKind ExeKind = "executable" +showComponentKind TestKind = "test-suite" +showComponentKind BenchKind = "benchmark" + +showComponentKindShort :: ComponentKind -> String +showComponentKindShort LibKind = "lib" +showComponentKindShort ExeKind = "exe" +showComponentKindShort TestKind = "test" +showComponentKindShort BenchKind = "bench" + +------------------------------ +-- Matching component targets +-- + +matchComponent1 :: [ComponentInfo] -> String -> Match BuildTarget +matchComponent1 cs = \str1 -> do + guardComponentName str1 + c <- matchComponentName cs str1 + return (BuildTargetComponent (cinfoName c)) + +matchComponent2 :: [ComponentInfo] -> String -> String -> Match BuildTarget +matchComponent2 cs = \str1 str2 -> do + ckind <- matchComponentKind str1 + guardComponentName str2 + c <- matchComponentKindAndName cs ckind str2 + return (BuildTargetComponent (cinfoName c)) + +-- utils: + +guardComponentName :: String -> Match () +guardComponentName s + | all validComponentChar s + && not (null s) = increaseConfidence + | otherwise = matchErrorExpected "component name" s + where + validComponentChar c = isAlphaNum c || c == '.' + || c == '_' || c == '-' || c == '\'' + +matchComponentName :: [ComponentInfo] -> String -> Match ComponentInfo +matchComponentName cs str = + orNoSuchThing "component" str + $ increaseConfidenceFor + $ matchInexactly caseFold + [ (cinfoStrName c, c) | c <- cs ] + str + +matchComponentKindAndName :: [ComponentInfo] -> ComponentKind -> String + -> Match ComponentInfo +matchComponentKindAndName cs ckind str = + orNoSuchThing (showComponentKind ckind ++ " component") str + $ increaseConfidenceFor + $ matchInexactly (\(ck, cn) -> (ck, caseFold cn)) + [ ((cinfoKind c, cinfoStrName c), c) | c <- cs ] + (ckind, str) + + +------------------------------ +-- Matching module targets +-- + +matchModule1 :: [ComponentInfo] -> String -> Match BuildTarget +matchModule1 cs = \str1 -> do + guardModuleName str1 + nubMatchErrors $ do + c <- tryEach cs + let ms = cinfoModules c + m <- matchModuleName ms str1 + return (BuildTargetModule (cinfoName c) m) + +matchModule2 :: [ComponentInfo] -> String -> String -> Match BuildTarget +matchModule2 cs = \str1 str2 -> do + guardComponentName str1 + guardModuleName str2 + c <- matchComponentName cs str1 + let ms = cinfoModules c + m <- matchModuleName ms str2 + return (BuildTargetModule (cinfoName c) m) + +matchModule3 :: [ComponentInfo] -> String -> String -> String + -> Match BuildTarget +matchModule3 cs str1 str2 str3 = do + ckind <- matchComponentKind str1 + guardComponentName str2 + c <- matchComponentKindAndName cs ckind str2 + guardModuleName str3 + let ms = cinfoModules c + m <- matchModuleName ms str3 + return (BuildTargetModule (cinfoName c) m) + +-- utils: + +guardModuleName :: String -> Match () +guardModuleName s + | all validModuleChar s + && not (null s) = increaseConfidence + | otherwise = matchErrorExpected "module name" s + where + validModuleChar c = isAlphaNum c || c == '.' || c == '_' || c == '\'' + +matchModuleName :: [ModuleName] -> String -> Match ModuleName +matchModuleName ms str = + orNoSuchThing "module" str + $ increaseConfidenceFor + $ matchInexactly caseFold + [ (display m, m) + | m <- ms ] + str + + +------------------------------ +-- Matching file targets +-- + +matchFile1 :: [ComponentInfo] -> String -> Bool -> Match BuildTarget +matchFile1 cs str1 exists = + nubMatchErrors $ do + c <- tryEach cs + filepath <- matchComponentFile c str1 exists + return (BuildTargetFile (cinfoName c) filepath) + + +matchFile2 :: [ComponentInfo] -> String -> String -> Bool -> Match BuildTarget +matchFile2 cs str1 str2 exists = do + guardComponentName str1 + c <- matchComponentName cs str1 + filepath <- matchComponentFile c str2 exists + return (BuildTargetFile (cinfoName c) filepath) + + +matchFile3 :: [ComponentInfo] -> String -> String -> String -> Bool + -> Match BuildTarget +matchFile3 cs str1 str2 str3 exists = do + ckind <- matchComponentKind str1 + guardComponentName str2 + c <- matchComponentKindAndName cs ckind str2 + filepath <- matchComponentFile c str3 exists + return (BuildTargetFile (cinfoName c) filepath) + + +matchComponentFile :: ComponentInfo -> String -> Bool -> Match FilePath +matchComponentFile c str fexists = + expecting "file" str $ + matchPlus + (matchFileExists str fexists) + (matchPlusShadowing + (msum [ matchModuleFileRooted dirs ms str + , matchOtherFileRooted dirs hsFiles str ]) + (msum [ matchModuleFileUnrooted ms str + , matchOtherFileUnrooted hsFiles str + , matchOtherFileUnrooted cFiles str + , matchOtherFileUnrooted jsFiles str ])) + where + dirs = cinfoSrcDirs c + ms = cinfoModules c + hsFiles = cinfoHsFiles c + cFiles = cinfoCFiles c + jsFiles = cinfoJsFiles c + + +-- utils + +matchFileExists :: FilePath -> Bool -> Match a +matchFileExists _ False = mzero +matchFileExists fname True = do increaseConfidence + matchErrorNoSuch "file" fname + +matchModuleFileUnrooted :: [ModuleName] -> String -> Match FilePath +matchModuleFileUnrooted ms str = do + let filepath = normalise str + _ <- matchModuleFileStem ms filepath + return filepath + +matchModuleFileRooted :: [FilePath] -> [ModuleName] -> String -> Match FilePath +matchModuleFileRooted dirs ms str = nubMatches $ do + let filepath = normalise str + filepath' <- matchDirectoryPrefix dirs filepath + _ <- matchModuleFileStem ms filepath' + return filepath + +matchModuleFileStem :: [ModuleName] -> FilePath -> Match ModuleName +matchModuleFileStem ms = + increaseConfidenceFor + . matchInexactly caseFold + [ (toFilePath m, m) | m <- ms ] + . dropExtension + +matchOtherFileRooted :: [FilePath] -> [FilePath] -> FilePath -> Match FilePath +matchOtherFileRooted dirs fs str = do + let filepath = normalise str + filepath' <- matchDirectoryPrefix dirs filepath + _ <- matchFile fs filepath' + return filepath + +matchOtherFileUnrooted :: [FilePath] -> FilePath -> Match FilePath +matchOtherFileUnrooted fs str = do + let filepath = normalise str + _ <- matchFile fs filepath + return filepath + +matchFile :: [FilePath] -> FilePath -> Match FilePath +matchFile fs = increaseConfidenceFor + . matchInexactly caseFold [ (f, f) | f <- fs ] + +matchDirectoryPrefix :: [FilePath] -> FilePath -> Match FilePath +matchDirectoryPrefix dirs filepath = + exactMatches $ + catMaybes + [ stripDirectory (normalise dir) filepath | dir <- dirs ] + where + stripDirectory :: FilePath -> FilePath -> Maybe FilePath + stripDirectory dir fp = + joinPath `fmap` stripPrefix (splitDirectories dir) (splitDirectories fp) + + +------------------------------ +-- Matching monad +-- + +-- | A matcher embodies a way to match some input as being some recognised +-- value. In particular it deals with multiple and ambigious matches. +-- +-- There are various matcher primitives ('matchExactly', 'matchInexactly'), +-- ways to combine matchers ('ambigiousWith', 'shadows') and finally we can +-- run a matcher against an input using 'findMatch'. +-- + +data Match a = NoMatch Confidence [MatchError] + | ExactMatch Confidence [a] + | InexactMatch Confidence [a] + deriving Show + +type Confidence = Int + +data MatchError = MatchErrorExpected String String + | MatchErrorNoSuch String String + deriving (Show, Eq) + + +instance Alternative Match where + empty = mzero + (<|>) = mplus + +instance MonadPlus Match where + mzero = matchZero + mplus = matchPlus + +matchZero :: Match a +matchZero = NoMatch 0 [] + +-- | Combine two matchers. Exact matches are used over inexact matches +-- but if we have multiple exact, or inexact then the we collect all the +-- ambigious matches. +-- +matchPlus :: Match a -> Match a -> Match a +matchPlus (ExactMatch d1 xs) (ExactMatch d2 xs') = + ExactMatch (max d1 d2) (xs ++ xs') +matchPlus a@(ExactMatch _ _ ) (InexactMatch _ _ ) = a +matchPlus a@(ExactMatch _ _ ) (NoMatch _ _ ) = a +matchPlus (InexactMatch _ _ ) b@(ExactMatch _ _ ) = b +matchPlus (InexactMatch d1 xs) (InexactMatch d2 xs') = + InexactMatch (max d1 d2) (xs ++ xs') +matchPlus a@(InexactMatch _ _ ) (NoMatch _ _ ) = a +matchPlus (NoMatch _ _ ) b@(ExactMatch _ _ ) = b +matchPlus (NoMatch _ _ ) b@(InexactMatch _ _ ) = b +matchPlus a@(NoMatch d1 ms) b@(NoMatch d2 ms') + | d1 > d2 = a + | d1 < d2 = b + | otherwise = NoMatch d1 (ms ++ ms') + +-- | Combine two matchers. This is similar to 'ambigiousWith' with the +-- difference that an exact match from the left matcher shadows any exact +-- match on the right. Inexact matches are still collected however. +-- +matchPlusShadowing :: Match a -> Match a -> Match a +matchPlusShadowing a@(ExactMatch _ _) (ExactMatch _ _) = a +matchPlusShadowing a b = matchPlus a b + +instance Functor Match where + fmap _ (NoMatch d ms) = NoMatch d ms + fmap f (ExactMatch d xs) = ExactMatch d (fmap f xs) + fmap f (InexactMatch d xs) = InexactMatch d (fmap f xs) + +instance Applicative Match where + pure = return + (<*>) = ap + +instance Monad Match where + return a = ExactMatch 0 [a] + NoMatch d ms >>= _ = NoMatch d ms + ExactMatch d xs >>= f = addDepth d + $ foldr matchPlus matchZero (map f xs) + InexactMatch d xs >>= f = addDepth d . forceInexact + $ foldr matchPlus matchZero (map f xs) + +addDepth :: Confidence -> Match a -> Match a +addDepth d' (NoMatch d msgs) = NoMatch (d'+d) msgs +addDepth d' (ExactMatch d xs) = ExactMatch (d'+d) xs +addDepth d' (InexactMatch d xs) = InexactMatch (d'+d) xs + +forceInexact :: Match a -> Match a +forceInexact (ExactMatch d ys) = InexactMatch d ys +forceInexact m = m + +------------------------------ +-- Various match primitives +-- + +matchErrorExpected, matchErrorNoSuch :: String -> String -> Match a +matchErrorExpected thing got = NoMatch 0 [MatchErrorExpected thing got] +matchErrorNoSuch thing got = NoMatch 0 [MatchErrorNoSuch thing got] + +expecting :: String -> String -> Match a -> Match a +expecting thing got (NoMatch 0 _) = matchErrorExpected thing got +expecting _ _ m = m + +orNoSuchThing :: String -> String -> Match a -> Match a +orNoSuchThing thing got (NoMatch 0 _) = matchErrorNoSuch thing got +orNoSuchThing _ _ m = m + +increaseConfidence :: Match () +increaseConfidence = ExactMatch 1 [()] + +increaseConfidenceFor :: Match a -> Match a +increaseConfidenceFor m = m >>= \r -> increaseConfidence >> return r + +nubMatches :: Eq a => Match a -> Match a +nubMatches (NoMatch d msgs) = NoMatch d msgs +nubMatches (ExactMatch d xs) = ExactMatch d (nub xs) +nubMatches (InexactMatch d xs) = InexactMatch d (nub xs) + +nubMatchErrors :: Match a -> Match a +nubMatchErrors (NoMatch d msgs) = NoMatch d (nub msgs) +nubMatchErrors (ExactMatch d xs) = ExactMatch d xs +nubMatchErrors (InexactMatch d xs) = InexactMatch d xs + +-- | Lift a list of matches to an exact match. +-- +exactMatches, inexactMatches :: [a] -> Match a + +exactMatches [] = matchZero +exactMatches xs = ExactMatch 0 xs + +inexactMatches [] = matchZero +inexactMatches xs = InexactMatch 0 xs + +tryEach :: [a] -> Match a +tryEach = exactMatches + + +------------------------------ +-- Top level match runner +-- + +-- | Given a matcher and a key to look up, use the matcher to find all the +-- possible matches. There may be 'None', a single 'Unambiguous' match or +-- you may have an 'Ambiguous' match with several possibilities. +-- +findMatch :: Eq b => Match b -> MaybeAmbigious b +findMatch match = + case match of + NoMatch _ msgs -> None (nub msgs) + ExactMatch _ xs -> checkAmbigious xs + InexactMatch _ xs -> checkAmbigious xs + where + checkAmbigious xs = case nub xs of + [x] -> Unambiguous x + xs' -> Ambiguous xs' + +data MaybeAmbigious a = None [MatchError] | Unambiguous a | Ambiguous [a] + deriving Show + + +------------------------------ +-- Basic matchers +-- + +{- +-- | A primitive matcher that looks up a value in a finite 'Map'. The +-- value must match exactly. +-- +matchExactly :: forall a b. Ord a => [(a, b)] -> (a -> Match b) +matchExactly xs = + \x -> case Map.lookup x m of + Nothing -> matchZero + Just ys -> ExactMatch 0 ys + where + m :: Ord a => Map a [b] + m = Map.fromListWith (++) [ (k,[x]) | (k,x) <- xs ] +-} + +-- | A primitive matcher that looks up a value in a finite 'Map'. It checks +-- for an exact or inexact match. We get an inexact match if the match +-- is not exact, but the canonical forms match. It takes a canonicalisation +-- function for this purpose. +-- +-- So for example if we used string case fold as the canonicalisation +-- function, then we would get case insensitive matching (but it will still +-- report an exact match when the case matches too). +-- +matchInexactly :: (Ord a, Ord a') => + (a -> a') -> + [(a, b)] -> (a -> Match b) +matchInexactly cannonicalise xs = + \x -> case Map.lookup x m of + Just ys -> exactMatches ys + Nothing -> case Map.lookup (cannonicalise x) m' of + Just ys -> inexactMatches ys + Nothing -> matchZero + where + m = Map.fromListWith (++) [ (k,[x]) | (k,x) <- xs ] + + -- the map of canonicalised keys to groups of inexact matches + m' = Map.mapKeysWith (++) cannonicalise m + + + +------------------------------ +-- Utils +-- + +caseFold :: String -> String +caseFold = lowercase diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/CCompiler.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/CCompiler.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/CCompiler.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/CCompiler.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,124 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.CCompiler +-- Copyright : 2011, Dan Knapp +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This simple package provides types and functions for interacting with +-- C compilers. Currently it's just a type enumerating extant C-like +-- languages, which we call dialects. + +{- +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of Isaac Jones nor the names of other + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -} + +module Distribution.Simple.CCompiler ( + CDialect(..), + cSourceExtensions, + cDialectFilenameExtension, + filenameCDialect + ) where + +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid + ( Monoid(..) ) +#endif +import System.FilePath + ( takeExtension ) + + +-- | Represents a dialect of C. The Monoid instance expresses backward +-- compatibility, in the sense that 'mappend a b' is the least inclusive +-- dialect which both 'a' and 'b' can be correctly interpreted as. +data CDialect = C + | ObjectiveC + | CPlusPlus + | ObjectiveCPlusPlus + deriving (Eq, Show) + +instance Monoid CDialect where + mempty = C + + mappend C anything = anything + mappend ObjectiveC CPlusPlus = ObjectiveCPlusPlus + mappend CPlusPlus ObjectiveC = ObjectiveCPlusPlus + mappend _ ObjectiveCPlusPlus = ObjectiveCPlusPlus + mappend ObjectiveC _ = ObjectiveC + mappend CPlusPlus _ = CPlusPlus + mappend ObjectiveCPlusPlus _ = ObjectiveCPlusPlus + + +-- | A list of all file extensions which are recognized as possibly containing +-- some dialect of C code. Note that this list is only for source files, +-- not for header files. +cSourceExtensions :: [String] +cSourceExtensions = ["c", "i", "ii", "m", "mi", "mm", "M", "mii", "cc", "cp", + "cxx", "cpp", "CPP", "c++", "C"] + + +-- | Takes a dialect of C and whether code is intended to be passed through +-- the preprocessor, and returns a filename extension for containing that +-- code. +cDialectFilenameExtension :: CDialect -> Bool -> String +cDialectFilenameExtension C True = "c" +cDialectFilenameExtension C False = "i" +cDialectFilenameExtension ObjectiveC True = "m" +cDialectFilenameExtension ObjectiveC False = "mi" +cDialectFilenameExtension CPlusPlus True = "cpp" +cDialectFilenameExtension CPlusPlus False = "ii" +cDialectFilenameExtension ObjectiveCPlusPlus True = "mm" +cDialectFilenameExtension ObjectiveCPlusPlus False = "mii" + + +-- | Infers from a filename's extension the dialect of C which it contains, +-- and whether it is intended to be passed through the preprocessor. +filenameCDialect :: String -> Maybe (CDialect, Bool) +filenameCDialect filename = do + extension <- case takeExtension filename of + '.':ext -> Just ext + _ -> Nothing + case extension of + "c" -> return (C, True) + "i" -> return (C, False) + "ii" -> return (CPlusPlus, False) + "m" -> return (ObjectiveC, True) + "mi" -> return (ObjectiveC, False) + "mm" -> return (ObjectiveCPlusPlus, True) + "M" -> return (ObjectiveCPlusPlus, True) + "mii" -> return (ObjectiveCPlusPlus, False) + "cc" -> return (CPlusPlus, True) + "cp" -> return (CPlusPlus, True) + "cxx" -> return (CPlusPlus, True) + "cpp" -> return (CPlusPlus, True) + "CPP" -> return (CPlusPlus, True) + "c++" -> return (CPlusPlus, True) + "C" -> return (CPlusPlus, True) + _ -> Nothing diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Command.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Command.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Command.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Command.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,600 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Command +-- Copyright : Duncan Coutts 2007 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is to do with command line handling. The Cabal command line is +-- organised into a number of named sub-commands (much like darcs). The +-- 'CommandUI' abstraction represents one of these sub-commands, with a name, +-- description, a set of flags. Commands can be associated with actions and +-- run. It handles some common stuff automatically, like the @--help@ and +-- command line completion flags. It is designed to allow other tools make +-- derived commands. This feature is used heavily in @cabal-install@. + +module Distribution.Simple.Command ( + + -- * Command interface + CommandUI(..), + commandShowOptions, + CommandParse(..), + commandParseArgs, + getNormalCommandDescriptions, + helpCommandUI, + + -- ** Constructing commands + ShowOrParseArgs(..), + usageDefault, + usageAlternatives, + mkCommandUI, + hiddenCommand, + + -- ** Associating actions with commands + Command, + commandAddAction, + noExtraFlags, + + -- ** Running commands + commandsRun, + +-- * Option Fields + OptionField(..), Name, + +-- ** Constructing Option Fields + option, multiOption, + +-- ** Liftings & Projections + liftOption, viewAsFieldDescr, + +-- * Option Descriptions + OptDescr(..), Description, SFlags, LFlags, OptFlags, ArgPlaceHolder, + +-- ** OptDescr 'smart' constructors + MkOptDescr, + reqArg, reqArg', optArg, optArg', noArg, + boolOpt, boolOpt', choiceOpt, choiceOptFromEnum + + ) where + +import Control.Monad +import Data.Char (isAlpha, toLower) +import Data.List (sortBy) +import Data.Maybe +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid +#endif +import qualified Distribution.GetOpt as GetOpt +import Distribution.Text + ( Text(disp, parse) ) +import Distribution.ParseUtils +import Distribution.ReadE +import Distribution.Simple.Utils (die, intercalate) +import Text.PrettyPrint ( punctuate, cat, comma, text ) +import Text.PrettyPrint as PP ( empty ) + +data CommandUI flags = CommandUI { + -- | The name of the command as it would be entered on the command line. + -- For example @\"build\"@. + commandName :: String, + -- | A short, one line description of the command to use in help texts. + commandSynopsis :: String, + -- | A function that maps a program name to a usage summary for this + -- command. + commandUsage :: String -> String, + -- | Additional explanation of the command to use in help texts. + commandDescription :: Maybe (String -> String), + -- | Post-Usage notes and examples in help texts + commandNotes :: Maybe (String -> String), + -- | Initial \/ empty flags + commandDefaultFlags :: flags, + -- | All the Option fields for this command + commandOptions :: ShowOrParseArgs -> [OptionField flags] + } + +data ShowOrParseArgs = ShowArgs | ParseArgs +type Name = String +type Description = String + +-- | We usually have a data type for storing configuration values, where +-- every field stores a configuration option, and the user sets +-- the value either via command line flags or a configuration file. +-- An individual OptionField models such a field, and we usually +-- build a list of options associated to a configuration data type. +data OptionField a = OptionField { + optionName :: Name, + optionDescr :: [OptDescr a] } + +-- | An OptionField takes one or more OptDescrs, describing the command line +-- interface for the field. +data OptDescr a = ReqArg Description OptFlags ArgPlaceHolder + (ReadE (a->a)) (a -> [String]) + + | OptArg Description OptFlags ArgPlaceHolder + (ReadE (a->a)) (a->a) (a -> [Maybe String]) + + | ChoiceOpt [(Description, OptFlags, a->a, a -> Bool)] + + | BoolOpt Description OptFlags{-True-} OptFlags{-False-} + (Bool -> a -> a) (a-> Maybe Bool) + +-- | Short command line option strings +type SFlags = [Char] +-- | Long command line option strings +type LFlags = [String] +type OptFlags = (SFlags,LFlags) +type ArgPlaceHolder = String + + +-- | Create an option taking a single OptDescr. +-- No explicit Name is given for the Option, the name is the first LFlag given. +option :: SFlags -> LFlags -> Description -> get -> set -> MkOptDescr get set a + -> OptionField a +option sf lf@(n:_) d get set arg = OptionField n [arg sf lf d get set] +option _ _ _ _ _ _ = error $ "Distribution.command.option: " + ++ "An OptionField must have at least one LFlag" + +-- | Create an option taking several OptDescrs. +-- You will have to give the flags and description individually to the +-- OptDescr constructor. +multiOption :: Name -> get -> set + -> [get -> set -> OptDescr a] -- ^MkOptDescr constructors partially + -- applied to flags and description. + -> OptionField a +multiOption n get set args = OptionField n [arg get set | arg <- args] + +type MkOptDescr get set a = SFlags -> LFlags -> Description -> get -> set + -> OptDescr a + +-- | Create a string-valued command line interface. +reqArg :: Monoid b => ArgPlaceHolder -> ReadE b -> (b -> [String]) + -> MkOptDescr (a -> b) (b -> a -> a) a +reqArg ad mkflag showflag sf lf d get set = + ReqArg d (sf,lf) ad (fmap (\a b -> set (get b `mappend` a) b) mkflag) + (showflag . get) + +-- | Create a string-valued command line interface with a default value. +optArg :: Monoid b => ArgPlaceHolder -> ReadE b -> b -> (b -> [Maybe String]) + -> MkOptDescr (a -> b) (b -> a -> a) a +optArg ad mkflag def showflag sf lf d get set = + OptArg d (sf,lf) ad (fmap (\a b -> set (get b `mappend` a) b) mkflag) + (\b -> set (get b `mappend` def) b) + (showflag . get) + +-- | (String -> a) variant of "reqArg" +reqArg' :: Monoid b => ArgPlaceHolder -> (String -> b) -> (b -> [String]) + -> MkOptDescr (a -> b) (b -> a -> a) a +reqArg' ad mkflag showflag = + reqArg ad (succeedReadE mkflag) showflag + +-- | (String -> a) variant of "optArg" +optArg' :: Monoid b => ArgPlaceHolder -> (Maybe String -> b) + -> (b -> [Maybe String]) + -> MkOptDescr (a -> b) (b -> a -> a) a +optArg' ad mkflag showflag = + optArg ad (succeedReadE (mkflag . Just)) def showflag + where def = mkflag Nothing + +noArg :: (Eq b, Monoid b) => b -> MkOptDescr (a -> b) (b -> a -> a) a +noArg flag sf lf d = choiceOpt [(flag, (sf,lf), d)] sf lf d + +boolOpt :: (b -> Maybe Bool) -> (Bool -> b) -> SFlags -> SFlags + -> MkOptDescr (a -> b) (b -> a -> a) a +boolOpt g s sfT sfF _sf _lf@(n:_) d get set = + BoolOpt d (sfT, ["enable-"++n]) (sfF, ["disable-"++n]) (set.s) (g.get) +boolOpt _ _ _ _ _ _ _ _ _ = error + "Distribution.Simple.Setup.boolOpt: unreachable" + +boolOpt' :: (b -> Maybe Bool) -> (Bool -> b) -> OptFlags -> OptFlags + -> MkOptDescr (a -> b) (b -> a -> a) a +boolOpt' g s ffT ffF _sf _lf d get set = BoolOpt d ffT ffF (set.s) (g . get) + +-- | create a Choice option +choiceOpt :: Eq b => [(b,OptFlags,Description)] + -> MkOptDescr (a -> b) (b -> a -> a) a +choiceOpt aa_ff _sf _lf _d get set = ChoiceOpt alts + where alts = [(d,flags, set alt, (==alt) . get) | (alt,flags,d) <- aa_ff] + +-- | create a Choice option out of an enumeration type. +-- As long flags, the Show output is used. As short flags, the first character +-- which does not conflict with a previous one is used. +choiceOptFromEnum :: (Bounded b, Enum b, Show b, Eq b) => + MkOptDescr (a -> b) (b -> a -> a) a +choiceOptFromEnum _sf _lf d get = + choiceOpt [ (x, (sf, [map toLower $ show x]), d') + | (x, sf) <- sflags' + , let d' = d ++ show x] + _sf _lf d get + where sflags' = foldl f [] [firstOne..] + f prev x = let prevflags = concatMap snd prev in + prev ++ take 1 [(x, [toLower sf]) + | sf <- show x, isAlpha sf + , toLower sf `notElem` prevflags] + firstOne = minBound `asTypeOf` get undefined + +commandGetOpts :: ShowOrParseArgs -> CommandUI flags + -> [GetOpt.OptDescr (flags -> flags)] +commandGetOpts showOrParse command = + concatMap viewAsGetOpt (commandOptions command showOrParse) + +viewAsGetOpt :: OptionField a -> [GetOpt.OptDescr (a->a)] +viewAsGetOpt (OptionField _n aa) = concatMap optDescrToGetOpt aa + where + optDescrToGetOpt (ReqArg d (cs,ss) arg_desc set _) = + [GetOpt.Option cs ss (GetOpt.ReqArg set' arg_desc) d] + where set' = readEOrFail set + optDescrToGetOpt (OptArg d (cs,ss) arg_desc set def _) = + [GetOpt.Option cs ss (GetOpt.OptArg set' arg_desc) d] + where set' Nothing = def + set' (Just txt) = readEOrFail set txt + optDescrToGetOpt (ChoiceOpt alts) = + [GetOpt.Option sf lf (GetOpt.NoArg set) d | (d,(sf,lf),set,_) <- alts ] + optDescrToGetOpt (BoolOpt d (sfT, lfT) ([], []) set _) = + [ GetOpt.Option sfT lfT (GetOpt.NoArg (set True)) d ] + optDescrToGetOpt (BoolOpt d ([], []) (sfF, lfF) set _) = + [ GetOpt.Option sfF lfF (GetOpt.NoArg (set False)) d ] + optDescrToGetOpt (BoolOpt d (sfT,lfT) (sfF, lfF) set _) = + [ GetOpt.Option sfT lfT (GetOpt.NoArg (set True)) ("Enable " ++ d) + , GetOpt.Option sfF lfF (GetOpt.NoArg (set False)) ("Disable " ++ d) ] + +-- | to view as a FieldDescr, we sort the list of interfaces (Req > Bool > +-- Choice > Opt) and consider only the first one. +viewAsFieldDescr :: OptionField a -> FieldDescr a +viewAsFieldDescr (OptionField _n []) = + error "Distribution.command.viewAsFieldDescr: unexpected" +viewAsFieldDescr (OptionField n dd) = FieldDescr n get set + where + optDescr = head $ sortBy cmp dd + + cmp :: OptDescr a -> OptDescr a -> Ordering + ReqArg{} `cmp` ReqArg{} = EQ + ReqArg{} `cmp` _ = GT + BoolOpt{} `cmp` ReqArg{} = LT + BoolOpt{} `cmp` BoolOpt{} = EQ + BoolOpt{} `cmp` _ = GT + ChoiceOpt{} `cmp` ReqArg{} = LT + ChoiceOpt{} `cmp` BoolOpt{} = LT + ChoiceOpt{} `cmp` ChoiceOpt{} = EQ + ChoiceOpt{} `cmp` _ = GT + OptArg{} `cmp` OptArg{} = EQ + OptArg{} `cmp` _ = LT + +-- get :: a -> Doc + get t = case optDescr of + ReqArg _ _ _ _ ppr -> + (cat . punctuate comma . map text . ppr) t + + OptArg _ _ _ _ _ ppr -> + case ppr t of [] -> PP.empty + (Nothing : _) -> text "True" + (Just a : _) -> text a + + ChoiceOpt alts -> + fromMaybe PP.empty $ listToMaybe + [ text lf | (_,(_,lf:_), _,enabled) <- alts, enabled t] + + BoolOpt _ _ _ _ enabled -> (maybe PP.empty disp . enabled) t + +-- set :: LineNo -> String -> a -> ParseResult a + set line val a = + case optDescr of + ReqArg _ _ _ readE _ -> ($ a) `liftM` runE line n readE val + -- We parse for a single value instead of a + -- list, as one can't really implement + -- parseList :: ReadE a -> ReadE [a] with + -- the current ReadE definition + ChoiceOpt{} -> + case getChoiceByLongFlag optDescr val of + Just f -> return (f a) + _ -> syntaxError line val + + BoolOpt _ _ _ setV _ -> (`setV` a) `liftM` runP line n parse val + + OptArg _ _ _ readE _ _ -> ($ a) `liftM` runE line n readE val + -- Optional arguments are parsed just like + -- required arguments here; we don't + -- provide a method to set an OptArg field + -- to the default value. + +getChoiceByLongFlag :: OptDescr b -> String -> Maybe (b->b) +getChoiceByLongFlag (ChoiceOpt alts) val = listToMaybe + [ set | (_,(_sf,lf:_), set, _) <- alts + , lf == val] + +getChoiceByLongFlag _ _ = + error "Distribution.command.getChoiceByLongFlag: expected a choice option" + +getCurrentChoice :: OptDescr a -> a -> [String] +getCurrentChoice (ChoiceOpt alts) a = + [ lf | (_,(_sf,lf:_), _, currentChoice) <- alts, currentChoice a] + +getCurrentChoice _ _ = error "Command.getChoice: expected a Choice OptDescr" + + +liftOption :: (b -> a) -> (a -> (b -> b)) -> OptionField a -> OptionField b +liftOption get' set' opt = + opt { optionDescr = liftOptDescr get' set' `map` optionDescr opt} + + +liftOptDescr :: (b -> a) -> (a -> (b -> b)) -> OptDescr a -> OptDescr b +liftOptDescr get' set' (ChoiceOpt opts) = + ChoiceOpt [ (d, ff, liftSet get' set' set , (get . get')) + | (d, ff, set, get) <- opts] + +liftOptDescr get' set' (OptArg d ff ad set def get) = + OptArg d ff ad (liftSet get' set' `fmap` set) + (liftSet get' set' def) (get . get') + +liftOptDescr get' set' (ReqArg d ff ad set get) = + ReqArg d ff ad (liftSet get' set' `fmap` set) (get . get') + +liftOptDescr get' set' (BoolOpt d ffT ffF set get) = + BoolOpt d ffT ffF (liftSet get' set' . set) (get . get') + +liftSet :: (b -> a) -> (a -> (b -> b)) -> (a -> a) -> b -> b +liftSet get' set' set x = set' (set $ get' x) x + +-- | Show flags in the standard long option command line format +commandShowOptions :: CommandUI flags -> flags -> [String] +commandShowOptions command v = concat + [ showOptDescr v od | o <- commandOptions command ParseArgs + , od <- optionDescr o] + where + maybePrefix [] = [] + maybePrefix (lOpt:_) = ["--" ++ lOpt] + + showOptDescr :: a -> OptDescr a -> [String] + showOptDescr x (BoolOpt _ (_,lfTs) (_,lfFs) _ enabled) + = case enabled x of + Nothing -> [] + Just True -> maybePrefix lfTs + Just False -> maybePrefix lfFs + showOptDescr x c@ChoiceOpt{} + = ["--" ++ val | val <- getCurrentChoice c x] + showOptDescr x (ReqArg _ (_ssff,lf:_) _ _ showflag) + = [ "--"++lf++"="++flag + | flag <- showflag x ] + showOptDescr x (OptArg _ (_ssff,lf:_) _ _ _ showflag) + = [ case flag of + Just s -> "--"++lf++"="++s + Nothing -> "--"++lf + | flag <- showflag x ] + showOptDescr _ _ + = error "Distribution.Simple.Command.showOptDescr: unreachable" + + +commandListOptions :: CommandUI flags -> [String] +commandListOptions command = + concatMap listOption $ + addCommonFlags ShowArgs $ -- This is a slight hack, we don't want + -- "--list-options" showing up in the + -- list options output, so use ShowArgs + commandGetOpts ShowArgs command + where + listOption (GetOpt.Option shortNames longNames _ _) = + [ "-" ++ [name] | name <- shortNames ] + ++ [ "--" ++ name | name <- longNames ] + +-- | The help text for this command with descriptions of all the options. +commandHelp :: CommandUI flags -> String -> String +commandHelp command pname = + commandSynopsis command + ++ "\n\n" + ++ commandUsage command pname + ++ ( case commandDescription command of + Nothing -> "" + Just desc -> '\n': desc pname) + ++ "\n" + ++ ( if cname == "" + then "Global flags:" + else "Flags for " ++ cname ++ ":" ) + ++ ( GetOpt.usageInfo "" + . addCommonFlags ShowArgs + $ commandGetOpts ShowArgs command ) + ++ ( case commandNotes command of + Nothing -> "" + Just notes -> '\n': notes pname) + where cname = commandName command + +-- | Default "usage" documentation text for commands. +usageDefault :: String -> String -> String +usageDefault name pname = + "Usage: " ++ pname ++ " " ++ name ++ " [FLAGS]\n\n" + ++ "Flags for " ++ name ++ ":" + +-- | Create "usage" documentation from a list of parameter +-- configurations. +usageAlternatives :: String -> [String] -> String -> String +usageAlternatives name strs pname = unlines + [ start ++ pname ++ " " ++ name ++ " " ++ s + | let starts = "Usage: " : repeat " or: " + , (start, s) <- zip starts strs + ] + +-- | Make a Command from standard 'GetOpt' options. +mkCommandUI :: String -- ^ name + -> String -- ^ synopsis + -> [String] -- ^ usage alternatives + -> flags -- ^ initial\/empty flags + -> (ShowOrParseArgs -> [OptionField flags]) -- ^ options + -> CommandUI flags +mkCommandUI name synopsis usages flags options = CommandUI + { commandName = name + , commandSynopsis = synopsis + , commandDescription = Nothing + , commandNotes = Nothing + , commandUsage = usageAlternatives name usages + , commandDefaultFlags = flags + , commandOptions = options + } + +-- | Common flags that apply to every command +data CommonFlag = HelpFlag | ListOptionsFlag + +commonFlags :: ShowOrParseArgs -> [GetOpt.OptDescr CommonFlag] +commonFlags showOrParseArgs = case showOrParseArgs of + ShowArgs -> [help] + ParseArgs -> [help, list] + where + help = GetOpt.Option helpShortFlags ["help"] (GetOpt.NoArg HelpFlag) + "Show this help text" + helpShortFlags = case showOrParseArgs of + ShowArgs -> ['h'] + ParseArgs -> ['h', '?'] + list = GetOpt.Option [] ["list-options"] (GetOpt.NoArg ListOptionsFlag) + "Print a list of command line flags" + +addCommonFlags :: ShowOrParseArgs + -> [GetOpt.OptDescr a] + -> [GetOpt.OptDescr (Either CommonFlag a)] +addCommonFlags showOrParseArgs options = + map (fmapOptDesc Left) (commonFlags showOrParseArgs) + ++ map (fmapOptDesc Right) options + where fmapOptDesc f (GetOpt.Option s l d m) = + GetOpt.Option s l (fmapArgDesc f d) m + fmapArgDesc f (GetOpt.NoArg a) = GetOpt.NoArg (f a) + fmapArgDesc f (GetOpt.ReqArg s d) = GetOpt.ReqArg (f . s) d + fmapArgDesc f (GetOpt.OptArg s d) = GetOpt.OptArg (f . s) d + +-- | Parse a bunch of command line arguments +-- +commandParseArgs :: CommandUI flags + -> Bool -- ^ Is the command a global or subcommand? + -> [String] + -> CommandParse (flags -> flags, [String]) +commandParseArgs command global args = + let options = addCommonFlags ParseArgs + $ commandGetOpts ParseArgs command + order | global = GetOpt.RequireOrder + | otherwise = GetOpt.Permute + in case GetOpt.getOpt' order options args of + (flags, _, _, _) + | any listFlag flags -> CommandList (commandListOptions command) + | any helpFlag flags -> CommandHelp (commandHelp command) + where listFlag (Left ListOptionsFlag) = True; listFlag _ = False + helpFlag (Left HelpFlag) = True; helpFlag _ = False + (flags, opts, opts', []) + | global || null opts' -> CommandReadyToGo (accum flags, mix opts opts') + | otherwise -> CommandErrors (unrecognised opts') + (_, _, _, errs) -> CommandErrors errs + + where -- Note: It is crucial to use reverse function composition here or to + -- reverse the flags here as we want to process the flags left to right + -- but data flow in function composition is right to left. + accum flags = foldr (flip (.)) id [ f | Right f <- flags ] + unrecognised opts = [ "unrecognized " + ++ "'" ++ (commandName command) ++ "'" + ++ " option `" ++ opt ++ "'\n" + | opt <- opts ] + -- For unrecognised global flags we put them in the position just after + -- the command, if there is one. This gives us a chance to parse them + -- as sub-command rather than global flags. + mix [] ys = ys + mix (x:xs) ys = x:ys++xs + +data CommandParse flags = CommandHelp (String -> String) + | CommandList [String] + | CommandErrors [String] + | CommandReadyToGo flags +instance Functor CommandParse where + fmap _ (CommandHelp help) = CommandHelp help + fmap _ (CommandList opts) = CommandList opts + fmap _ (CommandErrors errs) = CommandErrors errs + fmap f (CommandReadyToGo flags) = CommandReadyToGo (f flags) + + +data CommandType = NormalCommand | HiddenCommand +data Command action = + Command String String ([String] -> CommandParse action) CommandType + +-- | Mark command as hidden. Hidden commands don't show up in the 'progname +-- help' or 'progname --help' output. +hiddenCommand :: Command action -> Command action +hiddenCommand (Command name synopsys f _cmdType) = + Command name synopsys f HiddenCommand + +commandAddAction :: CommandUI flags + -> (flags -> [String] -> action) + -> Command action +commandAddAction command action = + Command (commandName command) + (commandSynopsis command) + (fmap (uncurry applyDefaultArgs) . commandParseArgs command False) + NormalCommand + + where applyDefaultArgs mkflags args = + let flags = mkflags (commandDefaultFlags command) + in action flags args + +commandsRun :: CommandUI a + -> [Command action] + -> [String] + -> CommandParse (a, CommandParse action) +commandsRun globalCommand commands args = + case commandParseArgs globalCommand True args of + CommandHelp help -> CommandHelp help + CommandList opts -> CommandList (opts ++ commandNames) + CommandErrors errs -> CommandErrors errs + CommandReadyToGo (mkflags, args') -> case args' of + ("help":cmdArgs) -> handleHelpCommand cmdArgs + (name:cmdArgs) -> case lookupCommand name of + [Command _ _ action _] + -> CommandReadyToGo (flags, action cmdArgs) + _ -> CommandReadyToGo (flags, badCommand name) + [] -> CommandReadyToGo (flags, noCommand) + where flags = mkflags (commandDefaultFlags globalCommand) + + where + lookupCommand cname = [ cmd | cmd@(Command cname' _ _ _) <- commands' + , cname' == cname ] + noCommand = CommandErrors ["no command given (try --help)\n"] + badCommand cname = CommandErrors ["unrecognised command: " ++ cname + ++ " (try --help)\n"] + commands' = commands ++ [commandAddAction helpCommandUI undefined] + commandNames = [ name | (Command name _ _ NormalCommand) <- commands' ] + + -- A bit of a hack: support "prog help" as a synonym of "prog --help" + -- furthermore, support "prog help command" as "prog command --help" + handleHelpCommand cmdArgs = + case commandParseArgs helpCommandUI True cmdArgs of + CommandHelp help -> CommandHelp help + CommandList list -> CommandList (list ++ commandNames) + CommandErrors _ -> CommandHelp globalHelp + CommandReadyToGo (_,[]) -> CommandHelp globalHelp + CommandReadyToGo (_,(name:cmdArgs')) -> + case lookupCommand name of + [Command _ _ action _] -> + case action ("--help":cmdArgs') of + CommandHelp help -> CommandHelp help + CommandList _ -> CommandList [] + _ -> CommandHelp globalHelp + _ -> badCommand name + + where globalHelp = commandHelp globalCommand + +-- | Utility function, many commands do not accept additional flags. This +-- action fails with a helpful error message if the user supplies any extra. +-- +noExtraFlags :: [String] -> IO () +noExtraFlags [] = return () +noExtraFlags extraFlags = + die $ "Unrecognised flags: " ++ intercalate ", " extraFlags +--TODO: eliminate this function and turn it into a variant on commandAddAction +-- instead like commandAddActionNoArgs that doesn't supply the [String] + +-- | Helper function for creating globalCommand description +getNormalCommandDescriptions :: [Command action] -> [(String, String)] +getNormalCommandDescriptions cmds = + [ (name, description) + | Command name description _ NormalCommand <- cmds ] + +helpCommandUI :: CommandUI () +helpCommandUI = mkCommandUI + "help" + "Help about commands." + ["[FLAGS]", "COMMAND [FLAGS]"] + () + (const []) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Compiler.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Compiler.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Compiler.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Compiler.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,287 @@ +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Compiler +-- Copyright : Isaac Jones 2003-2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This should be a much more sophisticated abstraction than it is. Currently +-- it's just a bit of data about the compiler, like it's flavour and name and +-- version. The reason it's just data is because currently it has to be in +-- 'Read' and 'Show' so it can be saved along with the 'LocalBuildInfo'. The +-- only interesting bit of info it contains is a mapping between language +-- extensions and compiler command line flags. This module also defines a +-- 'PackageDB' type which is used to refer to package databases. Most compilers +-- only know about a single global package collection but GHC has a global and +-- per-user one and it lets you create arbitrary other package databases. We do +-- not yet fully support this latter feature. + +module Distribution.Simple.Compiler ( + -- * Haskell implementations + module Distribution.Compiler, + Compiler(..), + showCompilerId, showCompilerIdWithAbi, + compilerFlavor, compilerVersion, + compilerCompatVersion, + compilerInfo, + + -- * Support for package databases + PackageDB(..), + PackageDBStack, + registrationPackageDB, + absolutePackageDBPaths, + absolutePackageDBPath, + + -- * Support for optimisation levels + OptimisationLevel(..), + flagToOptimisationLevel, + + -- * Support for debug info levels + DebugInfoLevel(..), + flagToDebugInfoLevel, + + -- * Support for language extensions + Flag, + languageToFlags, + unsupportedLanguages, + extensionsToFlags, + unsupportedExtensions, + parmakeSupported, + reexportedModulesSupported, + renamingPackageFlagsSupported, + packageKeySupported + ) where + +import Distribution.Compiler +import Distribution.Version (Version(..)) +import Distribution.Text (display) +import Language.Haskell.Extension (Language(Haskell98), Extension) + +import Control.Monad (liftM) +import Distribution.Compat.Binary (Binary) +import Data.List (nub) +import qualified Data.Map as M (Map, lookup) +import Data.Maybe (catMaybes, isNothing, listToMaybe) +import GHC.Generics (Generic) +import System.Directory (canonicalizePath) + +data Compiler = Compiler { + compilerId :: CompilerId, + -- ^ Compiler flavour and version. + compilerAbiTag :: AbiTag, + -- ^ Tag for distinguishing incompatible ABI's on the same architecture/os. + compilerCompat :: [CompilerId], + -- ^ Other implementations that this compiler claims to be compatible with. + compilerLanguages :: [(Language, Flag)], + -- ^ Supported language standards. + compilerExtensions :: [(Extension, Flag)], + -- ^ Supported extensions. + compilerProperties :: M.Map String String + -- ^ A key-value map for properties not covered by the above fields. + } + deriving (Generic, Show, Read) + +instance Binary Compiler + +showCompilerId :: Compiler -> String +showCompilerId = display . compilerId + +showCompilerIdWithAbi :: Compiler -> String +showCompilerIdWithAbi comp = + display (compilerId comp) ++ + case compilerAbiTag comp of + NoAbiTag -> [] + AbiTag xs -> '-':xs + +compilerFlavor :: Compiler -> CompilerFlavor +compilerFlavor = (\(CompilerId f _) -> f) . compilerId + +compilerVersion :: Compiler -> Version +compilerVersion = (\(CompilerId _ v) -> v) . compilerId + +compilerCompatVersion :: CompilerFlavor -> Compiler -> Maybe Version +compilerCompatVersion flavor comp + | compilerFlavor comp == flavor = Just (compilerVersion comp) + | otherwise = + listToMaybe [ v | CompilerId fl v <- compilerCompat comp, fl == flavor ] + +compilerInfo :: Compiler -> CompilerInfo +compilerInfo c = CompilerInfo (compilerId c) + (compilerAbiTag c) + (Just . compilerCompat $ c) + (Just . map fst . compilerLanguages $ c) + (Just . map fst . compilerExtensions $ c) + +-- ------------------------------------------------------------ +-- * Package databases +-- ------------------------------------------------------------ + +-- |Some compilers have a notion of a database of available packages. +-- For some there is just one global db of packages, other compilers +-- support a per-user or an arbitrary db specified at some location in +-- the file system. This can be used to build isloated environments of +-- packages, for example to build a collection of related packages +-- without installing them globally. +-- +data PackageDB = GlobalPackageDB + | UserPackageDB + | SpecificPackageDB FilePath + deriving (Eq, Generic, Ord, Show, Read) + +instance Binary PackageDB + +-- | We typically get packages from several databases, and stack them +-- together. This type lets us be explicit about that stacking. For example +-- typical stacks include: +-- +-- > [GlobalPackageDB] +-- > [GlobalPackageDB, UserPackageDB] +-- > [GlobalPackageDB, SpecificPackageDB "package.conf.inplace"] +-- +-- Note that the 'GlobalPackageDB' is invariably at the bottom since it +-- contains the rts, base and other special compiler-specific packages. +-- +-- We are not restricted to using just the above combinations. In particular +-- we can use several custom package dbs and the user package db together. +-- +-- When it comes to writing, the top most (last) package is used. +-- +type PackageDBStack = [PackageDB] + +-- | Return the package that we should register into. This is the package db at +-- the top of the stack. +-- +registrationPackageDB :: PackageDBStack -> PackageDB +registrationPackageDB [] = error "internal error: empty package db set" +registrationPackageDB dbs = last dbs + +-- | Make package paths absolute + + +absolutePackageDBPaths :: PackageDBStack -> IO PackageDBStack +absolutePackageDBPaths = mapM absolutePackageDBPath + +absolutePackageDBPath :: PackageDB -> IO PackageDB +absolutePackageDBPath GlobalPackageDB = return GlobalPackageDB +absolutePackageDBPath UserPackageDB = return UserPackageDB +absolutePackageDBPath (SpecificPackageDB db) = + SpecificPackageDB `liftM` canonicalizePath db + +-- ------------------------------------------------------------ +-- * Optimisation levels +-- ------------------------------------------------------------ + +-- | Some compilers support optimising. Some have different levels. +-- For compilers that do not the level is just capped to the level +-- they do support. +-- +data OptimisationLevel = NoOptimisation + | NormalOptimisation + | MaximumOptimisation + deriving (Bounded, Enum, Eq, Generic, Read, Show) + +instance Binary OptimisationLevel + +flagToOptimisationLevel :: Maybe String -> OptimisationLevel +flagToOptimisationLevel Nothing = NormalOptimisation +flagToOptimisationLevel (Just s) = case reads s of + [(i, "")] + | i >= fromEnum (minBound :: OptimisationLevel) + && i <= fromEnum (maxBound :: OptimisationLevel) + -> toEnum i + | otherwise -> error $ "Bad optimisation level: " ++ show i + ++ ". Valid values are 0..2" + _ -> error $ "Can't parse optimisation level " ++ s + +-- ------------------------------------------------------------ +-- * Debug info levels +-- ------------------------------------------------------------ + +-- | Some compilers support emitting debug info. Some have different +-- levels. For compilers that do not the level is just capped to the +-- level they do support. +-- +data DebugInfoLevel = NoDebugInfo + | MinimalDebugInfo + | NormalDebugInfo + | MaximalDebugInfo + deriving (Bounded, Enum, Eq, Generic, Read, Show) + +instance Binary DebugInfoLevel + +flagToDebugInfoLevel :: Maybe String -> DebugInfoLevel +flagToDebugInfoLevel Nothing = NormalDebugInfo +flagToDebugInfoLevel (Just s) = case reads s of + [(i, "")] + | i >= fromEnum (minBound :: DebugInfoLevel) + && i <= fromEnum (maxBound :: DebugInfoLevel) + -> toEnum i + | otherwise -> error $ "Bad debug info level: " ++ show i + ++ ". Valid values are 0..3" + _ -> error $ "Can't parse debug info level " ++ s + +-- ------------------------------------------------------------ +-- * Languages and Extensions +-- ------------------------------------------------------------ + +unsupportedLanguages :: Compiler -> [Language] -> [Language] +unsupportedLanguages comp langs = + [ lang | lang <- langs + , isNothing (languageToFlag comp lang) ] + +languageToFlags :: Compiler -> Maybe Language -> [Flag] +languageToFlags comp = filter (not . null) + . catMaybes . map (languageToFlag comp) + . maybe [Haskell98] (\x->[x]) + +languageToFlag :: Compiler -> Language -> Maybe Flag +languageToFlag comp ext = lookup ext (compilerLanguages comp) + + +-- |For the given compiler, return the extensions it does not support. +unsupportedExtensions :: Compiler -> [Extension] -> [Extension] +unsupportedExtensions comp exts = + [ ext | ext <- exts + , isNothing (extensionToFlag comp ext) ] + +type Flag = String + +-- |For the given compiler, return the flags for the supported extensions. +extensionsToFlags :: Compiler -> [Extension] -> [Flag] +extensionsToFlags comp = nub . filter (not . null) + . catMaybes . map (extensionToFlag comp) + +extensionToFlag :: Compiler -> Extension -> Maybe Flag +extensionToFlag comp ext = lookup ext (compilerExtensions comp) + +-- | Does this compiler support parallel --make mode? +parmakeSupported :: Compiler -> Bool +parmakeSupported = ghcSupported "Support parallel --make" + +-- | Does this compiler support reexported-modules? +reexportedModulesSupported :: Compiler -> Bool +reexportedModulesSupported = ghcSupported "Support reexported-modules" + +-- | Does this compiler support thinning/renaming on package flags? +renamingPackageFlagsSupported :: Compiler -> Bool +renamingPackageFlagsSupported = ghcSupported "Support thinning and renaming package flags" + +-- | Does this compiler support package keys? +packageKeySupported :: Compiler -> Bool +packageKeySupported = ghcSupported "Uses package keys" + +-- | Utility function for GHC only features +ghcSupported :: String -> Compiler -> Bool +ghcSupported key comp = + case compilerFlavor comp of + GHC -> checkProp + GHCJS -> checkProp + _ -> False + where checkProp = + case M.lookup key (compilerProperties comp) of + Just "YES" -> True + _ -> False diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Configure.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Configure.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Configure.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Configure.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,1674 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RecordWildCards #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Configure +-- Copyright : Isaac Jones 2003-2005 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This deals with the /configure/ phase. It provides the 'configure' action +-- which is given the package description and configure flags. It then tries +-- to: configure the compiler; resolves any conditionals in the package +-- description; resolve the package dependencies; check if all the extensions +-- used by this package are supported by the compiler; check that all the build +-- tools are available (including version checks if appropriate); checks for +-- any required @pkg-config@ packages (updating the 'BuildInfo' with the +-- results) +-- +-- Then based on all this it saves the info in the 'LocalBuildInfo' and writes +-- it out to the @dist\/setup-config@ file. It also displays various details to +-- the user, the amount of information displayed depending on the verbosity +-- level. + +module Distribution.Simple.Configure (configure, + writePersistBuildConfig, + getConfigStateFile, + getPersistBuildConfig, + checkPersistBuildConfigOutdated, + tryGetPersistBuildConfig, + maybeGetPersistBuildConfig, + localBuildInfoFile, + getInstalledPackages, getPackageDBContents, + configCompiler, configCompilerAux, + configCompilerEx, configCompilerAuxEx, + ccLdOptionsBuildInfo, + checkForeignDeps, + interpretPackageDbFlags, + ConfigStateFileError(..), + tryGetConfigStateFile, + platformDefines, + ) + where + +import Distribution.Compiler + ( CompilerId(..) ) +import Distribution.Utils.NubList +import Distribution.Simple.Compiler + ( CompilerFlavor(..), Compiler(..), compilerFlavor, compilerVersion + , compilerInfo + , showCompilerId, unsupportedLanguages, unsupportedExtensions + , PackageDB(..), PackageDBStack, reexportedModulesSupported + , packageKeySupported, renamingPackageFlagsSupported ) +import Distribution.Simple.PreProcess ( platformDefines ) +import Distribution.Package + ( PackageName(PackageName), PackageIdentifier(..), PackageId + , packageName, packageVersion, Package(..) + , Dependency(Dependency), simplifyDependency + , InstalledPackageId(..), thisPackageVersion + , mkPackageKey, PackageKey(..), packageKeyLibraryName ) +import qualified Distribution.InstalledPackageInfo as Installed +import Distribution.InstalledPackageInfo (InstalledPackageInfo, emptyInstalledPackageInfo) +import qualified Distribution.Simple.PackageIndex as PackageIndex +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import Distribution.PackageDescription as PD + ( PackageDescription(..), specVersion, GenericPackageDescription(..) + , Library(..), hasLibs, Executable(..), BuildInfo(..), allExtensions + , HookedBuildInfo, updatePackageDescription, allBuildInfo + , Flag(flagName), FlagName(..), TestSuite(..), Benchmark(..) + , ModuleReexport(..) , defaultRenaming ) +import Distribution.ModuleName + ( ModuleName ) +import Distribution.PackageDescription.Configuration + ( finalizePackageDescription, mapTreeData ) +import Distribution.PackageDescription.Check + ( PackageCheck(..), checkPackage, checkPackageFiles ) +import Distribution.Simple.Program + ( Program(..), ProgramLocation(..), ConfiguredProgram(..) + , ProgramConfiguration, defaultProgramConfiguration + , ProgramSearchPathEntry(..), getProgramSearchPath, setProgramSearchPath + , configureAllKnownPrograms, knownPrograms, lookupKnownProgram + , userSpecifyArgss, userSpecifyPaths + , lookupProgram, requireProgram, requireProgramVersion + , pkgConfigProgram, gccProgram, rawSystemProgramStdoutConf ) +import Distribution.Simple.Setup + ( ConfigFlags(..), CopyDest(..), Flag(..), fromFlag, fromFlagOrDefault + , flagToMaybe ) +import Distribution.Simple.InstallDirs + ( InstallDirs(..), defaultInstallDirs, combineInstallDirs ) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), Component(..), ComponentLocalBuildInfo(..) + , LibraryName(..) + , absoluteInstallDirs, prefixRelativeInstallDirs, inplacePackageId + , ComponentName(..), showComponentName, pkgEnabledComponents + , componentBuildInfo, componentName, checkComponentsCyclic ) +import Distribution.Simple.BuildPaths + ( autogenModulesDir ) +import Distribution.Simple.Utils + ( die, warn, info, setupMessage + , createDirectoryIfMissingVerbose, moreRecentFile + , intercalate, cabalVersion + , writeFileAtomic + , withTempFile ) +import Distribution.System + ( OS(..), buildOS, Platform (..), buildPlatform ) +import Distribution.Version + ( Version(..), anyVersion, orLaterVersion, withinRange, isAnyVersion ) +import Distribution.Verbosity + ( Verbosity, lessVerbose ) + +import qualified Distribution.Simple.GHC as GHC +import qualified Distribution.Simple.GHCJS as GHCJS +import qualified Distribution.Simple.JHC as JHC +import qualified Distribution.Simple.LHC as LHC +import qualified Distribution.Simple.UHC as UHC +import qualified Distribution.Simple.HaskellSuite as HaskellSuite + +-- Prefer the more generic Data.Traversable.mapM to Prelude.mapM +import Prelude hiding ( mapM ) +import Control.Exception + ( ErrorCall(..), Exception, evaluate, throw, throwIO, try ) +import Control.Monad + ( liftM, when, unless, foldM, filterM ) +import Distribution.Compat.Binary ( decodeOrFailIO, encode ) +import Data.ByteString.Lazy (ByteString) +import qualified Data.ByteString as BS +import qualified Data.ByteString.Lazy.Char8 as BLC8 +import Data.List + ( (\\), nub, partition, isPrefixOf, inits, stripPrefix ) +import Data.Maybe + ( isNothing, catMaybes, fromMaybe, isJust ) +import Data.Either + ( partitionEithers ) +import qualified Data.Set as Set +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid + ( Monoid(..) ) +#endif +import qualified Data.Map as Map +import Data.Map (Map) +import Data.Traversable + ( mapM ) +import Data.Typeable +import System.Directory + ( doesFileExist, createDirectoryIfMissing, getTemporaryDirectory ) +import System.FilePath + ( (), isAbsolute ) +import qualified System.Info + ( compilerName, compilerVersion ) +import System.IO + ( hPutStrLn, hClose ) +import Distribution.Text + ( Text(disp), display, simpleParse ) +import Text.PrettyPrint + ( render, (<>), ($+$), char, text, comma + , quotes, punctuate, nest, sep, hsep ) +import Distribution.Compat.Exception ( catchExit, catchIO ) + +data ConfigStateFileError + = ConfigStateFileNoHeader + | ConfigStateFileBadHeader + | ConfigStateFileNoParse + | ConfigStateFileMissing + | ConfigStateFileBadVersion PackageIdentifier PackageIdentifier (Either ConfigStateFileError LocalBuildInfo) + deriving (Typeable) + +instance Show ConfigStateFileError where + show ConfigStateFileNoHeader = + "Saved package config file header is missing. " + ++ "Try re-running the 'configure' command." + show ConfigStateFileBadHeader = + "Saved package config file header is corrupt. " + ++ "Try re-running the 'configure' command." + show ConfigStateFileNoParse = + "Saved package config file body is corrupt. " + ++ "Try re-running the 'configure' command." + show ConfigStateFileMissing = "Run the 'configure' command first." + show (ConfigStateFileBadVersion oldCabal oldCompiler _) = + "You need to re-run the 'configure' command. " + ++ "The version of Cabal being used has changed (was " + ++ display oldCabal ++ ", now " + ++ display currentCabalId ++ ")." + ++ badCompiler + where + badCompiler + | oldCompiler == currentCompilerId = "" + | otherwise = + " Additionally the compiler is different (was " + ++ display oldCompiler ++ ", now " + ++ display currentCompilerId + ++ ") which is probably the cause of the problem." + +instance Exception ConfigStateFileError + +getConfigStateFile :: FilePath -> IO LocalBuildInfo +getConfigStateFile filename = do + exists <- doesFileExist filename + unless exists $ throwIO ConfigStateFileMissing + -- Read the config file into a strict ByteString to avoid problems with + -- lazy I/O, then convert to lazy because the binary package needs that. + contents <- BS.readFile filename + let (header, body) = BLC8.span (/='\n') (BLC8.fromChunks [contents]) + + headerParseResult <- try $ evaluate $ parseHeader header + let (cabalId, compId) = + case headerParseResult of + Left (ErrorCall _) -> throw ConfigStateFileBadHeader + Right x -> x + + let getStoredValue = do + result <- decodeOrFailIO (BLC8.tail body) + case result of + Left _ -> throw ConfigStateFileNoParse + Right x -> return x + deferErrorIfBadVersion act + | cabalId /= currentCabalId = do + eResult <- try act + throw $ ConfigStateFileBadVersion cabalId compId eResult + | otherwise = act + deferErrorIfBadVersion getStoredValue + +tryGetConfigStateFile :: FilePath + -> IO (Either ConfigStateFileError LocalBuildInfo) +tryGetConfigStateFile = try . getConfigStateFile + +-- |Try to read the 'localBuildInfoFile'. +tryGetPersistBuildConfig :: FilePath + -> IO (Either ConfigStateFileError LocalBuildInfo) +tryGetPersistBuildConfig = try . getPersistBuildConfig + +-- | Read the 'localBuildInfoFile'. Throw an exception if the file is +-- missing, if the file cannot be read, or if the file was created by an older +-- version of Cabal. +getPersistBuildConfig :: FilePath -> IO LocalBuildInfo +getPersistBuildConfig = getConfigStateFile . localBuildInfoFile + +-- |Try to read the 'localBuildInfoFile'. +maybeGetPersistBuildConfig :: FilePath -> IO (Maybe LocalBuildInfo) +maybeGetPersistBuildConfig = + liftM (either (const Nothing) Just) . tryGetPersistBuildConfig + +-- |After running configure, output the 'LocalBuildInfo' to the +-- 'localBuildInfoFile'. +writePersistBuildConfig :: FilePath -> LocalBuildInfo -> IO () +writePersistBuildConfig distPref lbi = do + createDirectoryIfMissing False distPref + writeFileAtomic (localBuildInfoFile distPref) $ + BLC8.unlines [showHeader pkgId, encode lbi] + where + pkgId = packageId $ localPkgDescr lbi + +currentCabalId :: PackageIdentifier +currentCabalId = PackageIdentifier (PackageName "Cabal") cabalVersion + +currentCompilerId :: PackageIdentifier +currentCompilerId = PackageIdentifier (PackageName System.Info.compilerName) + System.Info.compilerVersion + +parseHeader :: ByteString -> (PackageIdentifier, PackageIdentifier) +parseHeader header = case BLC8.words header of + ["Saved", "package", "config", "for", pkgId, "written", "by", cabalId, "using", compId] -> + fromMaybe (throw ConfigStateFileBadHeader) $ do + _ <- simpleParse (BLC8.unpack pkgId) :: Maybe PackageIdentifier + cabalId' <- simpleParse (BLC8.unpack cabalId) + compId' <- simpleParse (BLC8.unpack compId) + return (cabalId', compId') + _ -> throw ConfigStateFileNoHeader + +showHeader :: PackageIdentifier -> ByteString +showHeader pkgId = BLC8.unwords + [ "Saved", "package", "config", "for" + , BLC8.pack $ display pkgId + , "written", "by" + , BLC8.pack $ display currentCabalId + , "using" + , BLC8.pack $ display currentCompilerId + ] + +-- |Check that localBuildInfoFile is up-to-date with respect to the +-- .cabal file. +checkPersistBuildConfigOutdated :: FilePath -> FilePath -> IO Bool +checkPersistBuildConfigOutdated distPref pkg_descr_file = do + pkg_descr_file `moreRecentFile` (localBuildInfoFile distPref) + +-- |@dist\/setup-config@ +localBuildInfoFile :: FilePath -> FilePath +localBuildInfoFile distPref = distPref "setup-config" + +-- ----------------------------------------------------------------------------- +-- * Configuration +-- ----------------------------------------------------------------------------- + +-- |Perform the \"@.\/setup configure@\" action. +-- Returns the @.setup-config@ file. +configure :: (GenericPackageDescription, HookedBuildInfo) + -> ConfigFlags -> IO LocalBuildInfo +configure (pkg_descr0, pbi) cfg + = do let distPref = fromFlag (configDistPref cfg) + buildDir' = distPref "build" + verbosity = fromFlag (configVerbosity cfg) + + setupMessage verbosity "Configuring" (packageId pkg_descr0) + + unless (configLibCoverage cfg == NoFlag) $ do + let enable | fromFlag (configLibCoverage cfg) = "enable" + | otherwise = "disable" + warn verbosity + ("The flag --" ++ enable ++ "-library-coverage is deprecated. " + ++ "Please use --" ++ enable ++ "-coverage instead.") + + createDirectoryIfMissingVerbose (lessVerbose verbosity) True distPref + + let programsConfig = mkProgramsConfig cfg (configPrograms cfg) + userInstall = fromFlag (configUserInstall cfg) + packageDbs = interpretPackageDbFlags userInstall + (configPackageDBs cfg) + + -- detect compiler + (comp, compPlatform, programsConfig') <- configCompilerEx + (flagToMaybe $ configHcFlavor cfg) + (flagToMaybe $ configHcPath cfg) (flagToMaybe $ configHcPkg cfg) + programsConfig (lessVerbose verbosity) + let version = compilerVersion comp + flavor = compilerFlavor comp + + -- Create a PackageIndex that makes *any libraries that might be* + -- defined internally to this package look like installed packages, in + -- case an executable should refer to any of them as dependencies. + -- + -- It must be *any libraries that might be* defined rather than the + -- actual definitions, because these depend on conditionals in the .cabal + -- file, and we haven't resolved them yet. finalizePackageDescription + -- does the resolution of conditionals, and it takes internalPackageSet + -- as part of its input. + -- + -- Currently a package can define no more than one library (which has + -- the same name as the package) but we could extend this later. + -- If we later allowed private internal libraries, then here we would + -- need to pre-scan the conditional data to make a list of all private + -- libraries that could possibly be defined by the .cabal file. + let pid = packageId pkg_descr0 + internalPackage = emptyInstalledPackageInfo { + --TODO: should use a per-compiler method to map the source + -- package ID into an installed package id we can use + -- for the internal package set. The open-codes use of + -- InstalledPackageId . display here is a hack. + Installed.installedPackageId = + InstalledPackageId $ display $ pid, + Installed.sourcePackageId = pid + } + internalPackageSet = PackageIndex.fromList [internalPackage] + installedPackageSet <- getInstalledPackages (lessVerbose verbosity) comp + packageDbs programsConfig' + + (allConstraints, requiredDepsMap) <- either die return $ + combinedConstraints (configConstraints cfg) + (configDependencies cfg) + installedPackageSet + + let exactConf = fromFlagOrDefault False (configExactConfiguration cfg) + -- Constraint test function for the solver + dependencySatisfiable d@(Dependency depName verRange) + | exactConf = + -- When we're given '--exact-configuration', we assume that all + -- dependencies and flags are exactly specified on the command + -- line. Thus we only consult the 'requiredDepsMap'. Note that + -- we're not doing the version range check, so if there's some + -- dependency that wasn't specified on the command line, + -- 'finalizePackageDescription' will fail. + -- + -- TODO: mention '--exact-configuration' in the error message + -- when this fails? + (depName `Map.member` requiredDepsMap) || isInternalDep + + | otherwise = + -- Normal operation: just look up dependency in the package + -- index. + not . null . PackageIndex.lookupDependency pkgs' $ d + where + pkgs' = PackageIndex.insert internalPackage installedPackageSet + isInternalDep = pkgName pid == depName + && pkgVersion pid `withinRange` verRange + enableTest t = t { testEnabled = fromFlag (configTests cfg) } + flaggedTests = map (\(n, t) -> (n, mapTreeData enableTest t)) + (condTestSuites pkg_descr0) + enableBenchmark bm = bm { benchmarkEnabled = + fromFlag (configBenchmarks cfg) } + flaggedBenchmarks = map (\(n, bm) -> + (n, mapTreeData enableBenchmark bm)) + (condBenchmarks pkg_descr0) + pkg_descr0'' = pkg_descr0 { condTestSuites = flaggedTests + , condBenchmarks = flaggedBenchmarks } + + (pkg_descr0', flags) <- + case finalizePackageDescription + (configConfigurationsFlags cfg) + dependencySatisfiable + compPlatform + (compilerInfo comp) + allConstraints + pkg_descr0'' + of Right r -> return r + Left missing -> + die $ "At least the following dependencies are missing:\n" + ++ (render . nest 4 . sep . punctuate comma + . map (disp . simplifyDependency) + $ missing) + + -- Sanity check: if '--exact-configuration' was given, ensure that the + -- complete flag assignment was specified on the command line. + when exactConf $ do + let cmdlineFlags = map fst (configConfigurationsFlags cfg) + allFlags = map flagName . genPackageFlags $ pkg_descr0 + diffFlags = allFlags \\ cmdlineFlags + when (not . null $ diffFlags) $ + die $ "'--exact-conf' was given, " + ++ "but the following flags were not specified: " + ++ intercalate ", " (map show diffFlags) + + -- add extra include/lib dirs as specified in cfg + -- we do it here so that those get checked too + let pkg_descr = addExtraIncludeLibDirs pkg_descr0' + + unless (renamingPackageFlagsSupported comp || + and [ rn == defaultRenaming + | bi <- allBuildInfo pkg_descr + , rn <- Map.elems (targetBuildRenaming bi)]) $ + die $ "Your compiler does not support thinning and renaming on " + ++ "package flags. To use this feature you probably must use " + ++ "GHC 7.9 or later." + + when (not (null flags)) $ + info verbosity $ "Flags chosen: " + ++ intercalate ", " [ name ++ "=" ++ display value + | (FlagName name, value) <- flags ] + + when (maybe False (not.null.PD.reexportedModules) (PD.library pkg_descr) + && not (reexportedModulesSupported comp)) $ do + die $ "Your compiler does not support module re-exports. To use " + ++ "this feature you probably must use GHC 7.9 or later." + + checkPackageProblems verbosity pkg_descr0 + (updatePackageDescription pbi pkg_descr) + + -- Handle hole instantiation + (holeDeps, hole_insts) <- configureInstantiateWith pkg_descr cfg installedPackageSet + + let selectDependencies :: [Dependency] -> + ([FailedDependency], [ResolvedDependency]) + selectDependencies = + (\xs -> ([ x | Left x <- xs ], [ x | Right x <- xs ])) + . map (selectDependency internalPackageSet installedPackageSet + requiredDepsMap) + + (failedDeps, allPkgDeps) = + selectDependencies (buildDepends pkg_descr) + + internalPkgDeps = [ pkgid + | InternalDependency _ pkgid <- allPkgDeps ] + externalPkgDeps = [ pkg + | ExternalDependency _ pkg <- allPkgDeps ] + + when (not (null internalPkgDeps) + && not (newPackageDepsBehaviour pkg_descr)) $ + die $ "The field 'build-depends: " + ++ intercalate ", " (map (display . packageName) internalPkgDeps) + ++ "' refers to a library which is defined within the same " + ++ "package. To use this feature the package must specify at " + ++ "least 'cabal-version: >= 1.8'." + + reportFailedDependencies failedDeps + reportSelectedDependencies verbosity allPkgDeps + + let installDeps = Map.elems + . Map.fromList + . map (\v -> (Installed.installedPackageId v, v)) + $ externalPkgDeps ++ holeDeps + + packageDependsIndex <- + case PackageIndex.dependencyClosure installedPackageSet + (map Installed.installedPackageId installDeps) of + Left packageDependsIndex -> return packageDependsIndex + Right broken -> + die $ "The following installed packages are broken because other" + ++ " packages they depend on are missing. These broken " + ++ "packages must be rebuilt before they can be used.\n" + ++ unlines [ "package " + ++ display (packageId pkg) + ++ " is broken due to missing package " + ++ intercalate ", " (map display deps) + | (pkg, deps) <- broken ] + + let pseudoTopPkg = emptyInstalledPackageInfo { + Installed.installedPackageId = + InstalledPackageId (display (packageId pkg_descr)), + Installed.sourcePackageId = packageId pkg_descr, + Installed.depends = + map Installed.installedPackageId installDeps + } + case PackageIndex.dependencyInconsistencies + . PackageIndex.insert pseudoTopPkg + $ packageDependsIndex of + [] -> return () + inconsistencies -> + warn verbosity $ + "This package indirectly depends on multiple versions of the same " + ++ "package. This is highly likely to cause a compile failure.\n" + ++ unlines [ "package " ++ display pkg ++ " requires " + ++ display (PackageIdentifier name ver) + | (name, uses) <- inconsistencies + , (pkg, ver) <- uses ] + + -- Calculate the package key. We're going to store it in LocalBuildInfo + -- canonically, but ComponentsLocalBuildInfo also needs to know about it + -- XXX Do we need the internal deps? + -- NB: does *not* include holeDeps! + let pkg_key = mkPackageKey (packageKeySupported comp) + (package pkg_descr) + (map Installed.packageKey externalPkgDeps) + (map (\(k,(p,m)) -> (k,(Installed.packageKey p,m))) hole_insts) + + -- internal component graph + buildComponents <- + case mkComponentsGraph pkg_descr internalPkgDeps of + Left componentCycle -> reportComponentCycle componentCycle + Right components -> + case mkComponentsLocalBuildInfo packageDependsIndex pkg_descr + internalPkgDeps externalPkgDeps holeDeps + (Map.fromList hole_insts) + pkg_key components of + Left problems -> reportModuleReexportProblems problems + Right components' -> return components' + + -- installation directories + defaultDirs <- defaultInstallDirs flavor userInstall (hasLibs pkg_descr) + let installDirs = combineInstallDirs fromFlagOrDefault + defaultDirs (configInstallDirs cfg) + + -- check languages and extensions + let langlist = nub $ catMaybes $ map defaultLanguage + (allBuildInfo pkg_descr) + let langs = unsupportedLanguages comp langlist + when (not (null langs)) $ + die $ "The package " ++ display (packageId pkg_descr0) + ++ " requires the following languages which are not " + ++ "supported by " ++ display (compilerId comp) ++ ": " + ++ intercalate ", " (map display langs) + let extlist = nub $ concatMap allExtensions (allBuildInfo pkg_descr) + let exts = unsupportedExtensions comp extlist + when (not (null exts)) $ + die $ "The package " ++ display (packageId pkg_descr0) + ++ " requires the following language extensions which are not " + ++ "supported by " ++ display (compilerId comp) ++ ": " + ++ intercalate ", " (map display exts) + + -- configured known/required programs & external build tools + -- exclude build-tool deps on "internal" exes in the same package + let requiredBuildTools = + [ buildTool + | let exeNames = map exeName (executables pkg_descr) + , bi <- allBuildInfo pkg_descr + , buildTool@(Dependency (PackageName toolName) reqVer) + <- buildTools bi + , let isInternal = + toolName `elem` exeNames + -- we assume all internal build-tools are + -- versioned with the package: + && packageVersion pkg_descr `withinRange` reqVer + , not isInternal ] + + programsConfig'' <- + configureAllKnownPrograms (lessVerbose verbosity) programsConfig' + >>= configureRequiredPrograms verbosity requiredBuildTools + + (pkg_descr', programsConfig''') <- + configurePkgconfigPackages verbosity pkg_descr programsConfig'' + + split_objs <- + if not (fromFlag $ configSplitObjs cfg) + then return False + else case flavor of + GHC | version >= Version [6,5] [] -> return True + GHCJS -> return True + _ -> do warn verbosity + ("this compiler does not support " ++ + "--enable-split-objs; ignoring") + return False + + let ghciLibByDefault = + case compilerId comp of + CompilerId GHC _ -> + -- If ghc is non-dynamic, then ghci needs object files, + -- so we build one by default. + -- + -- Technically, archive files should be sufficient for ghci, + -- but because of GHC bug #8942, it has never been safe to + -- rely on them. By the time that bug was fixed, ghci had + -- been changed to read shared libraries instead of archive + -- files (see next code block). + not (GHC.isDynamic comp) + CompilerId GHCJS _ -> + not (GHCJS.isDynamic comp) + _ -> False + + let sharedLibsByDefault + | fromFlag (configDynExe cfg) = + -- build a shared library if dynamically-linked + -- executables are requested + True + | otherwise = case compilerId comp of + CompilerId GHC _ -> + -- if ghc is dynamic, then ghci needs a shared + -- library, so we build one by default. + GHC.isDynamic comp + CompilerId GHCJS _ -> + GHCJS.isDynamic comp + _ -> False + withSharedLib_ = + -- build shared libraries if required by GHC or by the + -- executable linking mode, but allow the user to force + -- building only static library archives with + -- --disable-shared. + fromFlagOrDefault sharedLibsByDefault $ configSharedLib cfg + withDynExe_ = fromFlag $ configDynExe cfg + when (withDynExe_ && not withSharedLib_) $ warn verbosity $ + "Executables will use dynamic linking, but a shared library " + ++ "is not being built. Linking will fail if any executables " + ++ "depend on the library." + + let withProfExe_ = fromFlagOrDefault False $ configProfExe cfg + withProfLib_ = fromFlagOrDefault withProfExe_ $ configProfLib cfg + when (withProfExe_ && not withProfLib_) $ warn verbosity $ + "Executables will be built with profiling, but library " + ++ "profiling is disabled. Linking will fail if any executables " + ++ "depend on the library." + + let configCoverage_ = + mappend (configCoverage cfg) (configLibCoverage cfg) + + cfg' = cfg { configCoverage = configCoverage_ } + + reloc <- + if not (fromFlag $ configRelocatable cfg) + then return False + else return True + + let lbi = LocalBuildInfo { + configFlags = cfg', + extraConfigArgs = [], -- Currently configure does not + -- take extra args, but if it + -- did they would go here. + installDirTemplates = installDirs, + compiler = comp, + hostPlatform = compPlatform, + buildDir = buildDir', + componentsConfigs = buildComponents, + installedPkgs = packageDependsIndex, + pkgDescrFile = Nothing, + localPkgDescr = pkg_descr', + pkgKey = pkg_key, + instantiatedWith = hole_insts, + withPrograms = programsConfig''', + withVanillaLib = fromFlag $ configVanillaLib cfg, + withProfLib = withProfLib_, + withSharedLib = withSharedLib_, + withDynExe = withDynExe_, + withProfExe = withProfExe_, + withOptimization = fromFlag $ configOptimization cfg, + withDebugInfo = fromFlag $ configDebugInfo cfg, + withGHCiLib = fromFlagOrDefault ghciLibByDefault $ + configGHCiLib cfg, + splitObjs = split_objs, + stripExes = fromFlag $ configStripExes cfg, + stripLibs = fromFlag $ configStripLibs cfg, + withPackageDB = packageDbs, + progPrefix = fromFlag $ configProgPrefix cfg, + progSuffix = fromFlag $ configProgSuffix cfg, + relocatable = reloc + } + + when reloc (checkRelocatable verbosity pkg_descr lbi) + + let dirs = absoluteInstallDirs pkg_descr lbi NoCopyDest + relative = prefixRelativeInstallDirs (packageId pkg_descr) lbi + + unless (isAbsolute (prefix dirs)) $ die $ + "expected an absolute directory name for --prefix: " ++ prefix dirs + + info verbosity $ "Using " ++ display currentCabalId + ++ " compiled by " ++ display currentCompilerId + info verbosity $ "Using compiler: " ++ showCompilerId comp + info verbosity $ "Using install prefix: " ++ prefix dirs + + let dirinfo name dir isPrefixRelative = + info verbosity $ name ++ " installed in: " ++ dir ++ relNote + where relNote = case buildOS of + Windows | not (hasLibs pkg_descr) + && isNothing isPrefixRelative + -> " (fixed location)" + _ -> "" + + dirinfo "Binaries" (bindir dirs) (bindir relative) + dirinfo "Libraries" (libdir dirs) (libdir relative) + dirinfo "Private binaries" (libexecdir dirs) (libexecdir relative) + dirinfo "Data files" (datadir dirs) (datadir relative) + dirinfo "Documentation" (docdir dirs) (docdir relative) + dirinfo "Configuration files" (sysconfdir dirs) (sysconfdir relative) + + sequence_ [ reportProgram verbosity prog configuredProg + | (prog, configuredProg) <- knownPrograms programsConfig''' ] + + return lbi + + where + addExtraIncludeLibDirs pkg_descr = + let extraBi = mempty { extraLibDirs = configExtraLibDirs cfg + , PD.includeDirs = configExtraIncludeDirs cfg} + modifyLib l = l{ libBuildInfo = libBuildInfo l + `mappend` extraBi } + modifyExecutable e = e{ buildInfo = buildInfo e + `mappend` extraBi} + in pkg_descr{ library = modifyLib `fmap` library pkg_descr + , executables = modifyExecutable `map` + executables pkg_descr} + +mkProgramsConfig :: ConfigFlags -> ProgramConfiguration -> ProgramConfiguration +mkProgramsConfig cfg initialProgramsConfig = programsConfig + where + programsConfig = userSpecifyArgss (configProgramArgs cfg) + . userSpecifyPaths (configProgramPaths cfg) + . setProgramSearchPath searchpath + $ initialProgramsConfig + searchpath = getProgramSearchPath (initialProgramsConfig) + ++ map ProgramSearchPathDir (fromNubList $ configProgramPathExtra cfg) + +-- ----------------------------------------------------------------------------- +-- Configuring package dependencies + +reportProgram :: Verbosity -> Program -> Maybe ConfiguredProgram -> IO () +reportProgram verbosity prog Nothing + = info verbosity $ "No " ++ programName prog ++ " found" +reportProgram verbosity prog (Just configuredProg) + = info verbosity $ "Using " ++ programName prog ++ version ++ location + where location = case programLocation configuredProg of + FoundOnSystem p -> " found on system at: " ++ p + UserSpecified p -> " given by user at: " ++ p + version = case programVersion configuredProg of + Nothing -> "" + Just v -> " version " ++ display v + +hackageUrl :: String +hackageUrl = "http://hackage.haskell.org/package/" + +data ResolvedDependency = ExternalDependency Dependency InstalledPackageInfo + | InternalDependency Dependency PackageId -- should be a + -- lib name + +data FailedDependency = DependencyNotExists PackageName + | DependencyNoVersion Dependency + +-- | Test for a package dependency and record the version we have installed. +selectDependency :: InstalledPackageIndex -- ^ Internally defined packages + -> InstalledPackageIndex -- ^ Installed packages + -> Map PackageName InstalledPackageInfo + -- ^ Packages for which we have been given specific deps to use + -> Dependency + -> Either FailedDependency ResolvedDependency +selectDependency internalIndex installedIndex requiredDepsMap + dep@(Dependency pkgname vr) = + -- If the dependency specification matches anything in the internal package + -- index, then we prefer that match to anything in the second. + -- For example: + -- + -- Name: MyLibrary + -- Version: 0.1 + -- Library + -- .. + -- Executable my-exec + -- build-depends: MyLibrary + -- + -- We want "build-depends: MyLibrary" always to match the internal library + -- even if there is a newer installed library "MyLibrary-0.2". + -- However, "build-depends: MyLibrary >= 0.2" should match the installed one. + case PackageIndex.lookupPackageName internalIndex pkgname of + [(_,[pkg])] | packageVersion pkg `withinRange` vr + -> Right $ InternalDependency dep (packageId pkg) + + _ -> case Map.lookup pkgname requiredDepsMap of + -- If we know the exact pkg to use, then use it. + Just pkginstance -> Right (ExternalDependency dep pkginstance) + -- Otherwise we just pick an arbitrary instance of the latest version. + Nothing -> case PackageIndex.lookupDependency installedIndex dep of + [] -> Left $ DependencyNotExists pkgname + pkgs -> Right $ ExternalDependency dep $ + case last pkgs of + (_ver, pkginstances) -> head pkginstances + +reportSelectedDependencies :: Verbosity + -> [ResolvedDependency] -> IO () +reportSelectedDependencies verbosity deps = + info verbosity $ unlines + [ "Dependency " ++ display (simplifyDependency dep) + ++ ": using " ++ display pkgid + | resolved <- deps + , let (dep, pkgid) = case resolved of + ExternalDependency dep' pkg' -> (dep', packageId pkg') + InternalDependency dep' pkgid' -> (dep', pkgid') ] + +reportFailedDependencies :: [FailedDependency] -> IO () +reportFailedDependencies [] = return () +reportFailedDependencies failed = + die (intercalate "\n\n" (map reportFailedDependency failed)) + + where + reportFailedDependency (DependencyNotExists pkgname) = + "there is no version of " ++ display pkgname ++ " installed.\n" + ++ "Perhaps you need to download and install it from\n" + ++ hackageUrl ++ display pkgname ++ "?" + + reportFailedDependency (DependencyNoVersion dep) = + "cannot satisfy dependency " ++ display (simplifyDependency dep) ++ "\n" + +getInstalledPackages :: Verbosity -> Compiler + -> PackageDBStack -> ProgramConfiguration + -> IO InstalledPackageIndex +getInstalledPackages verbosity comp packageDBs progconf = do + when (null packageDBs) $ + die $ "No package databases have been specified. If you use " + ++ "--package-db=clear, you must follow it with --package-db= " + ++ "with 'global', 'user' or a specific file." + + info verbosity "Reading installed packages..." + case compilerFlavor comp of + GHC -> GHC.getInstalledPackages verbosity packageDBs progconf + GHCJS -> GHCJS.getInstalledPackages verbosity packageDBs progconf + JHC -> JHC.getInstalledPackages verbosity packageDBs progconf + LHC -> LHC.getInstalledPackages verbosity packageDBs progconf + UHC -> UHC.getInstalledPackages verbosity comp packageDBs progconf + HaskellSuite {} -> + HaskellSuite.getInstalledPackages verbosity packageDBs progconf + flv -> die $ "don't know how to find the installed packages for " + ++ display flv + +-- | Like 'getInstalledPackages', but for a single package DB. +getPackageDBContents :: Verbosity -> Compiler + -> PackageDB -> ProgramConfiguration + -> IO InstalledPackageIndex +getPackageDBContents verbosity comp packageDB progconf = do + info verbosity "Reading installed packages..." + case compilerFlavor comp of + GHC -> GHC.getPackageDBContents verbosity packageDB progconf + GHCJS -> GHCJS.getPackageDBContents verbosity packageDB progconf + -- For other compilers, try to fall back on 'getInstalledPackages'. + _ -> getInstalledPackages verbosity comp [packageDB] progconf + + +-- | The user interface specifies the package dbs to use with a combination of +-- @--global@, @--user@ and @--package-db=global|user|clear|$file@. +-- This function combines the global/user flag and interprets the package-db +-- flag into a single package db stack. +-- +interpretPackageDbFlags :: Bool -> [Maybe PackageDB] -> PackageDBStack +interpretPackageDbFlags userInstall specificDBs = + extra initialStack specificDBs + where + initialStack | userInstall = [GlobalPackageDB, UserPackageDB] + | otherwise = [GlobalPackageDB] + + extra dbs' [] = dbs' + extra _ (Nothing:dbs) = extra [] dbs + extra dbs' (Just db:dbs) = extra (dbs' ++ [db]) dbs + +newPackageDepsBehaviourMinVersion :: Version +newPackageDepsBehaviourMinVersion = Version [1,7,1] [] + +-- In older cabal versions, there was only one set of package dependencies for +-- the whole package. In this version, we can have separate dependencies per +-- target, but we only enable this behaviour if the minimum cabal version +-- specified is >= a certain minimum. Otherwise, for compatibility we use the +-- old behaviour. +newPackageDepsBehaviour :: PackageDescription -> Bool +newPackageDepsBehaviour pkg = + specVersion pkg >= newPackageDepsBehaviourMinVersion + +-- We are given both --constraint="foo < 2.0" style constraints and also +-- specific packages to pick via --dependency="foo=foo-2.0-177d5cdf20962d0581". +-- +-- When finalising the package we have to take into account the specific +-- installed deps we've been given, and the finalise function expects +-- constraints, so we have to translate these deps into version constraints. +-- +-- But after finalising we then have to make sure we pick the right specific +-- deps in the end. So we still need to remember which installed packages to +-- pick. +combinedConstraints :: [Dependency] -> + [(PackageName, InstalledPackageId)] -> + InstalledPackageIndex -> + Either String ([Dependency], + Map PackageName InstalledPackageInfo) +combinedConstraints constraints dependencies installedPackages = do + + when (not (null badInstalledPackageIds)) $ + Left $ render $ text "The following package dependencies were requested" + $+$ nest 4 (dispDependencies badInstalledPackageIds) + $+$ text "however the given installed package instance does not exist." + + when (not (null badNames)) $ + Left $ render $ text "The following package dependencies were requested" + $+$ nest 4 (dispDependencies badNames) + $+$ text "however the installed package's name does not match the name given." + + --TODO: we don't check that all dependencies are used! + + return (allConstraints, idConstraintMap) + + where + allConstraints :: [Dependency] + allConstraints = constraints + ++ [ thisPackageVersion (packageId pkg) + | (_, _, Just pkg) <- dependenciesPkgInfo ] + + idConstraintMap :: Map PackageName InstalledPackageInfo + idConstraintMap = Map.fromList + [ (packageName pkg, pkg) + | (_, _, Just pkg) <- dependenciesPkgInfo ] + + -- The dependencies along with the installed package info, if it exists + dependenciesPkgInfo :: [(PackageName, InstalledPackageId, + Maybe InstalledPackageInfo)] + dependenciesPkgInfo = + [ (pkgname, ipkgid, mpkg) + | (pkgname, ipkgid) <- dependencies + , let mpkg = PackageIndex.lookupInstalledPackageId + installedPackages ipkgid + ] + + -- If we looked up a package specified by an installed package id + -- (i.e. someone has written a hash) and didn't find it then it's + -- an error. + badInstalledPackageIds = + [ (pkgname, ipkgid) + | (pkgname, ipkgid, Nothing) <- dependenciesPkgInfo ] + + -- If someone has written e.g. + -- --dependency="foo=MyOtherLib-1.0-07...5bf30" then they have + -- probably made a mistake. + badNames = + [ (requestedPkgName, ipkgid) + | (requestedPkgName, ipkgid, Just pkg) <- dependenciesPkgInfo + , let foundPkgName = packageName pkg + , requestedPkgName /= foundPkgName ] + + dispDependencies deps = + hsep [ text "--dependency=" + <> quotes (disp pkgname <> char '=' <> disp ipkgid) + | (pkgname, ipkgid) <- deps ] + +-- ----------------------------------------------------------------------------- +-- Configuring hole instantiation + +configureInstantiateWith :: PackageDescription + -> ConfigFlags + -> InstalledPackageIndex -- ^ installed packages + -> IO ([InstalledPackageInfo], + [(ModuleName, (InstalledPackageInfo, ModuleName))]) +configureInstantiateWith pkg_descr cfg installedPackageSet = do + -- Holes: First, check and make sure the provided instantiation covers + -- all the holes we know about. Indefinite package installation is + -- not handled at all at this point. + -- NB: We union together /all/ of the requirements when calculating + -- the package key. + -- NB: For now, we assume that dependencies don't contribute signatures. + -- This will be handled by cabal-install; as far as ./Setup is + -- concerned, the most important thing is to be provided correctly + -- built dependencies. + let signatures = + maybe [] (\lib -> requiredSignatures lib ++ exposedSignatures lib) + (PD.library pkg_descr) + signatureSet = Set.fromList signatures + instantiateMap = Map.fromList (configInstantiateWith cfg) + missing_impls = filter (not . flip Map.member instantiateMap) signatures + hole_insts0 = filter (\(k,_) -> Set.member k signatureSet) (configInstantiateWith cfg) + + when (not (null missing_impls)) $ + die $ "Missing signature implementations for these modules: " + ++ intercalate ", " (map display missing_impls) + + -- Holes: Next, we need to make sure we have packages to actually + -- provide the implementations we're talking about. This is on top + -- of the normal dependency resolution process. + -- TODO: internal dependencies (e.g. the test package depending on the + -- main library) is not currently supported + let selectHoleDependency (k,(i,m)) = + case PackageIndex.lookupInstalledPackageId installedPackageSet i of + Just pkginst -> Right (k,(pkginst, m)) + Nothing -> Left i + (failed_hmap, hole_insts) = partitionEithers (map selectHoleDependency hole_insts0) + holeDeps = map (fst.snd) hole_insts -- could have dups + + -- Holes: Finally, any dependencies selected this way have to be + -- included in the allPkgs index, as well as the buildComponents. + -- But don't report these as potential inconsistencies! + + when (not (null failed_hmap)) $ + die $ "Could not resolve these package IDs (from signature implementations): " + ++ intercalate ", " (map display failed_hmap) + + return (holeDeps, hole_insts) + +-- ----------------------------------------------------------------------------- +-- Configuring program dependencies + +configureRequiredPrograms :: Verbosity -> [Dependency] -> ProgramConfiguration + -> IO ProgramConfiguration +configureRequiredPrograms verbosity deps conf = + foldM (configureRequiredProgram verbosity) conf deps + +configureRequiredProgram :: Verbosity -> ProgramConfiguration -> Dependency + -> IO ProgramConfiguration +configureRequiredProgram verbosity conf + (Dependency (PackageName progName) verRange) = + case lookupKnownProgram progName conf of + Nothing -> die ("Unknown build tool " ++ progName) + Just prog + -- requireProgramVersion always requires the program have a version + -- but if the user says "build-depends: foo" ie no version constraint + -- then we should not fail if we cannot discover the program version. + | verRange == anyVersion -> do + (_, conf') <- requireProgram verbosity prog conf + return conf' + | otherwise -> do + (_, _, conf') <- requireProgramVersion verbosity prog verRange conf + return conf' + +-- ----------------------------------------------------------------------------- +-- Configuring pkg-config package dependencies + +configurePkgconfigPackages :: Verbosity -> PackageDescription + -> ProgramConfiguration + -> IO (PackageDescription, ProgramConfiguration) +configurePkgconfigPackages verbosity pkg_descr conf + | null allpkgs = return (pkg_descr, conf) + | otherwise = do + (_, _, conf') <- requireProgramVersion + (lessVerbose verbosity) pkgConfigProgram + (orLaterVersion $ Version [0,9,0] []) conf + mapM_ requirePkg allpkgs + lib' <- mapM addPkgConfigBILib (library pkg_descr) + exes' <- mapM addPkgConfigBIExe (executables pkg_descr) + tests' <- mapM addPkgConfigBITest (testSuites pkg_descr) + benches' <- mapM addPkgConfigBIBench (benchmarks pkg_descr) + let pkg_descr' = pkg_descr { library = lib', executables = exes', + testSuites = tests', benchmarks = benches' } + return (pkg_descr', conf') + + where + allpkgs = concatMap pkgconfigDepends (allBuildInfo pkg_descr) + pkgconfig = rawSystemProgramStdoutConf (lessVerbose verbosity) + pkgConfigProgram conf + + requirePkg dep@(Dependency (PackageName pkg) range) = do + version <- pkgconfig ["--modversion", pkg] + `catchIO` (\_ -> die notFound) + `catchExit` (\_ -> die notFound) + case simpleParse version of + Nothing -> die "parsing output of pkg-config --modversion failed" + Just v | not (withinRange v range) -> die (badVersion v) + | otherwise -> info verbosity (depSatisfied v) + where + notFound = "The pkg-config package '" ++ pkg ++ "'" + ++ versionRequirement + ++ " is required but it could not be found." + badVersion v = "The pkg-config package '" ++ pkg ++ "'" + ++ versionRequirement + ++ " is required but the version installed on the" + ++ " system is version " ++ display v + depSatisfied v = "Dependency " ++ display dep + ++ ": using version " ++ display v + + versionRequirement + | isAnyVersion range = "" + | otherwise = " version " ++ display range + + -- Adds pkgconfig dependencies to the build info for a component + addPkgConfigBI compBI setCompBI comp = do + bi <- pkgconfigBuildInfo (pkgconfigDepends (compBI comp)) + return $ setCompBI comp (compBI comp `mappend` bi) + + -- Adds pkgconfig dependencies to the build info for a library + addPkgConfigBILib = addPkgConfigBI libBuildInfo $ + \lib bi -> lib { libBuildInfo = bi } + + -- Adds pkgconfig dependencies to the build info for an executable + addPkgConfigBIExe = addPkgConfigBI buildInfo $ + \exe bi -> exe { buildInfo = bi } + + -- Adds pkgconfig dependencies to the build info for a test suite + addPkgConfigBITest = addPkgConfigBI testBuildInfo $ + \test bi -> test { testBuildInfo = bi } + + -- Adds pkgconfig dependencies to the build info for a benchmark + addPkgConfigBIBench = addPkgConfigBI benchmarkBuildInfo $ + \bench bi -> bench { benchmarkBuildInfo = bi } + + pkgconfigBuildInfo :: [Dependency] -> IO BuildInfo + pkgconfigBuildInfo [] = return mempty + pkgconfigBuildInfo pkgdeps = do + let pkgs = nub [ display pkg | Dependency pkg _ <- pkgdeps ] + ccflags <- pkgconfig ("--cflags" : pkgs) + ldflags <- pkgconfig ("--libs" : pkgs) + return (ccLdOptionsBuildInfo (words ccflags) (words ldflags)) + +-- | Makes a 'BuildInfo' from C compiler and linker flags. +-- +-- This can be used with the output from configuration programs like pkg-config +-- and similar package-specific programs like mysql-config, freealut-config etc. +-- For example: +-- +-- > ccflags <- rawSystemProgramStdoutConf verbosity prog conf ["--cflags"] +-- > ldflags <- rawSystemProgramStdoutConf verbosity prog conf ["--libs"] +-- > return (ccldOptionsBuildInfo (words ccflags) (words ldflags)) +-- +ccLdOptionsBuildInfo :: [String] -> [String] -> BuildInfo +ccLdOptionsBuildInfo cflags ldflags = + let (includeDirs', cflags') = partition ("-I" `isPrefixOf`) cflags + (extraLibs', ldflags') = partition ("-l" `isPrefixOf`) ldflags + (extraLibDirs', ldflags'') = partition ("-L" `isPrefixOf`) ldflags' + in mempty { + PD.includeDirs = map (drop 2) includeDirs', + PD.extraLibs = map (drop 2) extraLibs', + PD.extraLibDirs = map (drop 2) extraLibDirs', + PD.ccOptions = cflags', + PD.ldOptions = ldflags'' + } + +-- ----------------------------------------------------------------------------- +-- Determining the compiler details + +configCompilerAuxEx :: ConfigFlags + -> IO (Compiler, Platform, ProgramConfiguration) +configCompilerAuxEx cfg = configCompilerEx (flagToMaybe $ configHcFlavor cfg) + (flagToMaybe $ configHcPath cfg) + (flagToMaybe $ configHcPkg cfg) + programsConfig + (fromFlag (configVerbosity cfg)) + where + programsConfig = mkProgramsConfig cfg defaultProgramConfiguration + +configCompilerEx :: Maybe CompilerFlavor -> Maybe FilePath -> Maybe FilePath + -> ProgramConfiguration -> Verbosity + -> IO (Compiler, Platform, ProgramConfiguration) +configCompilerEx Nothing _ _ _ _ = die "Unknown compiler" +configCompilerEx (Just hcFlavor) hcPath hcPkg conf verbosity = do + (comp, maybePlatform, programsConfig) <- case hcFlavor of + GHC -> GHC.configure verbosity hcPath hcPkg conf + GHCJS -> GHCJS.configure verbosity hcPath hcPkg conf + JHC -> JHC.configure verbosity hcPath hcPkg conf + LHC -> do (_, _, ghcConf) <- GHC.configure verbosity Nothing hcPkg conf + LHC.configure verbosity hcPath Nothing ghcConf + UHC -> UHC.configure verbosity hcPath hcPkg conf + HaskellSuite {} -> HaskellSuite.configure verbosity hcPath hcPkg conf + _ -> die "Unknown compiler" + return (comp, fromMaybe buildPlatform maybePlatform, programsConfig) + +-- Ideally we would like to not have separate configCompiler* and +-- configCompiler*Ex sets of functions, but there are many custom setup scripts +-- in the wild that are using them, so the versions with old types are kept for +-- backwards compatibility. Platform was added to the return triple in 1.18. + +{-# DEPRECATED configCompiler + "'configCompiler' is deprecated. Use 'configCompilerEx' instead." #-} +configCompiler :: Maybe CompilerFlavor -> Maybe FilePath -> Maybe FilePath + -> ProgramConfiguration -> Verbosity + -> IO (Compiler, ProgramConfiguration) +configCompiler mFlavor hcPath hcPkg conf verbosity = + fmap (\(a,_,b) -> (a,b)) $ configCompilerEx mFlavor hcPath hcPkg conf verbosity + +{-# DEPRECATED configCompilerAux + "configCompilerAux is deprecated. Use 'configCompilerAuxEx' instead." #-} +configCompilerAux :: ConfigFlags + -> IO (Compiler, ProgramConfiguration) +configCompilerAux = fmap (\(a,_,b) -> (a,b)) . configCompilerAuxEx + +-- ----------------------------------------------------------------------------- +-- Making the internal component graph + + +mkComponentsGraph :: PackageDescription + -> [PackageId] + -> Either [ComponentName] + [(Component, [ComponentName])] +mkComponentsGraph pkg_descr internalPkgDeps = + let graph = [ (c, componentName c, componentDeps c) + | c <- pkgEnabledComponents pkg_descr ] + in case checkComponentsCyclic graph of + Just ccycle -> Left [ cname | (_,cname,_) <- ccycle ] + Nothing -> Right [ (c, cdeps) | (c, _, cdeps) <- graph ] + where + -- The dependencies for the given component + componentDeps component = + [ CExeName toolname | Dependency (PackageName toolname) _ + <- buildTools bi + , toolname `elem` map exeName + (executables pkg_descr) ] + + ++ [ CLibName | Dependency pkgname _ <- targetBuildDepends bi + , pkgname `elem` map packageName internalPkgDeps ] + where + bi = componentBuildInfo component + +reportComponentCycle :: [ComponentName] -> IO a +reportComponentCycle cnames = + die $ "Components in the package depend on each other in a cyclic way:\n " + ++ intercalate " depends on " + [ "'" ++ showComponentName cname ++ "'" + | cname <- cnames ++ [head cnames] ] + +mkComponentsLocalBuildInfo :: InstalledPackageIndex + -> PackageDescription + -> [PackageId] -- internal package deps + -> [InstalledPackageInfo] -- external package deps + -> [InstalledPackageInfo] -- hole package deps + -> Map ModuleName (InstalledPackageInfo, ModuleName) + -> PackageKey + -> [(Component, [ComponentName])] + -> Either [(ModuleReexport, String)] -- errors + [(ComponentName, ComponentLocalBuildInfo, + [ComponentName])] -- ok +mkComponentsLocalBuildInfo installedPackages pkg_descr + internalPkgDeps externalPkgDeps holePkgDeps hole_insts + pkg_key graph = + sequence + [ do clbi <- componentLocalBuildInfo c + return (componentName c, clbi, cdeps) + | (c, cdeps) <- graph ] + where + -- The allPkgDeps contains all the package deps for the whole package + -- but we need to select the subset for this specific component. + -- we just take the subset for the package names this component + -- needs. Note, this only works because we cannot yet depend on two + -- versions of the same package. + componentLocalBuildInfo component = + case component of + CLib lib -> do + let exports = map (\n -> Installed.ExposedModule n Nothing Nothing) + (PD.exposedModules lib) + esigs = map (\n -> Installed.ExposedModule n Nothing + (fmap (\(pkg,m) -> Installed.OriginalModule + (Installed.installedPackageId pkg) m) + (Map.lookup n hole_insts))) + (PD.exposedSignatures lib) + reexports <- resolveModuleReexports installedPackages + (packageId pkg_descr) + externalPkgDeps lib + return LibComponentLocalBuildInfo { + componentPackageDeps = cpds, + componentLibraries = [ LibraryName ("HS" ++ packageKeyLibraryName (package pkg_descr) pkg_key) ], + componentPackageRenaming = cprns, + componentExposedModules = exports ++ reexports ++ esigs + } + CExe _ -> + return ExeComponentLocalBuildInfo { + componentPackageDeps = cpds, + componentPackageRenaming = cprns + } + CTest _ -> + return TestComponentLocalBuildInfo { + componentPackageDeps = cpds, + componentPackageRenaming = cprns + } + CBench _ -> + return BenchComponentLocalBuildInfo { + componentPackageDeps = cpds, + componentPackageRenaming = cprns + } + where + bi = componentBuildInfo component + dedup = Map.toList . Map.fromList + cpds = if newPackageDepsBehaviour pkg_descr + then dedup $ + [ (Installed.installedPackageId pkg, packageId pkg) + | pkg <- selectSubset bi externalPkgDeps ] + ++ [ (inplacePackageId pkgid, pkgid) + | pkgid <- selectSubset bi internalPkgDeps ] + ++ [ (Installed.installedPackageId pkg, packageId pkg) + | pkg <- holePkgDeps ] + else [ (Installed.installedPackageId pkg, packageId pkg) + | pkg <- externalPkgDeps ] + cprns = if newPackageDepsBehaviour pkg_descr + then Map.unionWith mappend + -- We need hole dependencies passed to GHC, so add them here + -- (but note that they're fully thinned out. If they + -- appeared legitimately the monoid instance will + -- fill them out. + (Map.fromList [(packageName pkg, mempty) | pkg <- holePkgDeps]) + (targetBuildRenaming bi) + -- Hack: if we have old package-deps behavior, it's impossible + -- for non-default renamings to be used, because the Cabal + -- version is too early. This is a good, because while all the + -- deps were bundled up in buildDepends, we didn't do this for + -- renamings, so it's not even clear how to get the merged + -- version. So just assume that all of them are the default.. + else Map.fromList (map (\(_,pid) -> (packageName pid, defaultRenaming)) cpds) + + selectSubset :: Package pkg => BuildInfo -> [pkg] -> [pkg] + selectSubset bi pkgs = + [ pkg | pkg <- pkgs, packageName pkg `elem` names bi ] + + names bi = [ name | Dependency name _ <- targetBuildDepends bi ] + +-- | Given the author-specified re-export declarations from the .cabal file, +-- resolve them to the form that we need for the package database. +-- +-- An invariant of the package database is that we always link the re-export +-- directly to its original defining location (rather than indirectly via a +-- chain of re-exporting packages). +-- +resolveModuleReexports :: InstalledPackageIndex + -> PackageId + -> [InstalledPackageInfo] + -> Library + -> Either [(ModuleReexport, String)] -- errors + [Installed.ExposedModule] -- ok +resolveModuleReexports installedPackages srcpkgid externalPkgDeps lib = + case partitionEithers (map resolveModuleReexport (PD.reexportedModules lib)) of + ([], ok) -> Right ok + (errs, _) -> Left errs + where + -- A mapping from visible module names to their original defining + -- module name. We also record the package name of the package which + -- *immediately* provided the module (not the original) to handle if the + -- user explicitly says which build-depends they want to reexport from. + visibleModules :: Map ModuleName [(PackageName, Installed.ExposedModule)] + visibleModules = + Map.fromListWith (++) $ + [ (Installed.exposedName exposedModule, [(exportingPackageName, + exposedModule)]) + -- The package index here contains all the indirect deps of the + -- package we're configuring, but we want just the direct deps + | let directDeps = Set.fromList (map Installed.installedPackageId externalPkgDeps) + , pkg <- PackageIndex.allPackages installedPackages + , Installed.installedPackageId pkg `Set.member` directDeps + , let exportingPackageName = packageName pkg + , exposedModule <- visibleModuleDetails pkg + ] + ++ [ (visibleModuleName, [(exportingPackageName, exposedModule)]) + | visibleModuleName <- PD.exposedModules lib + ++ otherModules (libBuildInfo lib) + , let exportingPackageName = packageName srcpkgid + definingModuleName = visibleModuleName + -- we don't know the InstalledPackageId of this package yet + -- we will fill it in later, before registration. + definingPackageId = InstalledPackageId "" + originalModule = Installed.OriginalModule definingPackageId + definingModuleName + exposedModule = Installed.ExposedModule visibleModuleName + (Just originalModule) + Nothing + ] + + -- All the modules exported from this package and their defining name and + -- package (either defined here in this package or re-exported from some + -- other package). Return an ExposedModule because we want to hold onto + -- signature information. + visibleModuleDetails :: InstalledPackageInfo -> [Installed.ExposedModule] + visibleModuleDetails pkg = do + exposedModule <- Installed.exposedModules pkg + case Installed.exposedReexport exposedModule of + -- The first case is the modules actually defined in this package. + -- In this case the reexport will point to this package. + Nothing -> return exposedModule { Installed.exposedReexport = + Just (Installed.OriginalModule (Installed.installedPackageId pkg) + (Installed.exposedName exposedModule)) } + -- On the other hand, a visible module might actually be itself + -- a re-export! In this case, the re-export info for the package + -- doing the re-export will point us to the original defining + -- module name and package, so we can reuse the entry. + Just _ -> return exposedModule + + resolveModuleReexport reexport@ModuleReexport { + moduleReexportOriginalPackage = moriginalPackageName, + moduleReexportOriginalName = originalName, + moduleReexportName = newName + } = + + let filterForSpecificPackage = + case moriginalPackageName of + Nothing -> id + Just originalPackageName -> + filter (\(pkgname, _) -> pkgname == originalPackageName) + + matches = filterForSpecificPackage + (Map.findWithDefault [] originalName visibleModules) + in + case (matches, moriginalPackageName) of + ((_, exposedModule):rest, _) + -- TODO: Refine this check for signatures + | all (\(_, exposedModule') -> Installed.exposedReexport exposedModule + == Installed.exposedReexport exposedModule') rest + -> Right exposedModule { Installed.exposedName = newName } + + ([], Just originalPackageName) + -> Left $ (,) reexport + $ "The package " ++ display originalPackageName + ++ " does not export a module " ++ display originalName + + ([], Nothing) + -> Left $ (,) reexport + $ "The module " ++ display originalName + ++ " is not exported by any suitable package (this package " + ++ "itself nor any of its 'build-depends' dependencies)." + + (ms, _) + -> Left $ (,) reexport + $ "The module " ++ display originalName ++ " is exported " + ++ "by more than one package (" + ++ intercalate ", " [ display pkgname | (pkgname,_) <- ms ] + ++ ") and so the re-export is ambiguous. The ambiguity can " + ++ "be resolved by qualifying by the package name. The " + ++ "syntax is 'packagename:moduleName [as newname]'." + + -- Note: if in future Cabal allows directly depending on multiple + -- instances of the same package (e.g. backpack) then an additional + -- ambiguity case is possible here: (_, Just originalPackageName) + -- with the module being ambigious despite being qualified by a + -- package name. Presumably by that time we'll have a mechanism to + -- qualify the instance we're referring to. + +reportModuleReexportProblems :: [(ModuleReexport, String)] -> IO a +reportModuleReexportProblems reexportProblems = + die $ unlines + [ "Problem with the module re-export '" ++ display reexport ++ "': " ++ msg + | (reexport, msg) <- reexportProblems ] + +-- ----------------------------------------------------------------------------- +-- Testing C lib and header dependencies + +-- Try to build a test C program which includes every header and links every +-- lib. If that fails, try to narrow it down by preprocessing (only) and linking +-- with individual headers and libs. If none is the obvious culprit then give a +-- generic error message. +-- TODO: produce a log file from the compiler errors, if any. +checkForeignDeps :: PackageDescription -> LocalBuildInfo -> Verbosity -> IO () +checkForeignDeps pkg lbi verbosity = do + ifBuildsWith allHeaders (commonCcArgs ++ makeLdArgs allLibs) -- I'm feeling + -- lucky + (return ()) + (do missingLibs <- findMissingLibs + missingHdr <- findOffendingHdr + explainErrors missingHdr missingLibs) + where + allHeaders = collectField PD.includes + allLibs = collectField PD.extraLibs + + ifBuildsWith headers args success failure = do + ok <- builds (makeProgram headers) args + if ok then success else failure + + findOffendingHdr = + ifBuildsWith allHeaders ccArgs + (return Nothing) + (go . tail . inits $ allHeaders) + where + go [] = return Nothing -- cannot happen + go (hdrs:hdrsInits) = + -- Try just preprocessing first + ifBuildsWith hdrs cppArgs + -- If that works, try compiling too + (ifBuildsWith hdrs ccArgs + (go hdrsInits) + (return . Just . Right . last $ hdrs)) + (return . Just . Left . last $ hdrs) + + cppArgs = "-E":commonCppArgs -- preprocess only + ccArgs = "-c":commonCcArgs -- don't try to link + + findMissingLibs = ifBuildsWith [] (makeLdArgs allLibs) + (return []) + (filterM (fmap not . libExists) allLibs) + + libExists lib = builds (makeProgram []) (makeLdArgs [lib]) + + commonCppArgs = platformDefines lbi + ++ [ "-I" ++ autogenModulesDir lbi ] + ++ [ "-I" ++ dir | dir <- collectField PD.includeDirs ] + ++ ["-I."] + ++ collectField PD.cppOptions + ++ collectField PD.ccOptions + ++ [ "-I" ++ dir + | dep <- deps + , dir <- Installed.includeDirs dep ] + ++ [ opt + | dep <- deps + , opt <- Installed.ccOptions dep ] + + commonCcArgs = commonCppArgs + ++ collectField PD.ccOptions + ++ [ opt + | dep <- deps + , opt <- Installed.ccOptions dep ] + + commonLdArgs = [ "-L" ++ dir | dir <- collectField PD.extraLibDirs ] + ++ collectField PD.ldOptions + ++ [ "-L" ++ dir + | dep <- deps + , dir <- Installed.libraryDirs dep ] + --TODO: do we also need dependent packages' ld options? + makeLdArgs libs = [ "-l"++lib | lib <- libs ] ++ commonLdArgs + + makeProgram hdrs = unlines $ + [ "#include \"" ++ hdr ++ "\"" | hdr <- hdrs ] ++ + ["int main(int argc, char** argv) { return 0; }"] + + collectField f = concatMap f allBi + allBi = allBuildInfo pkg + deps = PackageIndex.topologicalOrder (installedPkgs lbi) + + builds program args = do + tempDir <- getTemporaryDirectory + withTempFile tempDir ".c" $ \cName cHnd -> + withTempFile tempDir "" $ \oNname oHnd -> do + hPutStrLn cHnd program + hClose cHnd + hClose oHnd + _ <- rawSystemProgramStdoutConf verbosity + gccProgram (withPrograms lbi) (cName:"-o":oNname:args) + return True + `catchIO` (\_ -> return False) + `catchExit` (\_ -> return False) + + explainErrors Nothing [] = return () -- should be impossible! + explainErrors _ _ + | isNothing . lookupProgram gccProgram . withPrograms $ lbi + + = die $ unlines $ + [ "No working gcc", + "This package depends on foreign library but we cannot " + ++ "find a working C compiler. If you have it in a " + ++ "non-standard location you can use the --with-gcc " + ++ "flag to specify it." ] + + explainErrors hdr libs = die $ unlines $ + [ if plural + then "Missing dependencies on foreign libraries:" + else "Missing dependency on a foreign library:" + | missing ] + ++ case hdr of + Just (Left h) -> ["* Missing (or bad) header file: " ++ h ] + _ -> [] + ++ case libs of + [] -> [] + [lib] -> ["* Missing C library: " ++ lib] + _ -> ["* Missing C libraries: " ++ intercalate ", " libs] + ++ [if plural then messagePlural else messageSingular | missing] + ++ case hdr of + Just (Left _) -> [ headerCppMessage ] + Just (Right h) -> [ (if missing then "* " else "") + ++ "Bad header file: " ++ h + , headerCcMessage ] + _ -> [] + + where + plural = length libs >= 2 + -- Is there something missing? (as opposed to broken) + missing = not (null libs) + || case hdr of Just (Left _) -> True; _ -> False + + messageSingular = + "This problem can usually be solved by installing the system " + ++ "package that provides this library (you may need the " + ++ "\"-dev\" version). If the library is already installed " + ++ "but in a non-standard location then you can use the flags " + ++ "--extra-include-dirs= and --extra-lib-dirs= to specify " + ++ "where it is." + messagePlural = + "This problem can usually be solved by installing the system " + ++ "packages that provide these libraries (you may need the " + ++ "\"-dev\" versions). If the libraries are already installed " + ++ "but in a non-standard location then you can use the flags " + ++ "--extra-include-dirs= and --extra-lib-dirs= to specify " + ++ "where they are." + headerCppMessage = + "If the header file does exist, it may contain errors that " + ++ "are caught by the C compiler at the preprocessing stage. " + ++ "In this case you can re-run configure with the verbosity " + ++ "flag -v3 to see the error messages." + headerCcMessage = + "The header file contains a compile error. " + ++ "You can re-run configure with the verbosity flag " + ++ "-v3 to see the error messages from the C compiler." + +-- | Output package check warnings and errors. Exit if any errors. +checkPackageProblems :: Verbosity + -> GenericPackageDescription + -> PackageDescription + -> IO () +checkPackageProblems verbosity gpkg pkg = do + ioChecks <- checkPackageFiles pkg "." + let pureChecks = checkPackage gpkg (Just pkg) + errors = [ e | PackageBuildImpossible e <- pureChecks ++ ioChecks ] + warnings = [ w | PackageBuildWarning w <- pureChecks ++ ioChecks ] + if null errors + then mapM_ (warn verbosity) warnings + else die (intercalate "\n\n" errors) + +-- | Preform checks if a relocatable build is allowed +checkRelocatable :: Verbosity + -> PackageDescription + -> LocalBuildInfo + -> IO () +checkRelocatable verbosity pkg lbi + = sequence_ [ checkOS + , checkCompiler + , packagePrefixRelative + , depsPrefixRelative + ] + where + -- Check if the OS support relocatable builds. + -- + -- If you add new OS' to this list, and your OS supports dynamic libraries + -- and RPATH, make sure you add your OS to RPATH-support list of: + -- Distribution.Simple.GHC.getRPaths + checkOS + = unless (os `elem` [ OSX, Linux ]) + $ die $ "Operating system: " ++ display os ++ + ", does not support relocatable builds" + where + (Platform _ os) = hostPlatform lbi + + -- Check if the Compiler support relocatable builds + checkCompiler + = unless (compilerFlavor comp `elem` [ GHC ]) + $ die $ "Compiler: " ++ show comp ++ + ", does not support relocatable builds" + where + comp = compiler lbi + + -- Check if all the install dirs are relative to same prefix + packagePrefixRelative + = unless (relativeInstallDirs installDirs) + $ die $ "Installation directories are not prefix_relative:\n" ++ + show installDirs + where + installDirs = absoluteInstallDirs pkg lbi NoCopyDest + p = prefix installDirs + relativeInstallDirs (InstallDirs {..}) = + all isJust + (fmap (stripPrefix p) + [ bindir, libdir, dynlibdir, libexecdir, includedir, datadir + , docdir, mandir, htmldir, haddockdir, sysconfdir] ) + + -- Check if the library dirs of the dependencies that are in the package + -- database to which the package is installed are relative to the + -- prefix of the package + depsPrefixRelative = do + pkgr <- GHC.pkgRoot verbosity lbi (last (withPackageDB lbi)) + mapM_ (doCheck pkgr) ipkgs + where + doCheck pkgr ipkg + | maybe False (== pkgr) (Installed.pkgRoot ipkg) + = mapM_ (\l -> when (isNothing $ stripPrefix p l) (die (msg l))) + (Installed.libraryDirs ipkg) + | otherwise + = return () + installDirs = absoluteInstallDirs pkg lbi NoCopyDest + p = prefix installDirs + ipkgs = PackageIndex.allPackages (installedPkgs lbi) + msg l = "Library directory of a dependency: " ++ show l ++ + "\nis not relative to the installation prefix:\n" ++ + show p diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/ImplInfo.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/ImplInfo.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/ImplInfo.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/ImplInfo.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,108 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.GHC.ImplInfo +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module contains the data structure describing invocation +-- details for a GHC or GHC-derived compiler, such as supported flags +-- and workarounds for bugs. + +module Distribution.Simple.GHC.ImplInfo ( + GhcImplInfo(..), getImplInfo, + ghcVersionImplInfo, ghcjsVersionImplInfo, lhcVersionImplInfo + ) where + +import Distribution.Simple.Compiler + ( Compiler(..), CompilerFlavor(..) + , compilerFlavor, compilerVersion, compilerCompatVersion ) +import Distribution.Version ( Version(..) ) + +{- | + Information about features and quirks of a GHC-based implementation. + + Compiler flavors based on GHC behave similarly enough that some of + the support code for them is shared. Every implementation has its + own peculiarities, that may or may not be a direct result of the + underlying GHC version. This record keeps track of these differences. + + All shared code (i.e. everything not in the Distribution.Simple.FLAVOR + module) should use implementation info rather than version numbers + to test for supported features. +-} + +data GhcImplInfo = GhcImplInfo + { hasCcOdirBug :: Bool -- ^ bug in -odir handling for C compilations. + , flagInfoLanguages :: Bool -- ^ --info and --supported-languages flags + , fakeRecordPuns :: Bool -- ^ use -XRecordPuns for NamedFieldPuns + , flagStubdir :: Bool -- ^ -stubdir flag supported + , flagOutputDir :: Bool -- ^ -outputdir flag supported + , noExtInSplitSuffix :: Bool -- ^ split-obj suffix does not contain p_o ext + , flagFfiIncludes :: Bool -- ^ -#include on command line for FFI includes + , flagBuildingCabalPkg :: Bool -- ^ -fbuilding-cabal-package flag supported + , flagPackageId :: Bool -- ^ -package-id / -package flags supported + , separateGccMingw :: Bool -- ^ mingw and gcc are in separate directories + , supportsHaskell2010 :: Bool -- ^ -XHaskell2010 and -XHaskell98 flags + , reportsNoExt :: Bool -- ^ --supported-languages gives Ext and NoExt + , alwaysNondecIndent :: Bool -- ^ NondecreasingIndentation is always on + , flagGhciScript :: Bool -- ^ -ghci-script flag supported + , flagPackageConf :: Bool -- ^ use package-conf instead of package-db + , flagDebugInfo :: Bool -- ^ -g flag supported + } + +getImplInfo :: Compiler -> GhcImplInfo +getImplInfo comp = + case compilerFlavor comp of + GHC -> ghcVersionImplInfo (compilerVersion comp) + LHC -> lhcVersionImplInfo (compilerVersion comp) + GHCJS -> case compilerCompatVersion GHC comp of + Just ghcVer -> ghcjsVersionImplInfo (compilerVersion comp) ghcVer + _ -> error ("Distribution.Simple.GHC.Props.getImplProps: " ++ + "could not find GHC version for GHCJS compiler") + x -> error ("Distribution.Simple.GHC.Props.getImplProps only works" ++ + "for GHC-like compilers (GHC, GHCJS, LHC)" ++ + ", but found " ++ show x) + +ghcVersionImplInfo :: Version -> GhcImplInfo +ghcVersionImplInfo (Version v _) = GhcImplInfo + { hasCcOdirBug = v < [6,4,1] + , flagInfoLanguages = v >= [6,7] + , fakeRecordPuns = v >= [6,8] && v < [6,10] + , flagStubdir = v >= [6,8] + , flagOutputDir = v >= [6,10] + , noExtInSplitSuffix = v < [6,11] + , flagFfiIncludes = v < [6,11] + , flagBuildingCabalPkg = v >= [6,11] + , flagPackageId = v > [6,11] + , separateGccMingw = v < [6,12] + , supportsHaskell2010 = v >= [7] + , reportsNoExt = v >= [7] + , alwaysNondecIndent = v < [7,1] + , flagGhciScript = v >= [7,2] + , flagPackageConf = v < [7,5] + , flagDebugInfo = v >= [7,10] + } + +ghcjsVersionImplInfo :: Version -> Version -> GhcImplInfo +ghcjsVersionImplInfo _ghcjsVer _ghcVer = GhcImplInfo + { hasCcOdirBug = False + , flagInfoLanguages = True + , fakeRecordPuns = False + , flagStubdir = True + , flagOutputDir = True + , noExtInSplitSuffix = False + , flagFfiIncludes = False + , flagBuildingCabalPkg = True + , flagPackageId = True + , separateGccMingw = False + , supportsHaskell2010 = True + , reportsNoExt = True + , alwaysNondecIndent = False + , flagGhciScript = True + , flagPackageConf = False + , flagDebugInfo = False + } + +lhcVersionImplInfo :: Version -> GhcImplInfo +lhcVersionImplInfo = ghcVersionImplInfo diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/Internal.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/Internal.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/Internal.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/Internal.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,492 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.GHC.Internal +-- Copyright : Isaac Jones 2003-2007 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module contains functions shared by GHC (Distribution.Simple.GHC) +-- and GHC-derived compilers. + +module Distribution.Simple.GHC.Internal ( + configureToolchain, + getLanguages, + getExtensions, + targetPlatform, + getGhcInfo, + componentCcGhcOptions, + componentGhcOptions, + mkGHCiLibName, + filterGhciFlags, + ghcLookupProperty, + getHaskellObjects, + mkGhcOptPackages, + substTopDir, + checkPackageDbEnvVar + ) where + +import Distribution.Simple.GHC.ImplInfo ( GhcImplInfo (..) ) +import Distribution.Package + ( InstalledPackageId, PackageId ) +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo ) +import qualified Distribution.InstalledPackageInfo as InstalledPackageInfo + ( InstalledPackageInfo_(..) ) +import Distribution.PackageDescription as PD + ( BuildInfo(..), Library(..), libModules + , hcOptions, usedExtensions, ModuleRenaming, lookupRenaming ) +import Distribution.Compat.Exception ( catchExit, catchIO ) +import Distribution.Simple.Compiler + ( CompilerFlavor(..), Compiler(..), DebugInfoLevel(..), OptimisationLevel(..) ) +import Distribution.Simple.Program.GHC +import Distribution.Simple.Setup + ( toFlag ) +import qualified Distribution.ModuleName as ModuleName +import Distribution.Simple.Program + ( Program(..), ConfiguredProgram(..), ProgramConfiguration + , ProgramLocation(..), ProgramSearchPath, ProgramSearchPathEntry(..) + , rawSystemProgram, rawSystemProgramStdout, programPath + , addKnownProgram, arProgram, ldProgram, gccProgram, stripProgram + , getProgramOutput ) +import Distribution.Simple.Program.Types ( suppressOverrideArgs ) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), ComponentLocalBuildInfo(..) + , LibraryName(..) ) +import Distribution.Simple.Utils +import Distribution.Simple.BuildPaths +import Distribution.System ( buildOS, OS(..), Platform, platformFromTriple ) +import Distribution.Text ( display, simpleParse ) +import Distribution.Utils.NubList ( toNubListR ) +import Distribution.Verbosity +import Language.Haskell.Extension + ( Language(..), Extension(..), KnownExtension(..) ) + +import qualified Data.Map as M +import Data.Char ( isSpace ) +import Data.Maybe ( fromMaybe, maybeToList, isJust ) +import Control.Monad ( unless, when ) +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid ( Monoid(..) ) +#endif +import System.Directory ( getDirectoryContents, getTemporaryDirectory ) +import System.Environment ( getEnv ) +import System.FilePath ( (), (<.>), takeExtension, takeDirectory ) +import System.IO ( hClose, hPutStrLn ) + +targetPlatform :: [(String, String)] -> Maybe Platform +targetPlatform ghcInfo = platformFromTriple =<< lookup "Target platform" ghcInfo + +-- | Adjust the way we find and configure gcc and ld +-- +configureToolchain :: GhcImplInfo + -> ConfiguredProgram + -> M.Map String String + -> ProgramConfiguration + -> ProgramConfiguration +configureToolchain implInfo ghcProg ghcInfo = + addKnownProgram gccProgram { + programFindLocation = findProg gccProgram extraGccPath, + programPostConf = configureGcc + } + . addKnownProgram ldProgram { + programFindLocation = findProg ldProgram extraLdPath, + programPostConf = configureLd + } + . addKnownProgram arProgram { + programFindLocation = findProg arProgram extraArPath + } + . addKnownProgram stripProgram { + programFindLocation = findProg stripProgram extraStripPath + } + where + compilerDir = takeDirectory (programPath ghcProg) + baseDir = takeDirectory compilerDir + mingwBinDir = baseDir "mingw" "bin" + libDir = baseDir "gcc-lib" + includeDir = baseDir "include" "mingw" + isWindows = case buildOS of Windows -> True; _ -> False + binPrefix = "" + + mkExtraPath :: Maybe FilePath -> FilePath -> [FilePath] + mkExtraPath mbPath mingwPath | isWindows = mbDir ++ [mingwPath] + | otherwise = mbDir + where + mbDir = maybeToList . fmap takeDirectory $ mbPath + + extraGccPath = mkExtraPath mbGccLocation windowsExtraGccDir + extraLdPath = mkExtraPath mbLdLocation windowsExtraLdDir + extraArPath = mkExtraPath mbArLocation windowsExtraArDir + extraStripPath = mkExtraPath mbStripLocation windowsExtraStripDir + + -- on Windows finding and configuring ghc's gcc & binutils is a bit special + (windowsExtraGccDir, windowsExtraLdDir, + windowsExtraArDir, windowsExtraStripDir) + | separateGccMingw implInfo = (baseDir, libDir, libDir, libDir) + | otherwise = -- GHC >= 6.12 + let b = mingwBinDir binPrefix + in (b, b, b, b) + + findProg :: Program -> [FilePath] + -> Verbosity -> ProgramSearchPath -> IO (Maybe FilePath) + findProg prog extraPath v searchpath = + programFindLocation prog v searchpath' + where + searchpath' = (map ProgramSearchPathDir extraPath) ++ searchpath + + -- Read tool locations from the 'ghc --info' output. Useful when + -- cross-compiling. + mbGccLocation = M.lookup "C compiler command" ghcInfo + mbLdLocation = M.lookup "ld command" ghcInfo + mbArLocation = M.lookup "ar command" ghcInfo + mbStripLocation = M.lookup "strip command" ghcInfo + + ccFlags = getFlags "C compiler flags" + gccLinkerFlags = getFlags "Gcc Linker flags" + ldLinkerFlags = getFlags "Ld Linker flags" + + getFlags key = case M.lookup key ghcInfo of + Nothing -> [] + Just flags -> + case reads flags of + [(args, "")] -> args + _ -> [] -- XXX Should should be an error really + + configureGcc :: Verbosity -> ConfiguredProgram -> IO ConfiguredProgram + configureGcc v gccProg = do + gccProg' <- configureGcc' v gccProg + return gccProg' { + programDefaultArgs = programDefaultArgs gccProg' + ++ ccFlags ++ gccLinkerFlags + } + + configureGcc' :: Verbosity -> ConfiguredProgram -> IO ConfiguredProgram + configureGcc' + | isWindows = \_ gccProg -> case programLocation gccProg of + -- if it's found on system then it means we're using the result + -- of programFindLocation above rather than a user-supplied path + -- Pre GHC 6.12, that meant we should add these flags to tell + -- ghc's gcc where it lives and thus where gcc can find its + -- various files: + FoundOnSystem {} + | separateGccMingw implInfo -> + return gccProg { programDefaultArgs = ["-B" ++ libDir, + "-I" ++ includeDir] } + _ -> return gccProg + | otherwise = \_ gccProg -> return gccProg + + configureLd :: Verbosity -> ConfiguredProgram -> IO ConfiguredProgram + configureLd v ldProg = do + ldProg' <- configureLd' v ldProg + return ldProg' { + programDefaultArgs = programDefaultArgs ldProg' ++ ldLinkerFlags + } + + -- we need to find out if ld supports the -x flag + configureLd' :: Verbosity -> ConfiguredProgram -> IO ConfiguredProgram + configureLd' verbosity ldProg = do + tempDir <- getTemporaryDirectory + ldx <- withTempFile tempDir ".c" $ \testcfile testchnd -> + withTempFile tempDir ".o" $ \testofile testohnd -> do + hPutStrLn testchnd "int foo() { return 0; }" + hClose testchnd; hClose testohnd + rawSystemProgram verbosity ghcProg ["-c", testcfile, + "-o", testofile] + withTempFile tempDir ".o" $ \testofile' testohnd' -> + do + hClose testohnd' + _ <- rawSystemProgramStdout verbosity ldProg + ["-x", "-r", testofile, "-o", testofile'] + return True + `catchIO` (\_ -> return False) + `catchExit` (\_ -> return False) + if ldx + then return ldProg { programDefaultArgs = ["-x"] } + else return ldProg + +getLanguages :: Verbosity -> GhcImplInfo -> ConfiguredProgram + -> IO [(Language, String)] +getLanguages _ implInfo _ + -- TODO: should be using --supported-languages rather than hard coding + | supportsHaskell2010 implInfo = return [(Haskell98, "-XHaskell98") + ,(Haskell2010, "-XHaskell2010")] + | otherwise = return [(Haskell98, "")] + +getGhcInfo :: Verbosity -> GhcImplInfo -> ConfiguredProgram + -> IO [(String, String)] +getGhcInfo verbosity implInfo ghcProg + | flagInfoLanguages implInfo = do + xs <- getProgramOutput verbosity (suppressOverrideArgs ghcProg) + ["--info"] + case reads xs of + [(i, ss)] + | all isSpace ss -> + return i + _ -> + die "Can't parse --info output of GHC" + | otherwise = + return [] + +getExtensions :: Verbosity -> GhcImplInfo -> ConfiguredProgram + -> IO [(Extension, String)] +getExtensions verbosity implInfo ghcProg + | flagInfoLanguages implInfo = do + str <- getProgramOutput verbosity (suppressOverrideArgs ghcProg) + ["--supported-languages"] + let extStrs = if reportsNoExt implInfo + then lines str + else -- Older GHCs only gave us either Foo or NoFoo, + -- so we have to work out the other one ourselves + [ extStr'' + | extStr <- lines str + , let extStr' = case extStr of + 'N' : 'o' : xs -> xs + _ -> "No" ++ extStr + , extStr'' <- [extStr, extStr'] + ] + let extensions0 = [ (ext, "-X" ++ display ext) + | Just ext <- map simpleParse extStrs ] + extensions1 = if fakeRecordPuns implInfo + then -- ghc-6.8 introduced RecordPuns however it + -- should have been NamedFieldPuns. We now + -- encourage packages to use NamedFieldPuns + -- so for compatibility we fake support for + -- it in ghc-6.8 by making it an alias for + -- the old RecordPuns extension. + (EnableExtension NamedFieldPuns, "-XRecordPuns") : + (DisableExtension NamedFieldPuns, "-XNoRecordPuns") : + extensions0 + else extensions0 + extensions2 = if alwaysNondecIndent implInfo + then -- ghc-7.2 split NondecreasingIndentation off + -- into a proper extension. Before that it + -- was always on. + (EnableExtension NondecreasingIndentation, "") : + (DisableExtension NondecreasingIndentation, "") : + extensions1 + else extensions1 + return extensions2 + + | otherwise = return oldLanguageExtensions + +-- | For GHC 6.6.x and earlier, the mapping from supported extensions to flags +oldLanguageExtensions :: [(Extension, String)] +oldLanguageExtensions = + let doFlag (f, (enable, disable)) = [(EnableExtension f, enable), + (DisableExtension f, disable)] + fglasgowExts = ("-fglasgow-exts", + "") -- This is wrong, but we don't want to turn + -- all the extensions off when asked to just + -- turn one off + fFlag flag = ("-f" ++ flag, "-fno-" ++ flag) + in concatMap doFlag + [(OverlappingInstances , fFlag "allow-overlapping-instances") + ,(TypeSynonymInstances , fglasgowExts) + ,(TemplateHaskell , fFlag "th") + ,(ForeignFunctionInterface , fFlag "ffi") + ,(MonomorphismRestriction , fFlag "monomorphism-restriction") + ,(MonoPatBinds , fFlag "mono-pat-binds") + ,(UndecidableInstances , fFlag "allow-undecidable-instances") + ,(IncoherentInstances , fFlag "allow-incoherent-instances") + ,(Arrows , fFlag "arrows") + ,(Generics , fFlag "generics") + ,(ImplicitPrelude , fFlag "implicit-prelude") + ,(ImplicitParams , fFlag "implicit-params") + ,(CPP , ("-cpp", ""{- Wrong -})) + ,(BangPatterns , fFlag "bang-patterns") + ,(KindSignatures , fglasgowExts) + ,(RecursiveDo , fglasgowExts) + ,(ParallelListComp , fglasgowExts) + ,(MultiParamTypeClasses , fglasgowExts) + ,(FunctionalDependencies , fglasgowExts) + ,(Rank2Types , fglasgowExts) + ,(RankNTypes , fglasgowExts) + ,(PolymorphicComponents , fglasgowExts) + ,(ExistentialQuantification , fglasgowExts) + ,(ScopedTypeVariables , fFlag "scoped-type-variables") + ,(FlexibleContexts , fglasgowExts) + ,(FlexibleInstances , fglasgowExts) + ,(EmptyDataDecls , fglasgowExts) + ,(PatternGuards , fglasgowExts) + ,(GeneralizedNewtypeDeriving , fglasgowExts) + ,(MagicHash , fglasgowExts) + ,(UnicodeSyntax , fglasgowExts) + ,(PatternSignatures , fglasgowExts) + ,(UnliftedFFITypes , fglasgowExts) + ,(LiberalTypeSynonyms , fglasgowExts) + ,(TypeOperators , fglasgowExts) + ,(GADTs , fglasgowExts) + ,(RelaxedPolyRec , fglasgowExts) + ,(ExtendedDefaultRules , fFlag "extended-default-rules") + ,(UnboxedTuples , fglasgowExts) + ,(DeriveDataTypeable , fglasgowExts) + ,(ConstrainedClassMethods , fglasgowExts) + ] + +componentCcGhcOptions :: Verbosity -> GhcImplInfo -> LocalBuildInfo + -> BuildInfo -> ComponentLocalBuildInfo + -> FilePath -> FilePath + -> GhcOptions +componentCcGhcOptions verbosity implInfo lbi bi clbi pref filename = + mempty { + ghcOptVerbosity = toFlag verbosity, + ghcOptMode = toFlag GhcModeCompile, + ghcOptInputFiles = toNubListR [filename], + + ghcOptCppIncludePath = toNubListR $ [autogenModulesDir lbi, odir] + ++ PD.includeDirs bi, + ghcOptPackageDBs = withPackageDB lbi, + ghcOptPackages = toNubListR $ mkGhcOptPackages clbi, + ghcOptCcOptions = toNubListR $ + (case withOptimization lbi of + NoOptimisation -> [] + _ -> ["-O2"]) ++ + (case withDebugInfo lbi of + NoDebugInfo -> [] + MinimalDebugInfo -> ["-g1"] + NormalDebugInfo -> ["-g"] + MaximalDebugInfo -> ["-g3"]) ++ + PD.ccOptions bi, + ghcOptObjDir = toFlag odir + } + where + odir | hasCcOdirBug implInfo = pref takeDirectory filename + | otherwise = pref + -- ghc 6.4.0 had a bug in -odir handling for C compilations. + +componentGhcOptions :: Verbosity -> LocalBuildInfo + -> BuildInfo -> ComponentLocalBuildInfo -> FilePath + -> GhcOptions +componentGhcOptions verbosity lbi bi clbi odir = + mempty { + ghcOptVerbosity = toFlag verbosity, + ghcOptHideAllPackages = toFlag True, + ghcOptCabal = toFlag True, + ghcOptPackageDBs = withPackageDB lbi, + ghcOptPackages = toNubListR $ mkGhcOptPackages clbi, + ghcOptSplitObjs = toFlag (splitObjs lbi), + ghcOptSourcePathClear = toFlag True, + ghcOptSourcePath = toNubListR $ [odir] ++ (hsSourceDirs bi) + ++ [autogenModulesDir lbi], + ghcOptCppIncludePath = toNubListR $ [autogenModulesDir lbi, odir] + ++ PD.includeDirs bi, + ghcOptCppOptions = toNubListR $ cppOptions bi, + ghcOptCppIncludes = toNubListR $ + [autogenModulesDir lbi cppHeaderName], + ghcOptFfiIncludes = toNubListR $ PD.includes bi, + ghcOptObjDir = toFlag odir, + ghcOptHiDir = toFlag odir, + ghcOptStubDir = toFlag odir, + ghcOptOutputDir = toFlag odir, + ghcOptOptimisation = toGhcOptimisation (withOptimization lbi), + ghcOptDebugInfo = toGhcDebugInfo (withDebugInfo lbi), + ghcOptExtra = toNubListR $ hcOptions GHC bi, + ghcOptLanguage = toFlag (fromMaybe Haskell98 (defaultLanguage bi)), + -- Unsupported extensions have already been checked by configure + ghcOptExtensions = toNubListR $ usedExtensions bi, + ghcOptExtensionMap = M.fromList . compilerExtensions $ (compiler lbi) + } + where + toGhcOptimisation NoOptimisation = mempty --TODO perhaps override? + toGhcOptimisation NormalOptimisation = toFlag GhcNormalOptimisation + toGhcOptimisation MaximumOptimisation = toFlag GhcMaximumOptimisation + + -- GHC doesn't support debug info levels yet. + toGhcDebugInfo NoDebugInfo = mempty + toGhcDebugInfo MinimalDebugInfo = toFlag True + toGhcDebugInfo NormalDebugInfo = toFlag True + toGhcDebugInfo MaximalDebugInfo = toFlag True + +-- | Strip out flags that are not supported in ghci +filterGhciFlags :: [String] -> [String] +filterGhciFlags = filter supported + where + supported ('-':'O':_) = False + supported "-debug" = False + supported "-threaded" = False + supported "-ticky" = False + supported "-eventlog" = False + supported "-prof" = False + supported "-unreg" = False + supported _ = True + +mkGHCiLibName :: LibraryName -> String +mkGHCiLibName (LibraryName lib) = lib <.> "o" + +ghcLookupProperty :: String -> Compiler -> Bool +ghcLookupProperty prop comp = + case M.lookup prop (compilerProperties comp) of + Just "YES" -> True + _ -> False + +-- when using -split-objs, we need to search for object files in the +-- Module_split directory for each module. +getHaskellObjects :: GhcImplInfo -> Library -> LocalBuildInfo + -> FilePath -> String -> Bool -> IO [FilePath] +getHaskellObjects implInfo lib lbi pref wanted_obj_ext allow_split_objs + | splitObjs lbi && allow_split_objs = do + let splitSuffix = if noExtInSplitSuffix implInfo + then "_split" + else "_" ++ wanted_obj_ext ++ "_split" + dirs = [ pref (ModuleName.toFilePath x ++ splitSuffix) + | x <- libModules lib ] + objss <- mapM getDirectoryContents dirs + let objs = [ dir obj + | (objs',dir) <- zip objss dirs, obj <- objs', + let obj_ext = takeExtension obj, + '.':wanted_obj_ext == obj_ext ] + return objs + | otherwise = + return [ pref ModuleName.toFilePath x <.> wanted_obj_ext + | x <- libModules lib ] + +mkGhcOptPackages :: ComponentLocalBuildInfo + -> [(InstalledPackageId, PackageId, ModuleRenaming)] +mkGhcOptPackages clbi = + map (\(i,p) -> (i,p,lookupRenaming p (componentPackageRenaming clbi))) + (componentPackageDeps clbi) + +substTopDir :: FilePath -> InstalledPackageInfo -> InstalledPackageInfo +substTopDir topDir ipo + = ipo { + InstalledPackageInfo.importDirs + = map f (InstalledPackageInfo.importDirs ipo), + InstalledPackageInfo.libraryDirs + = map f (InstalledPackageInfo.libraryDirs ipo), + InstalledPackageInfo.includeDirs + = map f (InstalledPackageInfo.includeDirs ipo), + InstalledPackageInfo.frameworkDirs + = map f (InstalledPackageInfo.frameworkDirs ipo), + InstalledPackageInfo.haddockInterfaces + = map f (InstalledPackageInfo.haddockInterfaces ipo), + InstalledPackageInfo.haddockHTMLs + = map f (InstalledPackageInfo.haddockHTMLs ipo) + } + where f ('$':'t':'o':'p':'d':'i':'r':rest) = topDir ++ rest + f x = x + +-- Cabal does not use the environment variable GHC{,JS}_PACKAGE_PATH; let +-- users know that this is the case. See ticket #335. Simply ignoring it is +-- not a good idea, since then ghc and cabal are looking at different sets +-- of package DBs and chaos is likely to ensue. +-- +-- An exception to this is when running cabal from within a `cabal exec` +-- environment. In this case, `cabal exec` will set the +-- CABAL_SANDBOX_PACKAGE_PATH to the same value that it set +-- GHC{,JS}_PACKAGE_PATH to. If that is the case it is OK to allow +-- GHC{,JS}_PACKAGE_PATH. +checkPackageDbEnvVar :: String -> String -> IO () +checkPackageDbEnvVar compilerName packagePathEnvVar = do + mPP <- lookupEnv packagePathEnvVar + when (isJust mPP) $ do + mcsPP <- lookupEnv "CABAL_SANDBOX_PACKAGE_PATH" + unless (mPP == mcsPP) abort + where + lookupEnv :: String -> IO (Maybe String) + lookupEnv name = (Just `fmap` getEnv name) `catchIO` const (return Nothing) + abort = + die $ "Use of " ++ compilerName ++ "'s environment variable " + ++ packagePathEnvVar ++ " is incompatible with Cabal. Use the " + ++ "flag --package-db to specify a package database (it can be " + ++ "used multiple times)." diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/IPI641.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/IPI641.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/IPI641.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/IPI641.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,106 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.GHC.IPI641 +-- Copyright : (c) The University of Glasgow 2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- + +module Distribution.Simple.GHC.IPI641 ( + InstalledPackageInfo(..), + toCurrent, + ) where + +import qualified Distribution.InstalledPackageInfo as Current +import qualified Distribution.Package as Current hiding (depends, installedPackageId) +import Distribution.Text (display) + +import Distribution.Simple.GHC.IPI642 + ( PackageIdentifier, convertPackageId + , License, convertLicense, convertModuleName ) + +-- | This is the InstalledPackageInfo type used by ghc-6.4 and 6.4.1. +-- +-- It's here purely for the 'Read' instance so that we can read the package +-- database used by those ghc versions. It is a little hacky to read the +-- package db directly, but we do need the info and until ghc-6.9 there was +-- no better method. +-- +-- In ghc-6.4.2 the format changed a bit. See "Distribution.Simple.GHC.IPI642" +-- +data InstalledPackageInfo = InstalledPackageInfo { + package :: PackageIdentifier, + license :: License, + copyright :: String, + maintainer :: String, + author :: String, + stability :: String, + homepage :: String, + pkgUrl :: String, + description :: String, + category :: String, + exposed :: Bool, + exposedModules :: [String], + hiddenModules :: [String], + importDirs :: [FilePath], + libraryDirs :: [FilePath], + hsLibraries :: [String], + extraLibraries :: [String], + includeDirs :: [FilePath], + includes :: [String], + depends :: [PackageIdentifier], + hugsOptions :: [String], + ccOptions :: [String], + ldOptions :: [String], + frameworkDirs :: [FilePath], + frameworks :: [String], + haddockInterfaces :: [FilePath], + haddockHTMLs :: [FilePath] + } + deriving Read + +mkInstalledPackageId :: Current.PackageIdentifier -> Current.InstalledPackageId +mkInstalledPackageId = Current.InstalledPackageId . display + +toCurrent :: InstalledPackageInfo -> Current.InstalledPackageInfo +toCurrent ipi@InstalledPackageInfo{} = + let pid = convertPackageId (package ipi) + mkExposedModule m = Current.ExposedModule m Nothing Nothing + in Current.InstalledPackageInfo { + Current.installedPackageId = mkInstalledPackageId (convertPackageId (package ipi)), + Current.sourcePackageId = pid, + Current.packageKey = Current.OldPackageKey pid, + Current.license = convertLicense (license ipi), + Current.copyright = copyright ipi, + Current.maintainer = maintainer ipi, + Current.author = author ipi, + Current.stability = stability ipi, + Current.homepage = homepage ipi, + Current.pkgUrl = pkgUrl ipi, + Current.synopsis = "", + Current.description = description ipi, + Current.category = category ipi, + Current.exposed = exposed ipi, + Current.exposedModules = map (mkExposedModule . convertModuleName) (exposedModules ipi), + Current.instantiatedWith = [], + Current.hiddenModules = map convertModuleName (hiddenModules ipi), + Current.trusted = Current.trusted Current.emptyInstalledPackageInfo, + Current.importDirs = importDirs ipi, + Current.libraryDirs = libraryDirs ipi, + Current.dataDir = "", + Current.hsLibraries = hsLibraries ipi, + Current.extraLibraries = extraLibraries ipi, + Current.extraGHCiLibraries = [], + Current.includeDirs = includeDirs ipi, + Current.includes = includes ipi, + Current.depends = map (mkInstalledPackageId.convertPackageId) (depends ipi), + Current.ccOptions = ccOptions ipi, + Current.ldOptions = ldOptions ipi, + Current.frameworkDirs = frameworkDirs ipi, + Current.frameworks = frameworks ipi, + Current.haddockInterfaces = haddockInterfaces ipi, + Current.haddockHTMLs = haddockHTMLs ipi, + Current.pkgRoot = Nothing + } diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/IPI642.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/IPI642.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/IPI642.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC/IPI642.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,141 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.GHC.IPI642 +-- Copyright : (c) The University of Glasgow 2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- + +module Distribution.Simple.GHC.IPI642 ( + InstalledPackageInfo(..), + toCurrent, + + -- Don't use these, they're only for conversion purposes + PackageIdentifier, convertPackageId, + License, convertLicense, + convertModuleName + ) where + +import qualified Distribution.InstalledPackageInfo as Current +import qualified Distribution.Package as Current hiding (depends, installedPackageId) +import qualified Distribution.License as Current + +import Distribution.Version (Version) +import Distribution.ModuleName (ModuleName) +import Distribution.Text (simpleParse,display) + +import Data.Maybe + +-- | This is the InstalledPackageInfo type used by ghc-6.4.2 and later. +-- +-- It's here purely for the 'Read' instance so that we can read the package +-- database used by those ghc versions. It is a little hacky to read the +-- package db directly, but we do need the info and until ghc-6.9 there was +-- no better method. +-- +-- In ghc-6.4.1 and before the format was slightly different. +-- See "Distribution.Simple.GHC.IPI642" +-- +data InstalledPackageInfo = InstalledPackageInfo { + package :: PackageIdentifier, + license :: License, + copyright :: String, + maintainer :: String, + author :: String, + stability :: String, + homepage :: String, + pkgUrl :: String, + description :: String, + category :: String, + exposed :: Bool, + exposedModules :: [String], + hiddenModules :: [String], + importDirs :: [FilePath], + libraryDirs :: [FilePath], + hsLibraries :: [String], + extraLibraries :: [String], + extraGHCiLibraries:: [String], + includeDirs :: [FilePath], + includes :: [String], + depends :: [PackageIdentifier], + hugsOptions :: [String], + ccOptions :: [String], + ldOptions :: [String], + frameworkDirs :: [FilePath], + frameworks :: [String], + haddockInterfaces :: [FilePath], + haddockHTMLs :: [FilePath] + } + deriving Read + +data PackageIdentifier = PackageIdentifier { + pkgName :: String, + pkgVersion :: Version + } + deriving Read + +data License = GPL | LGPL | BSD3 | BSD4 + | PublicDomain | AllRightsReserved | OtherLicense + deriving Read + +convertPackageId :: PackageIdentifier -> Current.PackageIdentifier +convertPackageId PackageIdentifier { pkgName = n, pkgVersion = v } = + Current.PackageIdentifier (Current.PackageName n) v + +mkInstalledPackageId :: Current.PackageIdentifier -> Current.InstalledPackageId +mkInstalledPackageId = Current.InstalledPackageId . display + +convertModuleName :: String -> ModuleName +convertModuleName s = fromJust $ simpleParse s + +convertLicense :: License -> Current.License +convertLicense GPL = Current.GPL Nothing +convertLicense LGPL = Current.LGPL Nothing +convertLicense BSD3 = Current.BSD3 +convertLicense BSD4 = Current.BSD4 +convertLicense PublicDomain = Current.PublicDomain +convertLicense AllRightsReserved = Current.AllRightsReserved +convertLicense OtherLicense = Current.OtherLicense + +toCurrent :: InstalledPackageInfo -> Current.InstalledPackageInfo +toCurrent ipi@InstalledPackageInfo{} = + let pid = convertPackageId (package ipi) + mkExposedModule m = Current.ExposedModule m Nothing Nothing + in Current.InstalledPackageInfo { + Current.installedPackageId = mkInstalledPackageId (convertPackageId (package ipi)), + Current.sourcePackageId = pid, + Current.packageKey = Current.OldPackageKey pid, + Current.license = convertLicense (license ipi), + Current.copyright = copyright ipi, + Current.maintainer = maintainer ipi, + Current.author = author ipi, + Current.stability = stability ipi, + Current.homepage = homepage ipi, + Current.pkgUrl = pkgUrl ipi, + Current.synopsis = "", + Current.description = description ipi, + Current.category = category ipi, + Current.exposed = exposed ipi, + Current.exposedModules = map (mkExposedModule . convertModuleName) (exposedModules ipi), + Current.hiddenModules = map convertModuleName (hiddenModules ipi), + Current.instantiatedWith = [], + Current.trusted = Current.trusted Current.emptyInstalledPackageInfo, + Current.importDirs = importDirs ipi, + Current.libraryDirs = libraryDirs ipi, + Current.dataDir = "", + Current.hsLibraries = hsLibraries ipi, + Current.extraLibraries = extraLibraries ipi, + Current.extraGHCiLibraries = extraGHCiLibraries ipi, + Current.includeDirs = includeDirs ipi, + Current.includes = includes ipi, + Current.depends = map (mkInstalledPackageId.convertPackageId) (depends ipi), + Current.ccOptions = ccOptions ipi, + Current.ldOptions = ldOptions ipi, + Current.frameworkDirs = frameworkDirs ipi, + Current.frameworks = frameworks ipi, + Current.haddockInterfaces = haddockInterfaces ipi, + Current.haddockHTMLs = haddockHTMLs ipi, + Current.pkgRoot = Nothing + } diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHC.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,1113 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.GHC +-- Copyright : Isaac Jones 2003-2007 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is a fairly large module. It contains most of the GHC-specific code for +-- configuring, building and installing packages. It also exports a function +-- for finding out what packages are already installed. Configuring involves +-- finding the @ghc@ and @ghc-pkg@ programs, finding what language extensions +-- this version of ghc supports and returning a 'Compiler' value. +-- +-- 'getInstalledPackages' involves calling the @ghc-pkg@ program to find out +-- what packages are installed. +-- +-- Building is somewhat complex as there is quite a bit of information to take +-- into account. We have to build libs and programs, possibly for profiling and +-- shared libs. We have to support building libraries that will be usable by +-- GHCi and also ghc's @-split-objs@ feature. We have to compile any C files +-- using ghc. Linking, especially for @split-objs@ is remarkably complex, +-- partly because there tend to be 1,000's of @.o@ files and this can often be +-- more than we can pass to the @ld@ or @ar@ programs in one go. +-- +-- Installing for libs and exes involves finding the right files and copying +-- them to the right places. One of the more tricky things about this module is +-- remembering the layout of files in the build directory (which is not +-- explicitly documented) and thus what search dirs are used for various kinds +-- of files. + +module Distribution.Simple.GHC ( + getGhcInfo, + configure, getInstalledPackages, getPackageDBContents, + buildLib, buildExe, + replLib, replExe, + startInterpreter, + installLib, installExe, + libAbiHash, + hcPkgInfo, + registerPackage, + componentGhcOptions, + getLibDir, + isDynamic, + getGlobalPackageDB, + pkgRoot + ) where + +import qualified Distribution.Simple.GHC.IPI641 as IPI641 +import qualified Distribution.Simple.GHC.IPI642 as IPI642 +import qualified Distribution.Simple.GHC.Internal as Internal +import Distribution.Simple.GHC.ImplInfo +import Distribution.PackageDescription as PD + ( PackageDescription(..), BuildInfo(..), Executable(..), Library(..) + , allExtensions, libModules, exeModules + , hcOptions, hcSharedOptions, hcProfOptions ) +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo ) +import qualified Distribution.InstalledPackageInfo as InstalledPackageInfo + ( InstalledPackageInfo_(..) ) +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import qualified Distribution.Simple.PackageIndex as PackageIndex +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), ComponentLocalBuildInfo(..) + , absoluteInstallDirs, depLibraryPaths ) +import qualified Distribution.Simple.Hpc as Hpc +import Distribution.Simple.InstallDirs hiding ( absoluteInstallDirs ) +import Distribution.Simple.BuildPaths +import Distribution.Simple.Utils +import Distribution.Package + ( PackageName(..) ) +import qualified Distribution.ModuleName as ModuleName +import Distribution.Simple.Program + ( Program(..), ConfiguredProgram(..), ProgramConfiguration + , ProgramSearchPath + , rawSystemProgramStdout, rawSystemProgramStdoutConf + , getProgramInvocationOutput, requireProgramVersion, requireProgram + , userMaybeSpecifyPath, programPath, lookupProgram, addKnownProgram + , ghcProgram, ghcPkgProgram, hsc2hsProgram, ldProgram ) +import qualified Distribution.Simple.Program.HcPkg as HcPkg +import qualified Distribution.Simple.Program.Ar as Ar +import qualified Distribution.Simple.Program.Ld as Ld +import qualified Distribution.Simple.Program.Strip as Strip +import Distribution.Simple.Program.GHC +import Distribution.Simple.Setup + ( toFlag, fromFlag, fromFlagOrDefault, configCoverage, configDistPref ) +import qualified Distribution.Simple.Setup as Cabal + ( Flag(..) ) +import Distribution.Simple.Compiler + ( CompilerFlavor(..), CompilerId(..), Compiler(..), compilerVersion + , PackageDB(..), PackageDBStack, AbiTag(..) ) +import Distribution.Version + ( Version(..), anyVersion, orLaterVersion ) +import Distribution.System + ( Platform(..), OS(..) ) +import Distribution.Verbosity +import Distribution.Text + ( display ) +import Distribution.Utils.NubList + ( NubListR, overNubListR, toNubListR ) +import Language.Haskell.Extension (Extension(..), KnownExtension(..)) + +import Control.Monad ( unless, when ) +import Data.Char ( isDigit, isSpace ) +import Data.List +import qualified Data.Map as M ( fromList ) +import Data.Maybe ( catMaybes ) +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid ( Monoid(..) ) +#endif +import Data.Version ( showVersion ) +import System.Directory + ( doesFileExist, getAppUserDataDirectory, createDirectoryIfMissing ) +import System.FilePath ( (), (<.>), takeExtension, + takeDirectory, replaceExtension, + splitExtension, isRelative ) +import qualified System.Info + +-- ----------------------------------------------------------------------------- +-- Configuring + +configure :: Verbosity -> Maybe FilePath -> Maybe FilePath + -> ProgramConfiguration + -> IO (Compiler, Maybe Platform, ProgramConfiguration) +configure verbosity hcPath hcPkgPath conf0 = do + + (ghcProg, ghcVersion, conf1) <- + requireProgramVersion verbosity ghcProgram + (orLaterVersion (Version [6,4] [])) + (userMaybeSpecifyPath "ghc" hcPath conf0) + let implInfo = ghcVersionImplInfo ghcVersion + + -- This is slightly tricky, we have to configure ghc first, then we use the + -- location of ghc to help find ghc-pkg in the case that the user did not + -- specify the location of ghc-pkg directly: + (ghcPkgProg, ghcPkgVersion, conf2) <- + requireProgramVersion verbosity ghcPkgProgram { + programFindLocation = guessGhcPkgFromGhcPath ghcProg + } + anyVersion (userMaybeSpecifyPath "ghc-pkg" hcPkgPath conf1) + + when (ghcVersion /= ghcPkgVersion) $ die $ + "Version mismatch between ghc and ghc-pkg: " + ++ programPath ghcProg ++ " is version " ++ display ghcVersion ++ " " + ++ programPath ghcPkgProg ++ " is version " ++ display ghcPkgVersion + + -- Likewise we try to find the matching hsc2hs program. + let hsc2hsProgram' = hsc2hsProgram { + programFindLocation = guessHsc2hsFromGhcPath ghcProg + } + conf3 = addKnownProgram hsc2hsProgram' conf2 + + languages <- Internal.getLanguages verbosity implInfo ghcProg + extensions <- Internal.getExtensions verbosity implInfo ghcProg + + ghcInfo <- Internal.getGhcInfo verbosity implInfo ghcProg + let ghcInfoMap = M.fromList ghcInfo + + let comp = Compiler { + compilerId = CompilerId GHC ghcVersion, + compilerAbiTag = NoAbiTag, + compilerCompat = [], + compilerLanguages = languages, + compilerExtensions = extensions, + compilerProperties = ghcInfoMap + } + compPlatform = Internal.targetPlatform ghcInfo + conf4 = Internal.configureToolchain implInfo ghcProg ghcInfoMap conf3 -- configure gcc and ld + return (comp, compPlatform, conf4) + +-- | Given something like /usr/local/bin/ghc-6.6.1(.exe) we try and find +-- the corresponding tool; e.g. if the tool is ghc-pkg, we try looking +-- for a versioned or unversioned ghc-pkg in the same dir, that is: +-- +-- > /usr/local/bin/ghc-pkg-ghc-6.6.1(.exe) +-- > /usr/local/bin/ghc-pkg-6.6.1(.exe) +-- > /usr/local/bin/ghc-pkg(.exe) +-- +guessToolFromGhcPath :: Program -> ConfiguredProgram + -> Verbosity -> ProgramSearchPath + -> IO (Maybe FilePath) +guessToolFromGhcPath tool ghcProg verbosity searchpath + = do let toolname = programName tool + path = programPath ghcProg + dir = takeDirectory path + versionSuffix = takeVersionSuffix (dropExeExtension path) + guessNormal = dir toolname <.> exeExtension + guessGhcVersioned = dir (toolname ++ "-ghc" ++ versionSuffix) + <.> exeExtension + guessVersioned = dir (toolname ++ versionSuffix) + <.> exeExtension + guesses | null versionSuffix = [guessNormal] + | otherwise = [guessGhcVersioned, + guessVersioned, + guessNormal] + info verbosity $ "looking for tool " ++ toolname + ++ " near compiler in " ++ dir + exists <- mapM doesFileExist guesses + case [ file | (file, True) <- zip guesses exists ] of + -- If we can't find it near ghc, fall back to the usual + -- method. + [] -> programFindLocation tool verbosity searchpath + (fp:_) -> do info verbosity $ "found " ++ toolname ++ " in " ++ fp + return (Just fp) + + where takeVersionSuffix :: FilePath -> String + takeVersionSuffix = takeWhileEndLE isSuffixChar + + isSuffixChar :: Char -> Bool + isSuffixChar c = isDigit c || c == '.' || c == '-' + + dropExeExtension :: FilePath -> FilePath + dropExeExtension filepath = + case splitExtension filepath of + (filepath', extension) | extension == exeExtension -> filepath' + | otherwise -> filepath + +-- | Given something like /usr/local/bin/ghc-6.6.1(.exe) we try and find a +-- corresponding ghc-pkg, we try looking for both a versioned and unversioned +-- ghc-pkg in the same dir, that is: +-- +-- > /usr/local/bin/ghc-pkg-ghc-6.6.1(.exe) +-- > /usr/local/bin/ghc-pkg-6.6.1(.exe) +-- > /usr/local/bin/ghc-pkg(.exe) +-- +guessGhcPkgFromGhcPath :: ConfiguredProgram + -> Verbosity -> ProgramSearchPath -> IO (Maybe FilePath) +guessGhcPkgFromGhcPath = guessToolFromGhcPath ghcPkgProgram + +-- | Given something like /usr/local/bin/ghc-6.6.1(.exe) we try and find a +-- corresponding hsc2hs, we try looking for both a versioned and unversioned +-- hsc2hs in the same dir, that is: +-- +-- > /usr/local/bin/hsc2hs-ghc-6.6.1(.exe) +-- > /usr/local/bin/hsc2hs-6.6.1(.exe) +-- > /usr/local/bin/hsc2hs(.exe) +-- +guessHsc2hsFromGhcPath :: ConfiguredProgram + -> Verbosity -> ProgramSearchPath -> IO (Maybe FilePath) +guessHsc2hsFromGhcPath = guessToolFromGhcPath hsc2hsProgram + +getGhcInfo :: Verbosity -> ConfiguredProgram -> IO [(String, String)] +getGhcInfo verbosity ghcProg = Internal.getGhcInfo verbosity implInfo ghcProg + where + Just version = programVersion ghcProg + implInfo = ghcVersionImplInfo version + +-- | Given a single package DB, return all installed packages. +getPackageDBContents :: Verbosity -> PackageDB -> ProgramConfiguration + -> IO InstalledPackageIndex +getPackageDBContents verbosity packagedb conf = do + pkgss <- getInstalledPackages' verbosity [packagedb] conf + toPackageIndex verbosity pkgss conf + +-- | Given a package DB stack, return all installed packages. +getInstalledPackages :: Verbosity -> PackageDBStack -> ProgramConfiguration + -> IO InstalledPackageIndex +getInstalledPackages verbosity packagedbs conf = do + checkPackageDbEnvVar + checkPackageDbStack packagedbs + pkgss <- getInstalledPackages' verbosity packagedbs conf + index <- toPackageIndex verbosity pkgss conf + return $! hackRtsPackage index + + where + hackRtsPackage index = + case PackageIndex.lookupPackageName index (PackageName "rts") of + [(_,[rts])] + -> PackageIndex.insert (removeMingwIncludeDir rts) index + _ -> index -- No (or multiple) ghc rts package is registered!! + -- Feh, whatever, the ghc test suite does some crazy stuff. + +-- | Given a list of @(PackageDB, InstalledPackageInfo)@ pairs, produce a +-- @PackageIndex@. Helper function used by 'getPackageDBContents' and +-- 'getInstalledPackages'. +toPackageIndex :: Verbosity + -> [(PackageDB, [InstalledPackageInfo])] + -> ProgramConfiguration + -> IO InstalledPackageIndex +toPackageIndex verbosity pkgss conf = do + -- On Windows, various fields have $topdir/foo rather than full + -- paths. We need to substitute the right value in so that when + -- we, for example, call gcc, we have proper paths to give it. + topDir <- getLibDir' verbosity ghcProg + let indices = [ PackageIndex.fromList (map (Internal.substTopDir topDir) pkgs) + | (_, pkgs) <- pkgss ] + return $! (mconcat indices) + + where + Just ghcProg = lookupProgram ghcProgram conf + +getLibDir :: Verbosity -> LocalBuildInfo -> IO FilePath +getLibDir verbosity lbi = + dropWhileEndLE isSpace `fmap` + rawSystemProgramStdoutConf verbosity ghcProgram + (withPrograms lbi) ["--print-libdir"] + +getLibDir' :: Verbosity -> ConfiguredProgram -> IO FilePath +getLibDir' verbosity ghcProg = + dropWhileEndLE isSpace `fmap` + rawSystemProgramStdout verbosity ghcProg ["--print-libdir"] + + +-- | Return the 'FilePath' to the global GHC package database. +getGlobalPackageDB :: Verbosity -> ConfiguredProgram -> IO FilePath +getGlobalPackageDB verbosity ghcProg = + dropWhileEndLE isSpace `fmap` + rawSystemProgramStdout verbosity ghcProg ["--print-global-package-db"] + +checkPackageDbEnvVar :: IO () +checkPackageDbEnvVar = + Internal.checkPackageDbEnvVar "GHC" "GHC_PACKAGE_PATH" + +checkPackageDbStack :: PackageDBStack -> IO () +checkPackageDbStack (GlobalPackageDB:rest) + | GlobalPackageDB `notElem` rest = return () +checkPackageDbStack rest + | GlobalPackageDB `notElem` rest = + die $ "With current ghc versions the global package db is always used " + ++ "and must be listed first. This ghc limitation may be lifted in " + ++ "future, see http://hackage.haskell.org/trac/ghc/ticket/5977" +checkPackageDbStack _ = + die $ "If the global package db is specified, it must be " + ++ "specified first and cannot be specified multiple times" + +-- GHC < 6.10 put "$topdir/include/mingw" in rts's installDirs. This +-- breaks when you want to use a different gcc, so we need to filter +-- it out. +removeMingwIncludeDir :: InstalledPackageInfo -> InstalledPackageInfo +removeMingwIncludeDir pkg = + let ids = InstalledPackageInfo.includeDirs pkg + ids' = filter (not . ("mingw" `isSuffixOf`)) ids + in pkg { InstalledPackageInfo.includeDirs = ids' } + +-- | Get the packages from specific PackageDBs, not cumulative. +-- +getInstalledPackages' :: Verbosity -> [PackageDB] -> ProgramConfiguration + -> IO [(PackageDB, [InstalledPackageInfo])] +getInstalledPackages' verbosity packagedbs conf + | ghcVersion >= Version [6,9] [] = + sequence + [ do pkgs <- HcPkg.dump (hcPkgInfo conf) verbosity packagedb + return (packagedb, pkgs) + | packagedb <- packagedbs ] + + where + Just ghcProg = lookupProgram ghcProgram conf + Just ghcVersion = programVersion ghcProg + +getInstalledPackages' verbosity packagedbs conf = do + str <- rawSystemProgramStdoutConf verbosity ghcPkgProgram conf ["list"] + let pkgFiles = [ init line | line <- lines str, last line == ':' ] + dbFile packagedb = case (packagedb, pkgFiles) of + (GlobalPackageDB, global:_) -> return $ Just global + (UserPackageDB, _global:user:_) -> return $ Just user + (UserPackageDB, _global:_) -> return $ Nothing + (SpecificPackageDB specific, _) -> return $ Just specific + _ -> die "cannot read ghc-pkg package listing" + pkgFiles' <- mapM dbFile packagedbs + sequence [ withFileContents file $ \content -> do + pkgs <- readPackages file content + return (db, pkgs) + | (db , Just file) <- zip packagedbs pkgFiles' ] + where + -- Depending on the version of ghc we use a different type's Read + -- instance to parse the package file and then convert. + -- It's a bit yuck. But that's what we get for using Read/Show. + readPackages + | ghcVersion >= Version [6,4,2] [] + = \file content -> case reads content of + [(pkgs, _)] -> return (map IPI642.toCurrent pkgs) + _ -> failToRead file + | otherwise + = \file content -> case reads content of + [(pkgs, _)] -> return (map IPI641.toCurrent pkgs) + _ -> failToRead file + Just ghcProg = lookupProgram ghcProgram conf + Just ghcVersion = programVersion ghcProg + failToRead file = die $ "cannot read ghc package database " ++ file + +-- ----------------------------------------------------------------------------- +-- Building + +-- | Build a library with GHC. +-- +buildLib, replLib :: Verbosity -> Cabal.Flag (Maybe Int) + -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO () +buildLib = buildOrReplLib False +replLib = buildOrReplLib True + +buildOrReplLib :: Bool -> Verbosity -> Cabal.Flag (Maybe Int) + -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO () +buildOrReplLib forRepl verbosity numJobs pkg_descr lbi lib clbi = do + libName <- case componentLibraries clbi of + [libName] -> return libName + [] -> die "No library name found when building library" + _ -> die "Multiple library names found when building library" + + let libTargetDir = buildDir lbi + whenVanillaLib forceVanilla = + when (forceVanilla || withVanillaLib lbi) + whenProfLib = when (withProfLib lbi) + whenSharedLib forceShared = + when (forceShared || withSharedLib lbi) + whenGHCiLib = when (withGHCiLib lbi && withVanillaLib lbi) + ifReplLib = when forRepl + comp = compiler lbi + ghcVersion = compilerVersion comp + implInfo = getImplInfo comp + (Platform _hostArch hostOS) = hostPlatform lbi + hole_insts = map (\(k,(p,n)) -> (k,(InstalledPackageInfo.packageKey p,n))) (instantiatedWith lbi) + + (ghcProg, _) <- requireProgram verbosity ghcProgram (withPrograms lbi) + let runGhcProg = runGHC verbosity ghcProg comp + + libBi <- hackThreadedFlag verbosity + comp (withProfLib lbi) (libBuildInfo lib) + + let isGhcDynamic = isDynamic comp + dynamicTooSupported = supportsDynamicToo comp + doingTH = EnableExtension TemplateHaskell `elem` allExtensions libBi + forceVanillaLib = doingTH && not isGhcDynamic + forceSharedLib = doingTH && isGhcDynamic + -- TH always needs default libs, even when building for profiling + + -- Determine if program coverage should be enabled and if so, what + -- '-hpcdir' should be. + let isCoverageEnabled = fromFlag $ configCoverage $ configFlags lbi + -- Component name. Not 'libName' because that has the "HS" prefix + -- that GHC gives Haskell libraries. + cname = display $ PD.package $ localPkgDescr lbi + distPref = fromFlag $ configDistPref $ configFlags lbi + hpcdir way + | isCoverageEnabled = toFlag $ Hpc.mixDir distPref way cname + | otherwise = mempty + + createDirectoryIfMissingVerbose verbosity True libTargetDir + -- TODO: do we need to put hs-boot files into place for mutually recursive + -- modules? + let cObjs = map (`replaceExtension` objExtension) (cSources libBi) + baseOpts = componentGhcOptions verbosity lbi libBi clbi libTargetDir + vanillaOpts = baseOpts `mappend` mempty { + ghcOptMode = toFlag GhcModeMake, + ghcOptNumJobs = numJobs, + ghcOptPackageKey = toFlag (pkgKey lbi), + ghcOptSigOf = hole_insts, + ghcOptInputModules = toNubListR $ libModules lib, + ghcOptHPCDir = hpcdir Hpc.Vanilla + } + + profOpts = vanillaOpts `mappend` mempty { + ghcOptProfilingMode = toFlag True, + ghcOptHiSuffix = toFlag "p_hi", + ghcOptObjSuffix = toFlag "p_o", + ghcOptExtra = toNubListR $ hcProfOptions GHC libBi, + ghcOptHPCDir = hpcdir Hpc.Prof + } + + sharedOpts = vanillaOpts `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptFPic = toFlag True, + ghcOptHiSuffix = toFlag "dyn_hi", + ghcOptObjSuffix = toFlag "dyn_o", + ghcOptExtra = toNubListR $ hcSharedOptions GHC libBi, + ghcOptHPCDir = hpcdir Hpc.Dyn + } + linkerOpts = mempty { + ghcOptLinkOptions = toNubListR $ PD.ldOptions libBi, + ghcOptLinkLibs = toNubListR $ extraLibs libBi, + ghcOptLinkLibPath = toNubListR $ extraLibDirs libBi, + ghcOptLinkFrameworks = toNubListR $ PD.frameworks libBi, + ghcOptInputFiles = toNubListR + [libTargetDir x | x <- cObjs] + } + replOpts = vanillaOpts { + ghcOptExtra = overNubListR + Internal.filterGhciFlags $ + (ghcOptExtra vanillaOpts), + ghcOptNumJobs = mempty + } + `mappend` linkerOpts + `mappend` mempty { + ghcOptMode = toFlag GhcModeInteractive, + ghcOptOptimisation = toFlag GhcNoOptimisation + } + + vanillaSharedOpts = vanillaOpts `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcStaticAndDynamic, + ghcOptDynHiSuffix = toFlag "dyn_hi", + ghcOptDynObjSuffix = toFlag "dyn_o", + ghcOptHPCDir = hpcdir Hpc.Dyn + } + + unless (forRepl || null (libModules lib)) $ + do let vanilla = whenVanillaLib forceVanillaLib (runGhcProg vanillaOpts) + shared = whenSharedLib forceSharedLib (runGhcProg sharedOpts) + useDynToo = dynamicTooSupported && + (forceVanillaLib || withVanillaLib lbi) && + (forceSharedLib || withSharedLib lbi) && + null (hcSharedOptions GHC libBi) + if useDynToo + then do + runGhcProg vanillaSharedOpts + case (hpcdir Hpc.Dyn, hpcdir Hpc.Vanilla) of + (Cabal.Flag dynDir, Cabal.Flag vanillaDir) -> do + -- When the vanilla and shared library builds are done + -- in one pass, only one set of HPC module interfaces + -- are generated. This set should suffice for both + -- static and dynamically linked executables. We copy + -- the modules interfaces so they are available under + -- both ways. + copyDirectoryRecursive verbosity dynDir vanillaDir + _ -> return () + else if isGhcDynamic + then do shared; vanilla + else do vanilla; shared + whenProfLib (runGhcProg profOpts) + + -- build any C sources + unless (null (cSources libBi)) $ do + info verbosity "Building C Sources..." + sequence_ + [ do let baseCcOpts = Internal.componentCcGhcOptions verbosity implInfo + lbi libBi clbi libTargetDir filename + vanillaCcOpts = if isGhcDynamic + -- Dynamic GHC requires C sources to be built + -- with -fPIC for REPL to work. See #2207. + then baseCcOpts { ghcOptFPic = toFlag True } + else baseCcOpts + profCcOpts = vanillaCcOpts `mappend` mempty { + ghcOptProfilingMode = toFlag True, + ghcOptObjSuffix = toFlag "p_o" + } + sharedCcOpts = vanillaCcOpts `mappend` mempty { + ghcOptFPic = toFlag True, + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptObjSuffix = toFlag "dyn_o" + } + odir = fromFlag (ghcOptObjDir vanillaCcOpts) + createDirectoryIfMissingVerbose verbosity True odir + let runGhcProgIfNeeded ccOpts = do + needsRecomp <- checkNeedsRecompilation filename ccOpts + when needsRecomp $ runGhcProg ccOpts + runGhcProgIfNeeded vanillaCcOpts + unless forRepl $ + whenSharedLib forceSharedLib (runGhcProgIfNeeded sharedCcOpts) + unless forRepl $ whenProfLib (runGhcProgIfNeeded profCcOpts) + | filename <- cSources libBi] + + -- TODO: problem here is we need the .c files built first, so we can load them + -- with ghci, but .c files can depend on .h files generated by ghc by ffi + -- exports. + + ifReplLib $ do + when (null (libModules lib)) $ warn verbosity "No exposed modules" + ifReplLib (runGhcProg replOpts) + + -- link: + unless forRepl $ do + info verbosity "Linking..." + let cProfObjs = map (`replaceExtension` ("p_" ++ objExtension)) + (cSources libBi) + cSharedObjs = map (`replaceExtension` ("dyn_" ++ objExtension)) + (cSources libBi) + cid = compilerId (compiler lbi) + vanillaLibFilePath = libTargetDir mkLibName libName + profileLibFilePath = libTargetDir mkProfLibName libName + sharedLibFilePath = libTargetDir mkSharedLibName cid libName + ghciLibFilePath = libTargetDir Internal.mkGHCiLibName libName + libInstallPath = libdir $ absoluteInstallDirs pkg_descr lbi NoCopyDest + sharedLibInstallPath = libInstallPath mkSharedLibName cid libName + + stubObjs <- fmap catMaybes $ sequence + [ findFileWithExtension [objExtension] [libTargetDir] + (ModuleName.toFilePath x ++"_stub") + | ghcVersion < Version [7,2] [] -- ghc-7.2+ does not make _stub.o files + , x <- libModules lib ] + stubProfObjs <- fmap catMaybes $ sequence + [ findFileWithExtension ["p_" ++ objExtension] [libTargetDir] + (ModuleName.toFilePath x ++"_stub") + | ghcVersion < Version [7,2] [] -- ghc-7.2+ does not make _stub.o files + , x <- libModules lib ] + stubSharedObjs <- fmap catMaybes $ sequence + [ findFileWithExtension ["dyn_" ++ objExtension] [libTargetDir] + (ModuleName.toFilePath x ++"_stub") + | ghcVersion < Version [7,2] [] -- ghc-7.2+ does not make _stub.o files + , x <- libModules lib ] + + hObjs <- Internal.getHaskellObjects implInfo lib lbi + libTargetDir objExtension True + hProfObjs <- + if (withProfLib lbi) + then Internal.getHaskellObjects implInfo lib lbi + libTargetDir ("p_" ++ objExtension) True + else return [] + hSharedObjs <- + if (withSharedLib lbi) + then Internal.getHaskellObjects implInfo lib lbi + libTargetDir ("dyn_" ++ objExtension) False + else return [] + + unless (null hObjs && null cObjs && null stubObjs) $ do + rpaths <- getRPaths lbi clbi + + let staticObjectFiles = + hObjs + ++ map (libTargetDir ) cObjs + ++ stubObjs + profObjectFiles = + hProfObjs + ++ map (libTargetDir ) cProfObjs + ++ stubProfObjs + ghciObjFiles = + hObjs + ++ map (libTargetDir ) cObjs + ++ stubObjs + dynamicObjectFiles = + hSharedObjs + ++ map (libTargetDir ) cSharedObjs + ++ stubSharedObjs + -- After the relocation lib is created we invoke ghc -shared + -- with the dependencies spelled out as -package arguments + -- and ghc invokes the linker with the proper library paths + ghcSharedLinkArgs = + mempty { + ghcOptShared = toFlag True, + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptInputFiles = toNubListR dynamicObjectFiles, + ghcOptOutputFile = toFlag sharedLibFilePath, + -- For dynamic libs, Mac OS/X needs to know the install location + -- at build time. This only applies to GHC < 7.8 - see the + -- discussion in #1660. + ghcOptDylibName = if (hostOS == OSX + && ghcVersion < Version [7,8] []) + then toFlag sharedLibInstallPath + else mempty, + ghcOptPackageKey = toFlag (pkgKey lbi), + ghcOptNoAutoLinkPackages = toFlag True, + ghcOptPackageDBs = withPackageDB lbi, + ghcOptPackages = toNubListR $ + Internal.mkGhcOptPackages clbi , + ghcOptLinkLibs = toNubListR $ extraLibs libBi, + ghcOptLinkLibPath = toNubListR $ extraLibDirs libBi, + ghcOptLinkFrameworks = toNubListR $ PD.frameworks libBi, + ghcOptRPaths = rpaths + } + + info verbosity (show (ghcOptPackages ghcSharedLinkArgs)) + + whenVanillaLib False $ do + Ar.createArLibArchive verbosity lbi vanillaLibFilePath staticObjectFiles + + whenProfLib $ do + Ar.createArLibArchive verbosity lbi profileLibFilePath profObjectFiles + + whenGHCiLib $ do + (ldProg, _) <- requireProgram verbosity ldProgram (withPrograms lbi) + Ld.combineObjectFiles verbosity ldProg + ghciLibFilePath ghciObjFiles + + whenSharedLib False $ + runGhcProg ghcSharedLinkArgs + +-- | Start a REPL without loading any source files. +startInterpreter :: Verbosity -> ProgramConfiguration -> Compiler + -> PackageDBStack -> IO () +startInterpreter verbosity conf comp packageDBs = do + let replOpts = mempty { + ghcOptMode = toFlag GhcModeInteractive, + ghcOptPackageDBs = packageDBs + } + checkPackageDbStack packageDBs + (ghcProg, _) <- requireProgram verbosity ghcProgram conf + runGHC verbosity ghcProg comp replOpts + +-- | Build an executable with GHC. +-- +buildExe, replExe :: Verbosity -> Cabal.Flag (Maybe Int) + -> PackageDescription -> LocalBuildInfo + -> Executable -> ComponentLocalBuildInfo -> IO () +buildExe = buildOrReplExe False +replExe = buildOrReplExe True + +buildOrReplExe :: Bool -> Verbosity -> Cabal.Flag (Maybe Int) + -> PackageDescription -> LocalBuildInfo + -> Executable -> ComponentLocalBuildInfo -> IO () +buildOrReplExe forRepl verbosity numJobs _pkg_descr lbi + exe@Executable { exeName = exeName', modulePath = modPath } clbi = do + + (ghcProg, _) <- requireProgram verbosity ghcProgram (withPrograms lbi) + let comp = compiler lbi + implInfo = getImplInfo comp + runGhcProg = runGHC verbosity ghcProg comp + + exeBi <- hackThreadedFlag verbosity + comp (withProfExe lbi) (buildInfo exe) + + -- exeNameReal, the name that GHC really uses (with .exe on Windows) + let exeNameReal = exeName' <.> + (if takeExtension exeName' /= ('.':exeExtension) + then exeExtension + else "") + + let targetDir = (buildDir lbi) exeName' + let exeDir = targetDir (exeName' ++ "-tmp") + createDirectoryIfMissingVerbose verbosity True targetDir + createDirectoryIfMissingVerbose verbosity True exeDir + -- TODO: do we need to put hs-boot files into place for mutually recursive + -- modules? FIX: what about exeName.hi-boot? + + -- Determine if program coverage should be enabled and if so, what + -- '-hpcdir' should be. + let isCoverageEnabled = fromFlag $ configCoverage $ configFlags lbi + distPref = fromFlag $ configDistPref $ configFlags lbi + hpcdir way + | isCoverageEnabled = toFlag $ Hpc.mixDir distPref way exeName' + | otherwise = mempty + + -- build executables + + srcMainFile <- findFile (exeDir : hsSourceDirs exeBi) modPath + rpaths <- getRPaths lbi clbi + + let isGhcDynamic = isDynamic comp + dynamicTooSupported = supportsDynamicToo comp + isHaskellMain = elem (takeExtension srcMainFile) [".hs", ".lhs"] + cSrcs = cSources exeBi ++ [srcMainFile | not isHaskellMain] + cObjs = map (`replaceExtension` objExtension) cSrcs + baseOpts = (componentGhcOptions verbosity lbi exeBi clbi exeDir) + `mappend` mempty { + ghcOptMode = toFlag GhcModeMake, + ghcOptInputFiles = toNubListR + [ srcMainFile | isHaskellMain], + ghcOptInputModules = toNubListR + [ m | not isHaskellMain, m <- exeModules exe] + } + staticOpts = baseOpts `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcStaticOnly, + ghcOptHPCDir = hpcdir Hpc.Vanilla + } + profOpts = baseOpts `mappend` mempty { + ghcOptProfilingMode = toFlag True, + ghcOptHiSuffix = toFlag "p_hi", + ghcOptObjSuffix = toFlag "p_o", + ghcOptExtra = toNubListR $ hcProfOptions GHC exeBi, + ghcOptHPCDir = hpcdir Hpc.Prof + } + dynOpts = baseOpts `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptHiSuffix = toFlag "dyn_hi", + ghcOptObjSuffix = toFlag "dyn_o", + ghcOptExtra = toNubListR $ + hcSharedOptions GHC exeBi, + ghcOptHPCDir = hpcdir Hpc.Dyn + } + dynTooOpts = staticOpts `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcStaticAndDynamic, + ghcOptDynHiSuffix = toFlag "dyn_hi", + ghcOptDynObjSuffix = toFlag "dyn_o", + ghcOptHPCDir = hpcdir Hpc.Dyn + } + linkerOpts = mempty { + ghcOptLinkOptions = toNubListR $ PD.ldOptions exeBi, + ghcOptLinkLibs = toNubListR $ extraLibs exeBi, + ghcOptLinkLibPath = toNubListR $ extraLibDirs exeBi, + ghcOptLinkFrameworks = toNubListR $ PD.frameworks exeBi, + ghcOptInputFiles = toNubListR + [exeDir x | x <- cObjs] + } + dynLinkerOpts = mempty { + ghcOptRPaths = rpaths + } + replOpts = baseOpts { + ghcOptExtra = overNubListR + Internal.filterGhciFlags + (ghcOptExtra baseOpts) + } + -- For a normal compile we do separate invocations of ghc for + -- compiling as for linking. But for repl we have to do just + -- the one invocation, so that one has to include all the + -- linker stuff too, like -l flags and any .o files from C + -- files etc. + `mappend` linkerOpts + `mappend` mempty { + ghcOptMode = toFlag GhcModeInteractive, + ghcOptOptimisation = toFlag GhcNoOptimisation + } + commonOpts | withProfExe lbi = profOpts + | withDynExe lbi = dynOpts + | otherwise = staticOpts + compileOpts | useDynToo = dynTooOpts + | otherwise = commonOpts + withStaticExe = (not $ withProfExe lbi) && (not $ withDynExe lbi) + + -- For building exe's that use TH with -prof or -dynamic we actually have + -- to build twice, once without -prof/-dynamic and then again with + -- -prof/-dynamic. This is because the code that TH needs to run at + -- compile time needs to be the vanilla ABI so it can be loaded up and run + -- by the compiler. + -- With dynamic-by-default GHC the TH object files loaded at compile-time + -- need to be .dyn_o instead of .o. + doingTH = EnableExtension TemplateHaskell `elem` allExtensions exeBi + -- Should we use -dynamic-too instead of compiling twice? + useDynToo = dynamicTooSupported && isGhcDynamic + && doingTH && withStaticExe + && null (hcSharedOptions GHC exeBi) + compileTHOpts | isGhcDynamic = dynOpts + | otherwise = staticOpts + compileForTH + | forRepl = False + | useDynToo = False + | isGhcDynamic = doingTH && (withProfExe lbi || withStaticExe) + | otherwise = doingTH && (withProfExe lbi || withDynExe lbi) + + linkOpts = commonOpts `mappend` + linkerOpts `mappend` + mempty { ghcOptLinkNoHsMain = toFlag (not isHaskellMain) } `mappend` + (if withDynExe lbi then dynLinkerOpts else mempty) + + -- Build static/dynamic object files for TH, if needed. + when compileForTH $ + runGhcProg compileTHOpts { ghcOptNoLink = toFlag True + , ghcOptNumJobs = numJobs } + + unless forRepl $ + runGhcProg compileOpts { ghcOptNoLink = toFlag True + , ghcOptNumJobs = numJobs } + + -- build any C sources + unless (null cSrcs) $ do + info verbosity "Building C Sources..." + sequence_ + [ do let opts = (Internal.componentCcGhcOptions verbosity implInfo lbi exeBi + clbi exeDir filename) `mappend` mempty { + ghcOptDynLinkMode = toFlag (if withDynExe lbi + then GhcDynamicOnly + else GhcStaticOnly), + ghcOptProfilingMode = toFlag (withProfExe lbi) + } + odir = fromFlag (ghcOptObjDir opts) + createDirectoryIfMissingVerbose verbosity True odir + needsRecomp <- checkNeedsRecompilation filename opts + when needsRecomp $ + runGhcProg opts + | filename <- cSrcs ] + + -- TODO: problem here is we need the .c files built first, so we can load them + -- with ghci, but .c files can depend on .h files generated by ghc by ffi + -- exports. + when forRepl $ runGhcProg replOpts + + -- link: + unless forRepl $ do + info verbosity "Linking..." + runGhcProg linkOpts { ghcOptOutputFile = toFlag (targetDir exeNameReal) } + +-- | Returns True if the modification date of the given source file is newer than +-- the object file we last compiled for it, or if no object file exists yet. +checkNeedsRecompilation :: FilePath -> GhcOptions -> IO Bool +checkNeedsRecompilation filename opts = filename `moreRecentFile` oname + where oname = getObjectFileName filename opts + +-- | Finds the object file name of the given source file +getObjectFileName :: FilePath -> GhcOptions -> FilePath +getObjectFileName filename opts = oname + where odir = fromFlag (ghcOptObjDir opts) + oext = fromFlagOrDefault "o" (ghcOptObjSuffix opts) + oname = odir replaceExtension filename oext + +-- | Calculate the RPATHs for the component we are building. +-- +-- Calculates relative RPATHs when 'relocatable' is set. +getRPaths :: LocalBuildInfo + -> ComponentLocalBuildInfo -- ^ Component we are building + -> IO (NubListR FilePath) +getRPaths lbi clbi | supportRPaths hostOS = do + libraryPaths <- depLibraryPaths False (relocatable lbi) lbi clbi + let hostPref = case hostOS of + OSX -> "@loader_path" + _ -> "$ORIGIN" + relPath p = if isRelative p then hostPref p else p + rpaths = toNubListR (map relPath libraryPaths) + return rpaths + where + (Platform _ hostOS) = hostPlatform lbi + + -- The list of RPath-supported operating systems below reflects the + -- platforms on which Cabal's RPATH handling is tested. It does _NOT_ + -- reflect whether the OS supports RPATH. + + -- E.g. when this comment was written, the *BSD operating systems were + -- untested with regards to Cabal RPATH handling, and were hence set to + -- 'False', while those operating systems themselves do support RPATH. + supportRPaths Linux   = True + supportRPaths Windows = False + supportRPaths OSX   = True + supportRPaths FreeBSD   = False + supportRPaths OpenBSD   = False + supportRPaths NetBSD   = False + supportRPaths DragonFly = False + supportRPaths Solaris = False + supportRPaths AIX = False + supportRPaths HPUX = False + supportRPaths IRIX = False + supportRPaths HaLVM = False + supportRPaths IOS = False + supportRPaths Ghcjs = False + supportRPaths (OtherOS _) = False + -- Do _not_ add a default case so that we get a warning here when a new OS + -- is added. + +getRPaths _ _ = return mempty + +-- | Filter the "-threaded" flag when profiling as it does not +-- work with ghc-6.8 and older. +hackThreadedFlag :: Verbosity -> Compiler -> Bool -> BuildInfo -> IO BuildInfo +hackThreadedFlag verbosity comp prof bi + | not mustFilterThreaded = return bi + | otherwise = do + warn verbosity $ "The ghc flag '-threaded' is not compatible with " + ++ "profiling in ghc-6.8 and older. It will be disabled." + return bi { options = filterHcOptions (/= "-threaded") (options bi) } + where + mustFilterThreaded = prof && compilerVersion comp < Version [6, 10] [] + && "-threaded" `elem` hcOptions GHC bi + filterHcOptions p hcoptss = + [ (hc, if hc == GHC then filter p opts else opts) + | (hc, opts) <- hcoptss ] + + +-- | Extracts a String representing a hash of the ABI of a built +-- library. It can fail if the library has not yet been built. +-- +libAbiHash :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO String +libAbiHash verbosity _pkg_descr lbi lib clbi = do + libBi <- hackThreadedFlag verbosity + (compiler lbi) (withProfLib lbi) (libBuildInfo lib) + let + comp = compiler lbi + vanillaArgs = + (componentGhcOptions verbosity lbi libBi clbi (buildDir lbi)) + `mappend` mempty { + ghcOptMode = toFlag GhcModeAbiHash, + ghcOptPackageKey = toFlag (pkgKey lbi), + ghcOptInputModules = toNubListR $ exposedModules lib + } + sharedArgs = vanillaArgs `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptFPic = toFlag True, + ghcOptHiSuffix = toFlag "dyn_hi", + ghcOptObjSuffix = toFlag "dyn_o", + ghcOptExtra = toNubListR $ hcSharedOptions GHC libBi + } + profArgs = vanillaArgs `mappend` mempty { + ghcOptProfilingMode = toFlag True, + ghcOptHiSuffix = toFlag "p_hi", + ghcOptObjSuffix = toFlag "p_o", + ghcOptExtra = toNubListR $ hcProfOptions GHC libBi + } + ghcArgs = if withVanillaLib lbi then vanillaArgs + else if withSharedLib lbi then sharedArgs + else if withProfLib lbi then profArgs + else error "libAbiHash: Can't find an enabled library way" + -- + (ghcProg, _) <- requireProgram verbosity ghcProgram (withPrograms lbi) + hash <- getProgramInvocationOutput verbosity (ghcInvocation ghcProg comp ghcArgs) + return (takeWhile (not . isSpace) hash) + +componentGhcOptions :: Verbosity -> LocalBuildInfo + -> BuildInfo -> ComponentLocalBuildInfo -> FilePath + -> GhcOptions +componentGhcOptions = Internal.componentGhcOptions + +-- ----------------------------------------------------------------------------- +-- Installing + +-- |Install executables for GHC. +installExe :: Verbosity + -> LocalBuildInfo + -> InstallDirs FilePath -- ^Where to copy the files to + -> FilePath -- ^Build location + -> (FilePath, FilePath) -- ^Executable (prefix,suffix) + -> PackageDescription + -> Executable + -> IO () +installExe verbosity lbi installDirs buildPref + (progprefix, progsuffix) _pkg exe = do + let binDir = bindir installDirs + createDirectoryIfMissingVerbose verbosity True binDir + let exeFileName = exeName exe <.> exeExtension + fixedExeBaseName = progprefix ++ exeName exe ++ progsuffix + installBinary dest = do + installExecutableFile verbosity + (buildPref exeName exe exeFileName) + (dest <.> exeExtension) + when (stripExes lbi) $ + Strip.stripExe verbosity (hostPlatform lbi) (withPrograms lbi) + (dest <.> exeExtension) + installBinary (binDir fixedExeBaseName) + +-- |Install for ghc, .hi, .a and, if --with-ghci given, .o +installLib :: Verbosity + -> LocalBuildInfo + -> FilePath -- ^install location + -> FilePath -- ^install location for dynamic libraries + -> FilePath -- ^Build location + -> PackageDescription + -> Library + -> ComponentLocalBuildInfo + -> IO () +installLib verbosity lbi targetDir dynlibTargetDir builtDir _pkg lib clbi = do + -- copy .hi files over: + whenVanilla $ copyModuleFiles "hi" + whenProf $ copyModuleFiles "p_hi" + whenShared $ copyModuleFiles "dyn_hi" + + -- copy the built library files over: + whenVanilla $ mapM_ (installOrdinary builtDir targetDir) vanillaLibNames + whenProf $ mapM_ (installOrdinary builtDir targetDir) profileLibNames + whenGHCi $ mapM_ (installOrdinary builtDir targetDir) ghciLibNames + whenShared $ mapM_ (installShared builtDir dynlibTargetDir) sharedLibNames + + where + install isShared srcDir dstDir name = do + let src = srcDir name + dst = dstDir name + createDirectoryIfMissingVerbose verbosity True dstDir + if isShared + then do when (stripLibs lbi) $ Strip.stripLib verbosity + (hostPlatform lbi) (withPrograms lbi) src + installExecutableFile verbosity src dst + else installOrdinaryFile verbosity src dst + + installOrdinary = install False + installShared = install True + + copyModuleFiles ext = + findModuleFiles [builtDir] [ext] (libModules lib) + >>= installOrdinaryFiles verbosity targetDir + + cid = compilerId (compiler lbi) + libNames = componentLibraries clbi + vanillaLibNames = map mkLibName libNames + profileLibNames = map mkProfLibName libNames + ghciLibNames = map Internal.mkGHCiLibName libNames + sharedLibNames = map (mkSharedLibName cid) libNames + + hasLib = not $ null (libModules lib) + && null (cSources (libBuildInfo lib)) + whenVanilla = when (hasLib && withVanillaLib lbi) + whenProf = when (hasLib && withProfLib lbi) + whenGHCi = when (hasLib && withGHCiLib lbi) + whenShared = when (hasLib && withSharedLib lbi) + +-- ----------------------------------------------------------------------------- +-- Registering + +hcPkgInfo :: ProgramConfiguration -> HcPkg.HcPkgInfo +hcPkgInfo conf = HcPkg.HcPkgInfo { HcPkg.hcPkgProgram = ghcPkgProg + , HcPkg.noPkgDbStack = v < [6,9] + , HcPkg.noVerboseFlag = v < [6,11] + , HcPkg.flagPackageConf = v < [7,5] + , HcPkg.useSingleFileDb = v < [7,9] + } + where + v = versionBranch ver + Just ghcPkgProg = lookupProgram ghcPkgProgram conf + Just ver = programVersion ghcPkgProg + +registerPackage + :: Verbosity + -> InstalledPackageInfo + -> PackageDescription + -> LocalBuildInfo + -> Bool + -> PackageDBStack + -> IO () +registerPackage verbosity installedPkgInfo _pkg lbi _inplace packageDbs = + HcPkg.reregister (hcPkgInfo $ withPrograms lbi) verbosity + packageDbs (Right installedPkgInfo) + +pkgRoot :: Verbosity -> LocalBuildInfo -> PackageDB -> IO FilePath +pkgRoot verbosity lbi = pkgRoot' + where + pkgRoot' GlobalPackageDB = + let Just ghcProg = lookupProgram ghcProgram (withPrograms lbi) + in fmap takeDirectory (getGlobalPackageDB verbosity ghcProg) + pkgRoot' UserPackageDB = do + appDir <- getAppUserDataDirectory "ghc" + let ver = compilerVersion (compiler lbi) + subdir = System.Info.arch ++ '-':System.Info.os ++ '-':showVersion ver + rootDir = appDir subdir + -- We must create the root directory for the user package database if it + -- does not yet exists. Otherwise '${pkgroot}' will resolve to a + -- directory at the time of 'ghc-pkg register', and registration will + -- fail. + createDirectoryIfMissing True rootDir + return rootDir + pkgRoot' (SpecificPackageDB fp) = return (takeDirectory fp) + +-- ----------------------------------------------------------------------------- +-- Utils + +isDynamic :: Compiler -> Bool +isDynamic = Internal.ghcLookupProperty "GHC Dynamic" + +supportsDynamicToo :: Compiler -> Bool +supportsDynamicToo = Internal.ghcLookupProperty "Support dynamic-too" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHCJS.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHCJS.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/GHCJS.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/GHCJS.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,904 @@ +{-# LANGUAGE CPP #-} + +module Distribution.Simple.GHCJS ( + configure, getInstalledPackages, getPackageDBContents, + buildLib, buildExe, + replLib, replExe, + startInterpreter, + installLib, installExe, + libAbiHash, + hcPkgInfo, + registerPackage, + componentGhcOptions, + getLibDir, + isDynamic, + getGlobalPackageDB, + runCmd + ) where + +import Distribution.Simple.GHC.ImplInfo ( getImplInfo, ghcjsVersionImplInfo ) +import qualified Distribution.Simple.GHC.Internal as Internal +import Distribution.PackageDescription as PD + ( PackageDescription(..), BuildInfo(..), Executable(..) + , Library(..), libModules, exeModules + , hcOptions, hcProfOptions, hcSharedOptions + , allExtensions ) +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo ) +import qualified Distribution.InstalledPackageInfo as InstalledPackageInfo + ( InstalledPackageInfo_(..) ) +import Distribution.Simple.PackageIndex ( InstalledPackageIndex ) +import qualified Distribution.Simple.PackageIndex as PackageIndex +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), ComponentLocalBuildInfo(..) + , LibraryName(..) ) +import qualified Distribution.Simple.Hpc as Hpc +import Distribution.Simple.InstallDirs hiding ( absoluteInstallDirs ) +import Distribution.Simple.BuildPaths +import Distribution.Simple.Utils +import Distribution.Simple.Program + ( Program(..), ConfiguredProgram(..), ProgramConfiguration + , ProgramSearchPath + , rawSystemProgramConf + , rawSystemProgramStdout, rawSystemProgramStdoutConf + , getProgramInvocationOutput + , requireProgramVersion, requireProgram + , userMaybeSpecifyPath, programPath + , lookupProgram, addKnownPrograms + , ghcjsProgram, ghcjsPkgProgram, c2hsProgram, hsc2hsProgram + , ldProgram, haddockProgram, stripProgram ) +import qualified Distribution.Simple.Program.HcPkg as HcPkg +import qualified Distribution.Simple.Program.Ar as Ar +import qualified Distribution.Simple.Program.Ld as Ld +import qualified Distribution.Simple.Program.Strip as Strip +import Distribution.Simple.Program.GHC +import Distribution.Simple.Setup + ( toFlag, fromFlag, configCoverage, configDistPref ) +import qualified Distribution.Simple.Setup as Cabal + ( Flag(..) ) +import Distribution.Simple.Compiler + ( CompilerFlavor(..), CompilerId(..), Compiler(..) + , PackageDB(..), PackageDBStack, AbiTag(..) ) +import Distribution.Version + ( Version(..), anyVersion, orLaterVersion ) +import Distribution.System + ( Platform(..) ) +import Distribution.Verbosity +import Distribution.Utils.NubList + ( overNubListR, toNubListR ) +import Distribution.Text ( display ) +import Language.Haskell.Extension ( Extension(..) + , KnownExtension(..)) + +import Control.Monad ( unless, when ) +import Data.Char ( isSpace ) +import qualified Data.Map as M ( fromList ) +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid ( Monoid(..) ) +#endif +import System.Directory ( doesFileExist ) +import System.FilePath ( (), (<.>), takeExtension, + takeDirectory, replaceExtension, + splitExtension ) + +configure :: Verbosity -> Maybe FilePath -> Maybe FilePath + -> ProgramConfiguration + -> IO (Compiler, Maybe Platform, ProgramConfiguration) +configure verbosity hcPath hcPkgPath conf0 = do + (ghcjsProg, ghcjsVersion, conf1) <- + requireProgramVersion verbosity ghcjsProgram + (orLaterVersion (Version [0,1] [])) + (userMaybeSpecifyPath "ghcjs" hcPath conf0) + Just ghcjsGhcVersion <- findGhcjsGhcVersion verbosity (programPath ghcjsProg) + let implInfo = ghcjsVersionImplInfo ghcjsVersion ghcjsGhcVersion + + -- This is slightly tricky, we have to configure ghcjs first, then we use the + -- location of ghcjs to help find ghcjs-pkg in the case that the user did not + -- specify the location of ghc-pkg directly: + (ghcjsPkgProg, ghcjsPkgVersion, conf2) <- + requireProgramVersion verbosity ghcjsPkgProgram { + programFindLocation = guessGhcjsPkgFromGhcjsPath ghcjsProg + } + anyVersion (userMaybeSpecifyPath "ghcjs-pkg" hcPkgPath conf1) + + Just ghcjsPkgGhcjsVersion <- findGhcjsPkgGhcjsVersion + verbosity (programPath ghcjsPkgProg) + + when (ghcjsVersion /= ghcjsPkgGhcjsVersion) $ die $ + "Version mismatch between ghcjs and ghcjs-pkg: " + ++ programPath ghcjsProg ++ " is version " ++ display ghcjsVersion ++ " " + ++ programPath ghcjsPkgProg ++ " is version " ++ display ghcjsPkgGhcjsVersion + + when (ghcjsGhcVersion /= ghcjsPkgVersion) $ die $ + "Version mismatch between ghcjs and ghcjs-pkg: " + ++ programPath ghcjsProg + ++ " was built with GHC version " ++ display ghcjsGhcVersion ++ " " + ++ programPath ghcjsPkgProg + ++ " was built with GHC version " ++ display ghcjsPkgVersion + + -- be sure to use our versions of hsc2hs, c2hs, haddock and ghc + let hsc2hsProgram' = + hsc2hsProgram { programFindLocation = + guessHsc2hsFromGhcjsPath ghcjsProg } + c2hsProgram' = + c2hsProgram { programFindLocation = + guessC2hsFromGhcjsPath ghcjsProg } + + haddockProgram' = + haddockProgram { programFindLocation = + guessHaddockFromGhcjsPath ghcjsProg } + conf3 = addKnownPrograms [ hsc2hsProgram', c2hsProgram', haddockProgram' ] conf2 + + languages <- Internal.getLanguages verbosity implInfo ghcjsProg + extensions <- Internal.getExtensions verbosity implInfo ghcjsProg + + ghcInfo <- Internal.getGhcInfo verbosity implInfo ghcjsProg + let ghcInfoMap = M.fromList ghcInfo + + let comp = Compiler { + compilerId = CompilerId GHCJS ghcjsVersion, + compilerAbiTag = AbiTag $ + "ghc" ++ intercalate "_" (map show . versionBranch $ ghcjsGhcVersion), + compilerCompat = [CompilerId GHC ghcjsGhcVersion], + compilerLanguages = languages, + compilerExtensions = extensions, + compilerProperties = ghcInfoMap + } + compPlatform = Internal.targetPlatform ghcInfo + -- configure gcc and ld + let conf4 = if ghcjsNativeToo comp + then Internal.configureToolchain implInfo + ghcjsProg ghcInfoMap conf3 + else conf3 + return (comp, compPlatform, conf4) + +ghcjsNativeToo :: Compiler -> Bool +ghcjsNativeToo = Internal.ghcLookupProperty "Native Too" + +guessGhcjsPkgFromGhcjsPath :: ConfiguredProgram -> Verbosity + -> ProgramSearchPath -> IO (Maybe FilePath) +guessGhcjsPkgFromGhcjsPath = guessToolFromGhcjsPath ghcjsPkgProgram + +guessHsc2hsFromGhcjsPath :: ConfiguredProgram -> Verbosity + -> ProgramSearchPath -> IO (Maybe FilePath) +guessHsc2hsFromGhcjsPath = guessToolFromGhcjsPath hsc2hsProgram + +guessC2hsFromGhcjsPath :: ConfiguredProgram -> Verbosity + -> ProgramSearchPath -> IO (Maybe FilePath) +guessC2hsFromGhcjsPath = guessToolFromGhcjsPath c2hsProgram + +guessHaddockFromGhcjsPath :: ConfiguredProgram -> Verbosity + -> ProgramSearchPath -> IO (Maybe FilePath) +guessHaddockFromGhcjsPath = guessToolFromGhcjsPath haddockProgram + +guessToolFromGhcjsPath :: Program -> ConfiguredProgram + -> Verbosity -> ProgramSearchPath + -> IO (Maybe FilePath) +guessToolFromGhcjsPath tool ghcjsProg verbosity searchpath + = do let toolname = programName tool + path = programPath ghcjsProg + dir = takeDirectory path + versionSuffix = takeVersionSuffix (dropExeExtension path) + guessNormal = dir toolname <.> exeExtension + guessGhcjsVersioned = dir (toolname ++ "-ghcjs" ++ versionSuffix) + <.> exeExtension + guessGhcjs = dir (toolname ++ "-ghcjs") + <.> exeExtension + guessVersioned = dir (toolname ++ versionSuffix) <.> exeExtension + guesses | null versionSuffix = [guessGhcjs, guessNormal] + | otherwise = [guessGhcjsVersioned, + guessGhcjs, + guessVersioned, + guessNormal] + info verbosity $ "looking for tool " ++ toolname + ++ " near compiler in " ++ dir + exists <- mapM doesFileExist guesses + case [ file | (file, True) <- zip guesses exists ] of + -- If we can't find it near ghc, fall back to the usual + -- method. + [] -> programFindLocation tool verbosity searchpath + (fp:_) -> do info verbosity $ "found " ++ toolname ++ " in " ++ fp + return (Just fp) + + where takeVersionSuffix :: FilePath -> String + takeVersionSuffix = reverse . takeWhile (`elem ` "0123456789.-") . + reverse + + dropExeExtension :: FilePath -> FilePath + dropExeExtension filepath = + case splitExtension filepath of + (filepath', extension) | extension == exeExtension -> filepath' + | otherwise -> filepath + + +-- | Given a single package DB, return all installed packages. +getPackageDBContents :: Verbosity -> PackageDB -> ProgramConfiguration + -> IO InstalledPackageIndex +getPackageDBContents verbosity packagedb conf = do + pkgss <- getInstalledPackages' verbosity [packagedb] conf + toPackageIndex verbosity pkgss conf + +-- | Given a package DB stack, return all installed packages. +getInstalledPackages :: Verbosity -> PackageDBStack -> ProgramConfiguration + -> IO InstalledPackageIndex +getInstalledPackages verbosity packagedbs conf = do + checkPackageDbEnvVar + checkPackageDbStack packagedbs + pkgss <- getInstalledPackages' verbosity packagedbs conf + index <- toPackageIndex verbosity pkgss conf + return $! index + +toPackageIndex :: Verbosity + -> [(PackageDB, [InstalledPackageInfo])] + -> ProgramConfiguration + -> IO InstalledPackageIndex +toPackageIndex verbosity pkgss conf = do + -- On Windows, various fields have $topdir/foo rather than full + -- paths. We need to substitute the right value in so that when + -- we, for example, call gcc, we have proper paths to give it. + topDir <- getLibDir' verbosity ghcjsProg + let indices = [ PackageIndex.fromList (map (Internal.substTopDir topDir) pkgs) + | (_, pkgs) <- pkgss ] + return $! (mconcat indices) + + where + Just ghcjsProg = lookupProgram ghcjsProgram conf + +checkPackageDbEnvVar :: IO () +checkPackageDbEnvVar = + Internal.checkPackageDbEnvVar "GHCJS" "GHCJS_PACKAGE_PATH" + +checkPackageDbStack :: PackageDBStack -> IO () +checkPackageDbStack (GlobalPackageDB:rest) + | GlobalPackageDB `notElem` rest = return () +checkPackageDbStack rest + | GlobalPackageDB `notElem` rest = + die $ "With current ghc versions the global package db is always used " + ++ "and must be listed first. This ghc limitation may be lifted in " + ++ "future, see http://hackage.haskell.org/trac/ghc/ticket/5977" +checkPackageDbStack _ = + die $ "If the global package db is specified, it must be " + ++ "specified first and cannot be specified multiple times" + +getInstalledPackages' :: Verbosity -> [PackageDB] -> ProgramConfiguration + -> IO [(PackageDB, [InstalledPackageInfo])] +getInstalledPackages' verbosity packagedbs conf = + sequence + [ do pkgs <- HcPkg.dump (hcPkgInfo conf) verbosity packagedb + return (packagedb, pkgs) + | packagedb <- packagedbs ] + +getLibDir :: Verbosity -> LocalBuildInfo -> IO FilePath +getLibDir verbosity lbi = + (reverse . dropWhile isSpace . reverse) `fmap` + rawSystemProgramStdoutConf verbosity ghcjsProgram + (withPrograms lbi) ["--print-libdir"] + +getLibDir' :: Verbosity -> ConfiguredProgram -> IO FilePath +getLibDir' verbosity ghcjsProg = + (reverse . dropWhile isSpace . reverse) `fmap` + rawSystemProgramStdout verbosity ghcjsProg ["--print-libdir"] + +-- | Return the 'FilePath' to the global GHC package database. +getGlobalPackageDB :: Verbosity -> ConfiguredProgram -> IO FilePath +getGlobalPackageDB verbosity ghcjsProg = + (reverse . dropWhile isSpace . reverse) `fmap` + rawSystemProgramStdout verbosity ghcjsProg ["--print-global-package-db"] + +toJSLibName :: String -> String +toJSLibName lib + | takeExtension lib `elem` [".dll",".dylib",".so"] + = replaceExtension lib "js_so" + | takeExtension lib == ".a" = replaceExtension lib "js_a" + | otherwise = lib <.> "js_a" + +buildLib, replLib :: Verbosity -> Cabal.Flag (Maybe Int) -> PackageDescription + -> LocalBuildInfo -> Library -> ComponentLocalBuildInfo + -> IO () +buildLib = buildOrReplLib False +replLib = buildOrReplLib True + +buildOrReplLib :: Bool -> Verbosity -> Cabal.Flag (Maybe Int) + -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO () +buildOrReplLib forRepl verbosity numJobs _pkg_descr lbi lib clbi = do + libName <- case componentLibraries clbi of + [libName] -> return libName + [] -> die "No library name found when building library" + _ -> die "Multiple library names found when building library" + let libTargetDir = buildDir lbi + whenVanillaLib forceVanilla = + when (not forRepl && (forceVanilla || withVanillaLib lbi)) + whenProfLib = when (not forRepl && withProfLib lbi) + whenSharedLib forceShared = + when (not forRepl && (forceShared || withSharedLib lbi)) + whenGHCiLib = when (not forRepl && withGHCiLib lbi && withVanillaLib lbi) + ifReplLib = when forRepl + comp = compiler lbi + implInfo = getImplInfo comp + hole_insts = map (\(k,(p,n)) -> (k,(InstalledPackageInfo.packageKey p,n))) + (instantiatedWith lbi) + nativeToo = ghcjsNativeToo comp + + (ghcjsProg, _) <- requireProgram verbosity ghcjsProgram (withPrograms lbi) + let runGhcjsProg = runGHC verbosity ghcjsProg comp + libBi = libBuildInfo lib + isGhcjsDynamic = isDynamic comp + dynamicTooSupported = supportsDynamicToo comp + doingTH = EnableExtension TemplateHaskell `elem` allExtensions libBi + forceVanillaLib = doingTH && not isGhcjsDynamic + forceSharedLib = doingTH && isGhcjsDynamic + -- TH always needs default libs, even when building for profiling + + -- Determine if program coverage should be enabled and if so, what + -- '-hpcdir' should be. + let isCoverageEnabled = fromFlag $ configCoverage $ configFlags lbi + -- Component name. Not 'libName' because that has the "HS" prefix + -- that GHC gives Haskell libraries. + cname = display $ PD.package $ localPkgDescr lbi + distPref = fromFlag $ configDistPref $ configFlags lbi + hpcdir way + | isCoverageEnabled = toFlag $ Hpc.mixDir distPref way cname + | otherwise = mempty + + createDirectoryIfMissingVerbose verbosity True libTargetDir + -- TODO: do we need to put hs-boot files into place for mutually recursive + -- modules? + let cObjs = map (`replaceExtension` objExtension) (cSources libBi) + jsSrcs = jsSources libBi + baseOpts = componentGhcOptions verbosity lbi libBi clbi libTargetDir + linkJsLibOpts = mempty { + ghcOptExtra = toNubListR $ + [ "-link-js-lib" , (\(LibraryName l) -> l) libName + , "-js-lib-outputdir", libTargetDir ] ++ + concatMap (\x -> ["-js-lib-src",x]) jsSrcs + } + vanillaOptsNoJsLib = baseOpts `mappend` mempty { + ghcOptMode = toFlag GhcModeMake, + ghcOptNumJobs = numJobs, + ghcOptPackageKey = toFlag (pkgKey lbi), + ghcOptSigOf = hole_insts, + ghcOptInputModules = toNubListR $ libModules lib, + ghcOptHPCDir = hpcdir Hpc.Vanilla + } + vanillaOpts = vanillaOptsNoJsLib `mappend` linkJsLibOpts + + profOpts = adjustExts "p_hi" "p_o" vanillaOpts `mappend` mempty { + ghcOptProfilingMode = toFlag True, + ghcOptExtra = toNubListR $ + ghcjsProfOptions libBi, + ghcOptHPCDir = hpcdir Hpc.Prof + } + sharedOpts = adjustExts "dyn_hi" "dyn_o" vanillaOpts `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptFPic = toFlag True, + ghcOptExtra = toNubListR $ + ghcjsSharedOptions libBi, + ghcOptHPCDir = hpcdir Hpc.Dyn + } + linkerOpts = mempty { + ghcOptLinkOptions = toNubListR $ PD.ldOptions libBi, + ghcOptLinkLibs = toNubListR $ extraLibs libBi, + ghcOptLinkLibPath = toNubListR $ extraLibDirs libBi, + ghcOptLinkFrameworks = toNubListR $ PD.frameworks libBi, + ghcOptInputFiles = + toNubListR $ [libTargetDir x | x <- cObjs] ++ jsSrcs + } + replOpts = vanillaOptsNoJsLib { + ghcOptExtra = overNubListR + Internal.filterGhciFlags + (ghcOptExtra vanillaOpts), + ghcOptNumJobs = mempty + } + `mappend` linkerOpts + `mappend` mempty { + ghcOptMode = toFlag GhcModeInteractive, + ghcOptOptimisation = toFlag GhcNoOptimisation + } + + vanillaSharedOpts = vanillaOpts `mappend` + mempty { + ghcOptDynLinkMode = toFlag GhcStaticAndDynamic, + ghcOptDynHiSuffix = toFlag "dyn_hi", + ghcOptDynObjSuffix = toFlag "dyn_o", + ghcOptHPCDir = hpcdir Hpc.Dyn + } + + unless (forRepl || (null (libModules lib) && null jsSrcs && null cObjs)) $ + do let vanilla = whenVanillaLib forceVanillaLib (runGhcjsProg vanillaOpts) + shared = whenSharedLib forceSharedLib (runGhcjsProg sharedOpts) + useDynToo = dynamicTooSupported && + (forceVanillaLib || withVanillaLib lbi) && + (forceSharedLib || withSharedLib lbi) && + null (ghcjsSharedOptions libBi) + if useDynToo + then do + runGhcjsProg vanillaSharedOpts + case (hpcdir Hpc.Dyn, hpcdir Hpc.Vanilla) of + (Cabal.Flag dynDir, Cabal.Flag vanillaDir) -> do + -- When the vanilla and shared library builds are done + -- in one pass, only one set of HPC module interfaces + -- are generated. This set should suffice for both + -- static and dynamically linked executables. We copy + -- the modules interfaces so they are available under + -- both ways. + copyDirectoryRecursive verbosity dynDir vanillaDir + _ -> return () + else if isGhcjsDynamic + then do shared; vanilla + else do vanilla; shared + whenProfLib (runGhcjsProg profOpts) + + -- build any C sources + unless (null (cSources libBi) || not nativeToo) $ do + info verbosity "Building C Sources..." + sequence_ + [ do let vanillaCcOpts = + (Internal.componentCcGhcOptions verbosity implInfo + lbi libBi clbi libTargetDir filename) + profCcOpts = vanillaCcOpts `mappend` mempty { + ghcOptProfilingMode = toFlag True, + ghcOptObjSuffix = toFlag "p_o" + } + sharedCcOpts = vanillaCcOpts `mappend` mempty { + ghcOptFPic = toFlag True, + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptObjSuffix = toFlag "dyn_o" + } + odir = fromFlag (ghcOptObjDir vanillaCcOpts) + createDirectoryIfMissingVerbose verbosity True odir + runGhcjsProg vanillaCcOpts + whenSharedLib forceSharedLib (runGhcjsProg sharedCcOpts) + whenProfLib (runGhcjsProg profCcOpts) + | filename <- cSources libBi] + + -- TODO: problem here is we need the .c files built first, so we can load them + -- with ghci, but .c files can depend on .h files generated by ghc by ffi + -- exports. + unless (null (libModules lib)) $ + ifReplLib (runGhcjsProg replOpts) + + -- link: + when (nativeToo && not forRepl) $ do + info verbosity "Linking..." + let cProfObjs = map (`replaceExtension` ("p_" ++ objExtension)) + (cSources libBi) + cSharedObjs = map (`replaceExtension` ("dyn_" ++ objExtension)) + (cSources libBi) + cid = compilerId (compiler lbi) + vanillaLibFilePath = libTargetDir mkLibName libName + profileLibFilePath = libTargetDir mkProfLibName libName + sharedLibFilePath = libTargetDir mkSharedLibName cid libName + ghciLibFilePath = libTargetDir Internal.mkGHCiLibName libName + + hObjs <- Internal.getHaskellObjects implInfo lib lbi + libTargetDir objExtension True + hProfObjs <- + if (withProfLib lbi) + then Internal.getHaskellObjects implInfo lib lbi + libTargetDir ("p_" ++ objExtension) True + else return [] + hSharedObjs <- + if (withSharedLib lbi) + then Internal.getHaskellObjects implInfo lib lbi + libTargetDir ("dyn_" ++ objExtension) False + else return [] + + unless (null hObjs && null cObjs) $ do + + let staticObjectFiles = + hObjs + ++ map (libTargetDir ) cObjs + profObjectFiles = + hProfObjs + ++ map (libTargetDir ) cProfObjs + ghciObjFiles = + hObjs + ++ map (libTargetDir ) cObjs + dynamicObjectFiles = + hSharedObjs + ++ map (libTargetDir ) cSharedObjs + -- After the relocation lib is created we invoke ghc -shared + -- with the dependencies spelled out as -package arguments + -- and ghc invokes the linker with the proper library paths + ghcSharedLinkArgs = + mempty { + ghcOptShared = toFlag True, + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptInputFiles = toNubListR dynamicObjectFiles, + ghcOptOutputFile = toFlag sharedLibFilePath, + ghcOptPackageKey = toFlag (pkgKey lbi), + ghcOptNoAutoLinkPackages = toFlag True, + ghcOptPackageDBs = withPackageDB lbi, + ghcOptPackages = toNubListR $ + Internal.mkGhcOptPackages clbi, + ghcOptLinkLibs = toNubListR $ extraLibs libBi, + ghcOptLinkLibPath = toNubListR $ extraLibDirs libBi + } + + whenVanillaLib False $ do + Ar.createArLibArchive verbosity lbi vanillaLibFilePath staticObjectFiles + + whenProfLib $ do + Ar.createArLibArchive verbosity lbi profileLibFilePath profObjectFiles + + whenGHCiLib $ do + (ldProg, _) <- requireProgram verbosity ldProgram (withPrograms lbi) + Ld.combineObjectFiles verbosity ldProg + ghciLibFilePath ghciObjFiles + + whenSharedLib False $ + runGhcjsProg ghcSharedLinkArgs + +-- | Start a REPL without loading any source files. +startInterpreter :: Verbosity -> ProgramConfiguration -> Compiler + -> PackageDBStack -> IO () +startInterpreter verbosity conf comp packageDBs = do + let replOpts = mempty { + ghcOptMode = toFlag GhcModeInteractive, + ghcOptPackageDBs = packageDBs + } + checkPackageDbStack packageDBs + (ghcjsProg, _) <- requireProgram verbosity ghcjsProgram conf + runGHC verbosity ghcjsProg comp replOpts + +buildExe, replExe :: Verbosity -> Cabal.Flag (Maybe Int) + -> PackageDescription -> LocalBuildInfo + -> Executable -> ComponentLocalBuildInfo -> IO () +buildExe = buildOrReplExe False +replExe = buildOrReplExe True + +buildOrReplExe :: Bool -> Verbosity -> Cabal.Flag (Maybe Int) + -> PackageDescription -> LocalBuildInfo + -> Executable -> ComponentLocalBuildInfo -> IO () +buildOrReplExe forRepl verbosity numJobs _pkg_descr lbi + exe@Executable { exeName = exeName', modulePath = modPath } clbi = do + + (ghcjsProg, _) <- requireProgram verbosity ghcjsProgram (withPrograms lbi) + let comp = compiler lbi + implInfo = getImplInfo comp + runGhcjsProg = runGHC verbosity ghcjsProg comp + exeBi = buildInfo exe + + -- exeNameReal, the name that GHC really uses (with .exe on Windows) + let exeNameReal = exeName' <.> + (if takeExtension exeName' /= ('.':exeExtension) + then exeExtension + else "") + + let targetDir = (buildDir lbi) exeName' + let exeDir = targetDir (exeName' ++ "-tmp") + createDirectoryIfMissingVerbose verbosity True targetDir + createDirectoryIfMissingVerbose verbosity True exeDir + -- TODO: do we need to put hs-boot files into place for mutually recursive + -- modules? FIX: what about exeName.hi-boot? + + -- Determine if program coverage should be enabled and if so, what + -- '-hpcdir' should be. + let isCoverageEnabled = fromFlag $ configCoverage $ configFlags lbi + distPref = fromFlag $ configDistPref $ configFlags lbi + hpcdir way + | isCoverageEnabled = toFlag $ Hpc.mixDir distPref way exeName' + | otherwise = mempty + + -- build executables + + srcMainFile <- findFile (exeDir : hsSourceDirs exeBi) modPath + let isGhcjsDynamic = isDynamic comp + dynamicTooSupported = supportsDynamicToo comp + buildRunner = case clbi of + ExeComponentLocalBuildInfo {} -> False + _ -> True + isHaskellMain = elem (takeExtension srcMainFile) [".hs", ".lhs"] + jsSrcs = jsSources exeBi + cSrcs = cSources exeBi ++ [srcMainFile | not isHaskellMain] + cObjs = map (`replaceExtension` objExtension) cSrcs + nativeToo = ghcjsNativeToo comp + baseOpts = (componentGhcOptions verbosity lbi exeBi clbi exeDir) + `mappend` mempty { + ghcOptMode = toFlag GhcModeMake, + ghcOptInputFiles = toNubListR $ + [ srcMainFile | isHaskellMain], + ghcOptInputModules = toNubListR $ + [ m | not isHaskellMain, m <- exeModules exe], + ghcOptExtra = + if buildRunner then toNubListR ["-build-runner"] + else mempty + } + staticOpts = baseOpts `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcStaticOnly, + ghcOptHPCDir = hpcdir Hpc.Vanilla + } + profOpts = adjustExts "p_hi" "p_o" baseOpts `mappend` mempty { + ghcOptProfilingMode = toFlag True, + ghcOptExtra = toNubListR $ ghcjsProfOptions exeBi, + ghcOptHPCDir = hpcdir Hpc.Prof + } + dynOpts = adjustExts "dyn_hi" "dyn_o" baseOpts `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptExtra = toNubListR $ + ghcjsSharedOptions exeBi, + ghcOptHPCDir = hpcdir Hpc.Dyn + } + dynTooOpts = adjustExts "dyn_hi" "dyn_o" staticOpts `mappend` mempty { + ghcOptDynLinkMode = toFlag GhcStaticAndDynamic, + ghcOptHPCDir = hpcdir Hpc.Dyn + } + linkerOpts = mempty { + ghcOptLinkOptions = toNubListR $ PD.ldOptions exeBi, + ghcOptLinkLibs = toNubListR $ extraLibs exeBi, + ghcOptLinkLibPath = toNubListR $ extraLibDirs exeBi, + ghcOptLinkFrameworks = toNubListR $ PD.frameworks exeBi, + ghcOptInputFiles = toNubListR $ + [exeDir x | x <- cObjs] ++ jsSrcs + } + replOpts = baseOpts { + ghcOptExtra = overNubListR + Internal.filterGhciFlags + (ghcOptExtra baseOpts) + } + -- For a normal compile we do separate invocations of ghc for + -- compiling as for linking. But for repl we have to do just + -- the one invocation, so that one has to include all the + -- linker stuff too, like -l flags and any .o files from C + -- files etc. + `mappend` linkerOpts + `mappend` mempty { + ghcOptMode = toFlag GhcModeInteractive, + ghcOptOptimisation = toFlag GhcNoOptimisation + } + commonOpts | withProfExe lbi = profOpts + | withDynExe lbi = dynOpts + | otherwise = staticOpts + compileOpts | useDynToo = dynTooOpts + | otherwise = commonOpts + withStaticExe = (not $ withProfExe lbi) && (not $ withDynExe lbi) + + -- For building exe's that use TH with -prof or -dynamic we actually have + -- to build twice, once without -prof/-dynamic and then again with + -- -prof/-dynamic. This is because the code that TH needs to run at + -- compile time needs to be the vanilla ABI so it can be loaded up and run + -- by the compiler. + -- With dynamic-by-default GHC the TH object files loaded at compile-time + -- need to be .dyn_o instead of .o. + doingTH = EnableExtension TemplateHaskell `elem` allExtensions exeBi + -- Should we use -dynamic-too instead of compiling twice? + useDynToo = dynamicTooSupported && isGhcjsDynamic + && doingTH && withStaticExe && null (ghcjsSharedOptions exeBi) + compileTHOpts | isGhcjsDynamic = dynOpts + | otherwise = staticOpts + compileForTH + | forRepl = False + | useDynToo = False + | isGhcjsDynamic = doingTH && (withProfExe lbi || withStaticExe) + | otherwise = doingTH && (withProfExe lbi || withDynExe lbi) + + linkOpts = commonOpts `mappend` + linkerOpts `mappend` mempty { + ghcOptLinkNoHsMain = toFlag (not isHaskellMain) + } + + -- Build static/dynamic object files for TH, if needed. + when compileForTH $ + runGhcjsProg compileTHOpts { ghcOptNoLink = toFlag True + , ghcOptNumJobs = numJobs } + + unless forRepl $ + runGhcjsProg compileOpts { ghcOptNoLink = toFlag True + , ghcOptNumJobs = numJobs } + + -- build any C sources + unless (null cSrcs || not nativeToo) $ do + info verbosity "Building C Sources..." + sequence_ + [ do let opts = (Internal.componentCcGhcOptions verbosity implInfo lbi exeBi + clbi exeDir filename) `mappend` mempty { + ghcOptDynLinkMode = toFlag (if withDynExe lbi + then GhcDynamicOnly + else GhcStaticOnly), + ghcOptProfilingMode = toFlag (withProfExe lbi) + } + odir = fromFlag (ghcOptObjDir opts) + createDirectoryIfMissingVerbose verbosity True odir + runGhcjsProg opts + | filename <- cSrcs ] + + -- TODO: problem here is we need the .c files built first, so we can load them + -- with ghci, but .c files can depend on .h files generated by ghc by ffi + -- exports. + when forRepl $ runGhcjsProg replOpts + + -- link: + unless forRepl $ do + info verbosity "Linking..." + runGhcjsProg linkOpts { ghcOptOutputFile = toFlag (targetDir exeNameReal) } + +-- |Install for ghc, .hi, .a and, if --with-ghci given, .o +installLib :: Verbosity + -> LocalBuildInfo + -> FilePath -- ^install location + -> FilePath -- ^install location for dynamic libraries + -> FilePath -- ^Build location + -> PackageDescription + -> Library + -> ComponentLocalBuildInfo + -> IO () +installLib verbosity lbi targetDir dynlibTargetDir builtDir _pkg lib clbi = do + whenVanilla $ copyModuleFiles "js_hi" + whenProf $ copyModuleFiles "js_p_hi" + whenShared $ copyModuleFiles "js_dyn_hi" + + whenVanilla $ mapM_ (installOrdinary builtDir targetDir . toJSLibName) vanillaLibNames + whenProf $ mapM_ (installOrdinary builtDir targetDir . toJSLibName) profileLibNames + whenShared $ mapM_ (installShared builtDir dynlibTargetDir . toJSLibName) sharedLibNames + + when (ghcjsNativeToo $ compiler lbi) $ do + -- copy .hi files over: + whenVanilla $ copyModuleFiles "hi" + whenProf $ copyModuleFiles "p_hi" + whenShared $ copyModuleFiles "dyn_hi" + + -- copy the built library files over: + whenVanilla $ mapM_ (installOrdinary builtDir targetDir) vanillaLibNames + whenProf $ mapM_ (installOrdinary builtDir targetDir) profileLibNames + whenGHCi $ mapM_ (installOrdinary builtDir targetDir) ghciLibNames + whenShared $ mapM_ (installShared builtDir dynlibTargetDir) sharedLibNames + + where + install isShared srcDir dstDir name = do + let src = srcDir name + dst = dstDir name + createDirectoryIfMissingVerbose verbosity True dstDir + if isShared + then do when (stripLibs lbi) $ Strip.stripLib verbosity + (hostPlatform lbi) (withPrograms lbi) src + installExecutableFile verbosity src dst + else installOrdinaryFile verbosity src dst + + installOrdinary = install False + installShared = install True + + copyModuleFiles ext = + findModuleFiles [builtDir] [ext] (libModules lib) + >>= installOrdinaryFiles verbosity targetDir + + cid = compilerId (compiler lbi) + libNames = componentLibraries clbi + vanillaLibNames = map mkLibName libNames + profileLibNames = map mkProfLibName libNames + ghciLibNames = map Internal.mkGHCiLibName libNames + sharedLibNames = map (mkSharedLibName cid) libNames + + hasLib = not $ null (libModules lib) + && null (cSources (libBuildInfo lib)) + whenVanilla = when (hasLib && withVanillaLib lbi) + whenProf = when (hasLib && withProfLib lbi) + whenGHCi = when (hasLib && withGHCiLib lbi) + whenShared = when (hasLib && withSharedLib lbi) + +installExe :: Verbosity + -> LocalBuildInfo + -> InstallDirs FilePath -- ^Where to copy the files to + -> FilePath -- ^Build location + -> (FilePath, FilePath) -- ^Executable (prefix,suffix) + -> PackageDescription + -> Executable + -> IO () +installExe verbosity lbi installDirs buildPref + (progprefix, progsuffix) _pkg exe = do + let binDir = bindir installDirs + createDirectoryIfMissingVerbose verbosity True binDir + let exeFileName = exeName exe + fixedExeBaseName = progprefix ++ exeName exe ++ progsuffix + installBinary dest = do + rawSystemProgramConf verbosity ghcjsProgram (withPrograms lbi) $ + [ "--install-executable" + , buildPref exeName exe exeFileName + , "-o", dest + ] ++ + case (stripExes lbi, lookupProgram stripProgram $ withPrograms lbi) of + (True, Just strip) -> ["-strip-program", programPath strip] + _ -> [] + installBinary (binDir fixedExeBaseName) + +libAbiHash :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO String +libAbiHash verbosity _pkg_descr lbi lib clbi = do + let + libBi = libBuildInfo lib + comp = compiler lbi + vanillaArgs = + (componentGhcOptions verbosity lbi libBi clbi (buildDir lbi)) + `mappend` mempty { + ghcOptMode = toFlag GhcModeAbiHash, + ghcOptPackageKey = toFlag (pkgKey lbi), + ghcOptInputModules = toNubListR $ exposedModules lib + } + profArgs = adjustExts "js_p_hi" "js_p_o" vanillaArgs `mappend` mempty { + ghcOptProfilingMode = toFlag True, + ghcOptExtra = toNubListR (ghcjsProfOptions libBi) + } + ghcArgs = if withVanillaLib lbi then vanillaArgs + else if withProfLib lbi then profArgs + else error "libAbiHash: Can't find an enabled library way" + -- + (ghcjsProg, _) <- requireProgram verbosity ghcjsProgram (withPrograms lbi) + getProgramInvocationOutput verbosity (ghcInvocation ghcjsProg comp ghcArgs) + +adjustExts :: String -> String -> GhcOptions -> GhcOptions +adjustExts hiSuf objSuf opts = + opts `mappend` mempty { + ghcOptHiSuffix = toFlag hiSuf, + ghcOptObjSuffix = toFlag objSuf + } + +registerPackage :: Verbosity + -> InstalledPackageInfo + -> PackageDescription + -> LocalBuildInfo + -> Bool + -> PackageDBStack + -> IO () +registerPackage verbosity installedPkgInfo _pkg lbi _inplace packageDbs = + HcPkg.reregister (hcPkgInfo $ withPrograms lbi) verbosity packageDbs + (Right installedPkgInfo) + +componentGhcOptions :: Verbosity -> LocalBuildInfo + -> BuildInfo -> ComponentLocalBuildInfo -> FilePath + -> GhcOptions +componentGhcOptions verbosity lbi bi clbi odir = + let opts = Internal.componentGhcOptions verbosity lbi bi clbi odir + in opts { ghcOptExtra = ghcOptExtra opts `mappend` toNubListR + (hcOptions GHCJS bi) + } + +ghcjsProfOptions :: BuildInfo -> [String] +ghcjsProfOptions bi = + hcProfOptions GHC bi `mappend` hcProfOptions GHCJS bi + +ghcjsSharedOptions :: BuildInfo -> [String] +ghcjsSharedOptions bi = + hcSharedOptions GHC bi `mappend` hcSharedOptions GHCJS bi + +isDynamic :: Compiler -> Bool +isDynamic = Internal.ghcLookupProperty "GHC Dynamic" + +supportsDynamicToo :: Compiler -> Bool +supportsDynamicToo = Internal.ghcLookupProperty "Support dynamic-too" + +findGhcjsGhcVersion :: Verbosity -> FilePath -> IO (Maybe Version) +findGhcjsGhcVersion verbosity pgm = + findProgramVersion "--numeric-ghc-version" id verbosity pgm + +findGhcjsPkgGhcjsVersion :: Verbosity -> FilePath -> IO (Maybe Version) +findGhcjsPkgGhcjsVersion verbosity pgm = + findProgramVersion "--numeric-ghcjs-version" id verbosity pgm + +-- ----------------------------------------------------------------------------- +-- Registering + +hcPkgInfo :: ProgramConfiguration -> HcPkg.HcPkgInfo +hcPkgInfo conf = HcPkg.HcPkgInfo { HcPkg.hcPkgProgram = ghcjsPkgProg + , HcPkg.noPkgDbStack = False + , HcPkg.noVerboseFlag = False + , HcPkg.flagPackageConf = False + , HcPkg.useSingleFileDb = v < [7,9] + } + where + v = versionBranch ver + Just ghcjsPkgProg = lookupProgram ghcjsPkgProgram conf + Just ver = programVersion ghcjsPkgProg + +-- | Get the JavaScript file name and command and arguments to run a +-- program compiled by GHCJS +-- the exe should be the base program name without exe extension +runCmd :: ProgramConfiguration -> FilePath + -> (FilePath, FilePath, [String]) +runCmd conf exe = + ( script + , programPath ghcjsProg + , programDefaultArgs ghcjsProg ++ programOverrideArgs ghcjsProg ++ ["--run"] + ) + where + script = exe <.> "jsexe" "all" <.> "js" + Just ghcjsProg = lookupProgram ghcjsProgram conf diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Haddock.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Haddock.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Haddock.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Haddock.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,819 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Haddock +-- Copyright : Isaac Jones 2003-2005 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module deals with the @haddock@ and @hscolour@ commands. +-- It uses information about installed packages (from @ghc-pkg@) to find the +-- locations of documentation for dependent packages, so it can create links. +-- +-- The @hscolour@ support allows generating HTML versions of the original +-- source, with coloured syntax highlighting. + +module Distribution.Simple.Haddock ( + haddock, hscolour, + + haddockPackagePaths + ) where + +import qualified Distribution.Simple.GHC as GHC +import qualified Distribution.Simple.GHCJS as GHCJS + +-- local +import Distribution.Package + ( PackageIdentifier(..) + , Package(..) + , PackageName(..), packageName ) +import qualified Distribution.ModuleName as ModuleName +import Distribution.PackageDescription as PD + ( PackageDescription(..), BuildInfo(..), usedExtensions + , hcSharedOptions + , Library(..), hasLibs, Executable(..) + , TestSuite(..), TestSuiteInterface(..) + , Benchmark(..), BenchmarkInterface(..) ) +import Distribution.Simple.Compiler + ( Compiler, compilerInfo, CompilerFlavor(..) + , compilerFlavor, compilerCompatVersion ) +import Distribution.Simple.Program.GHC + ( GhcOptions(..), GhcDynLinkMode(..), renderGhcOptions ) +import Distribution.Simple.Program + ( ConfiguredProgram(..), lookupProgramVersion, requireProgramVersion + , rawSystemProgram, rawSystemProgramStdout + , hscolourProgram, haddockProgram ) +import Distribution.Simple.PreProcess + ( PPSuffixHandler, preprocessComponent) +import Distribution.Simple.Setup + ( defaultHscolourFlags + , Flag(..), toFlag, flagToMaybe, flagToList, fromFlag + , HaddockFlags(..), HscolourFlags(..) ) +import Distribution.Simple.Build (initialBuildSteps) +import Distribution.Simple.InstallDirs + ( InstallDirs(..) + , PathTemplateEnv, PathTemplate, PathTemplateVariable(..) + , toPathTemplate, fromPathTemplate + , substPathTemplate, initialPathTemplateEnv ) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), Component(..), ComponentLocalBuildInfo(..) + , withAllComponentsInBuildOrder ) +import Distribution.Simple.BuildPaths + ( haddockName, hscolourPref, autogenModulesDir) +import Distribution.Simple.PackageIndex (dependencyClosure) +import qualified Distribution.Simple.PackageIndex as PackageIndex +import qualified Distribution.InstalledPackageInfo as InstalledPackageInfo + ( InstalledPackageInfo_(..) ) +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo ) +import Distribution.Simple.Utils + ( die, copyFileTo, warn, notice, intercalate, setupMessage + , createDirectoryIfMissingVerbose + , TempFileOptions(..), defaultTempFileOptions + , withTempFileEx, copyFileVerbose + , withTempDirectoryEx, matchFileGlob + , findFileWithExtension, findFile ) +import Distribution.Text + ( display, simpleParse ) +import Distribution.Utils.NubList + ( toNubListR ) + +import Distribution.Verbosity +import Language.Haskell.Extension + + +import Control.Monad ( when, forM_ ) +import Data.Char ( isSpace ) +import Data.Either ( rights ) +import Data.Monoid +import Data.Foldable ( foldl' ) +import Data.Maybe ( fromMaybe, listToMaybe ) + +import System.Directory (doesFileExist) +import System.FilePath ( (), (<.>) + , normalise, splitPath, joinPath, isAbsolute ) +import System.IO (hClose, hPutStr, hPutStrLn, hSetEncoding, utf8) +import Distribution.Version + +-- ------------------------------------------------------------------------------ +-- Types + +-- | A record that represents the arguments to the haddock executable, a product +-- monoid. +data HaddockArgs = HaddockArgs { + argInterfaceFile :: Flag FilePath, + -- ^ Path to the interface file, relative to argOutputDir, required. + argPackageName :: Flag PackageIdentifier, + -- ^ Package name, required. + argHideModules :: (All,[ModuleName.ModuleName]), + -- ^ (Hide modules ?, modules to hide) + argIgnoreExports :: Any, + -- ^ Ignore export lists in modules? + argLinkSource :: Flag (Template,Template,Template), + -- ^ (Template for modules, template for symbols, template for lines). + argCssFile :: Flag FilePath, + -- ^ Optional custom CSS file. + argContents :: Flag String, + -- ^ Optional URL to contents page. + argVerbose :: Any, + argOutput :: Flag [Output], + -- ^ HTML or Hoogle doc or both? Required. + argInterfaces :: [(FilePath, Maybe String)], + -- ^ [(Interface file, URL to the HTML docs for links)]. + argOutputDir :: Directory, + -- ^ Where to generate the documentation. + argTitle :: Flag String, + -- ^ Page title, required. + argPrologue :: Flag String, + -- ^ Prologue text, required. + argGhcOptions :: Flag (GhcOptions, Version), + -- ^ Additional flags to pass to GHC. + argGhcLibDir :: Flag FilePath, + -- ^ To find the correct GHC, required. + argTargets :: [FilePath] + -- ^ Modules to process. +} + +-- | The FilePath of a directory, it's a monoid under '()'. +newtype Directory = Dir { unDir' :: FilePath } deriving (Read,Show,Eq,Ord) + +unDir :: Directory -> FilePath +unDir = joinPath . filter (\p -> p /="./" && p /= ".") . splitPath . unDir' + +type Template = String + +data Output = Html | Hoogle + +-- ------------------------------------------------------------------------------ +-- Haddock support + +haddock :: PackageDescription + -> LocalBuildInfo + -> [PPSuffixHandler] + -> HaddockFlags + -> IO () +haddock pkg_descr _ _ haddockFlags + | not (hasLibs pkg_descr) + && not (fromFlag $ haddockExecutables haddockFlags) + && not (fromFlag $ haddockTestSuites haddockFlags) + && not (fromFlag $ haddockBenchmarks haddockFlags) = + warn (fromFlag $ haddockVerbosity haddockFlags) $ + "No documentation was generated as this package does not contain " + ++ "a library. Perhaps you want to use the --executables, --tests or" + ++ " --benchmarks flags." + +haddock pkg_descr lbi suffixes flags = do + setupMessage verbosity "Running Haddock for" (packageId pkg_descr) + (confHaddock, version, _) <- + requireProgramVersion verbosity haddockProgram + (orLaterVersion (Version [2,0] [])) (withPrograms lbi) + + -- various sanity checks + when ( flag haddockHoogle + && version < Version [2,2] []) $ + die "haddock 2.0 and 2.1 do not support the --hoogle flag." + + haddockGhcVersionStr <- rawSystemProgramStdout verbosity confHaddock + ["--ghc-version"] + case (simpleParse haddockGhcVersionStr, compilerCompatVersion GHC comp) of + (Nothing, _) -> die "Could not get GHC version from Haddock" + (_, Nothing) -> die "Could not get GHC version from compiler" + (Just haddockGhcVersion, Just ghcVersion) + | haddockGhcVersion == ghcVersion -> return () + | otherwise -> die $ + "Haddock's internal GHC version must match the configured " + ++ "GHC version.\n" + ++ "The GHC version is " ++ display ghcVersion ++ " but " + ++ "haddock is using GHC version " ++ display haddockGhcVersion + + -- the tools match the requests, we can proceed + + initialBuildSteps (flag haddockDistPref) pkg_descr lbi verbosity + + when (flag haddockHscolour) $ + hscolour' (warn verbosity) pkg_descr lbi suffixes + (defaultHscolourFlags `mappend` haddockToHscolour flags) + + libdirArgs <- getGhcLibDir verbosity lbi + let commonArgs = mconcat + [ libdirArgs + , fromFlags (haddockTemplateEnv lbi (packageId pkg_descr)) flags + , fromPackageDescription pkg_descr ] + + let pre c = preprocessComponent pkg_descr c lbi False verbosity suffixes + withAllComponentsInBuildOrder pkg_descr lbi $ \component clbi -> do + pre component + let + doExe com = case (compToExe com) of + Just exe -> do + withTempDirectoryEx verbosity tmpFileOpts (buildDir lbi) "tmp" $ + \tmp -> do + exeArgs <- fromExecutable verbosity tmp lbi exe clbi htmlTemplate + version + let exeArgs' = commonArgs `mappend` exeArgs + runHaddock verbosity tmpFileOpts comp confHaddock exeArgs' + Nothing -> do + warn (fromFlag $ haddockVerbosity flags) + "Unsupported component, skipping..." + return () + case component of + CLib lib -> do + withTempDirectoryEx verbosity tmpFileOpts (buildDir lbi) "tmp" $ + \tmp -> do + libArgs <- fromLibrary verbosity tmp lbi lib clbi htmlTemplate + version + let libArgs' = commonArgs `mappend` libArgs + runHaddock verbosity tmpFileOpts comp confHaddock libArgs' + CExe _ -> when (flag haddockExecutables) $ doExe component + CTest _ -> when (flag haddockTestSuites) $ doExe component + CBench _ -> when (flag haddockBenchmarks) $ doExe component + + forM_ (extraDocFiles pkg_descr) $ \ fpath -> do + files <- matchFileGlob fpath + forM_ files $ copyFileTo verbosity (unDir $ argOutputDir commonArgs) + where + verbosity = flag haddockVerbosity + keepTempFiles = flag haddockKeepTempFiles + comp = compiler lbi + tmpFileOpts = defaultTempFileOptions { optKeepTempFiles = keepTempFiles } + flag f = fromFlag $ f flags + htmlTemplate = fmap toPathTemplate . flagToMaybe . haddockHtmlLocation + $ flags + +-- ------------------------------------------------------------------------------ +-- Contributions to HaddockArgs. + +fromFlags :: PathTemplateEnv -> HaddockFlags -> HaddockArgs +fromFlags env flags = + mempty { + argHideModules = (maybe mempty (All . not) + $ flagToMaybe (haddockInternal flags), mempty), + argLinkSource = if fromFlag (haddockHscolour flags) + then Flag ("src/%{MODULE/./-}.html" + ,"src/%{MODULE/./-}.html#%{NAME}" + ,"src/%{MODULE/./-}.html#line-%{LINE}") + else NoFlag, + argCssFile = haddockCss flags, + argContents = fmap (fromPathTemplate . substPathTemplate env) + (haddockContents flags), + argVerbose = maybe mempty (Any . (>= deafening)) + . flagToMaybe $ haddockVerbosity flags, + argOutput = + Flag $ case [ Html | Flag True <- [haddockHtml flags] ] ++ + [ Hoogle | Flag True <- [haddockHoogle flags] ] + of [] -> [ Html ] + os -> os, + argOutputDir = maybe mempty Dir . flagToMaybe $ haddockDistPref flags + } + +fromPackageDescription :: PackageDescription -> HaddockArgs +fromPackageDescription pkg_descr = + mempty { argInterfaceFile = Flag $ haddockName pkg_descr, + argPackageName = Flag $ packageId $ pkg_descr, + argOutputDir = Dir $ "doc" "html" + display (packageName pkg_descr), + argPrologue = Flag $ if null desc then synopsis pkg_descr + else desc, + argTitle = Flag $ showPkg ++ subtitle + } + where + desc = PD.description pkg_descr + showPkg = display (packageId pkg_descr) + subtitle | null (synopsis pkg_descr) = "" + | otherwise = ": " ++ synopsis pkg_descr + +componentGhcOptions :: Verbosity -> LocalBuildInfo + -> BuildInfo -> ComponentLocalBuildInfo -> FilePath + -> GhcOptions +componentGhcOptions verbosity lbi bi clbi odir = + let f = case compilerFlavor (compiler lbi) of + GHC -> GHC.componentGhcOptions + GHCJS -> GHCJS.componentGhcOptions + _ -> error $ + "Distribution.Simple.Haddock.componentGhcOptions:" ++ + "haddock only supports GHC and GHCJS" + in f verbosity lbi bi clbi odir + +fromLibrary :: Verbosity + -> FilePath + -> LocalBuildInfo -> Library -> ComponentLocalBuildInfo + -> Maybe PathTemplate -- ^ template for HTML location + -> Version + -> IO HaddockArgs +fromLibrary verbosity tmp lbi lib clbi htmlTemplate haddockVersion = do + inFiles <- map snd `fmap` getLibSourceFiles lbi lib + ifaceArgs <- getInterfaces verbosity lbi clbi htmlTemplate + let vanillaOpts = (componentGhcOptions normal lbi bi clbi (buildDir lbi)) { + -- Noooooooooo!!!!!111 + -- haddock stomps on our precious .hi + -- and .o files. Workaround by telling + -- haddock to write them elsewhere. + ghcOptObjDir = toFlag tmp, + ghcOptHiDir = toFlag tmp, + ghcOptStubDir = toFlag tmp, + ghcOptPackageKey = toFlag $ pkgKey lbi + } `mappend` getGhcCppOpts haddockVersion bi + sharedOpts = vanillaOpts { + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptFPic = toFlag True, + ghcOptHiSuffix = toFlag "dyn_hi", + ghcOptObjSuffix = toFlag "dyn_o", + ghcOptExtra = + toNubListR $ hcSharedOptions GHC bi + + } + opts <- if withVanillaLib lbi + then return vanillaOpts + else if withSharedLib lbi + then return sharedOpts + else die $ "Must have vanilla or shared libraries " + ++ "enabled in order to run haddock" + ghcVersion <- maybe (die "Compiler has no GHC version") + return + (compilerCompatVersion GHC (compiler lbi)) + + return ifaceArgs { + argHideModules = (mempty,otherModules $ bi), + argGhcOptions = toFlag (opts, ghcVersion), + argTargets = inFiles + } + where + bi = libBuildInfo lib + +fromExecutable :: Verbosity + -> FilePath + -> LocalBuildInfo -> Executable -> ComponentLocalBuildInfo + -> Maybe PathTemplate -- ^ template for HTML location + -> Version + -> IO HaddockArgs +fromExecutable verbosity tmp lbi exe clbi htmlTemplate haddockVersion = do + inFiles <- map snd `fmap` getExeSourceFiles lbi exe + ifaceArgs <- getInterfaces verbosity lbi clbi htmlTemplate + let vanillaOpts = (componentGhcOptions normal lbi bi clbi (buildDir lbi)) { + -- Noooooooooo!!!!!111 + -- haddock stomps on our precious .hi + -- and .o files. Workaround by telling + -- haddock to write them elsewhere. + ghcOptObjDir = toFlag tmp, + ghcOptHiDir = toFlag tmp, + ghcOptStubDir = toFlag tmp + } `mappend` getGhcCppOpts haddockVersion bi + sharedOpts = vanillaOpts { + ghcOptDynLinkMode = toFlag GhcDynamicOnly, + ghcOptFPic = toFlag True, + ghcOptHiSuffix = toFlag "dyn_hi", + ghcOptObjSuffix = toFlag "dyn_o", + ghcOptExtra = + toNubListR $ hcSharedOptions GHC bi + } + opts <- if withVanillaLib lbi + then return vanillaOpts + else if withSharedLib lbi + then return sharedOpts + else die $ "Must have vanilla or shared libraries " + ++ "enabled in order to run haddock" + ghcVersion <- maybe (die "Compiler has no GHC version") + return + (compilerCompatVersion GHC (compiler lbi)) + + return ifaceArgs { + argGhcOptions = toFlag (opts, ghcVersion), + argOutputDir = Dir (exeName exe), + argTitle = Flag (exeName exe), + argTargets = inFiles + } + where + bi = buildInfo exe + +compToExe :: Component -> Maybe Executable +compToExe comp = + case comp of + CTest test@TestSuite { testInterface = TestSuiteExeV10 _ f } -> + Just Executable { + exeName = testName test, + modulePath = f, + buildInfo = testBuildInfo test + } + CBench bench@Benchmark { benchmarkInterface = BenchmarkExeV10 _ f } -> + Just Executable { + exeName = benchmarkName bench, + modulePath = f, + buildInfo = benchmarkBuildInfo bench + } + CExe exe -> Just exe + _ -> Nothing + +getInterfaces :: Verbosity + -> LocalBuildInfo + -> ComponentLocalBuildInfo + -> Maybe PathTemplate -- ^ template for HTML location + -> IO HaddockArgs +getInterfaces verbosity lbi clbi htmlTemplate = do + (packageFlags, warnings) <- haddockPackageFlags lbi clbi htmlTemplate + maybe (return ()) (warn verbosity) warnings + return $ mempty { + argInterfaces = packageFlags + } + +getGhcCppOpts :: Version + -> BuildInfo + -> GhcOptions +getGhcCppOpts haddockVersion bi = + mempty { + ghcOptExtensions = toNubListR [EnableExtension CPP | needsCpp], + ghcOptCppOptions = toNubListR defines + } + where + needsCpp = EnableExtension CPP `elem` usedExtensions bi + defines = [haddockVersionMacro] + haddockVersionMacro = "-D__HADDOCK_VERSION__=" + ++ show (v1 * 1000 + v2 * 10 + v3) + where + [v1, v2, v3] = take 3 $ versionBranch haddockVersion ++ [0,0] + +getGhcLibDir :: Verbosity -> LocalBuildInfo + -> IO HaddockArgs +getGhcLibDir verbosity lbi = do + l <- case compilerFlavor (compiler lbi) of + GHC -> GHC.getLibDir verbosity lbi + GHCJS -> GHCJS.getLibDir verbosity lbi + _ -> error "haddock only supports GHC and GHCJS" + return $ mempty { argGhcLibDir = Flag l } + +-- ------------------------------------------------------------------------------ +-- | Call haddock with the specified arguments. +runHaddock :: Verbosity + -> TempFileOptions + -> Compiler + -> ConfiguredProgram + -> HaddockArgs + -> IO () +runHaddock verbosity tmpFileOpts comp confHaddock args = do + let haddockVersion = fromMaybe (error "unable to determine haddock version") + (programVersion confHaddock) + renderArgs verbosity tmpFileOpts haddockVersion comp args $ + \(flags,result)-> do + + rawSystemProgram verbosity confHaddock flags + + notice verbosity $ "Documentation created: " ++ result + + +renderArgs :: Verbosity + -> TempFileOptions + -> Version + -> Compiler + -> HaddockArgs + -> (([String], FilePath) -> IO a) + -> IO a +renderArgs verbosity tmpFileOpts version comp args k = do + let haddockSupportsUTF8 = version >= Version [2,14,4] [] + haddockSupportsResponseFiles = version > Version [2,16,2] [] + createDirectoryIfMissingVerbose verbosity True outputDir + withTempFileEx tmpFileOpts outputDir "haddock-prologue.txt" $ + \prologueFileName h -> do + do + when haddockSupportsUTF8 (hSetEncoding h utf8) + hPutStrLn h $ fromFlag $ argPrologue args + hClose h + let pflag = "--prologue=" ++ prologueFileName + renderedArgs = pflag : renderPureArgs version comp args + if haddockSupportsResponseFiles + then + withTempFileEx tmpFileOpts outputDir "haddock-response.txt" $ + \responseFileName hf -> do + when haddockSupportsUTF8 (hSetEncoding hf utf8) + hPutStr hf $ unlines $ map escapeArg renderedArgs + hClose hf + let respFile = "@" ++ responseFileName + k ([respFile], result) + else + k (renderedArgs, result) + where + outputDir = (unDir $ argOutputDir args) + result = intercalate ", " + . map (\o -> outputDir + case o of + Html -> "index.html" + Hoogle -> pkgstr <.> "txt") + $ arg argOutput + where + pkgstr = display $ packageName pkgid + pkgid = arg argPackageName + arg f = fromFlag $ f args + -- Support a gcc-like response file syntax. Each separate + -- argument and its possible parameter(s), will be separated in the + -- response file by an actual newline; all other whitespace, + -- single quotes, double quotes, and the character used for escaping + -- (backslash) are escaped. The called program will need to do a similar + -- inverse operation to de-escape and re-constitute the argument list. + escape cs c + | isSpace c + || '\\' == c + || '\'' == c + || '"' == c = c:'\\':cs -- n.b., our caller must reverse the result + | otherwise = c:cs + escapeArg = reverse . foldl' escape [] + +renderPureArgs :: Version -> Compiler -> HaddockArgs -> [String] +renderPureArgs version comp args = concat + [ (:[]) . (\f -> "--dump-interface="++ unDir (argOutputDir args) f) + . fromFlag . argInterfaceFile $ args + + , if isVersion 2 16 + then (\pkg -> [ "--package-name=" ++ display (pkgName pkg) + , "--package-version="++display (pkgVersion pkg) + ]) + . fromFlag . argPackageName $ args + else [] + + , (\(All b,xs) -> bool (map (("--hide=" ++). display) xs) [] b) + . argHideModules $ args + + , bool ["--ignore-all-exports"] [] . getAny . argIgnoreExports $ args + + , maybe [] (\(m,e,l) -> + ["--source-module=" ++ m + ,"--source-entity=" ++ e] + ++ if isVersion 2 14 then ["--source-entity-line=" ++ l] + else [] + ) . flagToMaybe . argLinkSource $ args + + , maybe [] ((:[]) . ("--css="++)) . flagToMaybe . argCssFile $ args + + , maybe [] ((:[]) . ("--use-contents="++)) . flagToMaybe . argContents $ args + + , bool [] [verbosityFlag] . getAny . argVerbose $ args + + , map (\o -> case o of Hoogle -> "--hoogle"; Html -> "--html") + . fromFlag . argOutput $ args + + , renderInterfaces . argInterfaces $ args + + , (:[]) . ("--odir="++) . unDir . argOutputDir $ args + + , (:[]) . ("--title="++) + . (bool (++" (internal documentation)") + id (getAny $ argIgnoreExports args)) + . fromFlag . argTitle $ args + + , [ "--optghc=" ++ opt | (opts, _ghcVer) <- flagToList (argGhcOptions args) + , opt <- renderGhcOptions comp opts ] + + , maybe [] (\l -> ["-B"++l]) $ + flagToMaybe (argGhcLibDir args) -- error if Nothing? + + , argTargets $ args + ] + where + renderInterfaces = + map (\(i,mh) -> "--read-interface=" ++ + maybe "" (++",") mh ++ i) + bool a b c = if c then a else b + isVersion major minor = version >= Version [major,minor] [] + verbosityFlag + | isVersion 2 5 = "--verbosity=1" + | otherwise = "--verbose" + +--------------------------------------------------------------------------------- + +-- | Given a list of 'InstalledPackageInfo's, return a list of interfaces and +-- HTML paths, and an optional warning for packages with missing documentation. +haddockPackagePaths :: [InstalledPackageInfo] + -> Maybe (InstalledPackageInfo -> FilePath) + -> IO ([(FilePath, Maybe FilePath)], Maybe String) +haddockPackagePaths ipkgs mkHtmlPath = do + interfaces <- sequence + [ case interfaceAndHtmlPath ipkg of + Nothing -> return (Left (packageId ipkg)) + Just (interface, html) -> do + exists <- doesFileExist interface + if exists + then return (Right (interface, html)) + else return (Left pkgid) + | ipkg <- ipkgs, let pkgid = packageId ipkg + , pkgName pkgid `notElem` noHaddockWhitelist + ] + + let missing = [ pkgid | Left pkgid <- interfaces ] + warning = "The documentation for the following packages are not " + ++ "installed. No links will be generated to these packages: " + ++ intercalate ", " (map display missing) + flags = rights interfaces + + return (flags, if null missing then Nothing else Just warning) + + where + -- Don't warn about missing documentation for these packages. See #1231. + noHaddockWhitelist = map PackageName [ "rts" ] + + -- Actually extract interface and HTML paths from an 'InstalledPackageInfo'. + interfaceAndHtmlPath :: InstalledPackageInfo + -> Maybe (FilePath, Maybe FilePath) + interfaceAndHtmlPath pkg = do + interface <- listToMaybe (InstalledPackageInfo.haddockInterfaces pkg) + html <- case mkHtmlPath of + Nothing -> fmap fixFileUrl + (listToMaybe (InstalledPackageInfo.haddockHTMLs pkg)) + Just mkPath -> Just (mkPath pkg) + return (interface, if null html then Nothing else Just html) + where + -- The 'haddock-html' field in the hc-pkg output is often set as a + -- native path, but we need it as a URL. See #1064. + fixFileUrl f | isAbsolute f = "file://" ++ f + | otherwise = f + +haddockPackageFlags :: LocalBuildInfo + -> ComponentLocalBuildInfo + -> Maybe PathTemplate + -> IO ([(FilePath, Maybe FilePath)], Maybe String) +haddockPackageFlags lbi clbi htmlTemplate = do + let allPkgs = installedPkgs lbi + directDeps = map fst (componentPackageDeps clbi) + transitiveDeps <- case dependencyClosure allPkgs directDeps of + Left x -> return x + Right inf -> die $ "internal error when calculating transitive " + ++ "package dependencies.\nDebug info: " ++ show inf + haddockPackagePaths (PackageIndex.allPackages transitiveDeps) mkHtmlPath + where + mkHtmlPath = fmap expandTemplateVars htmlTemplate + expandTemplateVars tmpl pkg = + fromPathTemplate . substPathTemplate (env pkg) $ tmpl + env pkg = haddockTemplateEnv lbi (packageId pkg) + + +haddockTemplateEnv :: LocalBuildInfo -> PackageIdentifier -> PathTemplateEnv +haddockTemplateEnv lbi pkg_id = + (PrefixVar, prefix (installDirTemplates lbi)) + : initialPathTemplateEnv pkg_id (pkgKey lbi) (compilerInfo (compiler lbi)) + (hostPlatform lbi) + +-- ------------------------------------------------------------------------------ +-- hscolour support. + +hscolour :: PackageDescription + -> LocalBuildInfo + -> [PPSuffixHandler] + -> HscolourFlags + -> IO () +hscolour pkg_descr lbi suffixes flags = do + -- we preprocess even if hscolour won't be found on the machine + -- will this upset someone? + initialBuildSteps distPref pkg_descr lbi verbosity + hscolour' die pkg_descr lbi suffixes flags + where + verbosity = fromFlag (hscolourVerbosity flags) + distPref = fromFlag $ hscolourDistPref flags + +hscolour' :: (String -> IO ()) -- ^ Called when the 'hscolour' exe is not found. + -> PackageDescription + -> LocalBuildInfo + -> [PPSuffixHandler] + -> HscolourFlags + -> IO () +hscolour' onNoHsColour pkg_descr lbi suffixes flags = + either onNoHsColour (\(hscolourProg, _, _) -> go hscolourProg) =<< + lookupProgramVersion verbosity hscolourProgram + (orLaterVersion (Version [1,8] [])) (withPrograms lbi) + where + go :: ConfiguredProgram -> IO () + go hscolourProg = do + setupMessage verbosity "Running hscolour for" (packageId pkg_descr) + createDirectoryIfMissingVerbose verbosity True $ + hscolourPref distPref pkg_descr + + let pre c = preprocessComponent pkg_descr c lbi False verbosity suffixes + withAllComponentsInBuildOrder pkg_descr lbi $ \comp _ -> do + pre comp + let + doExe com = case (compToExe com) of + Just exe -> do + let outputDir = hscolourPref distPref pkg_descr + exeName exe "src" + runHsColour hscolourProg outputDir =<< getExeSourceFiles lbi exe + Nothing -> do + warn (fromFlag $ hscolourVerbosity flags) + "Unsupported component, skipping..." + return () + case comp of + CLib lib -> do + let outputDir = hscolourPref distPref pkg_descr "src" + runHsColour hscolourProg outputDir =<< getLibSourceFiles lbi lib + CExe _ -> when (fromFlag (hscolourExecutables flags)) $ doExe comp + CTest _ -> when (fromFlag (hscolourTestSuites flags)) $ doExe comp + CBench _ -> when (fromFlag (hscolourBenchmarks flags)) $ doExe comp + + stylesheet = flagToMaybe (hscolourCSS flags) + + verbosity = fromFlag (hscolourVerbosity flags) + distPref = fromFlag (hscolourDistPref flags) + + runHsColour prog outputDir moduleFiles = do + createDirectoryIfMissingVerbose verbosity True outputDir + + case stylesheet of -- copy the CSS file + Nothing | programVersion prog >= Just (Version [1,9] []) -> + rawSystemProgram verbosity prog + ["-print-css", "-o" ++ outputDir "hscolour.css"] + | otherwise -> return () + Just s -> copyFileVerbose verbosity s (outputDir "hscolour.css") + + forM_ moduleFiles $ \(m, inFile) -> + rawSystemProgram verbosity prog + ["-css", "-anchor", "-o" ++ outFile m, inFile] + where + outFile m = outputDir + intercalate "-" (ModuleName.components m) <.> "html" + +haddockToHscolour :: HaddockFlags -> HscolourFlags +haddockToHscolour flags = + HscolourFlags { + hscolourCSS = haddockHscolourCss flags, + hscolourExecutables = haddockExecutables flags, + hscolourTestSuites = haddockTestSuites flags, + hscolourBenchmarks = haddockBenchmarks flags, + hscolourVerbosity = haddockVerbosity flags, + hscolourDistPref = haddockDistPref flags + } +--------------------------------------------------------------------------------- +-- TODO these should be moved elsewhere. + +getLibSourceFiles :: LocalBuildInfo + -> Library + -> IO [(ModuleName.ModuleName, FilePath)] +getLibSourceFiles lbi lib = getSourceFiles searchpaths modules + where + bi = libBuildInfo lib + modules = PD.exposedModules lib ++ otherModules bi + searchpaths = autogenModulesDir lbi : buildDir lbi : hsSourceDirs bi + +getExeSourceFiles :: LocalBuildInfo + -> Executable + -> IO [(ModuleName.ModuleName, FilePath)] +getExeSourceFiles lbi exe = do + moduleFiles <- getSourceFiles searchpaths modules + srcMainPath <- findFile (hsSourceDirs bi) (modulePath exe) + return ((ModuleName.main, srcMainPath) : moduleFiles) + where + bi = buildInfo exe + modules = otherModules bi + searchpaths = autogenModulesDir lbi : exeBuildDir lbi exe : hsSourceDirs bi + +getSourceFiles :: [FilePath] + -> [ModuleName.ModuleName] + -> IO [(ModuleName.ModuleName, FilePath)] +getSourceFiles dirs modules = flip mapM modules $ \m -> fmap ((,) m) $ + findFileWithExtension ["hs", "lhs"] dirs (ModuleName.toFilePath m) + >>= maybe (notFound m) (return . normalise) + where + notFound module_ = die $ "can't find source for module " ++ display module_ + +-- | The directory where we put build results for an executable +exeBuildDir :: LocalBuildInfo -> Executable -> FilePath +exeBuildDir lbi exe = buildDir lbi exeName exe exeName exe ++ "-tmp" + +-- ------------------------------------------------------------------------------ +-- Boilerplate Monoid instance. +instance Monoid HaddockArgs where + mempty = HaddockArgs { + argInterfaceFile = mempty, + argPackageName = mempty, + argHideModules = mempty, + argIgnoreExports = mempty, + argLinkSource = mempty, + argCssFile = mempty, + argContents = mempty, + argVerbose = mempty, + argOutput = mempty, + argInterfaces = mempty, + argOutputDir = mempty, + argTitle = mempty, + argPrologue = mempty, + argGhcOptions = mempty, + argGhcLibDir = mempty, + argTargets = mempty + } + mappend a b = HaddockArgs { + argInterfaceFile = mult argInterfaceFile, + argPackageName = mult argPackageName, + argHideModules = mult argHideModules, + argIgnoreExports = mult argIgnoreExports, + argLinkSource = mult argLinkSource, + argCssFile = mult argCssFile, + argContents = mult argContents, + argVerbose = mult argVerbose, + argOutput = mult argOutput, + argInterfaces = mult argInterfaces, + argOutputDir = mult argOutputDir, + argTitle = mult argTitle, + argPrologue = mult argPrologue, + argGhcOptions = mult argGhcOptions, + argGhcLibDir = mult argGhcLibDir, + argTargets = mult argTargets + } + where mult f = f a `mappend` f b + +instance Monoid Directory where + mempty = Dir "." + mappend (Dir m) (Dir n) = Dir $ m n diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/HaskellSuite.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/HaskellSuite.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/HaskellSuite.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/HaskellSuite.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,228 @@ +{-# LANGUAGE CPP #-} +module Distribution.Simple.HaskellSuite where + +import Control.Monad +#if __GLASGOW_HASKELL__ < 710 +import Control.Applicative +#endif +import Data.Maybe +import Data.Version +import qualified Data.Map as M (empty) + +import Distribution.Simple.Program +import Distribution.Simple.Compiler as Compiler +import Distribution.Simple.Utils +import Distribution.Simple.BuildPaths +import Distribution.Verbosity +import Distribution.Text +import Distribution.Package +import Distribution.InstalledPackageInfo hiding (includeDirs) +import Distribution.Simple.PackageIndex as PackageIndex +import Distribution.PackageDescription +import Distribution.Simple.LocalBuildInfo +import Distribution.System (Platform) +import Distribution.Compat.Exception +import Language.Haskell.Extension +import Distribution.Simple.Program.Builtin + (haskellSuiteProgram, haskellSuitePkgProgram) + +configure + :: Verbosity -> Maybe FilePath -> Maybe FilePath + -> ProgramConfiguration -> IO (Compiler, Maybe Platform, ProgramConfiguration) +configure verbosity mbHcPath hcPkgPath conf0 = do + + -- We have no idea how a haskell-suite tool is named, so we require at + -- least some information from the user. + hcPath <- + let msg = "You have to provide name or path of a haskell-suite tool (-w PATH)" + in maybe (die msg) return mbHcPath + + when (isJust hcPkgPath) $ + warn verbosity "--with-hc-pkg option is ignored for haskell-suite" + + (comp, confdCompiler, conf1) <- configureCompiler hcPath conf0 + + -- Update our pkg tool. It uses the same executable as the compiler, but + -- all command start with "pkg" + (confdPkg, _) <- requireProgram verbosity haskellSuitePkgProgram conf1 + let conf2 = + updateProgram + confdPkg + { programLocation = programLocation confdCompiler + , programDefaultArgs = ["pkg"] + } + conf1 + + return (comp, Nothing, conf2) + + where + configureCompiler hcPath conf0' = do + let + haskellSuiteProgram' = + haskellSuiteProgram + { programFindLocation = \v _p -> findProgramLocation v hcPath } + + -- NB: cannot call requireProgram right away — it'd think that + -- the program is already configured and won't reconfigure it again. + -- Instead, call configureProgram directly first. + conf1 <- configureProgram verbosity haskellSuiteProgram' conf0' + (confdCompiler, conf2) <- requireProgram verbosity haskellSuiteProgram' conf1 + + extensions <- getExtensions verbosity confdCompiler + languages <- getLanguages verbosity confdCompiler + (compName, compVersion) <- + getCompilerVersion verbosity confdCompiler + + let + comp = Compiler { + compilerId = CompilerId (HaskellSuite compName) compVersion, + compilerAbiTag = Compiler.NoAbiTag, + compilerCompat = [], + compilerLanguages = languages, + compilerExtensions = extensions, + compilerProperties = M.empty + } + + return (comp, confdCompiler, conf2) + +hstoolVersion :: Verbosity -> FilePath -> IO (Maybe Version) +hstoolVersion = findProgramVersion "--hspkg-version" id + +numericVersion :: Verbosity -> FilePath -> IO (Maybe Version) +numericVersion = findProgramVersion "--compiler-version" (last . words) + +getCompilerVersion :: Verbosity -> ConfiguredProgram -> IO (String, Version) +getCompilerVersion verbosity prog = do + output <- rawSystemStdout verbosity (programPath prog) ["--compiler-version"] + let + parts = words output + name = concat $ init parts -- there shouldn't be any spaces in the name anyway + versionStr = last parts + version <- + maybe (die "haskell-suite: couldn't determine compiler version") return $ + simpleParse versionStr + return (name, version) + +getExtensions :: Verbosity -> ConfiguredProgram -> IO [(Extension, Compiler.Flag)] +getExtensions verbosity prog = do + extStrs <- + lines <$> + rawSystemStdout verbosity (programPath prog) ["--supported-extensions"] + return + [ (ext, "-X" ++ display ext) | Just ext <- map simpleParse extStrs ] + +getLanguages :: Verbosity -> ConfiguredProgram -> IO [(Language, Compiler.Flag)] +getLanguages verbosity prog = do + langStrs <- + lines <$> + rawSystemStdout verbosity (programPath prog) ["--supported-languages"] + return + [ (ext, "-G" ++ display ext) | Just ext <- map simpleParse langStrs ] + +-- Other compilers do some kind of a packagedb stack check here. Not sure +-- if we need something like that as well. +getInstalledPackages :: Verbosity -> PackageDBStack -> ProgramConfiguration + -> IO InstalledPackageIndex +getInstalledPackages verbosity packagedbs conf = + liftM (PackageIndex.fromList . concat) $ forM packagedbs $ \packagedb -> + do str <- + getDbProgramOutput verbosity haskellSuitePkgProgram conf + ["dump", packageDbOpt packagedb] + `catchExit` \_ -> die $ "pkg dump failed" + case parsePackages str of + Right ok -> return ok + _ -> die "failed to parse output of 'pkg dump'" + + where + parsePackages str = + let parsed = map parseInstalledPackageInfo (splitPkgs str) + in case [ msg | ParseFailed msg <- parsed ] of + [] -> Right [ pkg | ParseOk _ pkg <- parsed ] + msgs -> Left msgs + + splitPkgs :: String -> [String] + splitPkgs = map unlines . splitWith ("---" ==) . lines + where + splitWith :: (a -> Bool) -> [a] -> [[a]] + splitWith p xs = ys : case zs of + [] -> [] + _:ws -> splitWith p ws + where (ys,zs) = break p xs + +buildLib + :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO () +buildLib verbosity pkg_descr lbi lib clbi = do + -- In future, there should be a mechanism for the compiler to request any + -- number of the above parameters (or their parts) — in particular, + -- pieces of PackageDescription. + -- + -- For now, we only pass those that we know are used. + + let odir = buildDir lbi + bi = libBuildInfo lib + srcDirs = hsSourceDirs bi ++ [odir] + dbStack = withPackageDB lbi + language = fromMaybe Haskell98 (defaultLanguage bi) + conf = withPrograms lbi + pkgid = packageId pkg_descr + + runDbProgram verbosity haskellSuiteProgram conf $ + [ "compile", "--build-dir", odir ] ++ + concat [ ["-i", d] | d <- srcDirs ] ++ + concat [ ["-I", d] | d <- [autogenModulesDir lbi, odir] ++ includeDirs bi ] ++ + [ packageDbOpt pkgDb | pkgDb <- dbStack ] ++ + [ "--package-name", display pkgid ] ++ + concat [ ["--package-id", display ipkgid ] + | (ipkgid, _) <- componentPackageDeps clbi ] ++ + ["-G", display language] ++ + concat [ ["-X", display ex] | ex <- usedExtensions bi ] ++ + cppOptions (libBuildInfo lib) ++ + [ display modu | modu <- libModules lib ] + + + +installLib + :: Verbosity + -> LocalBuildInfo + -> FilePath -- ^install location + -> FilePath -- ^install location for dynamic libraries + -> FilePath -- ^Build location + -> PackageDescription + -> Library + -> IO () +installLib verbosity lbi targetDir dynlibTargetDir builtDir pkg lib = do + let conf = withPrograms lbi + runDbProgram verbosity haskellSuitePkgProgram conf $ + [ "install-library" + , "--build-dir", builtDir + , "--target-dir", targetDir + , "--dynlib-target-dir", dynlibTargetDir + , "--package-id", display $ packageId pkg + ] ++ map display (libModules lib) + +registerPackage + :: Verbosity + -> InstalledPackageInfo + -> PackageDescription + -> LocalBuildInfo + -> Bool + -> PackageDBStack + -> IO () +registerPackage verbosity installedPkgInfo _pkg lbi _inplace packageDbs = do + (hspkg, _) <- requireProgram verbosity haskellSuitePkgProgram (withPrograms lbi) + + runProgramInvocation verbosity $ + (programInvocation hspkg + ["update", packageDbOpt $ last packageDbs]) + { progInvokeInput = Just $ showInstalledPackageInfo installedPkgInfo } + +initPackageDB :: Verbosity -> ProgramConfiguration -> FilePath -> IO () +initPackageDB verbosity conf dbPath = + runDbProgram verbosity haskellSuitePkgProgram conf + ["init", dbPath] + +packageDbOpt :: PackageDB -> String +packageDbOpt GlobalPackageDB = "--global" +packageDbOpt UserPackageDB = "--user" +packageDbOpt (SpecificPackageDB db) = "--package-db=" ++ db diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Hpc.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Hpc.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Hpc.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Hpc.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,141 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Hpc +-- Copyright : Thomas Tuegel 2011 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module provides functions for locating various HPC-related paths and +-- a function for adding the necessary options to a PackageDescription to +-- build test suites with HPC enabled. + +module Distribution.Simple.Hpc + ( Way(..), guessWay + , htmlDir + , mixDir + , tixDir + , tixFilePath + , markupPackage + , markupTest + ) where + +import Control.Monad ( when ) +import Distribution.ModuleName ( main ) +import Distribution.PackageDescription + ( TestSuite(..) + , testModules + ) +import Distribution.Simple.LocalBuildInfo ( LocalBuildInfo(..) ) +import Distribution.Simple.Program + ( hpcProgram + , requireProgramVersion + ) +import Distribution.Simple.Program.Hpc ( markup, union ) +import Distribution.Simple.Utils ( notice ) +import Distribution.Version ( anyVersion ) +import Distribution.Verbosity ( Verbosity() ) +import System.Directory ( createDirectoryIfMissing, doesFileExist ) +import System.FilePath + +-- ------------------------------------------------------------------------- +-- Haskell Program Coverage + +data Way = Vanilla | Prof | Dyn + deriving (Bounded, Enum, Eq, Read, Show) + +hpcDir :: FilePath -- ^ \"dist/\" prefix + -> Way + -> FilePath -- ^ Directory containing component's HPC .mix files +hpcDir distPref way = distPref "hpc" wayDir + where + wayDir = case way of + Vanilla -> "vanilla" + Prof -> "prof" + Dyn -> "dyn" + +mixDir :: FilePath -- ^ \"dist/\" prefix + -> Way + -> FilePath -- ^ Component name + -> FilePath -- ^ Directory containing test suite's .mix files +mixDir distPref way name = hpcDir distPref way "mix" name + +tixDir :: FilePath -- ^ \"dist/\" prefix + -> Way + -> FilePath -- ^ Component name + -> FilePath -- ^ Directory containing test suite's .tix files +tixDir distPref way name = hpcDir distPref way "tix" name + +-- | Path to the .tix file containing a test suite's sum statistics. +tixFilePath :: FilePath -- ^ \"dist/\" prefix + -> Way + -> FilePath -- ^ Component name + -> FilePath -- ^ Path to test suite's .tix file +tixFilePath distPref way name = tixDir distPref way name name <.> "tix" + +htmlDir :: FilePath -- ^ \"dist/\" prefix + -> Way + -> FilePath -- ^ Component name + -> FilePath -- ^ Path to test suite's HTML markup directory +htmlDir distPref way name = hpcDir distPref way "html" name + +-- | Attempt to guess the way the test suites in this package were compiled +-- and linked with the library so the correct module interfaces are found. +guessWay :: LocalBuildInfo -> Way +guessWay lbi + | withProfExe lbi = Prof + | withDynExe lbi = Dyn + | otherwise = Vanilla + +-- | Generate the HTML markup for a test suite. +markupTest :: Verbosity + -> LocalBuildInfo + -> FilePath -- ^ \"dist/\" prefix + -> String -- ^ Library name + -> TestSuite + -> IO () +markupTest verbosity lbi distPref libName suite = do + tixFileExists <- doesFileExist $ tixFilePath distPref way $ testName suite + when tixFileExists $ do + -- behaviour of 'markup' depends on version, so we need *a* version + -- but no particular one + (hpc, hpcVer, _) <- requireProgramVersion verbosity + hpcProgram anyVersion (withPrograms lbi) + let htmlDir_ = htmlDir distPref way $ testName suite + markup hpc hpcVer verbosity + (tixFilePath distPref way $ testName suite) mixDirs + htmlDir_ + (testModules suite ++ [ main ]) + notice verbosity $ "Test coverage report written to " + ++ htmlDir_ "hpc_index" <.> "html" + where + way = guessWay lbi + mixDirs = map (mixDir distPref way) [ testName suite, libName ] + +-- | Generate the HTML markup for all of a package's test suites. +markupPackage :: Verbosity + -> LocalBuildInfo + -> FilePath -- ^ \"dist/\" prefix + -> String -- ^ Library name + -> [TestSuite] + -> IO () +markupPackage verbosity lbi distPref libName suites = do + let tixFiles = map (tixFilePath distPref way . testName) suites + tixFilesExist <- mapM doesFileExist tixFiles + when (and tixFilesExist) $ do + -- behaviour of 'markup' depends on version, so we need *a* version + -- but no particular one + (hpc, hpcVer, _) <- requireProgramVersion verbosity + hpcProgram anyVersion (withPrograms lbi) + let outFile = tixFilePath distPref way libName + htmlDir' = htmlDir distPref way libName + excluded = concatMap testModules suites ++ [ main ] + createDirectoryIfMissing True $ takeDirectory outFile + union hpc verbosity tixFiles outFile excluded + markup hpc hpcVer verbosity outFile mixDirs htmlDir' excluded + notice verbosity $ "Package coverage report written to " + ++ htmlDir' "hpc_index.html" + where + way = guessWay lbi + mixDirs = map (mixDir distPref way) $ libName : map testName suites diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/InstallDirs.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/InstallDirs.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/InstallDirs.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/InstallDirs.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,608 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.InstallDirs +-- Copyright : Isaac Jones 2003-2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This manages everything to do with where files get installed (though does +-- not get involved with actually doing any installation). It provides an +-- 'InstallDirs' type which is a set of directories for where to install +-- things. It also handles the fact that we use templates in these install +-- dirs. For example most install dirs are relative to some @$prefix@ and by +-- changing the prefix all other dirs still end up changed appropriately. So it +-- provides a 'PathTemplate' type and functions for substituting for these +-- templates. + +module Distribution.Simple.InstallDirs ( + InstallDirs(..), + InstallDirTemplates, + defaultInstallDirs, + combineInstallDirs, + absoluteInstallDirs, + CopyDest(..), + prefixRelativeInstallDirs, + substituteInstallDirTemplates, + + PathTemplate, + PathTemplateVariable(..), + PathTemplateEnv, + toPathTemplate, + fromPathTemplate, + substPathTemplate, + initialPathTemplateEnv, + platformTemplateEnv, + compilerTemplateEnv, + packageTemplateEnv, + abiTemplateEnv, + installDirsTemplateEnv, + ) where + + +import Distribution.Compat.Binary (Binary) +import Data.List (isPrefixOf) +import Data.Maybe (fromMaybe) +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid (Monoid(..)) +#endif +import GHC.Generics (Generic) +import System.Directory (getAppUserDataDirectory) +import System.FilePath ((), isPathSeparator, pathSeparator) +import System.FilePath (dropDrive) + +import Distribution.Package + ( PackageIdentifier, PackageKey, packageName, packageVersion, packageKeyLibraryName ) +import Distribution.System + ( OS(..), buildOS, Platform(..) ) +import Distribution.Compiler + ( AbiTag(..), abiTagString, CompilerInfo(..), CompilerFlavor(..) ) +import Distribution.Text + ( display ) + +#if mingw32_HOST_OS +import Foreign +import Foreign.C +#endif + +-- --------------------------------------------------------------------------- +-- Installation directories + + +-- | The directories where we will install files for packages. +-- +-- We have several different directories for different types of files since +-- many systems have conventions whereby different types of files in a package +-- are installed in different directories. This is particularly the case on +-- Unix style systems. +-- +data InstallDirs dir = InstallDirs { + prefix :: dir, + bindir :: dir, + libdir :: dir, + libsubdir :: dir, + dynlibdir :: dir, + libexecdir :: dir, + includedir :: dir, + datadir :: dir, + datasubdir :: dir, + docdir :: dir, + mandir :: dir, + htmldir :: dir, + haddockdir :: dir, + sysconfdir :: dir + } deriving (Generic, Read, Show) + +instance Binary dir => Binary (InstallDirs dir) + +instance Functor InstallDirs where + fmap f dirs = InstallDirs { + prefix = f (prefix dirs), + bindir = f (bindir dirs), + libdir = f (libdir dirs), + libsubdir = f (libsubdir dirs), + dynlibdir = f (dynlibdir dirs), + libexecdir = f (libexecdir dirs), + includedir = f (includedir dirs), + datadir = f (datadir dirs), + datasubdir = f (datasubdir dirs), + docdir = f (docdir dirs), + mandir = f (mandir dirs), + htmldir = f (htmldir dirs), + haddockdir = f (haddockdir dirs), + sysconfdir = f (sysconfdir dirs) + } + +instance Monoid dir => Monoid (InstallDirs dir) where + mempty = InstallDirs { + prefix = mempty, + bindir = mempty, + libdir = mempty, + libsubdir = mempty, + dynlibdir = mempty, + libexecdir = mempty, + includedir = mempty, + datadir = mempty, + datasubdir = mempty, + docdir = mempty, + mandir = mempty, + htmldir = mempty, + haddockdir = mempty, + sysconfdir = mempty + } + mappend = combineInstallDirs mappend + +combineInstallDirs :: (a -> b -> c) + -> InstallDirs a + -> InstallDirs b + -> InstallDirs c +combineInstallDirs combine a b = InstallDirs { + prefix = prefix a `combine` prefix b, + bindir = bindir a `combine` bindir b, + libdir = libdir a `combine` libdir b, + libsubdir = libsubdir a `combine` libsubdir b, + dynlibdir = dynlibdir a `combine` dynlibdir b, + libexecdir = libexecdir a `combine` libexecdir b, + includedir = includedir a `combine` includedir b, + datadir = datadir a `combine` datadir b, + datasubdir = datasubdir a `combine` datasubdir b, + docdir = docdir a `combine` docdir b, + mandir = mandir a `combine` mandir b, + htmldir = htmldir a `combine` htmldir b, + haddockdir = haddockdir a `combine` haddockdir b, + sysconfdir = sysconfdir a `combine` sysconfdir b + } + +appendSubdirs :: (a -> a -> a) -> InstallDirs a -> InstallDirs a +appendSubdirs append dirs = dirs { + libdir = libdir dirs `append` libsubdir dirs, + datadir = datadir dirs `append` datasubdir dirs, + libsubdir = error "internal error InstallDirs.libsubdir", + datasubdir = error "internal error InstallDirs.datasubdir" + } + +-- | The installation directories in terms of 'PathTemplate's that contain +-- variables. +-- +-- The defaults for most of the directories are relative to each other, in +-- particular they are all relative to a single prefix. This makes it +-- convenient for the user to override the default installation directory +-- by only having to specify --prefix=... rather than overriding each +-- individually. This is done by allowing $-style variables in the dirs. +-- These are expanded by textual substitution (see 'substPathTemplate'). +-- +-- A few of these installation directories are split into two components, the +-- dir and subdir. The full installation path is formed by combining the two +-- together with @\/@. The reason for this is compatibility with other Unix +-- build systems which also support @--libdir@ and @--datadir@. We would like +-- users to be able to configure @--libdir=\/usr\/lib64@ for example but +-- because by default we want to support installing multiple versions of +-- packages and building the same package for multiple compilers we append the +-- libsubdir to get: @\/usr\/lib64\/$libname\/$compiler@. +-- +-- An additional complication is the need to support relocatable packages on +-- systems which support such things, like Windows. +-- +type InstallDirTemplates = InstallDirs PathTemplate + +-- --------------------------------------------------------------------------- +-- Default installation directories + +defaultInstallDirs :: CompilerFlavor -> Bool -> Bool -> IO InstallDirTemplates +defaultInstallDirs comp userInstall _hasLibs = do + installPrefix <- + if userInstall + then getAppUserDataDirectory "cabal" + else case buildOS of + Windows -> do windowsProgramFilesDir <- getWindowsProgramFilesDir + return (windowsProgramFilesDir "Haskell") + _ -> return "/usr/local" + installLibDir <- + case buildOS of + Windows -> return "$prefix" + _ -> case comp of + LHC | userInstall -> getAppUserDataDirectory "lhc" + _ -> return ("$prefix" "lib") + return $ fmap toPathTemplate $ InstallDirs { + prefix = installPrefix, + bindir = "$prefix" "bin", + libdir = installLibDir, + libsubdir = case comp of + JHC -> "$compiler" + LHC -> "$compiler" + UHC -> "$pkgid" + _other -> "$abi" "$libname", + dynlibdir = "$libdir", + libexecdir = case buildOS of + Windows -> "$prefix" "$libname" + _other -> "$prefix" "libexec", + includedir = "$libdir" "$libsubdir" "include", + datadir = case buildOS of + Windows -> "$prefix" + _other -> "$prefix" "share", + datasubdir = "$abi" "$pkgid", + docdir = "$datadir" "doc" "$abi" "$pkgid", + mandir = "$datadir" "man", + htmldir = "$docdir" "html", + haddockdir = "$htmldir", + sysconfdir = "$prefix" "etc" + } + +-- --------------------------------------------------------------------------- +-- Converting directories, absolute or prefix-relative + +-- | Substitute the install dir templates into each other. +-- +-- To prevent cyclic substitutions, only some variables are allowed in +-- particular dir templates. If out of scope vars are present, they are not +-- substituted for. Checking for any remaining unsubstituted vars can be done +-- as a subsequent operation. +-- +-- The reason it is done this way is so that in 'prefixRelativeInstallDirs' we +-- can replace 'prefix' with the 'PrefixVar' and get resulting +-- 'PathTemplate's that still have the 'PrefixVar' in them. Doing this makes it +-- each to check which paths are relative to the $prefix. +-- +substituteInstallDirTemplates :: PathTemplateEnv + -> InstallDirTemplates -> InstallDirTemplates +substituteInstallDirTemplates env dirs = dirs' + where + dirs' = InstallDirs { + -- So this specifies exactly which vars are allowed in each template + prefix = subst prefix [], + bindir = subst bindir [prefixVar], + libdir = subst libdir [prefixVar, bindirVar], + libsubdir = subst libsubdir [], + dynlibdir = subst dynlibdir [prefixVar, bindirVar, libdirVar], + libexecdir = subst libexecdir prefixBinLibVars, + includedir = subst includedir prefixBinLibVars, + datadir = subst datadir prefixBinLibVars, + datasubdir = subst datasubdir [], + docdir = subst docdir prefixBinLibDataVars, + mandir = subst mandir (prefixBinLibDataVars ++ [docdirVar]), + htmldir = subst htmldir (prefixBinLibDataVars ++ [docdirVar]), + haddockdir = subst haddockdir (prefixBinLibDataVars ++ + [docdirVar, htmldirVar]), + sysconfdir = subst sysconfdir prefixBinLibVars + } + subst dir env' = substPathTemplate (env'++env) (dir dirs) + + prefixVar = (PrefixVar, prefix dirs') + bindirVar = (BindirVar, bindir dirs') + libdirVar = (LibdirVar, libdir dirs') + libsubdirVar = (LibsubdirVar, libsubdir dirs') + datadirVar = (DatadirVar, datadir dirs') + datasubdirVar = (DatasubdirVar, datasubdir dirs') + docdirVar = (DocdirVar, docdir dirs') + htmldirVar = (HtmldirVar, htmldir dirs') + prefixBinLibVars = [prefixVar, bindirVar, libdirVar, libsubdirVar] + prefixBinLibDataVars = prefixBinLibVars ++ [datadirVar, datasubdirVar] + +-- | Convert from abstract install directories to actual absolute ones by +-- substituting for all the variables in the abstract paths, to get real +-- absolute path. +absoluteInstallDirs :: PackageIdentifier + -> PackageKey + -> CompilerInfo + -> CopyDest + -> Platform + -> InstallDirs PathTemplate + -> InstallDirs FilePath +absoluteInstallDirs pkgId pkg_key compilerId copydest platform dirs = + (case copydest of + CopyTo destdir -> fmap ((destdir ) . dropDrive) + _ -> id) + . appendSubdirs () + . fmap fromPathTemplate + $ substituteInstallDirTemplates env dirs + where + env = initialPathTemplateEnv pkgId pkg_key compilerId platform + + +-- |The location prefix for the /copy/ command. +data CopyDest + = NoCopyDest + | CopyTo FilePath + deriving (Eq, Show) + +-- | Check which of the paths are relative to the installation $prefix. +-- +-- If any of the paths are not relative, ie they are absolute paths, then it +-- prevents us from making a relocatable package (also known as a \"prefix +-- independent\" package). +-- +prefixRelativeInstallDirs :: PackageIdentifier + -> PackageKey + -> CompilerInfo + -> Platform + -> InstallDirTemplates + -> InstallDirs (Maybe FilePath) +prefixRelativeInstallDirs pkgId pkg_key compilerId platform dirs = + fmap relative + . appendSubdirs combinePathTemplate + $ -- substitute the path template into each other, except that we map + -- \$prefix back to $prefix. We're trying to end up with templates that + -- mention no vars except $prefix. + substituteInstallDirTemplates env dirs { + prefix = PathTemplate [Variable PrefixVar] + } + where + env = initialPathTemplateEnv pkgId pkg_key compilerId platform + + -- If it starts with $prefix then it's relative and produce the relative + -- path by stripping off $prefix/ or $prefix + relative dir = case dir of + PathTemplate cs -> fmap (fromPathTemplate . PathTemplate) (relative' cs) + relative' (Variable PrefixVar : Ordinary (s:rest) : rest') + | isPathSeparator s = Just (Ordinary rest : rest') + relative' (Variable PrefixVar : rest) = Just rest + relative' _ = Nothing + +-- --------------------------------------------------------------------------- +-- Path templates + +-- | An abstract path, possibly containing variables that need to be +-- substituted for to get a real 'FilePath'. +-- +newtype PathTemplate = PathTemplate [PathComponent] deriving (Eq, Generic, Ord) + +instance Binary PathTemplate + +data PathComponent = + Ordinary FilePath + | Variable PathTemplateVariable + deriving (Eq, Ord, Generic) + +instance Binary PathComponent + +data PathTemplateVariable = + PrefixVar -- ^ The @$prefix@ path variable + | BindirVar -- ^ The @$bindir@ path variable + | LibdirVar -- ^ The @$libdir@ path variable + | LibsubdirVar -- ^ The @$libsubdir@ path variable + | DatadirVar -- ^ The @$datadir@ path variable + | DatasubdirVar -- ^ The @$datasubdir@ path variable + | DocdirVar -- ^ The @$docdir@ path variable + | HtmldirVar -- ^ The @$htmldir@ path variable + | PkgNameVar -- ^ The @$pkg@ package name path variable + | PkgVerVar -- ^ The @$version@ package version path variable + | PkgIdVar -- ^ The @$pkgid@ package Id path variable, eg @foo-1.0@ + | PkgKeyVar -- ^ The @$pkgkey@ package key path variable + | LibNameVar -- ^ The @$libname@ expanded package key path variable + | CompilerVar -- ^ The compiler name and version, eg @ghc-6.6.1@ + | OSVar -- ^ The operating system name, eg @windows@ or @linux@ + | ArchVar -- ^ The CPU architecture name, eg @i386@ or @x86_64@ + | AbiVar -- ^ The Compiler's ABI identifier, $arch-$os-$compiler-$abitag + | AbiTagVar -- ^ The optional ABI tag for the compiler + | ExecutableNameVar -- ^ The executable name; used in shell wrappers + | TestSuiteNameVar -- ^ The name of the test suite being run + | TestSuiteResultVar -- ^ The result of the test suite being run, eg + -- @pass@, @fail@, or @error@. + | BenchmarkNameVar -- ^ The name of the benchmark being run + deriving (Eq, Ord, Generic) + +instance Binary PathTemplateVariable + +type PathTemplateEnv = [(PathTemplateVariable, PathTemplate)] + +-- | Convert a 'FilePath' to a 'PathTemplate' including any template vars. +-- +toPathTemplate :: FilePath -> PathTemplate +toPathTemplate = PathTemplate . read + +-- | Convert back to a path, any remaining vars are included +-- +fromPathTemplate :: PathTemplate -> FilePath +fromPathTemplate (PathTemplate template) = show template + +combinePathTemplate :: PathTemplate -> PathTemplate -> PathTemplate +combinePathTemplate (PathTemplate t1) (PathTemplate t2) = + PathTemplate (t1 ++ [Ordinary [pathSeparator]] ++ t2) + +substPathTemplate :: PathTemplateEnv -> PathTemplate -> PathTemplate +substPathTemplate environment (PathTemplate template) = + PathTemplate (concatMap subst template) + + where subst component@(Ordinary _) = [component] + subst component@(Variable variable) = + case lookup variable environment of + Just (PathTemplate components) -> components + Nothing -> [component] + +-- | The initial environment has all the static stuff but no paths +initialPathTemplateEnv :: PackageIdentifier + -> PackageKey + -> CompilerInfo + -> Platform + -> PathTemplateEnv +initialPathTemplateEnv pkgId pkg_key compiler platform = + packageTemplateEnv pkgId pkg_key + ++ compilerTemplateEnv compiler + ++ platformTemplateEnv platform + ++ abiTemplateEnv compiler platform + +packageTemplateEnv :: PackageIdentifier -> PackageKey -> PathTemplateEnv +packageTemplateEnv pkgId pkg_key = + [(PkgNameVar, PathTemplate [Ordinary $ display (packageName pkgId)]) + ,(PkgVerVar, PathTemplate [Ordinary $ display (packageVersion pkgId)]) + ,(PkgKeyVar, PathTemplate [Ordinary $ display pkg_key]) + ,(LibNameVar, PathTemplate [Ordinary $ packageKeyLibraryName pkgId pkg_key]) + ,(PkgIdVar, PathTemplate [Ordinary $ display pkgId]) + ] + +compilerTemplateEnv :: CompilerInfo -> PathTemplateEnv +compilerTemplateEnv compiler = + [(CompilerVar, PathTemplate [Ordinary $ display (compilerInfoId compiler)]) + ] + +platformTemplateEnv :: Platform -> PathTemplateEnv +platformTemplateEnv (Platform arch os) = + [(OSVar, PathTemplate [Ordinary $ display os]) + ,(ArchVar, PathTemplate [Ordinary $ display arch]) + ] + +abiTemplateEnv :: CompilerInfo -> Platform -> PathTemplateEnv +abiTemplateEnv compiler (Platform arch os) = + [(AbiVar, PathTemplate [Ordinary $ display arch ++ '-':display os ++ + '-':display (compilerInfoId compiler) ++ + case compilerInfoAbiTag compiler of + NoAbiTag -> "" + AbiTag tag -> '-':tag]) + ,(AbiTagVar, PathTemplate [Ordinary $ abiTagString (compilerInfoAbiTag compiler)]) + ] + +installDirsTemplateEnv :: InstallDirs PathTemplate -> PathTemplateEnv +installDirsTemplateEnv dirs = + [(PrefixVar, prefix dirs) + ,(BindirVar, bindir dirs) + ,(LibdirVar, libdir dirs) + ,(LibsubdirVar, libsubdir dirs) + ,(DatadirVar, datadir dirs) + ,(DatasubdirVar, datasubdir dirs) + ,(DocdirVar, docdir dirs) + ,(HtmldirVar, htmldir dirs) + ] + + +-- --------------------------------------------------------------------------- +-- Parsing and showing path templates: + +-- The textual format is that of an ordinary Haskell String, eg +-- "$prefix/bin" +-- and this gets parsed to the internal representation as a sequence of path +-- spans which are either strings or variables, eg: +-- PathTemplate [Variable PrefixVar, Ordinary "/bin" ] + +instance Show PathTemplateVariable where + show PrefixVar = "prefix" + show PkgKeyVar = "pkgkey" + show LibNameVar = "libname" + show BindirVar = "bindir" + show LibdirVar = "libdir" + show LibsubdirVar = "libsubdir" + show DatadirVar = "datadir" + show DatasubdirVar = "datasubdir" + show DocdirVar = "docdir" + show HtmldirVar = "htmldir" + show PkgNameVar = "pkg" + show PkgVerVar = "version" + show PkgIdVar = "pkgid" + show CompilerVar = "compiler" + show OSVar = "os" + show ArchVar = "arch" + show AbiTagVar = "abitag" + show AbiVar = "abi" + show ExecutableNameVar = "executablename" + show TestSuiteNameVar = "test-suite" + show TestSuiteResultVar = "result" + show BenchmarkNameVar = "benchmark" + +instance Read PathTemplateVariable where + readsPrec _ s = + take 1 + [ (var, drop (length varStr) s) + | (varStr, var) <- vars + , varStr `isPrefixOf` s ] + -- NB: order matters! Longer strings first + where vars = [("prefix", PrefixVar) + ,("bindir", BindirVar) + ,("libdir", LibdirVar) + ,("libsubdir", LibsubdirVar) + ,("datadir", DatadirVar) + ,("datasubdir", DatasubdirVar) + ,("docdir", DocdirVar) + ,("htmldir", HtmldirVar) + ,("pkgid", PkgIdVar) + ,("pkgkey", PkgKeyVar) + ,("libname", LibNameVar) + ,("pkg", PkgNameVar) + ,("version", PkgVerVar) + ,("compiler", CompilerVar) + ,("os", OSVar) + ,("arch", ArchVar) + ,("abitag", AbiTagVar) + ,("abi", AbiVar) + ,("executablename", ExecutableNameVar) + ,("test-suite", TestSuiteNameVar) + ,("result", TestSuiteResultVar) + ,("benchmark", BenchmarkNameVar)] + +instance Show PathComponent where + show (Ordinary path) = path + show (Variable var) = '$':show var + showList = foldr (\x -> (shows x .)) id + +instance Read PathComponent where + -- for some reason we collapse multiple $ symbols here + readsPrec _ = lex0 + where lex0 [] = [] + lex0 ('$':'$':s') = lex0 ('$':s') + lex0 ('$':s') = case [ (Variable var, s'') + | (var, s'') <- reads s' ] of + [] -> lex1 "$" s' + ok -> ok + lex0 s' = lex1 [] s' + lex1 "" "" = [] + lex1 acc "" = [(Ordinary (reverse acc), "")] + lex1 acc ('$':'$':s) = lex1 acc ('$':s) + lex1 acc ('$':s) = [(Ordinary (reverse acc), '$':s)] + lex1 acc (c:s) = lex1 (c:acc) s + readList [] = [([],"")] + readList s = [ (component:components, s'') + | (component, s') <- reads s + , (components, s'') <- readList s' ] + +instance Show PathTemplate where + show (PathTemplate template) = show (show template) + +instance Read PathTemplate where + readsPrec p s = [ (PathTemplate template, s') + | (path, s') <- readsPrec p s + , (template, "") <- reads path ] + +-- --------------------------------------------------------------------------- +-- Internal utilities + +getWindowsProgramFilesDir :: IO FilePath +getWindowsProgramFilesDir = do +#if mingw32_HOST_OS + m <- shGetFolderPath csidl_PROGRAM_FILES +#else + let m = Nothing +#endif + return (fromMaybe "C:\\Program Files" m) + +#if mingw32_HOST_OS +shGetFolderPath :: CInt -> IO (Maybe FilePath) +shGetFolderPath n = + allocaArray long_path_size $ \pPath -> do + r <- c_SHGetFolderPath nullPtr n nullPtr 0 pPath + if (r /= 0) + then return Nothing + else do s <- peekCWString pPath; return (Just s) + where + long_path_size = 1024 -- MAX_PATH is 260, this should be plenty + +csidl_PROGRAM_FILES :: CInt +csidl_PROGRAM_FILES = 0x0026 +-- csidl_PROGRAM_FILES_COMMON :: CInt +-- csidl_PROGRAM_FILES_COMMON = 0x002b + +#ifdef x86_64_HOST_ARCH +#define CALLCONV ccall +#else +#define CALLCONV stdcall +#endif + +foreign import CALLCONV unsafe "shlobj.h SHGetFolderPathW" + c_SHGetFolderPath :: Ptr () + -> CInt + -> Ptr () + -> CInt + -> CWString + -> IO CInt +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Install.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Install.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Install.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Install.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,190 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Install +-- Copyright : Isaac Jones 2003-2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is the entry point into installing a built package. Performs the +-- \"@.\/setup install@\" and \"@.\/setup copy@\" actions. It moves files into +-- place based on the prefix argument. It does the generic bits and then calls +-- compiler-specific functions to do the rest. + +module Distribution.Simple.Install ( + install, + ) where + +import Distribution.PackageDescription ( + PackageDescription(..), BuildInfo(..), Library(..), + hasLibs, withLib, hasExes, withExe ) +import Distribution.Package (Package(..)) +import Distribution.Simple.LocalBuildInfo ( + LocalBuildInfo(..), InstallDirs(..), absoluteInstallDirs, + substPathTemplate, withLibLBI) +import Distribution.Simple.BuildPaths (haddockName, haddockPref) +import Distribution.Simple.Utils + ( createDirectoryIfMissingVerbose + , installDirectoryContents, installOrdinaryFile, isInSearchPath + , die, info, notice, warn, matchDirFileGlob ) +import Distribution.Simple.Compiler + ( CompilerFlavor(..), compilerFlavor ) +import Distribution.Simple.Setup (CopyFlags(..), fromFlag) + +import qualified Distribution.Simple.GHC as GHC +import qualified Distribution.Simple.GHCJS as GHCJS +import qualified Distribution.Simple.JHC as JHC +import qualified Distribution.Simple.LHC as LHC +import qualified Distribution.Simple.UHC as UHC +import qualified Distribution.Simple.HaskellSuite as HaskellSuite + +import Control.Monad (when, unless) +import System.Directory + ( doesDirectoryExist, doesFileExist ) +import System.FilePath + ( takeFileName, takeDirectory, (), isAbsolute ) + +import Distribution.Verbosity +import Distribution.Text + ( display ) + +-- |Perform the \"@.\/setup install@\" and \"@.\/setup copy@\" +-- actions. Move files into place based on the prefix argument. + +install :: PackageDescription -- ^information from the .cabal file + -> LocalBuildInfo -- ^information from the configure step + -> CopyFlags -- ^flags sent to copy or install + -> IO () +install pkg_descr lbi flags = do + let distPref = fromFlag (copyDistPref flags) + verbosity = fromFlag (copyVerbosity flags) + copydest = fromFlag (copyDest flags) + installDirs@(InstallDirs { + bindir = binPref, + libdir = libPref, +-- dynlibdir = dynlibPref, --see TODO below + datadir = dataPref, + docdir = docPref, + htmldir = htmlPref, + haddockdir = interfacePref, + includedir = incPref}) + = absoluteInstallDirs pkg_descr lbi copydest + + --TODO: decide if we need the user to be able to control the libdir + -- for shared libs independently of the one for static libs. If so + -- it should also have a flag in the command line UI + -- For the moment use dynlibdir = libdir + dynlibPref = libPref + progPrefixPref = substPathTemplate (packageId pkg_descr) lbi (progPrefix lbi) + progSuffixPref = substPathTemplate (packageId pkg_descr) lbi (progSuffix lbi) + + docExists <- doesDirectoryExist $ haddockPref distPref pkg_descr + info verbosity ("directory " ++ haddockPref distPref pkg_descr ++ + " does exist: " ++ show docExists) + + installDataFiles verbosity pkg_descr dataPref + + when docExists $ do + createDirectoryIfMissingVerbose verbosity True htmlPref + installDirectoryContents verbosity + (haddockPref distPref pkg_descr) htmlPref + -- setPermissionsRecursive [Read] htmlPref + -- The haddock interface file actually already got installed + -- in the recursive copy, but now we install it where we actually + -- want it to be (normally the same place). We could remove the + -- copy in htmlPref first. + let haddockInterfaceFileSrc = haddockPref distPref pkg_descr + haddockName pkg_descr + haddockInterfaceFileDest = interfacePref haddockName pkg_descr + -- We only generate the haddock interface file for libs, So if the + -- package consists only of executables there will not be one: + exists <- doesFileExist haddockInterfaceFileSrc + when exists $ do + createDirectoryIfMissingVerbose verbosity True interfacePref + installOrdinaryFile verbosity haddockInterfaceFileSrc + haddockInterfaceFileDest + + let lfiles = licenseFiles pkg_descr + unless (null lfiles) $ do + createDirectoryIfMissingVerbose verbosity True docPref + sequence_ + [ installOrdinaryFile verbosity lfile (docPref takeFileName lfile) + | lfile <- lfiles ] + + let buildPref = buildDir lbi + when (hasLibs pkg_descr) $ + notice verbosity ("Installing library in " ++ libPref) + when (hasExes pkg_descr) $ do + notice verbosity ("Installing executable(s) in " ++ binPref) + inPath <- isInSearchPath binPref + when (not inPath) $ + warn verbosity ("The directory " ++ binPref + ++ " is not in the system search path.") + + -- install include files for all compilers - they may be needed to compile + -- haskell files (using the CPP extension) + when (hasLibs pkg_descr) $ installIncludeFiles verbosity pkg_descr incPref + + case compilerFlavor (compiler lbi) of + GHC -> do withLibLBI pkg_descr lbi $ + GHC.installLib verbosity lbi libPref dynlibPref buildPref pkg_descr + withExe pkg_descr $ + GHC.installExe verbosity lbi installDirs buildPref (progPrefixPref, progSuffixPref) pkg_descr + GHCJS-> do withLibLBI pkg_descr lbi $ + GHCJS.installLib verbosity lbi libPref dynlibPref buildPref pkg_descr + withExe pkg_descr $ + GHCJS.installExe verbosity lbi installDirs buildPref (progPrefixPref, progSuffixPref) pkg_descr + LHC -> do withLibLBI pkg_descr lbi $ + LHC.installLib verbosity lbi libPref dynlibPref buildPref pkg_descr + withExe pkg_descr $ + LHC.installExe verbosity lbi installDirs buildPref (progPrefixPref, progSuffixPref) pkg_descr + JHC -> do withLib pkg_descr $ + JHC.installLib verbosity libPref buildPref pkg_descr + withExe pkg_descr $ + JHC.installExe verbosity binPref buildPref (progPrefixPref, progSuffixPref) pkg_descr + UHC -> do withLib pkg_descr $ UHC.installLib verbosity lbi libPref dynlibPref buildPref pkg_descr + HaskellSuite {} -> + withLib pkg_descr $ + HaskellSuite.installLib verbosity lbi libPref dynlibPref buildPref pkg_descr + _ -> die $ "installing with " + ++ display (compilerFlavor (compiler lbi)) + ++ " is not implemented" + -- register step should be performed by caller. + +-- | Install the files listed in data-files +-- +installDataFiles :: Verbosity -> PackageDescription -> FilePath -> IO () +installDataFiles verbosity pkg_descr destDataDir = + flip mapM_ (dataFiles pkg_descr) $ \ file -> do + let srcDataDir = dataDir pkg_descr + files <- matchDirFileGlob srcDataDir file + let dir = takeDirectory file + createDirectoryIfMissingVerbose verbosity True (destDataDir dir) + sequence_ [ installOrdinaryFile verbosity (srcDataDir file') + (destDataDir file') + | file' <- files ] + +-- | Install the files listed in install-includes +-- +installIncludeFiles :: Verbosity -> PackageDescription -> FilePath -> IO () +installIncludeFiles verbosity + PackageDescription { library = Just lib } destIncludeDir = do + + incs <- mapM (findInc relincdirs) (installIncludes lbi) + sequence_ + [ do createDirectoryIfMissingVerbose verbosity True destDir + installOrdinaryFile verbosity srcFile destFile + | (relFile, srcFile) <- incs + , let destFile = destIncludeDir relFile + destDir = takeDirectory destFile ] + where + relincdirs = "." : filter (not.isAbsolute) (includeDirs lbi) + lbi = libBuildInfo lib + + findInc [] file = die ("can't find include file " ++ file) + findInc (dir:dirs) file = do + let path = dir file + exists <- doesFileExist path + if exists then return (file, path) else findInc dirs file +installIncludeFiles _ _ _ = die "installIncludeFiles: Can't happen?" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/JHC.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/JHC.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/JHC.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/JHC.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,196 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.JHC +-- Copyright : Isaac Jones 2003-2006 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module contains most of the JHC-specific code for configuring, building +-- and installing packages. + +module Distribution.Simple.JHC ( + configure, getInstalledPackages, + buildLib, buildExe, + installLib, installExe + ) where + +import Distribution.PackageDescription as PD + ( PackageDescription(..), BuildInfo(..), Executable(..) + , Library(..), libModules, hcOptions, usedExtensions ) +import Distribution.InstalledPackageInfo + ( emptyInstalledPackageInfo, ) +import qualified Distribution.InstalledPackageInfo as InstalledPackageInfo +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import qualified Distribution.Simple.PackageIndex as PackageIndex +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), ComponentLocalBuildInfo(..) ) +import Distribution.Simple.BuildPaths + ( autogenModulesDir, exeExtension ) +import Distribution.Simple.Compiler + ( CompilerFlavor(..), CompilerId(..), Compiler(..), AbiTag(..) + , PackageDBStack, Flag, languageToFlags, extensionsToFlags ) +import Language.Haskell.Extension + ( Language(Haskell98), Extension(..), KnownExtension(..)) +import Distribution.Simple.Program + ( ConfiguredProgram(..), jhcProgram, ProgramConfiguration + , userMaybeSpecifyPath, requireProgramVersion, lookupProgram + , rawSystemProgram, rawSystemProgramStdoutConf ) +import Distribution.Version + ( Version(..), orLaterVersion ) +import Distribution.Package + ( Package(..), InstalledPackageId(InstalledPackageId), + pkgName, pkgVersion, ) +import Distribution.Simple.Utils + ( createDirectoryIfMissingVerbose, writeFileAtomic + , installOrdinaryFile, installExecutableFile + , intercalate ) +import System.FilePath ( () ) +import Distribution.Verbosity +import Distribution.Text + ( Text(parse), display ) +import Distribution.Compat.ReadP + ( readP_to_S, string, skipSpaces ) +import Distribution.System ( Platform ) + +import Data.List ( nub ) +import Data.Char ( isSpace ) +import qualified Data.Map as M ( empty ) +import Data.Maybe ( fromMaybe ) + +import qualified Data.ByteString.Lazy.Char8 as BS.Char8 + + +-- ----------------------------------------------------------------------------- +-- Configuring + +configure :: Verbosity -> Maybe FilePath -> Maybe FilePath + -> ProgramConfiguration -> IO (Compiler, Maybe Platform, ProgramConfiguration) +configure verbosity hcPath _hcPkgPath conf = do + + (jhcProg, _, conf') <- requireProgramVersion verbosity + jhcProgram (orLaterVersion (Version [0,7,2] [])) + (userMaybeSpecifyPath "jhc" hcPath conf) + + let Just version = programVersion jhcProg + comp = Compiler { + compilerId = CompilerId JHC version, + compilerAbiTag = NoAbiTag, + compilerCompat = [], + compilerLanguages = jhcLanguages, + compilerExtensions = jhcLanguageExtensions, + compilerProperties = M.empty + } + compPlatform = Nothing + return (comp, compPlatform, conf') + +jhcLanguages :: [(Language, Flag)] +jhcLanguages = [(Haskell98, "")] + +-- | The flags for the supported extensions +jhcLanguageExtensions :: [(Extension, Flag)] +jhcLanguageExtensions = + [(EnableExtension TypeSynonymInstances , "") + ,(DisableExtension TypeSynonymInstances , "") + ,(EnableExtension ForeignFunctionInterface , "") + ,(DisableExtension ForeignFunctionInterface , "") + ,(EnableExtension ImplicitPrelude , "") -- Wrong + ,(DisableExtension ImplicitPrelude , "--noprelude") + ,(EnableExtension CPP , "-fcpp") + ,(DisableExtension CPP , "-fno-cpp") + ] + +getInstalledPackages :: Verbosity -> PackageDBStack -> ProgramConfiguration + -> IO InstalledPackageIndex +getInstalledPackages verbosity _packageDBs conf = do + -- jhc --list-libraries lists all available libraries. + -- How shall I find out, whether they are global or local + -- without checking all files and locations? + str <- rawSystemProgramStdoutConf verbosity jhcProgram conf ["--list-libraries"] + let pCheck :: [(a, String)] -> [a] + pCheck rs = [ r | (r,s) <- rs, all isSpace s ] + let parseLine ln = + pCheck (readP_to_S + (skipSpaces >> string "Name:" >> skipSpaces >> parse) ln) + return $ + PackageIndex.fromList $ + map (\p -> emptyInstalledPackageInfo { + InstalledPackageInfo.installedPackageId = + InstalledPackageId (display p), + InstalledPackageInfo.sourcePackageId = p + }) $ + concatMap parseLine $ + lines str + +-- ----------------------------------------------------------------------------- +-- Building + +-- | Building a package for JHC. +-- Currently C source files are not supported. +buildLib :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO () +buildLib verbosity pkg_descr lbi lib clbi = do + let Just jhcProg = lookupProgram jhcProgram (withPrograms lbi) + let libBi = libBuildInfo lib + let args = constructJHCCmdLine lbi libBi clbi (buildDir lbi) verbosity + let pkgid = display (packageId pkg_descr) + pfile = buildDir lbi "jhc-pkg.conf" + hlfile= buildDir lbi (pkgid ++ ".hl") + writeFileAtomic pfile . BS.Char8.pack $ jhcPkgConf pkg_descr + rawSystemProgram verbosity jhcProg $ + ["--build-hl="++pfile, "-o", hlfile] ++ + args ++ map display (libModules lib) + +-- | Building an executable for JHC. +-- Currently C source files are not supported. +buildExe :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Executable -> ComponentLocalBuildInfo -> IO () +buildExe verbosity _pkg_descr lbi exe clbi = do + let Just jhcProg = lookupProgram jhcProgram (withPrograms lbi) + let exeBi = buildInfo exe + let out = buildDir lbi exeName exe + let args = constructJHCCmdLine lbi exeBi clbi (buildDir lbi) verbosity + rawSystemProgram verbosity jhcProg (["-o",out] ++ args ++ [modulePath exe]) + +constructJHCCmdLine :: LocalBuildInfo -> BuildInfo -> ComponentLocalBuildInfo + -> FilePath -> Verbosity -> [String] +constructJHCCmdLine lbi bi clbi _odir verbosity = + (if verbosity >= deafening then ["-v"] else []) + ++ hcOptions JHC bi + ++ languageToFlags (compiler lbi) (defaultLanguage bi) + ++ extensionsToFlags (compiler lbi) (usedExtensions bi) + ++ ["--noauto","-i-"] + ++ concat [["-i", l] | l <- nub (hsSourceDirs bi)] + ++ ["-i", autogenModulesDir lbi] + ++ ["-optc" ++ opt | opt <- PD.ccOptions bi] + -- It would be better if JHC would accept package names with versions, + -- but JHC-0.7.2 doesn't accept this. + -- Thus, we have to strip the version with 'pkgName'. + ++ (concat [ ["-p", display (pkgName pkgid)] + | (_, pkgid) <- componentPackageDeps clbi ]) + +jhcPkgConf :: PackageDescription -> String +jhcPkgConf pd = + let sline name sel = name ++ ": "++sel pd + lib = fromMaybe (error "no library available") . library + comma = intercalate "," . map display + in unlines [sline "name" (display . pkgName . packageId) + ,sline "version" (display . pkgVersion . packageId) + ,sline "exposed-modules" (comma . PD.exposedModules . lib) + ,sline "hidden-modules" (comma . otherModules . libBuildInfo . lib) + ] + +installLib :: Verbosity -> FilePath -> FilePath -> PackageDescription -> Library -> IO () +installLib verb dest build_dir pkg_descr _ = do + let p = display (packageId pkg_descr)++".hl" + createDirectoryIfMissingVerbose verb True dest + installOrdinaryFile verb (build_dir p) (dest p) + +installExe :: Verbosity -> FilePath -> FilePath -> (FilePath,FilePath) -> PackageDescription -> Executable -> IO () +installExe verb dest build_dir (progprefix,progsuffix) _ exe = do + let exe_name = exeName exe + src = exe_name exeExtension + out = (progprefix ++ exe_name ++ progsuffix) exeExtension + createDirectoryIfMissingVerbose verb True dest + installExecutableFile verb (build_dir src) (dest out) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/LHC.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/LHC.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/LHC.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/LHC.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,803 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.LHC +-- Copyright : Isaac Jones 2003-2007 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is a fairly large module. It contains most of the GHC-specific code for +-- configuring, building and installing packages. It also exports a function +-- for finding out what packages are already installed. Configuring involves +-- finding the @ghc@ and @ghc-pkg@ programs, finding what language extensions +-- this version of ghc supports and returning a 'Compiler' value. +-- +-- 'getInstalledPackages' involves calling the @ghc-pkg@ program to find out +-- what packages are installed. +-- +-- Building is somewhat complex as there is quite a bit of information to take +-- into account. We have to build libs and programs, possibly for profiling and +-- shared libs. We have to support building libraries that will be usable by +-- GHCi and also ghc's @-split-objs@ feature. We have to compile any C files +-- using ghc. Linking, especially for @split-objs@ is remarkably complex, +-- partly because there tend to be 1,000's of @.o@ files and this can often be +-- more than we can pass to the @ld@ or @ar@ programs in one go. +-- +-- Installing for libs and exes involves finding the right files and copying +-- them to the right places. One of the more tricky things about this module is +-- remembering the layout of files in the build directory (which is not +-- explicitly documented) and thus what search dirs are used for various kinds +-- of files. + +module Distribution.Simple.LHC ( + configure, getInstalledPackages, + buildLib, buildExe, + installLib, installExe, + registerPackage, + hcPkgInfo, + ghcOptions, + ghcVerbosityOptions + ) where + +import Distribution.PackageDescription as PD + ( PackageDescription(..), BuildInfo(..), Executable(..) + , Library(..), libModules, hcOptions, hcProfOptions, hcSharedOptions + , usedExtensions, allExtensions ) +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo + , parseInstalledPackageInfo ) +import qualified Distribution.InstalledPackageInfo as InstalledPackageInfo + ( InstalledPackageInfo_(..) ) +import Distribution.Simple.PackageIndex +import qualified Distribution.Simple.PackageIndex as PackageIndex +import Distribution.ParseUtils ( ParseResult(..) ) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), ComponentLocalBuildInfo(..), + LibraryName(..) ) +import Distribution.Simple.InstallDirs +import Distribution.Simple.BuildPaths +import Distribution.Simple.Utils +import Distribution.Package + ( Package(..) ) +import qualified Distribution.ModuleName as ModuleName +import Distribution.Simple.Program + ( Program(..), ConfiguredProgram(..), ProgramConfiguration + , ProgramSearchPath, ProgramLocation(..) + , rawSystemProgram, rawSystemProgramConf + , rawSystemProgramStdout, rawSystemProgramStdoutConf + , requireProgramVersion + , userMaybeSpecifyPath, programPath, lookupProgram, addKnownProgram + , arProgram, ldProgram + , gccProgram, stripProgram + , lhcProgram, lhcPkgProgram ) +import qualified Distribution.Simple.Program.HcPkg as HcPkg +import Distribution.Simple.Compiler + ( CompilerFlavor(..), CompilerId(..), Compiler(..), compilerVersion + , OptimisationLevel(..), PackageDB(..), PackageDBStack, AbiTag(..) + , Flag, languageToFlags, extensionsToFlags ) +import Distribution.Version + ( Version(..), orLaterVersion ) +import Distribution.System + ( OS(..), buildOS ) +import Distribution.Verbosity +import Distribution.Text + ( display, simpleParse ) +import Language.Haskell.Extension + ( Language(Haskell98), Extension(..), KnownExtension(..) ) + +import Control.Monad ( unless, when ) +import Data.List +import qualified Data.Map as M ( empty ) +import Data.Maybe ( catMaybes ) +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid ( Monoid(..) ) +#endif +import System.Directory ( removeFile, renameFile, + getDirectoryContents, doesFileExist, + getTemporaryDirectory ) +import System.FilePath ( (), (<.>), takeExtension, + takeDirectory, replaceExtension ) +import System.IO (hClose, hPutStrLn) +import Distribution.Compat.Exception (catchExit, catchIO) +import Distribution.System ( Platform ) + +-- ----------------------------------------------------------------------------- +-- Configuring + +configure :: Verbosity -> Maybe FilePath -> Maybe FilePath + -> ProgramConfiguration -> IO (Compiler, Maybe Platform, ProgramConfiguration) +configure verbosity hcPath hcPkgPath conf = do + + (lhcProg, lhcVersion, conf') <- + requireProgramVersion verbosity lhcProgram + (orLaterVersion (Version [0,7] [])) + (userMaybeSpecifyPath "lhc" hcPath conf) + + (lhcPkgProg, lhcPkgVersion, conf'') <- + requireProgramVersion verbosity lhcPkgProgram + (orLaterVersion (Version [0,7] [])) + (userMaybeSpecifyPath "lhc-pkg" hcPkgPath conf') + + when (lhcVersion /= lhcPkgVersion) $ die $ + "Version mismatch between lhc and lhc-pkg: " + ++ programPath lhcProg ++ " is version " ++ display lhcVersion ++ " " + ++ programPath lhcPkgProg ++ " is version " ++ display lhcPkgVersion + + languages <- getLanguages verbosity lhcProg + extensions <- getExtensions verbosity lhcProg + + let comp = Compiler { + compilerId = CompilerId LHC lhcVersion, + compilerAbiTag = NoAbiTag, + compilerCompat = [], + compilerLanguages = languages, + compilerExtensions = extensions, + compilerProperties = M.empty + } + conf''' = configureToolchain lhcProg conf'' -- configure gcc and ld + compPlatform = Nothing + return (comp, compPlatform, conf''') + +-- | Adjust the way we find and configure gcc and ld +-- +configureToolchain :: ConfiguredProgram -> ProgramConfiguration + -> ProgramConfiguration +configureToolchain lhcProg = + addKnownProgram gccProgram { + programFindLocation = findProg gccProgram (baseDir "gcc.exe"), + programPostConf = configureGcc + } + . addKnownProgram ldProgram { + programFindLocation = findProg ldProgram (libDir "ld.exe"), + programPostConf = configureLd + } + where + compilerDir = takeDirectory (programPath lhcProg) + baseDir = takeDirectory compilerDir + libDir = baseDir "gcc-lib" + includeDir = baseDir "include" "mingw" + isWindows = case buildOS of Windows -> True; _ -> False + + -- on Windows finding and configuring ghc's gcc and ld is a bit special + findProg :: Program -> FilePath + -> Verbosity -> ProgramSearchPath -> IO (Maybe FilePath) + findProg prog location | isWindows = \verbosity searchpath -> do + exists <- doesFileExist location + if exists then return (Just location) + else do warn verbosity ("Couldn't find " ++ programName prog ++ " where I expected it. Trying the search path.") + programFindLocation prog verbosity searchpath + | otherwise = programFindLocation prog + + configureGcc :: Verbosity -> ConfiguredProgram -> IO ConfiguredProgram + configureGcc + | isWindows = \_ gccProg -> case programLocation gccProg of + -- if it's found on system then it means we're using the result + -- of programFindLocation above rather than a user-supplied path + -- that means we should add this extra flag to tell ghc's gcc + -- where it lives and thus where gcc can find its various files: + FoundOnSystem {} -> return gccProg { + programDefaultArgs = ["-B" ++ libDir, + "-I" ++ includeDir] + } + UserSpecified {} -> return gccProg + | otherwise = \_ gccProg -> return gccProg + + -- we need to find out if ld supports the -x flag + configureLd :: Verbosity -> ConfiguredProgram -> IO ConfiguredProgram + configureLd verbosity ldProg = do + tempDir <- getTemporaryDirectory + ldx <- withTempFile tempDir ".c" $ \testcfile testchnd -> + withTempFile tempDir ".o" $ \testofile testohnd -> do + hPutStrLn testchnd "int foo() { return 0; }" + hClose testchnd; hClose testohnd + rawSystemProgram verbosity lhcProg ["-c", testcfile, + "-o", testofile] + withTempFile tempDir ".o" $ \testofile' testohnd' -> + do + hClose testohnd' + _ <- rawSystemProgramStdout verbosity ldProg + ["-x", "-r", testofile, "-o", testofile'] + return True + `catchIO` (\_ -> return False) + `catchExit` (\_ -> return False) + if ldx + then return ldProg { programDefaultArgs = ["-x"] } + else return ldProg + +getLanguages :: Verbosity -> ConfiguredProgram -> IO [(Language, Flag)] +getLanguages _ _ = return [(Haskell98, "")] +--FIXME: does lhc support -XHaskell98 flag? from what version? + +getExtensions :: Verbosity -> ConfiguredProgram -> IO [(Extension, Flag)] +getExtensions verbosity lhcProg = do + exts <- rawSystemStdout verbosity (programPath lhcProg) + ["--supported-languages"] + -- GHC has the annoying habit of inverting some of the extensions + -- so we have to try parsing ("No" ++ ghcExtensionName) first + let readExtension str = do + ext <- simpleParse ("No" ++ str) + case ext of + UnknownExtension _ -> simpleParse str + _ -> return ext + return $ [ (ext, "-X" ++ display ext) + | Just ext <- map readExtension (lines exts) ] + +getInstalledPackages :: Verbosity -> PackageDBStack -> ProgramConfiguration + -> IO InstalledPackageIndex +getInstalledPackages verbosity packagedbs conf = do + checkPackageDbStack packagedbs + pkgss <- getInstalledPackages' lhcPkg verbosity packagedbs conf + let indexes = [ PackageIndex.fromList (map (substTopDir topDir) pkgs) + | (_, pkgs) <- pkgss ] + return $! (mconcat indexes) + + where + -- On Windows, various fields have $topdir/foo rather than full + -- paths. We need to substitute the right value in so that when + -- we, for example, call gcc, we have proper paths to give it + Just ghcProg = lookupProgram lhcProgram conf + Just lhcPkg = lookupProgram lhcPkgProgram conf + compilerDir = takeDirectory (programPath ghcProg) + topDir = takeDirectory compilerDir + +checkPackageDbStack :: PackageDBStack -> IO () +checkPackageDbStack (GlobalPackageDB:rest) + | GlobalPackageDB `notElem` rest = return () +checkPackageDbStack _ = + die $ "GHC.getInstalledPackages: the global package db must be " + ++ "specified first and cannot be specified multiple times" + +-- | Get the packages from specific PackageDBs, not cumulative. +-- +getInstalledPackages' :: ConfiguredProgram -> Verbosity + -> [PackageDB] -> ProgramConfiguration + -> IO [(PackageDB, [InstalledPackageInfo])] +getInstalledPackages' lhcPkg verbosity packagedbs conf + = + sequence + [ do str <- rawSystemProgramStdoutConf verbosity lhcPkgProgram conf + ["dump", packageDbGhcPkgFlag packagedb] + `catchExit` \_ -> die $ "ghc-pkg dump failed" + case parsePackages str of + Left ok -> return (packagedb, ok) + _ -> die "failed to parse output of 'ghc-pkg dump'" + | packagedb <- packagedbs ] + + where + parsePackages str = + let parsed = map parseInstalledPackageInfo (splitPkgs str) + in case [ msg | ParseFailed msg <- parsed ] of + [] -> Left [ pkg | ParseOk _ pkg <- parsed ] + msgs -> Right msgs + + splitPkgs :: String -> [String] + splitPkgs = map unlines . splitWith ("---" ==) . lines + where + splitWith :: (a -> Bool) -> [a] -> [[a]] + splitWith p xs = ys : case zs of + [] -> [] + _:ws -> splitWith p ws + where (ys,zs) = break p xs + + packageDbGhcPkgFlag GlobalPackageDB = "--global" + packageDbGhcPkgFlag UserPackageDB = "--user" + packageDbGhcPkgFlag (SpecificPackageDB path) = "--" ++ packageDbFlag ++ "=" ++ path + + packageDbFlag + | programVersion lhcPkg < Just (Version [7,5] []) + = "package-conf" + | otherwise + = "package-db" + + +substTopDir :: FilePath -> InstalledPackageInfo -> InstalledPackageInfo +substTopDir topDir ipo + = ipo { + InstalledPackageInfo.importDirs + = map f (InstalledPackageInfo.importDirs ipo), + InstalledPackageInfo.libraryDirs + = map f (InstalledPackageInfo.libraryDirs ipo), + InstalledPackageInfo.includeDirs + = map f (InstalledPackageInfo.includeDirs ipo), + InstalledPackageInfo.frameworkDirs + = map f (InstalledPackageInfo.frameworkDirs ipo), + InstalledPackageInfo.haddockInterfaces + = map f (InstalledPackageInfo.haddockInterfaces ipo), + InstalledPackageInfo.haddockHTMLs + = map f (InstalledPackageInfo.haddockHTMLs ipo) + } + where f ('$':'t':'o':'p':'d':'i':'r':rest) = topDir ++ rest + f x = x + +-- ----------------------------------------------------------------------------- +-- Building + +-- | Build a library with LHC. +-- +buildLib :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO () +buildLib verbosity pkg_descr lbi lib clbi = do + libName <- case componentLibraries clbi of + [libName] -> return libName + [] -> die "No library name found when building library" + _ -> die "Multiple library names found when building library" + + let pref = buildDir lbi + pkgid = packageId pkg_descr + runGhcProg = rawSystemProgramConf verbosity lhcProgram (withPrograms lbi) + ifVanillaLib forceVanilla = when (forceVanilla || withVanillaLib lbi) + ifProfLib = when (withProfLib lbi) + ifSharedLib = when (withSharedLib lbi) + ifGHCiLib = when (withGHCiLib lbi && withVanillaLib lbi) + + libBi <- hackThreadedFlag verbosity + (compiler lbi) (withProfLib lbi) (libBuildInfo lib) + + let libTargetDir = pref + forceVanillaLib = EnableExtension TemplateHaskell `elem` allExtensions libBi + -- TH always needs vanilla libs, even when building for profiling + + createDirectoryIfMissingVerbose verbosity True libTargetDir + -- TODO: do we need to put hs-boot files into place for mutually recursive modules? + let ghcArgs = + ["-package-name", display pkgid ] + ++ constructGHCCmdLine lbi libBi clbi libTargetDir verbosity + ++ map display (libModules lib) + lhcWrap x = ["--build-library", "--ghc-opts=" ++ unwords x] + ghcArgsProf = ghcArgs + ++ ["-prof", + "-hisuf", "p_hi", + "-osuf", "p_o" + ] + ++ hcProfOptions GHC libBi + ghcArgsShared = ghcArgs + ++ ["-dynamic", + "-hisuf", "dyn_hi", + "-osuf", "dyn_o", "-fPIC" + ] + ++ hcSharedOptions GHC libBi + unless (null (libModules lib)) $ + do ifVanillaLib forceVanillaLib (runGhcProg $ lhcWrap ghcArgs) + ifProfLib (runGhcProg $ lhcWrap ghcArgsProf) + ifSharedLib (runGhcProg $ lhcWrap ghcArgsShared) + + -- build any C sources + unless (null (cSources libBi)) $ do + info verbosity "Building C Sources..." + sequence_ [do let (odir,args) = constructCcCmdLine lbi libBi clbi pref + filename verbosity + createDirectoryIfMissingVerbose verbosity True odir + runGhcProg args + ifSharedLib (runGhcProg (args ++ ["-fPIC", "-osuf dyn_o"])) + | filename <- cSources libBi] + + -- link: + info verbosity "Linking..." + let cObjs = map (`replaceExtension` objExtension) (cSources libBi) + cSharedObjs = map (`replaceExtension` ("dyn_" ++ objExtension)) (cSources libBi) + cid = compilerId (compiler lbi) + vanillaLibFilePath = libTargetDir mkLibName libName + profileLibFilePath = libTargetDir mkProfLibName libName + sharedLibFilePath = libTargetDir mkSharedLibName cid libName + ghciLibFilePath = libTargetDir mkGHCiLibName libName + + stubObjs <- fmap catMaybes $ sequence + [ findFileWithExtension [objExtension] [libTargetDir] + (ModuleName.toFilePath x ++"_stub") + | x <- libModules lib ] + stubProfObjs <- fmap catMaybes $ sequence + [ findFileWithExtension ["p_" ++ objExtension] [libTargetDir] + (ModuleName.toFilePath x ++"_stub") + | x <- libModules lib ] + stubSharedObjs <- fmap catMaybes $ sequence + [ findFileWithExtension ["dyn_" ++ objExtension] [libTargetDir] + (ModuleName.toFilePath x ++"_stub") + | x <- libModules lib ] + + hObjs <- getHaskellObjects lib lbi + pref objExtension True + hProfObjs <- + if (withProfLib lbi) + then getHaskellObjects lib lbi + pref ("p_" ++ objExtension) True + else return [] + hSharedObjs <- + if (withSharedLib lbi) + then getHaskellObjects lib lbi + pref ("dyn_" ++ objExtension) False + else return [] + + unless (null hObjs && null cObjs && null stubObjs) $ do + -- first remove library files if they exists + sequence_ + [ removeFile libFilePath `catchIO` \_ -> return () + | libFilePath <- [vanillaLibFilePath, profileLibFilePath + ,sharedLibFilePath, ghciLibFilePath] ] + + let arVerbosity | verbosity >= deafening = "v" + | verbosity >= normal = "" + | otherwise = "c" + arArgs = ["q"++ arVerbosity] + ++ [vanillaLibFilePath] + arObjArgs = + hObjs + ++ map (pref ) cObjs + ++ stubObjs + arProfArgs = ["q"++ arVerbosity] + ++ [profileLibFilePath] + arProfObjArgs = + hProfObjs + ++ map (pref ) cObjs + ++ stubProfObjs + ldArgs = ["-r"] + ++ ["-o", ghciLibFilePath <.> "tmp"] + ldObjArgs = + hObjs + ++ map (pref ) cObjs + ++ stubObjs + ghcSharedObjArgs = + hSharedObjs + ++ map (pref ) cSharedObjs + ++ stubSharedObjs + -- After the relocation lib is created we invoke ghc -shared + -- with the dependencies spelled out as -package arguments + -- and ghc invokes the linker with the proper library paths + ghcSharedLinkArgs = + [ "-no-auto-link-packages", + "-shared", + "-dynamic", + "-o", sharedLibFilePath ] + ++ ghcSharedObjArgs + ++ ["-package-name", display pkgid ] + ++ ghcPackageFlags lbi clbi + ++ ["-l"++extraLib | extraLib <- extraLibs libBi] + ++ ["-L"++extraLibDir | extraLibDir <- extraLibDirs libBi] + + runLd ldLibName args = do + exists <- doesFileExist ldLibName + -- This method is called iteratively by xargs. The + -- output goes to .tmp, and any existing file + -- named is included when linking. The + -- output is renamed to . + rawSystemProgramConf verbosity ldProgram (withPrograms lbi) + (args ++ if exists then [ldLibName] else []) + renameFile (ldLibName <.> "tmp") ldLibName + + runAr = rawSystemProgramConf verbosity arProgram (withPrograms lbi) + + --TODO: discover this at configure time or runtime on Unix + -- The value is 32k on Windows and POSIX specifies a minimum of 4k + -- but all sensible Unixes use more than 4k. + -- we could use getSysVar ArgumentLimit but that's in the Unix lib + maxCommandLineSize = 30 * 1024 + + ifVanillaLib False $ xargs maxCommandLineSize + runAr arArgs arObjArgs + + ifProfLib $ xargs maxCommandLineSize + runAr arProfArgs arProfObjArgs + + ifGHCiLib $ xargs maxCommandLineSize + (runLd ghciLibFilePath) ldArgs ldObjArgs + + ifSharedLib $ runGhcProg ghcSharedLinkArgs + + +-- | Build an executable with LHC. +-- +buildExe :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Executable -> ComponentLocalBuildInfo -> IO () +buildExe verbosity _pkg_descr lbi + exe@Executable { exeName = exeName', modulePath = modPath } clbi = do + let pref = buildDir lbi + runGhcProg = rawSystemProgramConf verbosity lhcProgram (withPrograms lbi) + + exeBi <- hackThreadedFlag verbosity + (compiler lbi) (withProfExe lbi) (buildInfo exe) + + -- exeNameReal, the name that GHC really uses (with .exe on Windows) + let exeNameReal = exeName' <.> + (if null $ takeExtension exeName' then exeExtension else "") + + let targetDir = pref exeName' + let exeDir = targetDir (exeName' ++ "-tmp") + createDirectoryIfMissingVerbose verbosity True targetDir + createDirectoryIfMissingVerbose verbosity True exeDir + -- TODO: do we need to put hs-boot files into place for mutually recursive modules? + -- FIX: what about exeName.hi-boot? + + -- build executables + unless (null (cSources exeBi)) $ do + info verbosity "Building C Sources." + sequence_ [do let (odir,args) = constructCcCmdLine lbi exeBi clbi + exeDir filename verbosity + createDirectoryIfMissingVerbose verbosity True odir + runGhcProg args + | filename <- cSources exeBi] + + srcMainFile <- findFile (exeDir : hsSourceDirs exeBi) modPath + + let cObjs = map (`replaceExtension` objExtension) (cSources exeBi) + let lhcWrap x = ("--ghc-opts\"":x) ++ ["\""] + let binArgs linkExe profExe = + (if linkExe + then ["-o", targetDir exeNameReal] + else ["-c"]) + ++ constructGHCCmdLine lbi exeBi clbi exeDir verbosity + ++ [exeDir x | x <- cObjs] + ++ [srcMainFile] + ++ ["-optl" ++ opt | opt <- PD.ldOptions exeBi] + ++ ["-l"++lib | lib <- extraLibs exeBi] + ++ ["-L"++libDir | libDir <- extraLibDirs exeBi] + ++ concat [["-framework", f] | f <- PD.frameworks exeBi] + ++ if profExe + then ["-prof", + "-hisuf", "p_hi", + "-osuf", "p_o" + ] ++ hcProfOptions GHC exeBi + else [] + + -- For building exe's for profiling that use TH we actually + -- have to build twice, once without profiling and the again + -- with profiling. This is because the code that TH needs to + -- run at compile time needs to be the vanilla ABI so it can + -- be loaded up and run by the compiler. + when (withProfExe lbi && EnableExtension TemplateHaskell `elem` allExtensions exeBi) + (runGhcProg $ lhcWrap (binArgs False False)) + + runGhcProg (binArgs True (withProfExe lbi)) + +-- | Filter the "-threaded" flag when profiling as it does not +-- work with ghc-6.8 and older. +hackThreadedFlag :: Verbosity -> Compiler -> Bool -> BuildInfo -> IO BuildInfo +hackThreadedFlag verbosity comp prof bi + | not mustFilterThreaded = return bi + | otherwise = do + warn verbosity $ "The ghc flag '-threaded' is not compatible with " + ++ "profiling in ghc-6.8 and older. It will be disabled." + return bi { options = filterHcOptions (/= "-threaded") (options bi) } + where + mustFilterThreaded = prof && compilerVersion comp < Version [6, 10] [] + && "-threaded" `elem` hcOptions GHC bi + filterHcOptions p hcoptss = + [ (hc, if hc == GHC then filter p opts else opts) + | (hc, opts) <- hcoptss ] + +-- when using -split-objs, we need to search for object files in the +-- Module_split directory for each module. +getHaskellObjects :: Library -> LocalBuildInfo + -> FilePath -> String -> Bool -> IO [FilePath] +getHaskellObjects lib lbi pref wanted_obj_ext allow_split_objs + | splitObjs lbi && allow_split_objs = do + let dirs = [ pref (ModuleName.toFilePath x ++ "_split") + | x <- libModules lib ] + objss <- mapM getDirectoryContents dirs + let objs = [ dir obj + | (objs',dir) <- zip objss dirs, obj <- objs', + let obj_ext = takeExtension obj, + '.':wanted_obj_ext == obj_ext ] + return objs + | otherwise = + return [ pref ModuleName.toFilePath x <.> wanted_obj_ext + | x <- libModules lib ] + + +constructGHCCmdLine + :: LocalBuildInfo + -> BuildInfo + -> ComponentLocalBuildInfo + -> FilePath + -> Verbosity + -> [String] +constructGHCCmdLine lbi bi clbi odir verbosity = + ["--make"] + ++ ghcVerbosityOptions verbosity + -- Unsupported extensions have already been checked by configure + ++ ghcOptions lbi bi clbi odir + +ghcVerbosityOptions :: Verbosity -> [String] +ghcVerbosityOptions verbosity + | verbosity >= deafening = ["-v"] + | verbosity >= normal = [] + | otherwise = ["-w", "-v0"] + +ghcOptions :: LocalBuildInfo -> BuildInfo -> ComponentLocalBuildInfo + -> FilePath -> [String] +ghcOptions lbi bi clbi odir + = ["-hide-all-packages"] + ++ ghcPackageDbOptions lbi + ++ (if splitObjs lbi then ["-split-objs"] else []) + ++ ["-i"] + ++ ["-i" ++ odir] + ++ ["-i" ++ l | l <- nub (hsSourceDirs bi)] + ++ ["-i" ++ autogenModulesDir lbi] + ++ ["-I" ++ autogenModulesDir lbi] + ++ ["-I" ++ odir] + ++ ["-I" ++ dir | dir <- PD.includeDirs bi] + ++ ["-optP" ++ opt | opt <- cppOptions bi] + ++ [ "-optP-include", "-optP"++ (autogenModulesDir lbi cppHeaderName) ] + ++ [ "-#include \"" ++ inc ++ "\"" | inc <- PD.includes bi ] + ++ [ "-odir", odir, "-hidir", odir ] + ++ (if compilerVersion c >= Version [6,8] [] + then ["-stubdir", odir] else []) + ++ ghcPackageFlags lbi clbi + ++ (case withOptimization lbi of + NoOptimisation -> [] + NormalOptimisation -> ["-O"] + MaximumOptimisation -> ["-O2"]) + ++ hcOptions GHC bi + ++ languageToFlags c (defaultLanguage bi) + ++ extensionsToFlags c (usedExtensions bi) + where c = compiler lbi + +ghcPackageFlags :: LocalBuildInfo -> ComponentLocalBuildInfo -> [String] +ghcPackageFlags lbi clbi + | ghcVer >= Version [6,11] [] + = concat [ ["-package-id", display ipkgid] + | (ipkgid, _) <- componentPackageDeps clbi ] + + | otherwise = concat [ ["-package", display pkgid] + | (_, pkgid) <- componentPackageDeps clbi ] + where + ghcVer = compilerVersion (compiler lbi) + +ghcPackageDbOptions :: LocalBuildInfo -> [String] +ghcPackageDbOptions lbi = case dbstack of + (GlobalPackageDB:UserPackageDB:dbs) -> concatMap specific dbs + (GlobalPackageDB:dbs) -> ("-no-user-" ++ packageDbFlag) + : concatMap specific dbs + _ -> ierror + where + specific (SpecificPackageDB db) = [ '-':packageDbFlag, db ] + specific _ = ierror + ierror = error ("internal error: unexpected package db stack: " ++ show dbstack) + + dbstack = withPackageDB lbi + packageDbFlag + | compilerVersion (compiler lbi) < Version [7,5] [] + = "package-conf" + | otherwise + = "package-db" + +constructCcCmdLine :: LocalBuildInfo -> BuildInfo -> ComponentLocalBuildInfo + -> FilePath -> FilePath -> Verbosity -> (FilePath,[String]) +constructCcCmdLine lbi bi clbi pref filename verbosity + = let odir | compilerVersion (compiler lbi) >= Version [6,4,1] [] = pref + | otherwise = pref takeDirectory filename + -- ghc 6.4.1 fixed a bug in -odir handling + -- for C compilations. + in + (odir, + ghcCcOptions lbi bi clbi odir + ++ (if verbosity >= deafening then ["-v"] else []) + ++ ["-c",filename]) + + +ghcCcOptions :: LocalBuildInfo -> BuildInfo -> ComponentLocalBuildInfo + -> FilePath -> [String] +ghcCcOptions lbi bi clbi odir + = ["-I" ++ dir | dir <- PD.includeDirs bi] + ++ ghcPackageDbOptions lbi + ++ ghcPackageFlags lbi clbi + ++ ["-optc" ++ opt | opt <- PD.ccOptions bi] + ++ (case withOptimization lbi of + NoOptimisation -> [] + _ -> ["-optc-O2"]) + ++ ["-odir", odir] + +mkGHCiLibName :: LibraryName -> String +mkGHCiLibName (LibraryName lib) = lib <.> "o" + +-- ----------------------------------------------------------------------------- +-- Installing + +-- |Install executables for GHC. +installExe :: Verbosity + -> LocalBuildInfo + -> InstallDirs FilePath -- ^Where to copy the files to + -> FilePath -- ^Build location + -> (FilePath, FilePath) -- ^Executable (prefix,suffix) + -> PackageDescription + -> Executable + -> IO () +installExe verbosity lbi installDirs buildPref (progprefix, progsuffix) _pkg exe = do + let binDir = bindir installDirs + createDirectoryIfMissingVerbose verbosity True binDir + let exeFileName = exeName exe <.> exeExtension + fixedExeBaseName = progprefix ++ exeName exe ++ progsuffix + installBinary dest = do + installExecutableFile verbosity + (buildPref exeName exe exeFileName) + (dest <.> exeExtension) + stripExe verbosity lbi exeFileName (dest <.> exeExtension) + installBinary (binDir fixedExeBaseName) + +stripExe :: Verbosity -> LocalBuildInfo -> FilePath -> FilePath -> IO () +stripExe verbosity lbi name path = when (stripExes lbi) $ + case lookupProgram stripProgram (withPrograms lbi) of + Just strip -> rawSystemProgram verbosity strip args + Nothing -> unless (buildOS == Windows) $ + -- Don't bother warning on windows, we don't expect them to + -- have the strip program anyway. + warn verbosity $ "Unable to strip executable '" ++ name + ++ "' (missing the 'strip' program)" + where + args = path : case buildOS of + OSX -> ["-x"] -- By default, stripping the ghc binary on at least + -- some OS X installations causes: + -- HSbase-3.0.o: unknown symbol `_environ'" + -- The -x flag fixes that. + _ -> [] + +-- |Install for ghc, .hi, .a and, if --with-ghci given, .o +installLib :: Verbosity + -> LocalBuildInfo + -> FilePath -- ^install location + -> FilePath -- ^install location for dynamic libraries + -> FilePath -- ^Build location + -> PackageDescription + -> Library + -> ComponentLocalBuildInfo + -> IO () +installLib verbosity lbi targetDir dynlibTargetDir builtDir _pkg lib clbi = do + -- copy .hi files over: + let copy src dst n = do + createDirectoryIfMissingVerbose verbosity True dst + installOrdinaryFile verbosity (src n) (dst n) + copyModuleFiles ext = + findModuleFiles [builtDir] [ext] (libModules lib) + >>= installOrdinaryFiles verbosity targetDir + ifVanilla $ copyModuleFiles "hi" + ifProf $ copyModuleFiles "p_hi" + hcrFiles <- findModuleFiles (builtDir : hsSourceDirs (libBuildInfo lib)) ["hcr"] (libModules lib) + flip mapM_ hcrFiles $ \(srcBase, srcFile) -> runLhc ["--install-library", srcBase srcFile] + + -- copy the built library files over: + ifVanilla $ mapM_ (copy builtDir targetDir) vanillaLibNames + ifProf $ mapM_ (copy builtDir targetDir) profileLibNames + ifGHCi $ mapM_ (copy builtDir targetDir) ghciLibNames + ifShared $ mapM_ (copy builtDir dynlibTargetDir) sharedLibNames + + where + cid = compilerId (compiler lbi) + libNames = componentLibraries clbi + vanillaLibNames = map mkLibName libNames + profileLibNames = map mkProfLibName libNames + ghciLibNames = map mkGHCiLibName libNames + sharedLibNames = map (mkSharedLibName cid) libNames + + hasLib = not $ null (libModules lib) + && null (cSources (libBuildInfo lib)) + ifVanilla = when (hasLib && withVanillaLib lbi) + ifProf = when (hasLib && withProfLib lbi) + ifGHCi = when (hasLib && withGHCiLib lbi) + ifShared = when (hasLib && withSharedLib lbi) + + runLhc = rawSystemProgramConf verbosity lhcProgram (withPrograms lbi) + +-- ----------------------------------------------------------------------------- +-- Registering + +registerPackage + :: Verbosity + -> InstalledPackageInfo + -> PackageDescription + -> LocalBuildInfo + -> Bool + -> PackageDBStack + -> IO () +registerPackage verbosity installedPkgInfo _pkg lbi _inplace packageDbs = + HcPkg.reregister (hcPkgInfo $ withPrograms lbi) verbosity packageDbs + (Right installedPkgInfo) + +hcPkgInfo :: ProgramConfiguration -> HcPkg.HcPkgInfo +hcPkgInfo conf = HcPkg.HcPkgInfo { HcPkg.hcPkgProgram = lhcPkgProg + , HcPkg.noPkgDbStack = False + , HcPkg.noVerboseFlag = False + , HcPkg.flagPackageConf = False + , HcPkg.useSingleFileDb = True + } + where + Just lhcPkgProg = lookupProgram lhcPkgProgram conf diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/LocalBuildInfo.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/LocalBuildInfo.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/LocalBuildInfo.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/LocalBuildInfo.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,504 @@ +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.LocalBuildInfo +-- Copyright : Isaac Jones 2003-2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Once a package has been configured we have resolved conditionals and +-- dependencies, configured the compiler and other needed external programs. +-- The 'LocalBuildInfo' is used to hold all this information. It holds the +-- install dirs, the compiler, the exact package dependencies, the configured +-- programs, the package database to use and a bunch of miscellaneous configure +-- flags. It gets saved and reloaded from a file (@dist\/setup-config@). It gets +-- passed in to very many subsequent build actions. + +module Distribution.Simple.LocalBuildInfo ( + LocalBuildInfo(..), + externalPackageDeps, + inplacePackageId, + + -- * Buildable package components + Component(..), + ComponentName(..), + showComponentName, + ComponentLocalBuildInfo(..), + LibraryName(..), + foldComponent, + componentName, + componentBuildInfo, + componentEnabled, + componentDisabledReason, + ComponentDisabledReason(..), + pkgComponents, + pkgEnabledComponents, + lookupComponent, + getComponent, + getComponentLocalBuildInfo, + allComponentsInBuildOrder, + componentsInBuildOrder, + checkComponentsCyclic, + depLibraryPaths, + + withAllComponentsInBuildOrder, + withComponentsInBuildOrder, + withComponentsLBI, + withLibLBI, + withExeLBI, + withTestLBI, + + -- * Installation directories + module Distribution.Simple.InstallDirs, + absoluteInstallDirs, prefixRelativeInstallDirs, + substPathTemplate + ) where + + +import Distribution.Simple.InstallDirs hiding (absoluteInstallDirs, + prefixRelativeInstallDirs, + substPathTemplate, ) +import qualified Distribution.Simple.InstallDirs as InstallDirs +import Distribution.Simple.Program (ProgramConfiguration) +import Distribution.InstalledPackageInfo (InstalledPackageInfo) +import Distribution.PackageDescription + ( PackageDescription(..), withLib, Library(libBuildInfo), withExe + , Executable(exeName, buildInfo), withTest, TestSuite(..) + , BuildInfo(buildable), Benchmark(..), ModuleRenaming(..) ) +import qualified Distribution.InstalledPackageInfo as Installed +import Distribution.Package + ( PackageId, Package(..), InstalledPackageId(..), PackageKey + , PackageName ) +import Distribution.Simple.Compiler + ( Compiler, compilerInfo, PackageDBStack, DebugInfoLevel + , OptimisationLevel ) +import Distribution.Simple.PackageIndex + ( InstalledPackageIndex, allPackages ) +import Distribution.ModuleName ( ModuleName ) +import Distribution.Simple.Setup + ( ConfigFlags ) +import Distribution.Simple.Utils + ( shortRelativePath ) +import Distribution.Text + ( display ) +import Distribution.System + ( Platform (..) ) + +import Data.Array ((!)) +import Distribution.Compat.Binary (Binary) +import Data.Graph +import Data.List (nub, find, stripPrefix) +import Data.Maybe +import Data.Tree (flatten) +import GHC.Generics (Generic) +import Data.Map (Map) + +import System.Directory (doesDirectoryExist, canonicalizePath) + +-- | Data cached after configuration step. See also +-- 'Distribution.Simple.Setup.ConfigFlags'. +data LocalBuildInfo = LocalBuildInfo { + configFlags :: ConfigFlags, + -- ^ Options passed to the configuration step. + -- Needed to re-run configuration when .cabal is out of date + extraConfigArgs :: [String], + -- ^ Extra args on the command line for the configuration step. + -- Needed to re-run configuration when .cabal is out of date + installDirTemplates :: InstallDirTemplates, + -- ^ The installation directories for the various different + -- kinds of files + --TODO: inplaceDirTemplates :: InstallDirs FilePath + compiler :: Compiler, + -- ^ The compiler we're building with + hostPlatform :: Platform, + -- ^ The platform we're building for + buildDir :: FilePath, + -- ^ Where to build the package. + componentsConfigs :: [(ComponentName, ComponentLocalBuildInfo, [ComponentName])], + -- ^ All the components to build, ordered by topological sort, and with their dependencies + -- over the intrapackage dependency graph + installedPkgs :: InstalledPackageIndex, + -- ^ All the info about the installed packages that the + -- current package depends on (directly or indirectly). + pkgDescrFile :: Maybe FilePath, + -- ^ the filename containing the .cabal file, if available + localPkgDescr :: PackageDescription, + -- ^ The resolved package description, that does not contain + -- any conditionals. + pkgKey :: PackageKey, + -- ^ The package key for the current build, calculated from + -- the package ID and the dependency graph. + instantiatedWith :: [(ModuleName, (InstalledPackageInfo, ModuleName))], + withPrograms :: ProgramConfiguration, -- ^Location and args for all programs + withPackageDB :: PackageDBStack, -- ^What package database to use, global\/user + withVanillaLib:: Bool, -- ^Whether to build normal libs. + withProfLib :: Bool, -- ^Whether to build profiling versions of libs. + withSharedLib :: Bool, -- ^Whether to build shared versions of libs. + withDynExe :: Bool, -- ^Whether to link executables dynamically + withProfExe :: Bool, -- ^Whether to build executables for profiling. + withOptimization :: OptimisationLevel, -- ^Whether to build with optimization (if available). + withDebugInfo :: DebugInfoLevel, -- ^Whether to emit debug info (if available). + withGHCiLib :: Bool, -- ^Whether to build libs suitable for use with GHCi. + splitObjs :: Bool, -- ^Use -split-objs with GHC, if available + stripExes :: Bool, -- ^Whether to strip executables during install + stripLibs :: Bool, -- ^Whether to strip libraries during install + progPrefix :: PathTemplate, -- ^Prefix to be prepended to installed executables + progSuffix :: PathTemplate, -- ^Suffix to be appended to installed executables + relocatable :: Bool -- ^Whether to build a relocatable package + } deriving (Generic, Read, Show) + +instance Binary LocalBuildInfo + +-- | External package dependencies for the package as a whole. This is the +-- union of the individual 'componentPackageDeps', less any internal deps. +externalPackageDeps :: LocalBuildInfo -> [(InstalledPackageId, PackageId)] +externalPackageDeps lbi = + -- TODO: what about non-buildable components? + nub [ (ipkgid, pkgid) + | (_,clbi,_) <- componentsConfigs lbi + , (ipkgid, pkgid) <- componentPackageDeps clbi + , not (internal pkgid) ] + where + -- True if this dependency is an internal one (depends on the library + -- defined in the same package). + internal pkgid = pkgid == packageId (localPkgDescr lbi) + +-- | The installed package Id we use for local packages registered in the local +-- package db. This is what is used for intra-package deps between components. +-- +inplacePackageId :: PackageId -> InstalledPackageId +inplacePackageId pkgid = InstalledPackageId (display pkgid ++ "-inplace") + +-- ----------------------------------------------------------------------------- +-- Buildable components + +data Component = CLib Library + | CExe Executable + | CTest TestSuite + | CBench Benchmark + deriving (Show, Eq, Read) + +data ComponentName = CLibName -- currently only a single lib + | CExeName String + | CTestName String + | CBenchName String + deriving (Eq, Generic, Ord, Read, Show) + +instance Binary ComponentName + +showComponentName :: ComponentName -> String +showComponentName CLibName = "library" +showComponentName (CExeName name) = "executable '" ++ name ++ "'" +showComponentName (CTestName name) = "test suite '" ++ name ++ "'" +showComponentName (CBenchName name) = "benchmark '" ++ name ++ "'" + +data ComponentLocalBuildInfo + = LibComponentLocalBuildInfo { + -- | Resolved internal and external package dependencies for this component. + -- The 'BuildInfo' specifies a set of build dependencies that must be + -- satisfied in terms of version ranges. This field fixes those dependencies + -- to the specific versions available on this machine for this compiler. + componentPackageDeps :: [(InstalledPackageId, PackageId)], + componentExposedModules :: [Installed.ExposedModule], + componentPackageRenaming :: Map PackageName ModuleRenaming, + componentLibraries :: [LibraryName] + } + | ExeComponentLocalBuildInfo { + componentPackageDeps :: [(InstalledPackageId, PackageId)], + componentPackageRenaming :: Map PackageName ModuleRenaming + } + | TestComponentLocalBuildInfo { + componentPackageDeps :: [(InstalledPackageId, PackageId)], + componentPackageRenaming :: Map PackageName ModuleRenaming + } + | BenchComponentLocalBuildInfo { + componentPackageDeps :: [(InstalledPackageId, PackageId)], + componentPackageRenaming :: Map PackageName ModuleRenaming + } + deriving (Generic, Read, Show) + +instance Binary ComponentLocalBuildInfo + +foldComponent :: (Library -> a) + -> (Executable -> a) + -> (TestSuite -> a) + -> (Benchmark -> a) + -> Component + -> a +foldComponent f _ _ _ (CLib lib) = f lib +foldComponent _ f _ _ (CExe exe) = f exe +foldComponent _ _ f _ (CTest tst) = f tst +foldComponent _ _ _ f (CBench bch) = f bch + +data LibraryName = LibraryName String + deriving (Generic, Read, Show) + +instance Binary LibraryName + +componentBuildInfo :: Component -> BuildInfo +componentBuildInfo = + foldComponent libBuildInfo buildInfo testBuildInfo benchmarkBuildInfo + +componentName :: Component -> ComponentName +componentName = + foldComponent (const CLibName) + (CExeName . exeName) + (CTestName . testName) + (CBenchName . benchmarkName) + +-- | All the components in the package (libs, exes, or test suites). +-- +pkgComponents :: PackageDescription -> [Component] +pkgComponents pkg = + [ CLib lib | Just lib <- [library pkg] ] + ++ [ CExe exe | exe <- executables pkg ] + ++ [ CTest tst | tst <- testSuites pkg ] + ++ [ CBench bm | bm <- benchmarks pkg ] + +-- | All the components in the package that are buildable and enabled. +-- Thus this excludes non-buildable components and test suites or benchmarks +-- that have been disabled. +-- +pkgEnabledComponents :: PackageDescription -> [Component] +pkgEnabledComponents = filter componentEnabled . pkgComponents + +componentEnabled :: Component -> Bool +componentEnabled = isNothing . componentDisabledReason + +data ComponentDisabledReason = DisabledComponent + | DisabledAllTests + | DisabledAllBenchmarks + +componentDisabledReason :: Component -> Maybe ComponentDisabledReason +componentDisabledReason (CLib lib) + | not (buildable (libBuildInfo lib)) = Just DisabledComponent +componentDisabledReason (CExe exe) + | not (buildable (buildInfo exe)) = Just DisabledComponent +componentDisabledReason (CTest tst) + | not (buildable (testBuildInfo tst)) = Just DisabledComponent + | not (testEnabled tst) = Just DisabledAllTests +componentDisabledReason (CBench bm) + | not (buildable (benchmarkBuildInfo bm)) = Just DisabledComponent + | not (benchmarkEnabled bm) = Just DisabledAllBenchmarks +componentDisabledReason _ = Nothing + +lookupComponent :: PackageDescription -> ComponentName -> Maybe Component +lookupComponent pkg CLibName = + fmap CLib $ library pkg +lookupComponent pkg (CExeName name) = + fmap CExe $ find ((name ==) . exeName) (executables pkg) +lookupComponent pkg (CTestName name) = + fmap CTest $ find ((name ==) . testName) (testSuites pkg) +lookupComponent pkg (CBenchName name) = + fmap CBench $ find ((name ==) . benchmarkName) (benchmarks pkg) + +getComponent :: PackageDescription -> ComponentName -> Component +getComponent pkg cname = + case lookupComponent pkg cname of + Just cpnt -> cpnt + Nothing -> missingComponent + where + missingComponent = + error $ "internal error: the package description contains no " + ++ "component corresponding to " ++ show cname + + +getComponentLocalBuildInfo :: LocalBuildInfo -> ComponentName -> ComponentLocalBuildInfo +getComponentLocalBuildInfo lbi cname = + case [ clbi + | (cname', clbi, _) <- componentsConfigs lbi + , cname == cname' ] of + [clbi] -> clbi + _ -> missingComponent + where + missingComponent = + error $ "internal error: there is no configuration data " + ++ "for component " ++ show cname + + +-- |If the package description has a library section, call the given +-- function with the library build info as argument. Extended version of +-- 'withLib' that also gives corresponding build info. +withLibLBI :: PackageDescription -> LocalBuildInfo + -> (Library -> ComponentLocalBuildInfo -> IO ()) -> IO () +withLibLBI pkg_descr lbi f = + withLib pkg_descr $ \lib -> + f lib (getComponentLocalBuildInfo lbi CLibName) + +-- | Perform the action on each buildable 'Executable' in the package +-- description. Extended version of 'withExe' that also gives corresponding +-- build info. +withExeLBI :: PackageDescription -> LocalBuildInfo + -> (Executable -> ComponentLocalBuildInfo -> IO ()) -> IO () +withExeLBI pkg_descr lbi f = + withExe pkg_descr $ \exe -> + f exe (getComponentLocalBuildInfo lbi (CExeName (exeName exe))) + +withTestLBI :: PackageDescription -> LocalBuildInfo + -> (TestSuite -> ComponentLocalBuildInfo -> IO ()) -> IO () +withTestLBI pkg_descr lbi f = + withTest pkg_descr $ \test -> + f test (getComponentLocalBuildInfo lbi (CTestName (testName test))) + +{-# DEPRECATED withComponentsLBI "Use withAllComponentsInBuildOrder" #-} +withComponentsLBI :: PackageDescription -> LocalBuildInfo + -> (Component -> ComponentLocalBuildInfo -> IO ()) + -> IO () +withComponentsLBI = withAllComponentsInBuildOrder + +-- | Perform the action on each buildable 'Library' or 'Executable' (Component) +-- in the PackageDescription, subject to the build order specified by the +-- 'compBuildOrder' field of the given 'LocalBuildInfo' +withAllComponentsInBuildOrder :: PackageDescription -> LocalBuildInfo + -> (Component -> ComponentLocalBuildInfo -> IO ()) + -> IO () +withAllComponentsInBuildOrder pkg lbi f = + sequence_ + [ f (getComponent pkg cname) clbi + | (cname, clbi) <- allComponentsInBuildOrder lbi ] + +withComponentsInBuildOrder :: PackageDescription -> LocalBuildInfo + -> [ComponentName] + -> (Component -> ComponentLocalBuildInfo -> IO ()) + -> IO () +withComponentsInBuildOrder pkg lbi cnames f = + sequence_ + [ f (getComponent pkg cname') clbi + | (cname', clbi) <- componentsInBuildOrder lbi cnames ] + +allComponentsInBuildOrder :: LocalBuildInfo + -> [(ComponentName, ComponentLocalBuildInfo)] +allComponentsInBuildOrder lbi = + componentsInBuildOrder lbi + [ cname | (cname, _, _) <- componentsConfigs lbi ] + +componentsInBuildOrder :: LocalBuildInfo -> [ComponentName] + -> [(ComponentName, ComponentLocalBuildInfo)] +componentsInBuildOrder lbi cnames = + map ((\(clbi,cname,_) -> (cname,clbi)) . vertexToNode) + . postOrder graph + . map (\cname -> fromMaybe (noSuchComp cname) (keyToVertex cname)) + $ cnames + where + (graph, vertexToNode, keyToVertex) = + graphFromEdges (map (\(a,b,c) -> (b,a,c)) (componentsConfigs lbi)) + + noSuchComp cname = error $ "internal error: componentsInBuildOrder: " + ++ "no such component: " ++ show cname + + postOrder :: Graph -> [Vertex] -> [Vertex] + postOrder g vs = postorderF (dfs g vs) [] + + postorderF :: Forest a -> [a] -> [a] + postorderF ts = foldr (.) id $ map postorderT ts + + postorderT :: Tree a -> [a] -> [a] + postorderT (Node a ts) = postorderF ts . (a :) + +checkComponentsCyclic :: Ord key => [(node, key, [key])] + -> Maybe [(node, key, [key])] +checkComponentsCyclic es = + let (graph, vertexToNode, _) = graphFromEdges es + cycles = [ flatten c | c <- scc graph, isCycle c ] + isCycle (Node v []) = selfCyclic v + isCycle _ = True + selfCyclic v = v `elem` graph ! v + in case cycles of + [] -> Nothing + (c:_) -> Just (map vertexToNode c) + +-- | Determine the directories containing the dynamic libraries of the +-- transitive dependencies of the component we are building. +-- +-- When wanted, and possible, returns paths relative to the installDirs 'prefix' +depLibraryPaths :: Bool -- ^ Building for inplace? + -> Bool -- ^ Generate prefix-relative library paths + -> LocalBuildInfo + -> ComponentLocalBuildInfo -- ^ Component that is being built + -> IO [FilePath] +depLibraryPaths inplace relative lbi clbi = do + let pkgDescr = localPkgDescr lbi + installDirs = absoluteInstallDirs pkgDescr lbi NoCopyDest + executable = case clbi of + ExeComponentLocalBuildInfo {} -> True + _ -> False + relDir | executable = bindir installDirs + | otherwise = libdir installDirs + + let hasInternalDeps = not $ null + $ [ pkgid + | (_,pkgid) <- componentPackageDeps clbi + , internal pkgid + ] + + let ipkgs = allPackages (installedPkgs lbi) + allDepLibDirs = concatMap Installed.libraryDirs ipkgs + internalLib + | inplace = buildDir lbi + | otherwise = libdir installDirs + allDepLibDirs' = if hasInternalDeps + then internalLib : allDepLibDirs + else allDepLibDirs + allDepLibDirsC <- mapM canonicalizePathNoFail allDepLibDirs' + + let p = prefix installDirs + prefixRelative l = isJust (stripPrefix p l) + libPaths + | relative && + prefixRelative relDir = map (\l -> + if prefixRelative l + then shortRelativePath relDir l + else l + ) allDepLibDirsC + | otherwise = allDepLibDirsC + + return libPaths + where + internal pkgid = pkgid == packageId (localPkgDescr lbi) + -- 'canonicalizePath' fails on UNIX when the directory does not exists. + -- So just don't canonicalize when it doesn't exist. + canonicalizePathNoFail p = do + exists <- doesDirectoryExist p + if exists + then canonicalizePath p + else return p + + +-- ----------------------------------------------------------------------------- +-- Wrappers for a couple functions from InstallDirs + +-- |See 'InstallDirs.absoluteInstallDirs' +absoluteInstallDirs :: PackageDescription -> LocalBuildInfo -> CopyDest + -> InstallDirs FilePath +absoluteInstallDirs pkg lbi copydest = + InstallDirs.absoluteInstallDirs + (packageId pkg) + (pkgKey lbi) + (compilerInfo (compiler lbi)) + copydest + (hostPlatform lbi) + (installDirTemplates lbi) + +-- |See 'InstallDirs.prefixRelativeInstallDirs' +prefixRelativeInstallDirs :: PackageId -> LocalBuildInfo + -> InstallDirs (Maybe FilePath) +prefixRelativeInstallDirs pkg_descr lbi = + InstallDirs.prefixRelativeInstallDirs + (packageId pkg_descr) + (pkgKey lbi) + (compilerInfo (compiler lbi)) + (hostPlatform lbi) + (installDirTemplates lbi) + +substPathTemplate :: PackageId -> LocalBuildInfo + -> PathTemplate -> FilePath +substPathTemplate pkgid lbi = fromPathTemplate + . ( InstallDirs.substPathTemplate env ) + where env = initialPathTemplateEnv + pkgid + (pkgKey lbi) + (compilerInfo (compiler lbi)) + (hostPlatform lbi) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/PackageIndex.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/PackageIndex.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/PackageIndex.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/PackageIndex.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,695 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.PackageIndex +-- Copyright : (c) David Himmelstrup 2005, +-- Bjorn Bringert 2007, +-- Duncan Coutts 2008-2009 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- An index of packages. +-- +module Distribution.Simple.PackageIndex ( + -- * Package index data type + InstalledPackageIndex, + PackageIndex, + FakeMap, + + -- * Creating an index + fromList, + + -- * Updates + merge, + + insert, + + deleteInstalledPackageId, + deleteSourcePackageId, + deletePackageName, +-- deleteDependency, + + -- * Queries + + -- ** Precise lookups + lookupInstalledPackageId, + lookupSourcePackageId, + lookupPackageId, + lookupPackageName, + lookupDependency, + + -- ** Case-insensitive searches + searchByName, + SearchResult(..), + searchByNameSubstring, + + -- ** Bulk queries + allPackages, + allPackagesByName, + allPackagesBySourcePackageId, + + -- ** Special queries + brokenPackages, + dependencyClosure, + reverseDependencyClosure, + topologicalOrder, + reverseTopologicalOrder, + dependencyInconsistencies, + dependencyCycles, + dependencyGraph, + moduleNameIndex, + + -- ** Variants of special queries supporting fake map + fakeLookupInstalledPackageId, + brokenPackages', + dependencyClosure', + reverseDependencyClosure', + dependencyInconsistencies', + dependencyCycles', + dependencyGraph', + ) where + +import Control.Exception (assert) +import Data.Array ((!)) +import qualified Data.Array as Array +import Distribution.Compat.Binary (Binary) +import qualified Data.Graph as Graph +import Data.List as List + ( null, foldl', sort + , groupBy, sortBy, find, isInfixOf, nubBy, deleteBy, deleteFirstsBy ) +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid (Monoid(..)) +#endif +import Data.Map (Map) +import qualified Data.Map as Map +import Data.Maybe (isNothing, fromMaybe) +import qualified Data.Tree as Tree +import GHC.Generics (Generic) +import Prelude hiding (lookup) + +import Distribution.Package + ( PackageName(..), PackageId + , Package(..), packageName, packageVersion + , Dependency(Dependency)--, --PackageFixedDeps(..) + , InstalledPackageId(..), PackageInstalled(..) ) +import Distribution.ModuleName + ( ModuleName ) +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo ) +import qualified Distribution.InstalledPackageInfo as IPI +import Distribution.Version + ( Version, withinRange ) +import Distribution.Simple.Utils (lowercase, comparing, equating) + +-- Note [FakeMap] +----------------- +-- We'd like to use the PackageIndex defined in this module for +-- cabal-install's InstallPlan. However, at the moment, this +-- data structure is indexed by InstalledPackageId, which we don't +-- know until after we've compiled a package (whereas InstallPlan +-- needs to store not-compiled packages in the index.) Eventually, +-- an InstalledPackageId will be calculatable prior to actually +-- building the package (making it something of a misnomer), but +-- at the moment, the "fake installed package ID map" is a workaround +-- to solve this problem while reusing PackageIndex. The basic idea +-- is that, since we don't know what an InstalledPackageId is +-- beforehand, we just fake up one based on the package ID (it only +-- needs to be unique for the particular install plan), and fill +-- it out with the actual generated InstalledPackageId after the +-- package is successfully compiled. +-- +-- However, there is a problem: in the index there may be +-- references using the old package ID, which are now dangling if +-- we update the InstalledPackageId. We could map over the entire +-- index to update these pointers as well (a costly operation), but +-- instead, we've chosen to parametrize a variety of important functions +-- by a FakeMap, which records what a fake installed package ID was +-- actually resolved to post-compilation. If we do a lookup, we first +-- check and see if it's a fake ID in the FakeMap. +-- +-- It's a bit grungy, but we expect this to only be temporary anyway. +-- (Another possible workaround would have been to *not* update +-- the installed package ID, but I decided this would be hard to +-- understand.) + +-- | Map from fake installed package IDs to real ones. See Note [FakeMap] +type FakeMap = Map InstalledPackageId InstalledPackageId + +-- | The collection of information about packages from one or more 'PackageDB's. +-- These packages generally should have an instance of 'PackageInstalled' +-- +-- Packages are uniquely identified in by their 'InstalledPackageId', they can +-- also be efficiently looked up by package name or by name and version. +-- +data PackageIndex a = PackageIndex + -- The primary index. Each InstalledPackageInfo record is uniquely identified + -- by its InstalledPackageId. + -- + !(Map InstalledPackageId a) + + -- This auxiliary index maps package names (case-sensitively) to all the + -- versions and instances of that package. This allows us to find all + -- versions satisfying a dependency. + -- + -- It is a three-level index. The first level is the package name, + -- the second is the package version and the final level is instances + -- of the same package version. These are unique by InstalledPackageId + -- and are kept in preference order. + -- + -- FIXME: Clarify what "preference order" means. Check that this invariant is + -- preserved. See #1463 for discussion. + !(Map PackageName (Map Version [a])) + + deriving (Generic, Show, Read) + +instance Binary a => Binary (PackageIndex a) + +-- | The default package index which contains 'InstalledPackageInfo'. Normally +-- use this. +type InstalledPackageIndex = PackageIndex InstalledPackageInfo + +instance PackageInstalled a => Monoid (PackageIndex a) where + mempty = PackageIndex Map.empty Map.empty + mappend = merge + --save one mappend with empty in the common case: + mconcat [] = mempty + mconcat xs = foldr1 mappend xs + +invariant :: PackageInstalled a => PackageIndex a -> Bool +invariant (PackageIndex pids pnames) = + map installedPackageId (Map.elems pids) + == sort + [ assert pinstOk (installedPackageId pinst) + | (pname, pvers) <- Map.toList pnames + , let pversOk = not (Map.null pvers) + , (pver, pinsts) <- assert pversOk $ Map.toList pvers + , let pinsts' = sortBy (comparing installedPackageId) pinsts + pinstsOk = all (\g -> length g == 1) + (groupBy (equating installedPackageId) pinsts') + , pinst <- assert pinstsOk $ pinsts' + , let pinstOk = packageName pinst == pname + && packageVersion pinst == pver + ] + + +-- +-- * Internal helpers +-- + +mkPackageIndex :: PackageInstalled a + => Map InstalledPackageId a + -> Map PackageName (Map Version [a]) + -> PackageIndex a +mkPackageIndex pids pnames = assert (invariant index) index + where index = PackageIndex pids pnames + + +-- +-- * Construction +-- + +-- | Build an index out of a bunch of packages. +-- +-- If there are duplicates by 'InstalledPackageId' then later ones mask earlier +-- ones. +-- +fromList :: PackageInstalled a => [a] -> PackageIndex a +fromList pkgs = mkPackageIndex pids pnames + where + pids = Map.fromList [ (installedPackageId pkg, pkg) | pkg <- pkgs ] + pnames = + Map.fromList + [ (packageName (head pkgsN), pvers) + | pkgsN <- groupBy (equating packageName) + . sortBy (comparing packageId) + $ pkgs + , let pvers = + Map.fromList + [ (packageVersion (head pkgsNV), + nubBy (equating installedPackageId) (reverse pkgsNV)) + | pkgsNV <- groupBy (equating packageVersion) pkgsN + ] + ] + +-- +-- * Updates +-- + +-- | Merge two indexes. +-- +-- Packages from the second mask packages from the first if they have the exact +-- same 'InstalledPackageId'. +-- +-- For packages with the same source 'PackageId', packages from the second are +-- \"preferred\" over those from the first. Being preferred means they are top +-- result when we do a lookup by source 'PackageId'. This is the mechanism we +-- use to prefer user packages over global packages. +-- +merge :: PackageInstalled a => PackageIndex a -> PackageIndex a -> PackageIndex a +merge (PackageIndex pids1 pnames1) (PackageIndex pids2 pnames2) = + mkPackageIndex (Map.unionWith (\_ y -> y) pids1 pids2) + (Map.unionWith (Map.unionWith mergeBuckets) pnames1 pnames2) + where + -- Packages in the second list mask those in the first, however preferred + -- packages go first in the list. + mergeBuckets xs ys = ys ++ (xs \\ ys) + (\\) = deleteFirstsBy (equating installedPackageId) + + +-- | Inserts a single package into the index. +-- +-- This is equivalent to (but slightly quicker than) using 'mappend' or +-- 'merge' with a singleton index. +-- +insert :: PackageInstalled a => a -> PackageIndex a -> PackageIndex a +insert pkg (PackageIndex pids pnames) = + mkPackageIndex pids' pnames' + + where + pids' = Map.insert (installedPackageId pkg) pkg pids + pnames' = insertPackageName pnames + insertPackageName = + Map.insertWith' (\_ -> insertPackageVersion) + (packageName pkg) + (Map.singleton (packageVersion pkg) [pkg]) + + insertPackageVersion = + Map.insertWith' (\_ -> insertPackageInstance) + (packageVersion pkg) [pkg] + + insertPackageInstance pkgs = + pkg : deleteBy (equating installedPackageId) pkg pkgs + + +-- | Removes a single installed package from the index. +-- +deleteInstalledPackageId :: PackageInstalled a => InstalledPackageId -> PackageIndex a -> PackageIndex a +deleteInstalledPackageId ipkgid original@(PackageIndex pids pnames) = + case Map.updateLookupWithKey (\_ _ -> Nothing) ipkgid pids of + (Nothing, _) -> original + (Just spkgid, pids') -> mkPackageIndex pids' + (deletePkgName spkgid pnames) + + where + deletePkgName spkgid = + Map.update (deletePkgVersion spkgid) (packageName spkgid) + + deletePkgVersion spkgid = + (\m -> if Map.null m then Nothing else Just m) + . Map.update deletePkgInstance (packageVersion spkgid) + + deletePkgInstance = + (\xs -> if List.null xs then Nothing else Just xs) + . List.deleteBy (\_ pkg -> installedPackageId pkg == ipkgid) undefined + + +-- | Removes all packages with this source 'PackageId' from the index. +-- +deleteSourcePackageId :: PackageInstalled a => PackageId -> PackageIndex a -> PackageIndex a +deleteSourcePackageId pkgid original@(PackageIndex pids pnames) = + case Map.lookup (packageName pkgid) pnames of + Nothing -> original + Just pvers -> case Map.lookup (packageVersion pkgid) pvers of + Nothing -> original + Just pkgs -> mkPackageIndex + (foldl' (flip (Map.delete . installedPackageId)) pids pkgs) + (deletePkgName pnames) + where + deletePkgName = + Map.update deletePkgVersion (packageName pkgid) + + deletePkgVersion = + (\m -> if Map.null m then Nothing else Just m) + . Map.delete (packageVersion pkgid) + + +-- | Removes all packages with this (case-sensitive) name from the index. +-- +deletePackageName :: PackageInstalled a => PackageName -> PackageIndex a -> PackageIndex a +deletePackageName name original@(PackageIndex pids pnames) = + case Map.lookup name pnames of + Nothing -> original + Just pvers -> mkPackageIndex + (foldl' (flip (Map.delete . installedPackageId)) pids + (concat (Map.elems pvers))) + (Map.delete name pnames) + +{- +-- | Removes all packages satisfying this dependency from the index. +-- +deleteDependency :: Dependency -> PackageIndex -> PackageIndex +deleteDependency (Dependency name verstionRange) = + delete' name (\pkg -> packageVersion pkg `withinRange` verstionRange) +-} + +-- +-- * Bulk queries +-- + +-- | Get all the packages from the index. +-- +allPackages :: PackageIndex a -> [a] +allPackages (PackageIndex pids _) = Map.elems pids + +-- | Get all the packages from the index. +-- +-- They are grouped by package name (case-sensitively). +-- +allPackagesByName :: PackageIndex a -> [(PackageName, [a])] +allPackagesByName (PackageIndex _ pnames) = + [ (pkgname, concat (Map.elems pvers)) + | (pkgname, pvers) <- Map.toList pnames ] + +-- | Get all the packages from the index. +-- +-- They are grouped by source package id (package name and version). +-- +allPackagesBySourcePackageId :: PackageInstalled a => PackageIndex a -> [(PackageId, [a])] +allPackagesBySourcePackageId (PackageIndex _ pnames) = + [ (packageId ipkg, ipkgs) + | pvers <- Map.elems pnames + , ipkgs@(ipkg:_) <- Map.elems pvers ] + +-- +-- * Lookups +-- + +-- | Does a lookup by source package id (name & version). +-- +-- Since multiple package DBs mask each other by 'InstalledPackageId', +-- then we get back at most one package. +-- +lookupInstalledPackageId :: PackageInstalled a => PackageIndex a -> InstalledPackageId + -> Maybe a +lookupInstalledPackageId (PackageIndex pids _) pid = Map.lookup pid pids + + +-- | Does a lookup by source package id (name & version). +-- +-- There can be multiple installed packages with the same source 'PackageId' +-- but different 'InstalledPackageId'. They are returned in order of +-- preference, with the most preferred first. +-- +lookupSourcePackageId :: PackageInstalled a => PackageIndex a -> PackageId -> [a] +lookupSourcePackageId (PackageIndex _ pnames) pkgid = + case Map.lookup (packageName pkgid) pnames of + Nothing -> [] + Just pvers -> case Map.lookup (packageVersion pkgid) pvers of + Nothing -> [] + Just pkgs -> pkgs -- in preference order + +-- | Convenient alias of 'lookupSourcePackageId', but assuming only +-- one package per package ID. +lookupPackageId :: PackageInstalled a => PackageIndex a -> PackageId -> Maybe a +lookupPackageId index pkgid = case lookupSourcePackageId index pkgid of + [] -> Nothing + [pkg] -> Just pkg + _ -> error "Distribution.Simple.PackageIndex: multiple matches found" + +-- | Does a lookup by source package name. +-- +lookupPackageName :: PackageInstalled a => PackageIndex a -> PackageName + -> [(Version, [a])] +lookupPackageName (PackageIndex _ pnames) name = + case Map.lookup name pnames of + Nothing -> [] + Just pvers -> Map.toList pvers + + +-- | Does a lookup by source package name and a range of versions. +-- +-- We get back any number of versions of the specified package name, all +-- satisfying the version range constraint. +-- +lookupDependency :: PackageInstalled a => PackageIndex a -> Dependency + -> [(Version, [a])] +lookupDependency (PackageIndex _ pnames) (Dependency name versionRange) = + case Map.lookup name pnames of + Nothing -> [] + Just pvers -> [ entry + | entry@(ver, _) <- Map.toList pvers + , ver `withinRange` versionRange ] + +-- +-- * Case insensitive name lookups +-- + +-- | Does a case-insensitive search by package name. +-- +-- If there is only one package that compares case-insensitively to this name +-- then the search is unambiguous and we get back all versions of that package. +-- If several match case-insensitively but one matches exactly then it is also +-- unambiguous. +-- +-- If however several match case-insensitively and none match exactly then we +-- have an ambiguous result, and we get back all the versions of all the +-- packages. The list of ambiguous results is split by exact package name. So +-- it is a non-empty list of non-empty lists. +-- +searchByName :: PackageInstalled a => PackageIndex a -> String -> SearchResult [a] +searchByName (PackageIndex _ pnames) name = + case [ pkgs | pkgs@(PackageName name',_) <- Map.toList pnames + , lowercase name' == lname ] of + [] -> None + [(_,pvers)] -> Unambiguous (concat (Map.elems pvers)) + pkgss -> case find ((PackageName name==) . fst) pkgss of + Just (_,pvers) -> Unambiguous (concat (Map.elems pvers)) + Nothing -> Ambiguous (map (concat . Map.elems . snd) pkgss) + where lname = lowercase name + +data SearchResult a = None | Unambiguous a | Ambiguous [a] + +-- | Does a case-insensitive substring search by package name. +-- +-- That is, all packages that contain the given string in their name. +-- +searchByNameSubstring :: PackageInstalled a => PackageIndex a -> String -> [a] +searchByNameSubstring (PackageIndex _ pnames) searchterm = + [ pkg + | (PackageName name, pvers) <- Map.toList pnames + , lsearchterm `isInfixOf` lowercase name + , pkgs <- Map.elems pvers + , pkg <- pkgs ] + where lsearchterm = lowercase searchterm + + +-- +-- * Special queries +-- + +-- None of the stuff below depends on the internal representation of the index. +-- + +-- | Find if there are any cycles in the dependency graph. If there are no +-- cycles the result is @[]@. +-- +-- This actually computes the strongly connected components. So it gives us a +-- list of groups of packages where within each group they all depend on each +-- other, directly or indirectly. +-- +dependencyCycles :: PackageInstalled a => PackageIndex a -> [[a]] +dependencyCycles = dependencyCycles' Map.empty + +-- | Variant of 'dependencyCycles' which accepts a 'FakeMap'. See Note [FakeMap]. +dependencyCycles' :: PackageInstalled a => FakeMap -> PackageIndex a -> [[a]] +dependencyCycles' fakeMap index = + [ vs | Graph.CyclicSCC vs <- Graph.stronglyConnComp adjacencyList ] + where + adjacencyList = [ (pkg, installedPackageId pkg, fakeInstalledDepends fakeMap pkg) + | pkg <- allPackages index ] + + +-- | All packages that have immediate dependencies that are not in the index. +-- +-- Returns such packages along with the dependencies that they're missing. +-- +brokenPackages :: PackageInstalled a => PackageIndex a -> [(a, [InstalledPackageId])] +brokenPackages = brokenPackages' Map.empty + +-- | Variant of 'brokenPackages' which accepts a 'FakeMap'. See Note [FakeMap]. +brokenPackages' :: PackageInstalled a => FakeMap -> PackageIndex a -> [(a, [InstalledPackageId])] +brokenPackages' fakeMap index = + [ (pkg, missing) + | pkg <- allPackages index + , let missing = [ pkg' | pkg' <- installedDepends pkg + , isNothing (fakeLookupInstalledPackageId fakeMap index pkg') ] + , not (null missing) ] + +-- | Variant of 'lookupInstalledPackageId' which accepts a 'FakeMap'. See Note [FakeMap]. +fakeLookupInstalledPackageId :: PackageInstalled a => FakeMap -> PackageIndex a -> InstalledPackageId -> Maybe a +fakeLookupInstalledPackageId fakeMap index pkg = lookupInstalledPackageId index (Map.findWithDefault pkg pkg fakeMap) + +-- | Tries to take the transitive closure of the package dependencies. +-- +-- If the transitive closure is complete then it returns that subset of the +-- index. Otherwise it returns the broken packages as in 'brokenPackages'. +-- +-- * Note that if the result is @Right []@ it is because at least one of +-- the original given 'PackageId's do not occur in the index. +-- +dependencyClosure :: PackageInstalled a => PackageIndex a + -> [InstalledPackageId] + -> Either (PackageIndex a) + [(a, [InstalledPackageId])] +dependencyClosure = dependencyClosure' Map.empty + +-- | Variant of 'dependencyClosure' which accepts a 'FakeMap'. See Note [FakeMap]. +dependencyClosure' :: PackageInstalled a => FakeMap + -> PackageIndex a + -> [InstalledPackageId] + -> Either (PackageIndex a) + [(a, [InstalledPackageId])] +dependencyClosure' fakeMap index pkgids0 = case closure mempty [] pkgids0 of + (completed, []) -> Left completed + (completed, _) -> Right (brokenPackages completed) + where + closure completed failed [] = (completed, failed) + closure completed failed (pkgid:pkgids) = case fakeLookupInstalledPackageId fakeMap index pkgid of + Nothing -> closure completed (pkgid:failed) pkgids + Just pkg -> case fakeLookupInstalledPackageId fakeMap completed (installedPackageId pkg) of + Just _ -> closure completed failed pkgids + Nothing -> closure completed' failed pkgids' + where completed' = insert pkg completed + pkgids' = installedDepends pkg ++ pkgids + +-- | Takes the transitive closure of the packages reverse dependencies. +-- +-- * The given 'PackageId's must be in the index. +-- +reverseDependencyClosure :: PackageInstalled a => PackageIndex a + -> [InstalledPackageId] + -> [a] +reverseDependencyClosure = reverseDependencyClosure' Map.empty + +-- | Variant of 'reverseDependencyClosure' which accepts a 'FakeMap'. See Note [FakeMap]. +reverseDependencyClosure' :: PackageInstalled a => FakeMap + -> PackageIndex a + -> [InstalledPackageId] + -> [a] +reverseDependencyClosure' fakeMap index = + map vertexToPkg + . concatMap Tree.flatten + . Graph.dfs reverseDepGraph + . map (fromMaybe noSuchPkgId . pkgIdToVertex) + + where + (depGraph, vertexToPkg, pkgIdToVertex) = dependencyGraph' fakeMap index + reverseDepGraph = Graph.transposeG depGraph + noSuchPkgId = error "reverseDependencyClosure: package is not in the graph" + +topologicalOrder :: PackageInstalled a => PackageIndex a -> [a] +topologicalOrder index = map toPkgId + . Graph.topSort + $ graph + where (graph, toPkgId, _) = dependencyGraph index + +reverseTopologicalOrder :: PackageInstalled a => PackageIndex a -> [a] +reverseTopologicalOrder index = map toPkgId + . Graph.topSort + . Graph.transposeG + $ graph + where (graph, toPkgId, _) = dependencyGraph index + +-- | Builds a graph of the package dependencies. +-- +-- Dependencies on other packages that are not in the index are discarded. +-- You can check if there are any such dependencies with 'brokenPackages'. +-- +dependencyGraph :: PackageInstalled a => PackageIndex a + -> (Graph.Graph, + Graph.Vertex -> a, + InstalledPackageId -> Maybe Graph.Vertex) +dependencyGraph = dependencyGraph' Map.empty + +-- | Variant of 'dependencyGraph' which accepts a 'FakeMap'. See Note [FakeMap]. +dependencyGraph' :: PackageInstalled a => FakeMap + -> PackageIndex a + -> (Graph.Graph, + Graph.Vertex -> a, + InstalledPackageId -> Maybe Graph.Vertex) +dependencyGraph' fakeMap index = (graph, vertex_to_pkg, id_to_vertex) + where + graph = Array.listArray bounds + [ [ v | Just v <- map id_to_vertex (installedDepends pkg) ] + | pkg <- pkgs ] + + pkgs = sortBy (comparing packageId) (allPackages index) + vertices = zip (map installedPackageId pkgs) [0..] + vertex_map = Map.fromList vertices + id_to_vertex pid = Map.lookup (Map.findWithDefault pid pid fakeMap) vertex_map + + vertex_to_pkg vertex = pkgTable ! vertex + + pkgTable = Array.listArray bounds pkgs + topBound = length pkgs - 1 + bounds = (0, topBound) + +-- | Given a package index where we assume we want to use all the packages +-- (use 'dependencyClosure' if you need to get such a index subset) find out +-- if the dependencies within it use consistent versions of each package. +-- Return all cases where multiple packages depend on different versions of +-- some other package. +-- +-- Each element in the result is a package name along with the packages that +-- depend on it and the versions they require. These are guaranteed to be +-- distinct. +-- +dependencyInconsistencies :: PackageInstalled a => PackageIndex a + -> [(PackageName, [(PackageId, Version)])] +dependencyInconsistencies = dependencyInconsistencies' Map.empty + +-- | Variant of 'dependencyInconsistencies' which accepts a 'FakeMap'. See Note [FakeMap]. +dependencyInconsistencies' :: PackageInstalled a => FakeMap -> PackageIndex a + -> [(PackageName, [(PackageId, Version)])] +dependencyInconsistencies' fakeMap index = + [ (name, [ (pid,packageVersion dep) | (dep,pids) <- uses, pid <- pids]) + | (name, ipid_map) <- Map.toList inverseIndex + , let uses = Map.elems ipid_map + , reallyIsInconsistent (map fst uses) ] + + where -- for each PackageName, + -- for each package with that name, + -- the InstalledPackageInfo and the package Ids of packages + -- that depend on it. + inverseIndex = Map.fromListWith (Map.unionWith (\(a,b) (_,b') -> (a,b++b'))) + [ (packageName dep, + Map.fromList [(ipid,(dep,[packageId pkg]))]) + | pkg <- allPackages index + , ipid <- fakeInstalledDepends fakeMap pkg + , Just dep <- [fakeLookupInstalledPackageId fakeMap index ipid] + ] + + reallyIsInconsistent :: PackageInstalled a => [a] -> Bool + reallyIsInconsistent [] = False + reallyIsInconsistent [_p] = False + reallyIsInconsistent [p1, p2] = + let pid1 = installedPackageId p1 + pid2 = installedPackageId p2 + in Map.findWithDefault pid1 pid1 fakeMap `notElem` fakeInstalledDepends fakeMap p2 + && Map.findWithDefault pid2 pid2 fakeMap `notElem` fakeInstalledDepends fakeMap p1 + reallyIsInconsistent _ = True + +-- | Variant of 'installedDepends' which accepts a 'FakeMap'. See Note [FakeMap]. +fakeInstalledDepends :: PackageInstalled a => FakeMap -> a -> [InstalledPackageId] +fakeInstalledDepends fakeMap = map (\pid -> Map.findWithDefault pid pid fakeMap) . installedDepends + +-- | A rough approximation of GHC's module finder, takes a 'InstalledPackageIndex' and +-- turns it into a map from module names to their source packages. It's used to +-- initialize the @build-deps@ field in @cabal init@. +moduleNameIndex :: InstalledPackageIndex -> Map ModuleName [InstalledPackageInfo] +moduleNameIndex index = + Map.fromListWith (++) $ do + pkg <- allPackages index + IPI.ExposedModule m reexport _ <- IPI.exposedModules pkg + case reexport of + Nothing -> return (m, [pkg]) + Just (IPI.OriginalModule _ m') | m == m' -> [] + | otherwise -> return (m', [pkg]) + -- The heuristic is this: we want to prefer the original package + -- which originally exported a module. However, if a reexport + -- also *renamed* the module (m /= m'), then we have to use the + -- downstream package, since the upstream package has the wrong + -- module name! diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/PreProcess/Unlit.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/PreProcess/Unlit.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/PreProcess/Unlit.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/PreProcess/Unlit.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,165 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.PreProcess.Unlit +-- Copyright : ... +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Remove the \"literal\" markups from a Haskell source file, including +-- \"@>@\", \"@\\begin{code}@\", \"@\\end{code}@\", and \"@#@\" + +-- This version is interesting because instead of striping comment lines, it +-- turns them into "-- " style comments. This allows using haddock markup +-- in literate scripts without having to use "> --" prefix. + +module Distribution.Simple.PreProcess.Unlit (unlit,plain) where + +import Data.Char +import Data.List + +data Classified = BirdTrack String | Blank String | Ordinary String + | Line !Int String | CPP String + | BeginCode | EndCode + -- output only: + | Error String | Comment String + +-- | No unliteration. +plain :: String -> String -> String +plain _ hs = hs + +classify :: String -> Classified +classify ('>':s) = BirdTrack s +classify ('#':s) = case tokens s of + (line:file:_) | all isDigit line + && length file >= 2 + && head file == '"' + && last file == '"' + -> Line (read line) (tail (init file)) + _ -> CPP s + where tokens = unfoldr $ \str -> case lex str of + (t@(_:_), str'):_ -> Just (t, str') + _ -> Nothing +classify ('\\':s) + | "begin{code}" `isPrefixOf` s = BeginCode + | "end{code}" `isPrefixOf` s = EndCode +classify s | all isSpace s = Blank s +classify s = Ordinary s + +-- So the weird exception for comment indenting is to make things work with +-- haddock, see classifyAndCheckForBirdTracks below. +unclassify :: Bool -> Classified -> String +unclassify _ (BirdTrack s) = ' ':s +unclassify _ (Blank s) = s +unclassify _ (Ordinary s) = s +unclassify _ (Line n file) = "# " ++ show n ++ " " ++ show file +unclassify _ (CPP s) = '#':s +unclassify True (Comment "") = " --" +unclassify True (Comment s) = " -- " ++ s +unclassify False (Comment "") = "--" +unclassify False (Comment s) = "-- " ++ s +unclassify _ _ = internalError + +-- | 'unlit' takes a filename (for error reports), and transforms the +-- given string, to eliminate the literate comments from the program text. +unlit :: FilePath -> String -> Either String String +unlit file input = + let (usesBirdTracks, classified) = classifyAndCheckForBirdTracks + . inlines + $ input + in either (Left . unlines . map (unclassify usesBirdTracks)) + Right + . checkErrors + . reclassify + $ classified + + where + -- So haddock requires comments and code to align, since it treats comments + -- as following the layout rule. This is a pain for us since bird track + -- style literate code typically gets indented by two since ">" is replaced + -- by " " and people usually use one additional space of indent ie + -- "> then the code". On the other hand we cannot just go and indent all + -- the comments by two since that does not work for latex style literate + -- code. So the hacky solution we use here is that if we see any bird track + -- style code then we'll indent all comments by two, otherwise by none. + -- Of course this will not work for mixed latex/bird track .lhs files but + -- nobody does that, it's silly and specifically recommended against in the + -- H98 unlit spec. + -- + classifyAndCheckForBirdTracks = + flip mapAccumL False $ \seenBirdTrack line -> + let classification = classify line + in (seenBirdTrack || isBirdTrack classification, classification) + + isBirdTrack (BirdTrack _) = True + isBirdTrack _ = False + + checkErrors ls = case [ e | Error e <- ls ] of + [] -> Left ls + (message:_) -> Right (f ++ ":" ++ show n ++ ": " ++ message) + where (f, n) = errorPos file 1 ls + errorPos f n [] = (f, n) + errorPos f n (Error _:_) = (f, n) + errorPos _ _ (Line n' f':ls) = errorPos f' n' ls + errorPos f n (_ :ls) = errorPos f (n+1) ls + +-- Here we model a state machine, with each state represented by +-- a local function. We only have four states (well, five, +-- if you count the error state), but the rules +-- to transition between then are not so simple. +-- Would it be simpler to have more states? +-- +-- Each state represents the type of line that was last read +-- i.e. are we in a comment section, or a latex-code section, +-- or a bird-code section, etc? +reclassify :: [Classified] -> [Classified] +reclassify = blank -- begin in blank state + where + latex [] = [] + latex (EndCode :ls) = Blank "" : comment ls + latex (BeginCode :_ ) = [Error "\\begin{code} in code section"] + latex (BirdTrack l:ls) = Ordinary ('>':l) : latex ls + latex ( l:ls) = l : latex ls + + blank [] = [] + blank (EndCode :_ ) = [Error "\\end{code} without \\begin{code}"] + blank (BeginCode :ls) = Blank "" : latex ls + blank (BirdTrack l:ls) = BirdTrack l : bird ls + blank (Ordinary l:ls) = Comment l : comment ls + blank ( l:ls) = l : blank ls + + bird [] = [] + bird (EndCode :_ ) = [Error "\\end{code} without \\begin{code}"] + bird (BeginCode :ls) = Blank "" : latex ls + bird (Blank l :ls) = Blank l : blank ls + bird (Ordinary _:_ ) = [Error "program line before comment line"] + bird ( l:ls) = l : bird ls + + comment [] = [] + comment (EndCode :_ ) = [Error "\\end{code} without \\begin{code}"] + comment (BeginCode :ls) = Blank "" : latex ls + comment (CPP l :ls) = CPP l : comment ls + comment (BirdTrack _:_ ) = [Error "comment line before program line"] + -- a blank line and another ordinary line following a comment + -- will be treated as continuing the comment. Otherwise it's + -- then end of the comment, with a blank line. + comment (Blank l:ls@(Ordinary _:_)) = Comment l : comment ls + comment (Blank l:ls) = Blank l : blank ls + comment (Line n f :ls) = Line n f : comment ls + comment (Ordinary l:ls) = Comment l : comment ls + comment (Comment _: _) = internalError + comment (Error _: _) = internalError + +-- Re-implementation of 'lines', for better efficiency (but decreased laziness). +-- Also, importantly, accepts non-standard DOS and Mac line ending characters. +inlines :: String -> [String] +inlines xs = lines' xs id + where + lines' [] acc = [acc []] + lines' ('\^M':'\n':s) acc = acc [] : lines' s id -- DOS + lines' ('\^M':s) acc = acc [] : lines' s id -- MacOS + lines' ('\n':s) acc = acc [] : lines' s id -- Unix + lines' (c:s) acc = lines' s (acc . (c:)) + +internalError :: a +internalError = error "unlit: internal error" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/PreProcess.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/PreProcess.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/PreProcess.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/PreProcess.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,620 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.PreProcess +-- Copyright : (c) 2003-2005, Isaac Jones, Malcolm Wallace +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This defines a 'PreProcessor' abstraction which represents a pre-processor +-- that can transform one kind of file into another. There is also a +-- 'PPSuffixHandler' which is a combination of a file extension and a function +-- for configuring a 'PreProcessor'. It defines a bunch of known built-in +-- preprocessors like @cpp@, @cpphs@, @c2hs@, @hsc2hs@, @happy@, @alex@ etc and +-- lists them in 'knownSuffixHandlers'. On top of this it provides a function +-- for actually preprocessing some sources given a bunch of known suffix +-- handlers. This module is not as good as it could be, it could really do with +-- a rewrite to address some of the problems we have with pre-processors. + +module Distribution.Simple.PreProcess (preprocessComponent, knownSuffixHandlers, + ppSuffixes, PPSuffixHandler, PreProcessor(..), + mkSimplePreProcessor, runSimplePreProcessor, + ppCpp, ppCpp', ppGreenCard, ppC2hs, ppHsc2hs, + ppHappy, ppAlex, ppUnlit, platformDefines + ) + where + + +import Control.Monad +import Distribution.Simple.PreProcess.Unlit (unlit) +import Distribution.Package + ( Package(..), PackageName(..) ) +import qualified Distribution.ModuleName as ModuleName +import Distribution.PackageDescription as PD + ( PackageDescription(..), BuildInfo(..) + , Executable(..) + , Library(..), libModules + , TestSuite(..), testModules + , TestSuiteInterface(..) + , Benchmark(..), benchmarkModules, BenchmarkInterface(..) ) +import qualified Distribution.InstalledPackageInfo as Installed + ( InstalledPackageInfo_(..) ) +import qualified Distribution.Simple.PackageIndex as PackageIndex +import Distribution.Simple.CCompiler + ( cSourceExtensions ) +import Distribution.Simple.Compiler + ( CompilerFlavor(..) + , compilerFlavor, compilerCompatVersion, compilerVersion ) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), Component(..) ) +import Distribution.Simple.BuildPaths (autogenModulesDir,cppHeaderName) +import Distribution.Simple.Utils + ( createDirectoryIfMissingVerbose, withUTF8FileContents, writeUTF8File + , die, setupMessage, intercalate, copyFileVerbose, moreRecentFile + , findFileWithExtension, findFileWithExtension' ) +import Distribution.Simple.Program + ( Program(..), ConfiguredProgram(..), programPath + , requireProgram, requireProgramVersion + , rawSystemProgramConf, rawSystemProgram + , greencardProgram, cpphsProgram, hsc2hsProgram, c2hsProgram + , happyProgram, alexProgram, ghcProgram, ghcjsProgram, gccProgram ) +import Distribution.Simple.Test.LibV09 + ( writeSimpleTestStub, stubFilePath, stubName ) +import Distribution.System + ( OS(..), buildOS, Arch(..), Platform(..) ) +import Distribution.Text +import Distribution.Version + ( Version(..), anyVersion, orLaterVersion ) +import Distribution.Verbosity + +import Data.Maybe (fromMaybe) +import Data.List (nub) +import System.Directory (doesFileExist) +import System.Info (os, arch) +import System.FilePath (splitExtension, dropExtensions, (), (<.>), + takeDirectory, normalise, replaceExtension) + +-- |The interface to a preprocessor, which may be implemented using an +-- external program, but need not be. The arguments are the name of +-- the input file, the name of the output file and a verbosity level. +-- Here is a simple example that merely prepends a comment to the given +-- source file: +-- +-- > ppTestHandler :: PreProcessor +-- > ppTestHandler = +-- > PreProcessor { +-- > platformIndependent = True, +-- > runPreProcessor = mkSimplePreProcessor $ \inFile outFile verbosity -> +-- > do info verbosity (inFile++" has been preprocessed to "++outFile) +-- > stuff <- readFile inFile +-- > writeFile outFile ("-- preprocessed as a test\n\n" ++ stuff) +-- > return ExitSuccess +-- +-- We split the input and output file names into a base directory and the +-- rest of the file name. The input base dir is the path in the list of search +-- dirs that this file was found in. The output base dir is the build dir where +-- all the generated source files are put. +-- +-- The reason for splitting it up this way is that some pre-processors don't +-- simply generate one output .hs file from one input file but have +-- dependencies on other generated files (notably c2hs, where building one +-- .hs file may require reading other .chi files, and then compiling the .hs +-- file may require reading a generated .h file). In these cases the generated +-- files need to embed relative path names to each other (eg the generated .hs +-- file mentions the .h file in the FFI imports). This path must be relative to +-- the base directory where the generated files are located, it cannot be +-- relative to the top level of the build tree because the compilers do not +-- look for .h files relative to there, ie we do not use \"-I .\", instead we +-- use \"-I dist\/build\" (or whatever dist dir has been set by the user) +-- +-- Most pre-processors do not care of course, so mkSimplePreProcessor and +-- runSimplePreProcessor functions handle the simple case. +-- +data PreProcessor = PreProcessor { + + -- Is the output of the pre-processor platform independent? eg happy output + -- is portable haskell but c2hs's output is platform dependent. + -- This matters since only platform independent generated code can be + -- inlcuded into a source tarball. + platformIndependent :: Bool, + + -- TODO: deal with pre-processors that have implementaion dependent output + -- eg alex and happy have --ghc flags. However we can't really inlcude + -- ghc-specific code into supposedly portable source tarballs. + + runPreProcessor :: (FilePath, FilePath) -- Location of the source file relative to a base dir + -> (FilePath, FilePath) -- Output file name, relative to an output base dir + -> Verbosity -- verbosity + -> IO () -- Should exit if the preprocessor fails + } + +mkSimplePreProcessor :: (FilePath -> FilePath -> Verbosity -> IO ()) + -> (FilePath, FilePath) + -> (FilePath, FilePath) -> Verbosity -> IO () +mkSimplePreProcessor simplePP + (inBaseDir, inRelativeFile) + (outBaseDir, outRelativeFile) verbosity = simplePP inFile outFile verbosity + where inFile = normalise (inBaseDir inRelativeFile) + outFile = normalise (outBaseDir outRelativeFile) + +runSimplePreProcessor :: PreProcessor -> FilePath -> FilePath -> Verbosity + -> IO () +runSimplePreProcessor pp inFile outFile verbosity = + runPreProcessor pp (".", inFile) (".", outFile) verbosity + +-- |A preprocessor for turning non-Haskell files with the given extension +-- into plain Haskell source files. +type PPSuffixHandler + = (String, BuildInfo -> LocalBuildInfo -> PreProcessor) + +-- | Apply preprocessors to the sources from 'hsSourceDirs' for a given +-- component (lib, exe, or test suite). +preprocessComponent :: PackageDescription + -> Component + -> LocalBuildInfo + -> Bool + -> Verbosity + -> [PPSuffixHandler] + -> IO () +preprocessComponent pd comp lbi isSrcDist verbosity handlers = case comp of + (CLib lib@Library{ libBuildInfo = bi }) -> do + let dirs = hsSourceDirs bi ++ [autogenModulesDir lbi] + setupMessage verbosity "Preprocessing library" (packageId pd) + forM_ (map ModuleName.toFilePath $ libModules lib) $ + pre dirs (buildDir lbi) (localHandlers bi) + (CExe exe@Executable { buildInfo = bi, exeName = nm }) -> do + let exeDir = buildDir lbi nm nm ++ "-tmp" + dirs = hsSourceDirs bi ++ [autogenModulesDir lbi] + setupMessage verbosity ("Preprocessing executable '" ++ nm ++ "' for") (packageId pd) + forM_ (map ModuleName.toFilePath $ otherModules bi) $ + pre dirs exeDir (localHandlers bi) + pre (hsSourceDirs bi) exeDir (localHandlers bi) $ + dropExtensions (modulePath exe) + CTest test@TestSuite{ testName = nm } -> do + setupMessage verbosity ("Preprocessing test suite '" ++ nm ++ "' for") (packageId pd) + case testInterface test of + TestSuiteExeV10 _ f -> + preProcessTest test f $ buildDir lbi testName test + testName test ++ "-tmp" + TestSuiteLibV09 _ _ -> do + let testDir = buildDir lbi stubName test + stubName test ++ "-tmp" + writeSimpleTestStub test testDir + preProcessTest test (stubFilePath test) testDir + TestSuiteUnsupported tt -> die $ "No support for preprocessing test " + ++ "suite type " ++ display tt + CBench bm@Benchmark{ benchmarkName = nm } -> do + setupMessage verbosity ("Preprocessing benchmark '" ++ nm ++ "' for") (packageId pd) + case benchmarkInterface bm of + BenchmarkExeV10 _ f -> + preProcessBench bm f $ buildDir lbi benchmarkName bm + benchmarkName bm ++ "-tmp" + BenchmarkUnsupported tt -> die $ "No support for preprocessing benchmark " + ++ "type " ++ display tt + where + builtinHaskellSuffixes = ["hs", "lhs", "hsig", "lhsig"] + builtinCSuffixes = cSourceExtensions + builtinSuffixes = builtinHaskellSuffixes ++ builtinCSuffixes + localHandlers bi = [(ext, h bi lbi) | (ext, h) <- handlers] + pre dirs dir lhndlrs fp = + preprocessFile dirs dir isSrcDist fp verbosity builtinSuffixes lhndlrs + preProcessTest test = preProcessComponent (testBuildInfo test) + (testModules test) + preProcessBench bm = preProcessComponent (benchmarkBuildInfo bm) + (benchmarkModules bm) + preProcessComponent bi modules exePath dir = do + let biHandlers = localHandlers bi + sourceDirs = hsSourceDirs bi ++ [ autogenModulesDir lbi ] + sequence_ [ preprocessFile sourceDirs dir isSrcDist + (ModuleName.toFilePath modu) verbosity builtinSuffixes + biHandlers + | modu <- modules ] + preprocessFile (dir : (hsSourceDirs bi)) dir isSrcDist + (dropExtensions $ exePath) verbosity + builtinSuffixes biHandlers + +--TODO: try to list all the modules that could not be found +-- not just the first one. It's annoying and slow due to the need +-- to reconfigure after editing the .cabal file each time. + +-- |Find the first extension of the file that exists, and preprocess it +-- if required. +preprocessFile + :: [FilePath] -- ^source directories + -> FilePath -- ^build directory + -> Bool -- ^preprocess for sdist + -> FilePath -- ^module file name + -> Verbosity -- ^verbosity + -> [String] -- ^builtin suffixes + -> [(String, PreProcessor)] -- ^possible preprocessors + -> IO () +preprocessFile searchLoc buildLoc forSDist baseFile verbosity builtinSuffixes handlers = do + -- look for files in the various source dirs with this module name + -- and a file extension of a known preprocessor + psrcFiles <- findFileWithExtension' (map fst handlers) searchLoc baseFile + case psrcFiles of + -- no preprocessor file exists, look for an ordinary source file + -- just to make sure one actually exists at all for this module. + -- Note: by looking in the target/output build dir too, we allow + -- source files to appear magically in the target build dir without + -- any corresponding "real" source file. This lets custom Setup.hs + -- files generate source modules directly into the build dir without + -- the rest of the build system being aware of it (somewhat dodgy) + Nothing -> do + bsrcFiles <- findFileWithExtension builtinSuffixes (buildLoc : searchLoc) baseFile + case bsrcFiles of + Nothing -> die $ "can't find source for " ++ baseFile + ++ " in " ++ intercalate ", " searchLoc + _ -> return () + -- found a pre-processable file in one of the source dirs + Just (psrcLoc, psrcRelFile) -> do + let (srcStem, ext) = splitExtension psrcRelFile + psrcFile = psrcLoc psrcRelFile + pp = fromMaybe (error "Internal error in preProcess module: Just expected") + (lookup (tailNotNull ext) handlers) + -- Preprocessing files for 'sdist' is different from preprocessing + -- for 'build'. When preprocessing for sdist we preprocess to + -- avoid that the user has to have the preprocessors available. + -- ATM, we don't have a way to specify which files are to be + -- preprocessed and which not, so for sdist we only process + -- platform independent files and put them into the 'buildLoc' + -- (which we assume is set to the temp. directory that will become + -- the tarball). + --TODO: eliminate sdist variant, just supply different handlers + when (not forSDist || forSDist && platformIndependent pp) $ do + -- look for existing pre-processed source file in the dest dir to + -- see if we really have to re-run the preprocessor. + ppsrcFiles <- findFileWithExtension builtinSuffixes [buildLoc] baseFile + recomp <- case ppsrcFiles of + Nothing -> return True + Just ppsrcFile -> + psrcFile `moreRecentFile` ppsrcFile + when recomp $ do + let destDir = buildLoc dirName srcStem + createDirectoryIfMissingVerbose verbosity True destDir + runPreProcessorWithHsBootHack pp + (psrcLoc, psrcRelFile) + (buildLoc, srcStem <.> "hs") + + where + dirName = takeDirectory + tailNotNull [] = [] + tailNotNull x = tail x + + -- FIXME: This is a somewhat nasty hack. GHC requires that hs-boot files + -- be in the same place as the hs files, so if we put the hs file in dist/ + -- then we need to copy the hs-boot file there too. This should probably be + -- done another way. Possibly we should also be looking for .lhs-boot + -- files, but I think that preprocessors only produce .hs files. + runPreProcessorWithHsBootHack pp + (inBaseDir, inRelativeFile) + (outBaseDir, outRelativeFile) = do + runPreProcessor pp + (inBaseDir, inRelativeFile) + (outBaseDir, outRelativeFile) verbosity + + exists <- doesFileExist inBoot + when exists $ copyFileVerbose verbosity inBoot outBoot + + where + inBoot = replaceExtension inFile "hs-boot" + outBoot = replaceExtension outFile "hs-boot" + + inFile = normalise (inBaseDir inRelativeFile) + outFile = normalise (outBaseDir outRelativeFile) + +-- ------------------------------------------------------------ +-- * known preprocessors +-- ------------------------------------------------------------ + +ppGreenCard :: BuildInfo -> LocalBuildInfo -> PreProcessor +ppGreenCard _ lbi + = PreProcessor { + platformIndependent = False, + runPreProcessor = mkSimplePreProcessor $ \inFile outFile verbosity -> + rawSystemProgramConf verbosity greencardProgram (withPrograms lbi) + (["-tffi", "-o" ++ outFile, inFile]) + } + +-- This one is useful for preprocessors that can't handle literate source. +-- We also need a way to chain preprocessors. +ppUnlit :: PreProcessor +ppUnlit = + PreProcessor { + platformIndependent = True, + runPreProcessor = mkSimplePreProcessor $ \inFile outFile _verbosity -> + withUTF8FileContents inFile $ \contents -> + either (writeUTF8File outFile) die (unlit inFile contents) + } + +ppCpp :: BuildInfo -> LocalBuildInfo -> PreProcessor +ppCpp = ppCpp' [] + +ppCpp' :: [String] -> BuildInfo -> LocalBuildInfo -> PreProcessor +ppCpp' extraArgs bi lbi = + case compilerFlavor (compiler lbi) of + GHC -> ppGhcCpp ghcProgram (>= Version [6,6] []) args bi lbi + GHCJS -> ppGhcCpp ghcjsProgram (const True) args bi lbi + _ -> ppCpphs args bi lbi + where cppArgs = getCppOptions bi lbi + args = cppArgs ++ extraArgs + +ppGhcCpp :: Program -> (Version -> Bool) + -> [String] -> BuildInfo -> LocalBuildInfo -> PreProcessor +ppGhcCpp program xHs extraArgs _bi lbi = + PreProcessor { + platformIndependent = False, + runPreProcessor = mkSimplePreProcessor $ \inFile outFile verbosity -> do + (prog, version, _) <- requireProgramVersion verbosity + program anyVersion (withPrograms lbi) + rawSystemProgram verbosity prog $ + ["-E", "-cpp"] + -- This is a bit of an ugly hack. We're going to + -- unlit the file ourselves later on if appropriate, + -- so we need GHC not to unlit it now or it'll get + -- double-unlitted. In the future we might switch to + -- using cpphs --unlit instead. + ++ (if xHs version then ["-x", "hs"] else []) + ++ [ "-optP-include", "-optP"++ (autogenModulesDir lbi cppHeaderName) ] + ++ ["-o", outFile, inFile] + ++ extraArgs + } + +ppCpphs :: [String] -> BuildInfo -> LocalBuildInfo -> PreProcessor +ppCpphs extraArgs _bi lbi = + PreProcessor { + platformIndependent = False, + runPreProcessor = mkSimplePreProcessor $ \inFile outFile verbosity -> do + (cpphsProg, cpphsVersion, _) <- requireProgramVersion verbosity + cpphsProgram anyVersion (withPrograms lbi) + rawSystemProgram verbosity cpphsProg $ + ("-O" ++ outFile) : inFile + : "--noline" : "--strip" + : (if cpphsVersion >= Version [1,6] [] + then ["--include="++ (autogenModulesDir lbi cppHeaderName)] + else []) + ++ extraArgs + } + +ppHsc2hs :: BuildInfo -> LocalBuildInfo -> PreProcessor +ppHsc2hs bi lbi = + PreProcessor { + platformIndependent = False, + runPreProcessor = mkSimplePreProcessor $ \inFile outFile verbosity -> do + (gccProg, _) <- requireProgram verbosity gccProgram (withPrograms lbi) + rawSystemProgramConf verbosity hsc2hsProgram (withPrograms lbi) $ + [ "--cc=" ++ programPath gccProg + , "--ld=" ++ programPath gccProg ] + + -- Additional gcc options + ++ [ "--cflag=" ++ opt | opt <- programDefaultArgs gccProg + ++ programOverrideArgs gccProg ] + ++ [ "--lflag=" ++ opt | opt <- programDefaultArgs gccProg + ++ programOverrideArgs gccProg ] + + -- OSX frameworks: + ++ [ what ++ "=-F" ++ opt + | isOSX + , opt <- nub (concatMap Installed.frameworkDirs pkgs) + , what <- ["--cflag", "--lflag"] ] + ++ [ "--lflag=" ++ arg + | isOSX + , opt <- PD.frameworks bi ++ concatMap Installed.frameworks pkgs + , arg <- ["-framework", opt] ] + + -- Note that on ELF systems, wherever we use -L, we must also use -R + -- because presumably that -L dir is not on the normal path for the + -- system's dynamic linker. This is needed because hsc2hs works by + -- compiling a C program and then running it. + + ++ [ "--cflag=" ++ opt | opt <- platformDefines lbi ] + + -- Options from the current package: + ++ [ "--cflag=-I" ++ dir | dir <- PD.includeDirs bi ] + ++ [ "--cflag=" ++ opt | opt <- PD.ccOptions bi + ++ PD.cppOptions bi ] + ++ [ "--cflag=" ++ opt | opt <- + [ "-I" ++ autogenModulesDir lbi, + "-include", autogenModulesDir lbi cppHeaderName ] ] + ++ [ "--lflag=-L" ++ opt | opt <- PD.extraLibDirs bi ] + ++ [ "--lflag=-Wl,-R," ++ opt | isELF + , opt <- PD.extraLibDirs bi ] + ++ [ "--lflag=-l" ++ opt | opt <- PD.extraLibs bi ] + ++ [ "--lflag=" ++ opt | opt <- PD.ldOptions bi ] + + -- Options from dependent packages + ++ [ "--cflag=" ++ opt + | pkg <- pkgs + , opt <- [ "-I" ++ opt | opt <- Installed.includeDirs pkg ] + ++ [ opt | opt <- Installed.ccOptions pkg ] ] + ++ [ "--lflag=" ++ opt + | pkg <- pkgs + , opt <- [ "-L" ++ opt | opt <- Installed.libraryDirs pkg ] + ++ [ "-Wl,-R," ++ opt | isELF + , opt <- Installed.libraryDirs pkg ] + ++ [ "-l" ++ opt | opt <- Installed.extraLibraries pkg ] + ++ [ opt | opt <- Installed.ldOptions pkg ] ] + ++ ["-o", outFile, inFile] + } + where + pkgs = PackageIndex.topologicalOrder (packageHacks (installedPkgs lbi)) + isOSX = case buildOS of OSX -> True; _ -> False + isELF = case buildOS of OSX -> False; Windows -> False; _ -> True; + packageHacks = case compilerFlavor (compiler lbi) of + GHC -> hackRtsPackage + GHCJS -> hackRtsPackage + _ -> id + -- We don't link in the actual Haskell libraries of our dependencies, so + -- the -u flags in the ldOptions of the rts package mean linking fails on + -- OS X (it's ld is a tad stricter than gnu ld). Thus we remove the + -- ldOptions for GHC's rts package: + hackRtsPackage index = + case PackageIndex.lookupPackageName index (PackageName "rts") of + [(_, [rts])] + -> PackageIndex.insert rts { Installed.ldOptions = [] } index + _ -> error "No (or multiple) ghc rts package is registered!!" + + +ppC2hs :: BuildInfo -> LocalBuildInfo -> PreProcessor +ppC2hs bi lbi = + PreProcessor { + platformIndependent = False, + runPreProcessor = \(inBaseDir, inRelativeFile) + (outBaseDir, outRelativeFile) verbosity -> do + (c2hsProg, _, _) <- requireProgramVersion verbosity + c2hsProgram (orLaterVersion (Version [0,15] [])) + (withPrograms lbi) + (gccProg, _) <- requireProgram verbosity gccProgram (withPrograms lbi) + rawSystemProgram verbosity c2hsProg $ + + -- Options from the current package: + [ "--cpp=" ++ programPath gccProg, "--cppopts=-E" ] + ++ [ "--cppopts=" ++ opt | opt <- getCppOptions bi lbi ] + ++ [ "--include=" ++ outBaseDir ] + + -- Options from dependent packages + ++ [ "--cppopts=" ++ opt + | pkg <- pkgs + , opt <- [ "-I" ++ opt | opt <- Installed.includeDirs pkg ] + ++ [ opt | opt@('-':c:_) <- Installed.ccOptions pkg + , c `elem` "DIU" ] ] + --TODO: install .chi files for packages, so we can --include + -- those dirs here, for the dependencies + + -- input and output files + ++ [ "--output-dir=" ++ outBaseDir + , "--output=" ++ outRelativeFile + , inBaseDir inRelativeFile ] + } + where + pkgs = PackageIndex.topologicalOrder (installedPkgs lbi) + +--TODO: perhaps use this with hsc2hs too +--TODO: remove cc-options from cpphs for cabal-version: >= 1.10 +getCppOptions :: BuildInfo -> LocalBuildInfo -> [String] +getCppOptions bi lbi + = platformDefines lbi + ++ cppOptions bi + ++ ["-I" ++ dir | dir <- PD.includeDirs bi] + ++ [opt | opt@('-':c:_) <- PD.ccOptions bi, c `elem` "DIU"] + +platformDefines :: LocalBuildInfo -> [String] +platformDefines lbi = + case compilerFlavor comp of + GHC -> + ["-D__GLASGOW_HASKELL__=" ++ versionInt version] ++ + ["-D" ++ os ++ "_BUILD_OS=1"] ++ + ["-D" ++ arch ++ "_BUILD_ARCH=1"] ++ + map (\os' -> "-D" ++ os' ++ "_HOST_OS=1") osStr ++ + map (\arch' -> "-D" ++ arch' ++ "_HOST_ARCH=1") archStr + GHCJS -> + compatGlasgowHaskell ++ + ["-D__GHCJS__=" ++ versionInt version] ++ + ["-D" ++ os ++ "_BUILD_OS=1"] ++ + ["-D" ++ arch ++ "_BUILD_ARCH=1"] ++ + map (\os' -> "-D" ++ os' ++ "_HOST_OS=1") osStr ++ + map (\arch' -> "-D" ++ arch' ++ "_HOST_ARCH=1") archStr + JHC -> ["-D__JHC__=" ++ versionInt version] + HaskellSuite {} -> + ["-D__HASKELL_SUITE__"] ++ + map (\os' -> "-D" ++ os' ++ "_HOST_OS=1") osStr ++ + map (\arch' -> "-D" ++ arch' ++ "_HOST_ARCH=1") archStr + _ -> [] + where + comp = compiler lbi + Platform hostArch hostOS = hostPlatform lbi + version = compilerVersion comp + compatGlasgowHaskell = + maybe [] (\v -> ["-D__GLASGOW_HASKELL__=" ++ versionInt v]) + (compilerCompatVersion GHC comp) + -- TODO: move this into the compiler abstraction + -- FIXME: this forces GHC's crazy 4.8.2 -> 408 convention on all + -- the other compilers. Check if that's really what they want. + versionInt :: Version -> String + versionInt (Version { versionBranch = [] }) = "1" + versionInt (Version { versionBranch = [n] }) = show n + versionInt (Version { versionBranch = n1:n2:_ }) + = -- 6.8.x -> 608 + -- 6.10.x -> 610 + let s1 = show n1 + s2 = show n2 + middle = case s2 of + _ : _ : _ -> "" + _ -> "0" + in s1 ++ middle ++ s2 + osStr = case hostOS of + Linux -> ["linux"] + Windows -> ["mingw32"] + OSX -> ["darwin"] + FreeBSD -> ["freebsd"] + OpenBSD -> ["openbsd"] + NetBSD -> ["netbsd"] + DragonFly -> ["dragonfly"] + Solaris -> ["solaris2"] + AIX -> ["aix"] + HPUX -> ["hpux"] + IRIX -> ["irix"] + HaLVM -> [] + IOS -> ["ios"] + Ghcjs -> ["ghcjs"] + OtherOS _ -> [] + archStr = case hostArch of + I386 -> ["i386"] + X86_64 -> ["x86_64"] + PPC -> ["powerpc"] + PPC64 -> ["powerpc64"] + Sparc -> ["sparc"] + Arm -> ["arm"] + Mips -> ["mips"] + SH -> [] + IA64 -> ["ia64"] + S390 -> ["s390"] + Alpha -> ["alpha"] + Hppa -> ["hppa"] + Rs6000 -> ["rs6000"] + M68k -> ["m68k"] + Vax -> ["vax"] + JavaScript -> ["javascript"] + OtherArch _ -> [] + +ppHappy :: BuildInfo -> LocalBuildInfo -> PreProcessor +ppHappy _ lbi = pp { platformIndependent = True } + where pp = standardPP lbi happyProgram (hcFlags hc) + hc = compilerFlavor (compiler lbi) + hcFlags GHC = ["-agc"] + hcFlags GHCJS = ["-agc"] + hcFlags _ = [] + +ppAlex :: BuildInfo -> LocalBuildInfo -> PreProcessor +ppAlex _ lbi = pp { platformIndependent = True } + where pp = standardPP lbi alexProgram (hcFlags hc) + hc = compilerFlavor (compiler lbi) + hcFlags GHC = ["-g"] + hcFlags GHCJS = ["-g"] + hcFlags _ = [] + +standardPP :: LocalBuildInfo -> Program -> [String] -> PreProcessor +standardPP lbi prog args = + PreProcessor { + platformIndependent = False, + runPreProcessor = mkSimplePreProcessor $ \inFile outFile verbosity -> + rawSystemProgramConf verbosity prog (withPrograms lbi) + (args ++ ["-o", outFile, inFile]) + } + +-- |Convenience function; get the suffixes of these preprocessors. +ppSuffixes :: [ PPSuffixHandler ] -> [String] +ppSuffixes = map fst + +-- |Standard preprocessors: GreenCard, c2hs, hsc2hs, happy, alex and cpphs. +knownSuffixHandlers :: [ PPSuffixHandler ] +knownSuffixHandlers = + [ ("gc", ppGreenCard) + , ("chs", ppC2hs) + , ("hsc", ppHsc2hs) + , ("x", ppAlex) + , ("y", ppHappy) + , ("ly", ppHappy) + , ("cpphs", ppCpp) + ] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Ar.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Ar.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Ar.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Ar.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,169 @@ +{-# LANGUAGE OverloadedStrings #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Ar +-- Copyright : Duncan Coutts 2009 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module provides an library interface to the @ar@ program. + +module Distribution.Simple.Program.Ar ( + createArLibArchive, + multiStageProgramInvocation + ) where + +import Control.Monad (when, unless) +import qualified Data.ByteString as BS +import qualified Data.ByteString.Char8 as BS8 +import Data.Char (isSpace) +import Distribution.Compat.CopyFile (filesEqual) +import Distribution.Simple.LocalBuildInfo (LocalBuildInfo(..)) +import Distribution.Simple.Program + ( arProgram, requireProgram ) +import Distribution.Simple.Program.Run + ( programInvocation, multiStageProgramInvocation + , runProgramInvocation ) +import qualified Distribution.Simple.Program.Strip as Strip + ( stripLib ) +import Distribution.Simple.Utils + ( dieWithLocation, withTempDirectory ) +import Distribution.System + ( Arch(..), OS(..), Platform(..) ) +import Distribution.Verbosity + ( Verbosity, deafening, verbose ) +import System.Directory (doesFileExist, renameFile) +import System.FilePath ((), splitFileName) +import System.IO + ( Handle, IOMode(ReadWriteMode), SeekMode(AbsoluteSeek) + , hFileSize, hSeek, withBinaryFile ) + +-- | Call @ar@ to create a library archive from a bunch of object files. +-- +createArLibArchive :: Verbosity -> LocalBuildInfo + -> FilePath -> [FilePath] -> IO () +createArLibArchive verbosity lbi targetPath files = do + (ar, _) <- requireProgram verbosity arProgram progConf + + let (targetDir, targetName) = splitFileName targetPath + withTempDirectory verbosity targetDir "objs" $ \ tmpDir -> do + let tmpPath = tmpDir targetName + + -- The args to use with "ar" are actually rather subtle and system-dependent. + -- In particular we have the following issues: + -- + -- -- On OS X, "ar q" does not make an archive index. Archives with no + -- index cannot be used. + -- + -- -- GNU "ar r" will not let us add duplicate objects, only "ar q" lets us + -- do that. We have duplicates because of modules like "A.M" and "B.M" + -- both make an object file "M.o" and ar does not consider the directory. + -- + -- Our solution is to use "ar r" in the simple case when one call is enough. + -- When we need to call ar multiple times we use "ar q" and for the last + -- call on OSX we use "ar qs" so that it'll make the index. + + let simpleArgs = case hostOS of + OSX -> ["-r", "-s"] + _ -> ["-r"] + + initialArgs = ["-q"] + finalArgs = case hostOS of + OSX -> ["-q", "-s"] + _ -> ["-q"] + + extraArgs = verbosityOpts verbosity ++ [tmpPath] + + simple = programInvocation ar (simpleArgs ++ extraArgs) + initial = programInvocation ar (initialArgs ++ extraArgs) + middle = initial + final = programInvocation ar (finalArgs ++ extraArgs) + + sequence_ + [ runProgramInvocation verbosity inv + | inv <- multiStageProgramInvocation + simple (initial, middle, final) files ] + + when stripLib $ Strip.stripLib verbosity platform progConf tmpPath + unless (hostArch == Arm) $ -- See #1537 + wipeMetadata tmpPath + equal <- filesEqual tmpPath targetPath + unless equal $ renameFile tmpPath targetPath + + where + progConf = withPrograms lbi + stripLib = stripLibs lbi + platform@(Platform hostArch hostOS) = hostPlatform lbi + verbosityOpts v | v >= deafening = ["-v"] + | v >= verbose = [] + | otherwise = ["-c"] + +-- | @ar@ by default includes various metadata for each object file in their +-- respective headers, so the output can differ for the same inputs, making +-- it difficult to avoid re-linking. GNU @ar@(1) has a deterministic mode +-- (@-D@) flag that always writes zero for the mtime, UID and GID, and 0644 +-- for the file mode. However detecting whether @-D@ is supported seems +-- rather harder than just re-implementing this feature. +wipeMetadata :: FilePath -> IO () +wipeMetadata path = do + -- Check for existence first (ReadWriteMode would create one otherwise) + exists <- doesFileExist path + unless exists $ wipeError "Temporary file disappeared" + withBinaryFile path ReadWriteMode $ \ h -> hFileSize h >>= wipeArchive h + + where + wipeError msg = dieWithLocation path Nothing $ + "Distribution.Simple.Program.Ar.wipeMetadata: " ++ msg + archLF = "!\x0a" -- global magic, 8 bytes + x60LF = "\x60\x0a" -- header magic, 2 bytes + metadata = BS.concat + [ "0 " -- mtime, 12 bytes + , "0 " -- UID, 6 bytes + , "0 " -- GID, 6 bytes + , "0644 " -- mode, 8 bytes + ] + headerSize :: Int + headerSize = 60 + + -- http://en.wikipedia.org/wiki/Ar_(Unix)#File_format_details + wipeArchive :: Handle -> Integer -> IO () + wipeArchive h archiveSize = do + global <- BS.hGet h (BS.length archLF) + unless (global == archLF) $ wipeError "Bad global header" + wipeHeader (toInteger $ BS.length archLF) + + where + wipeHeader :: Integer -> IO () + wipeHeader offset = case compare offset archiveSize of + EQ -> return () + GT -> wipeError (atOffset "Archive truncated") + LT -> do + header <- BS.hGet h headerSize + unless (BS.length header == headerSize) $ + wipeError (atOffset "Short header") + let magic = BS.drop 58 header + unless (magic == x60LF) . wipeError . atOffset $ + "Bad magic " ++ show magic ++ " in header" + + let name = BS.take 16 header + let size = BS.take 10 $ BS.drop 48 header + objSize <- case reads (BS8.unpack size) of + [(n, s)] | all isSpace s -> return n + _ -> wipeError (atOffset "Bad file size in header") + + let replacement = BS.concat [ name, metadata, size, magic ] + unless (BS.length replacement == headerSize) $ + wipeError (atOffset "Something has gone terribly wrong") + hSeek h AbsoluteSeek offset + BS.hPut h replacement + + let nextHeader = offset + toInteger headerSize + + -- Odd objects are padded with an extra '\x0a' + if odd objSize then objSize + 1 else objSize + hSeek h AbsoluteSeek nextHeader + wipeHeader nextHeader + + where + atOffset msg = msg ++ " at offset " ++ show offset diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Builtin.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Builtin.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Builtin.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Builtin.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,362 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Builtin +-- Copyright : Isaac Jones 2006, Duncan Coutts 2007-2009 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- The module defines all the known built-in 'Program's. +-- +-- Where possible we try to find their version numbers. +-- +module Distribution.Simple.Program.Builtin ( + + -- * The collection of unconfigured and configured programs + builtinPrograms, + + -- * Programs that Cabal knows about + ghcProgram, + ghcPkgProgram, + ghcjsProgram, + ghcjsPkgProgram, + lhcProgram, + lhcPkgProgram, + hmakeProgram, + jhcProgram, + haskellSuiteProgram, + haskellSuitePkgProgram, + uhcProgram, + gccProgram, + arProgram, + stripProgram, + happyProgram, + alexProgram, + hsc2hsProgram, + c2hsProgram, + cpphsProgram, + hscolourProgram, + haddockProgram, + greencardProgram, + ldProgram, + tarProgram, + cppProgram, + pkgConfigProgram, + hpcProgram, + ) where + +import Distribution.Simple.Program.Find + ( findProgramOnSearchPath ) +import Distribution.Simple.Program.Run + ( getProgramInvocationOutput, programInvocation ) +import Distribution.Simple.Program.Types + ( Program(..), ConfiguredProgram(..), simpleProgram ) +import Distribution.Simple.Utils + ( findProgramVersion ) +import Distribution.Compat.Exception + ( catchIO ) +import Distribution.Verbosity + ( lessVerbose ) +import Distribution.Version + ( Version(..), withinRange, earlierVersion, laterVersion + , intersectVersionRanges ) +import Data.Char + ( isDigit ) + +import Data.List + ( isInfixOf ) +import qualified Data.Map as Map + +-- ------------------------------------------------------------ +-- * Known programs +-- ------------------------------------------------------------ + +-- | The default list of programs. +-- These programs are typically used internally to Cabal. +builtinPrograms :: [Program] +builtinPrograms = + [ + -- compilers and related progs + ghcProgram + , ghcPkgProgram + , ghcjsProgram + , ghcjsPkgProgram + , haskellSuiteProgram + , haskellSuitePkgProgram + , hmakeProgram + , jhcProgram + , lhcProgram + , lhcPkgProgram + , uhcProgram + , hpcProgram + -- preprocessors + , hscolourProgram + , haddockProgram + , happyProgram + , alexProgram + , hsc2hsProgram + , c2hsProgram + , cpphsProgram + , greencardProgram + -- platform toolchain + , gccProgram + , arProgram + , stripProgram + , ldProgram + , tarProgram + -- configuration tools + , pkgConfigProgram + ] + +ghcProgram :: Program +ghcProgram = (simpleProgram "ghc") { + programFindVersion = findProgramVersion "--numeric-version" id, + + -- Workaround for https://ghc.haskell.org/trac/ghc/ticket/8825 + -- (spurious warning on non-english locales) + programPostConf = \_verbosity ghcProg -> + do let ghcProg' = ghcProg { + programOverrideEnv = ("LANGUAGE", Just "en") + : programOverrideEnv ghcProg + } + -- Only the 7.8 branch seems to be affected. Fixed in 7.8.4. + affectedVersionRange = intersectVersionRanges + (laterVersion $ Version [7,8,0] []) + (earlierVersion $ Version [7,8,4] []) + return $ maybe ghcProg + (\v -> if withinRange v affectedVersionRange + then ghcProg' else ghcProg) + (programVersion ghcProg) + } + +ghcPkgProgram :: Program +ghcPkgProgram = (simpleProgram "ghc-pkg") { + programFindVersion = findProgramVersion "--version" $ \str -> + -- Invoking "ghc-pkg --version" gives a string like + -- "GHC package manager version 6.4.1" + case words str of + (_:_:_:_:ver:_) -> ver + _ -> "" + } + +ghcjsProgram :: Program +ghcjsProgram = (simpleProgram "ghcjs") { + programFindVersion = findProgramVersion "--numeric-ghcjs-version" id + } + +-- note: version is the version number of the GHC version that ghcjs-pkg was built with +ghcjsPkgProgram :: Program +ghcjsPkgProgram = (simpleProgram "ghcjs-pkg") { + programFindVersion = findProgramVersion "--version" $ \str -> + -- Invoking "ghcjs-pkg --version" gives a string like + -- "GHCJS package manager version 6.4.1" + case words str of + (_:_:_:_:ver:_) -> ver + _ -> "" + } + +lhcProgram :: Program +lhcProgram = (simpleProgram "lhc") { + programFindVersion = findProgramVersion "--numeric-version" id + } + +lhcPkgProgram :: Program +lhcPkgProgram = (simpleProgram "lhc-pkg") { + programFindVersion = findProgramVersion "--version" $ \str -> + -- Invoking "lhc-pkg --version" gives a string like + -- "LHC package manager version 0.7" + case words str of + (_:_:_:_:ver:_) -> ver + _ -> "" + } + +hmakeProgram :: Program +hmakeProgram = (simpleProgram "hmake") { + programFindVersion = findProgramVersion "--version" $ \str -> + -- Invoking "hmake --version" gives a string line + -- "/usr/local/bin/hmake: 3.13 (2006-11-01)" + case words str of + (_:ver:_) -> ver + _ -> "" + } + +jhcProgram :: Program +jhcProgram = (simpleProgram "jhc") { + programFindVersion = findProgramVersion "--version" $ \str -> + -- invoking "jhc --version" gives a string like + -- "jhc 0.3.20080208 (wubgipkamcep-2) + -- compiled by ghc-6.8 on a x86_64 running linux" + case words str of + (_:ver:_) -> ver + _ -> "" + } + +uhcProgram :: Program +uhcProgram = (simpleProgram "uhc") { + programFindVersion = findProgramVersion "--version-dotted" id + } + +hpcProgram :: Program +hpcProgram = (simpleProgram "hpc") + { + programFindVersion = findProgramVersion "version" $ \str -> + case words str of + (_ : _ : _ : ver : _) -> ver + _ -> "" + } + +-- This represents a haskell-suite compiler. Of course, the compiler +-- itself probably is not called "haskell-suite", so this is not a real +-- program. (But we don't know statically the name of the actual compiler, +-- so this is the best we can do.) +-- +-- Having this Program value serves two purposes: +-- +-- 1. We can accept options for the compiler in the form of +-- +-- --haskell-suite-option(s)=... +-- +-- 2. We can find a program later using this static id (with +-- requireProgram). +-- +-- The path to the real compiler is found and recorded in the ProgramDb +-- during the configure phase. +haskellSuiteProgram :: Program +haskellSuiteProgram = (simpleProgram "haskell-suite") { + -- pretend that the program exists, otherwise it won't be in the + -- "configured" state + programFindLocation = + \_verbosity _searchPath -> return $ Just "haskell-suite-dummy-location" + } + +-- This represent a haskell-suite package manager. See the comments for +-- haskellSuiteProgram. +haskellSuitePkgProgram :: Program +haskellSuitePkgProgram = (simpleProgram "haskell-suite-pkg") { + programFindLocation = + \_verbosity _searchPath -> return $ Just "haskell-suite-pkg-dummy-location" + } + + +happyProgram :: Program +happyProgram = (simpleProgram "happy") { + programFindVersion = findProgramVersion "--version" $ \str -> + -- Invoking "happy --version" gives a string like + -- "Happy Version 1.16 Copyright (c) ...." + case words str of + (_:_:ver:_) -> ver + _ -> "" + } + +alexProgram :: Program +alexProgram = (simpleProgram "alex") { + programFindVersion = findProgramVersion "--version" $ \str -> + -- Invoking "alex --version" gives a string like + -- "Alex version 2.1.0, (c) 2003 Chris Dornan and Simon Marlow" + case words str of + (_:_:ver:_) -> takeWhile (\x -> isDigit x || x == '.') ver + _ -> "" + } + +gccProgram :: Program +gccProgram = (simpleProgram "gcc") { + programFindVersion = findProgramVersion "-dumpversion" id + } + +arProgram :: Program +arProgram = simpleProgram "ar" + +stripProgram :: Program +stripProgram = (simpleProgram "strip") { + programFindVersion = \verbosity -> + findProgramVersion "--version" selectVersion (lessVerbose verbosity) + } + where + selectVersion str = + -- Invoking "strip --version" gives very inconsistent + -- results. We look for the first word that starts with a + -- number, and try parsing out the first two components of + -- it. Non-GNU 'strip' doesn't appear to have a version flag. + let numeric "" = False + numeric (x:_) = isDigit x + in case dropWhile (not . numeric) (words str) of + (ver:_) -> + -- take the first two version components + let isDot = (== '.') + (major, rest) = break isDot ver + minor = takeWhile (not . isDot) (dropWhile isDot rest) + in major ++ "." ++ minor + _ -> "" + +hsc2hsProgram :: Program +hsc2hsProgram = (simpleProgram "hsc2hs") { + programFindVersion = + findProgramVersion "--version" $ \str -> + -- Invoking "hsc2hs --version" gives a string like "hsc2hs version 0.66" + case words str of + (_:_:ver:_) -> ver + _ -> "" + } + +c2hsProgram :: Program +c2hsProgram = (simpleProgram "c2hs") { + programFindVersion = findProgramVersion "--numeric-version" id + } + +cpphsProgram :: Program +cpphsProgram = (simpleProgram "cpphs") { + programFindVersion = findProgramVersion "--version" $ \str -> + -- Invoking "cpphs --version" gives a string like "cpphs 1.3" + case words str of + (_:ver:_) -> ver + _ -> "" + } + +hscolourProgram :: Program +hscolourProgram = (simpleProgram "hscolour") { + programFindLocation = \v p -> findProgramOnSearchPath v p "HsColour", + programFindVersion = findProgramVersion "-version" $ \str -> + -- Invoking "HsColour -version" gives a string like "HsColour 1.7" + case words str of + (_:ver:_) -> ver + _ -> "" + } + +haddockProgram :: Program +haddockProgram = (simpleProgram "haddock") { + programFindVersion = findProgramVersion "--version" $ \str -> + -- Invoking "haddock --version" gives a string like + -- "Haddock version 0.8, (c) Simon Marlow 2006" + case words str of + (_:_:ver:_) -> takeWhile (`elem` ('.':['0'..'9'])) ver + _ -> "" + } + +greencardProgram :: Program +greencardProgram = simpleProgram "greencard" + +ldProgram :: Program +ldProgram = simpleProgram "ld" + +tarProgram :: Program +tarProgram = (simpleProgram "tar") { + -- See #1901. Some versions of 'tar' (OpenBSD, NetBSD, ...) don't support the + -- '--format' option. + programPostConf = \verbosity tarProg -> do + tarHelpOutput <- getProgramInvocationOutput + verbosity (programInvocation tarProg ["--help"]) + -- Some versions of tar don't support '--help'. + `catchIO` (\_ -> return "") + let k = "Supports --format" + v = if ("--format" `isInfixOf` tarHelpOutput) then "YES" else "NO" + m = Map.insert k v (programProperties tarProg) + return $ tarProg { programProperties = m } + } + +cppProgram :: Program +cppProgram = simpleProgram "cpp" + +pkgConfigProgram :: Program +pkgConfigProgram = (simpleProgram "pkg-config") { + programFindVersion = findProgramVersion "--version" id + } diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Db.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Db.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Db.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Db.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,471 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Db +-- Copyright : Isaac Jones 2006, Duncan Coutts 2007-2009 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This provides a 'ProgramDb' type which holds configured and not-yet +-- configured programs. It is the parameter to lots of actions elsewhere in +-- Cabal that need to look up and run programs. If we had a Cabal monad, +-- the 'ProgramDb' would probably be a reader or state component of it. +-- +-- One nice thing about using it is that any program that is +-- registered with Cabal will get some \"configure\" and \".cabal\" +-- helpers like --with-foo-args --foo-path= and extra-foo-args. +-- +-- There's also a hook for adding programs in a Setup.lhs script. See +-- hookedPrograms in 'Distribution.Simple.UserHooks'. This gives a +-- hook user the ability to get the above flags and such so that they +-- don't have to write all the PATH logic inside Setup.lhs. + +module Distribution.Simple.Program.Db ( + -- * The collection of configured programs we can run + ProgramDb, + emptyProgramDb, + defaultProgramDb, + restoreProgramDb, + + -- ** Query and manipulate the program db + addKnownProgram, + addKnownPrograms, + lookupKnownProgram, + knownPrograms, + getProgramSearchPath, + setProgramSearchPath, + modifyProgramSearchPath, + userSpecifyPath, + userSpecifyPaths, + userMaybeSpecifyPath, + userSpecifyArgs, + userSpecifyArgss, + userSpecifiedArgs, + lookupProgram, + updateProgram, + configuredPrograms, + + -- ** Query and manipulate the program db + configureProgram, + configureAllKnownPrograms, + lookupProgramVersion, + reconfigurePrograms, + requireProgram, + requireProgramVersion, + + ) where + +import Distribution.Simple.Program.Types + ( Program(..), ProgArg, ConfiguredProgram(..), ProgramLocation(..) ) +import Distribution.Simple.Program.Find + ( ProgramSearchPath, defaultProgramSearchPath + , findProgramOnSearchPath, programSearchPathAsPATHVar ) +import Distribution.Simple.Program.Builtin + ( builtinPrograms ) +import Distribution.Simple.Utils + ( die, doesExecutableExist ) +import Distribution.Version + ( Version, VersionRange, isAnyVersion, withinRange ) +import Distribution.Text + ( display ) +import Distribution.Verbosity + ( Verbosity ) + +import Distribution.Compat.Binary (Binary(..)) +#if __GLASGOW_HASKELL__ < 710 +import Data.Functor ((<$>)) +#endif +import Data.List + ( foldl' ) +import Data.Maybe + ( catMaybes ) +import qualified Data.Map as Map +import Control.Monad + ( join, foldM ) + +-- ------------------------------------------------------------ +-- * Programs database +-- ------------------------------------------------------------ + +-- | The configuration is a collection of information about programs. It +-- contains information both about configured programs and also about programs +-- that we are yet to configure. +-- +-- The idea is that we start from a collection of unconfigured programs and one +-- by one we try to configure them at which point we move them into the +-- configured collection. For unconfigured programs we record not just the +-- 'Program' but also any user-provided arguments and location for the program. +data ProgramDb = ProgramDb { + unconfiguredProgs :: UnconfiguredProgs, + progSearchPath :: ProgramSearchPath, + configuredProgs :: ConfiguredProgs + } + +type UnconfiguredProgram = (Program, Maybe FilePath, [ProgArg]) +type UnconfiguredProgs = Map.Map String UnconfiguredProgram +type ConfiguredProgs = Map.Map String ConfiguredProgram + + +emptyProgramDb :: ProgramDb +emptyProgramDb = ProgramDb Map.empty defaultProgramSearchPath Map.empty + +defaultProgramDb :: ProgramDb +defaultProgramDb = restoreProgramDb builtinPrograms emptyProgramDb + + +-- internal helpers: +updateUnconfiguredProgs :: (UnconfiguredProgs -> UnconfiguredProgs) + -> ProgramDb -> ProgramDb +updateUnconfiguredProgs update conf = + conf { unconfiguredProgs = update (unconfiguredProgs conf) } + +updateConfiguredProgs :: (ConfiguredProgs -> ConfiguredProgs) + -> ProgramDb -> ProgramDb +updateConfiguredProgs update conf = + conf { configuredProgs = update (configuredProgs conf) } + + +-- Read & Show instances are based on listToFM +-- Note that we only serialise the configured part of the database, this is +-- because we don't need the unconfigured part after the configure stage, and +-- additionally because we cannot read/show 'Program' as it contains functions. +instance Show ProgramDb where + show = show . Map.toAscList . configuredProgs + +instance Read ProgramDb where + readsPrec p s = + [ (emptyProgramDb { configuredProgs = Map.fromList s' }, r) + | (s', r) <- readsPrec p s ] + +instance Binary ProgramDb where + put = put . configuredProgs + get = do + progs <- get + return $! emptyProgramDb { configuredProgs = progs } + + +-- | The Read\/Show instance does not preserve all the unconfigured 'Programs' +-- because 'Program' is not in Read\/Show because it contains functions. So to +-- fully restore a deserialised 'ProgramDb' use this function to add +-- back all the known 'Program's. +-- +-- * It does not add the default programs, but you probably want them, use +-- 'builtinPrograms' in addition to any extra you might need. +-- +restoreProgramDb :: [Program] -> ProgramDb -> ProgramDb +restoreProgramDb = addKnownPrograms + + +-- ------------------------------- +-- Managing unconfigured programs + +-- | Add a known program that we may configure later +-- +addKnownProgram :: Program -> ProgramDb -> ProgramDb +addKnownProgram prog = updateUnconfiguredProgs $ + Map.insertWith combine (programName prog) (prog, Nothing, []) + where combine _ (_, path, args) = (prog, path, args) + + +addKnownPrograms :: [Program] -> ProgramDb -> ProgramDb +addKnownPrograms progs conf = foldl' (flip addKnownProgram) conf progs + + +lookupKnownProgram :: String -> ProgramDb -> Maybe Program +lookupKnownProgram name = + fmap (\(p,_,_)->p) . Map.lookup name . unconfiguredProgs + + +knownPrograms :: ProgramDb -> [(Program, Maybe ConfiguredProgram)] +knownPrograms conf = + [ (p,p') | (p,_,_) <- Map.elems (unconfiguredProgs conf) + , let p' = Map.lookup (programName p) (configuredProgs conf) ] + +-- | Get the current 'ProgramSearchPath' used by the 'ProgramDb'. +-- This is the default list of locations where programs are looked for when +-- configuring them. This can be overridden for specific programs (with +-- 'userSpecifyPath'), and specific known programs can modify or ignore this +-- search path in their own configuration code. +-- +getProgramSearchPath :: ProgramDb -> ProgramSearchPath +getProgramSearchPath = progSearchPath + +-- | Change the current 'ProgramSearchPath' used by the 'ProgramDb'. +-- This will affect programs that are configured from here on, so you +-- should usually set it before configuring any programs. +-- +setProgramSearchPath :: ProgramSearchPath -> ProgramDb -> ProgramDb +setProgramSearchPath searchpath db = db { progSearchPath = searchpath } + +-- | Modify the current 'ProgramSearchPath' used by the 'ProgramDb'. +-- This will affect programs that are configured from here on, so you +-- should usually modify it before configuring any programs. +-- +modifyProgramSearchPath :: (ProgramSearchPath -> ProgramSearchPath) + -> ProgramDb + -> ProgramDb +modifyProgramSearchPath f db = + setProgramSearchPath (f $ getProgramSearchPath db) db + +-- |User-specify this path. Basically override any path information +-- for this program in the configuration. If it's not a known +-- program ignore it. +-- +userSpecifyPath :: String -- ^Program name + -> FilePath -- ^user-specified path to the program + -> ProgramDb -> ProgramDb +userSpecifyPath name path = updateUnconfiguredProgs $ + flip Map.update name $ \(prog, _, args) -> Just (prog, Just path, args) + + +userMaybeSpecifyPath :: String -> Maybe FilePath + -> ProgramDb -> ProgramDb +userMaybeSpecifyPath _ Nothing conf = conf +userMaybeSpecifyPath name (Just path) conf = userSpecifyPath name path conf + + +-- |User-specify the arguments for this program. Basically override +-- any args information for this program in the configuration. If it's +-- not a known program, ignore it.. +userSpecifyArgs :: String -- ^Program name + -> [ProgArg] -- ^user-specified args + -> ProgramDb + -> ProgramDb +userSpecifyArgs name args' = + updateUnconfiguredProgs + (flip Map.update name $ + \(prog, path, args) -> Just (prog, path, args ++ args')) + . updateConfiguredProgs + (flip Map.update name $ + \prog -> Just prog { programOverrideArgs = programOverrideArgs prog + ++ args' }) + + +-- | Like 'userSpecifyPath' but for a list of progs and their paths. +-- +userSpecifyPaths :: [(String, FilePath)] + -> ProgramDb + -> ProgramDb +userSpecifyPaths paths conf = + foldl' (\conf' (prog, path) -> userSpecifyPath prog path conf') conf paths + + +-- | Like 'userSpecifyPath' but for a list of progs and their args. +-- +userSpecifyArgss :: [(String, [ProgArg])] + -> ProgramDb + -> ProgramDb +userSpecifyArgss argss conf = + foldl' (\conf' (prog, args) -> userSpecifyArgs prog args conf') conf argss + + +-- | Get the path that has been previously specified for a program, if any. +-- +userSpecifiedPath :: Program -> ProgramDb -> Maybe FilePath +userSpecifiedPath prog = + join . fmap (\(_,p,_)->p) . Map.lookup (programName prog) . unconfiguredProgs + + +-- | Get any extra args that have been previously specified for a program. +-- +userSpecifiedArgs :: Program -> ProgramDb -> [ProgArg] +userSpecifiedArgs prog = + maybe [] (\(_,_,as)->as) . Map.lookup (programName prog) . unconfiguredProgs + + +-- ----------------------------- +-- Managing configured programs + +-- | Try to find a configured program +lookupProgram :: Program -> ProgramDb -> Maybe ConfiguredProgram +lookupProgram prog = Map.lookup (programName prog) . configuredProgs + + +-- | Update a configured program in the database. +updateProgram :: ConfiguredProgram -> ProgramDb + -> ProgramDb +updateProgram prog = updateConfiguredProgs $ + Map.insert (programId prog) prog + + +-- | List all configured programs. +configuredPrograms :: ProgramDb -> [ConfiguredProgram] +configuredPrograms = Map.elems . configuredProgs + +-- --------------------------- +-- Configuring known programs + +-- | Try to configure a specific program. If the program is already included in +-- the collection of unconfigured programs then we use any user-supplied +-- location and arguments. If the program gets configured successfully it gets +-- added to the configured collection. +-- +-- Note that it is not a failure if the program cannot be configured. It's only +-- a failure if the user supplied a location and the program could not be found +-- at that location. +-- +-- The reason for it not being a failure at this stage is that we don't know up +-- front all the programs we will need, so we try to configure them all. +-- To verify that a program was actually successfully configured use +-- 'requireProgram'. +-- +configureProgram :: Verbosity + -> Program + -> ProgramDb + -> IO ProgramDb +configureProgram verbosity prog conf = do + let name = programName prog + maybeLocation <- case userSpecifiedPath prog conf of + Nothing -> programFindLocation prog verbosity (progSearchPath conf) + >>= return . fmap FoundOnSystem + Just path -> do + absolute <- doesExecutableExist path + if absolute + then return (Just (UserSpecified path)) + else findProgramOnSearchPath verbosity (progSearchPath conf) path + >>= maybe (die notFound) (return . Just . UserSpecified) + where notFound = "Cannot find the program '" ++ name + ++ "'. User-specified path '" + ++ path ++ "' does not refer to an executable and " + ++ "the program is not on the system path." + case maybeLocation of + Nothing -> return conf + Just location -> do + version <- programFindVersion prog verbosity (locationPath location) + newPath <- programSearchPathAsPATHVar (progSearchPath conf) + let configuredProg = ConfiguredProgram { + programId = name, + programVersion = version, + programDefaultArgs = [], + programOverrideArgs = userSpecifiedArgs prog conf, + programOverrideEnv = [("PATH", Just newPath)], + programProperties = Map.empty, + programLocation = location + } + configuredProg' <- programPostConf prog verbosity configuredProg + return (updateConfiguredProgs (Map.insert name configuredProg') conf) + + +-- | Configure a bunch of programs using 'configureProgram'. Just a 'foldM'. +-- +configurePrograms :: Verbosity + -> [Program] + -> ProgramDb + -> IO ProgramDb +configurePrograms verbosity progs conf = + foldM (flip (configureProgram verbosity)) conf progs + + +-- | Try to configure all the known programs that have not yet been configured. +-- +configureAllKnownPrograms :: Verbosity + -> ProgramDb + -> IO ProgramDb +configureAllKnownPrograms verbosity conf = + configurePrograms verbosity + [ prog | (prog,_,_) <- Map.elems notYetConfigured ] conf + where + notYetConfigured = unconfiguredProgs conf + `Map.difference` configuredProgs conf + + +-- | reconfigure a bunch of programs given new user-specified args. It takes +-- the same inputs as 'userSpecifyPath' and 'userSpecifyArgs' and for all progs +-- with a new path it calls 'configureProgram'. +-- +reconfigurePrograms :: Verbosity + -> [(String, FilePath)] + -> [(String, [ProgArg])] + -> ProgramDb + -> IO ProgramDb +reconfigurePrograms verbosity paths argss conf = do + configurePrograms verbosity progs + . userSpecifyPaths paths + . userSpecifyArgss argss + $ conf + + where + progs = catMaybes [ lookupKnownProgram name conf | (name,_) <- paths ] + + +-- | Check that a program is configured and available to be run. +-- +-- It raises an exception if the program could not be configured, otherwise +-- it returns the configured program. +-- +requireProgram :: Verbosity -> Program -> ProgramDb + -> IO (ConfiguredProgram, ProgramDb) +requireProgram verbosity prog conf = do + + -- If it's not already been configured, try to configure it now + conf' <- case lookupProgram prog conf of + Nothing -> configureProgram verbosity prog conf + Just _ -> return conf + + case lookupProgram prog conf' of + Nothing -> die notFound + Just configuredProg -> return (configuredProg, conf') + + where notFound = "The program '" ++ programName prog + ++ "' is required but it could not be found." + + +-- | Check that a program is configured and available to be run. +-- +-- Additionally check that the program version number is suitable and return +-- it. For example you could require 'AnyVersion' or @'orLaterVersion' +-- ('Version' [1,0] [])@ +-- +-- It returns the configured program, its version number and a possibly updated +-- 'ProgramDb'. If the program could not be configured or the version is +-- unsuitable, it returns an error value. +-- +lookupProgramVersion + :: Verbosity -> Program -> VersionRange -> ProgramDb + -> IO (Either String (ConfiguredProgram, Version, ProgramDb)) +lookupProgramVersion verbosity prog range programDb = do + + -- If it's not already been configured, try to configure it now + programDb' <- case lookupProgram prog programDb of + Nothing -> configureProgram verbosity prog programDb + Just _ -> return programDb + + case lookupProgram prog programDb' of + Nothing -> return $! Left notFound + Just configuredProg@ConfiguredProgram { programLocation = location } -> + case programVersion configuredProg of + Just version + | withinRange version range -> + return $! Right (configuredProg, version ,programDb') + | otherwise -> + return $! Left (badVersion version location) + Nothing -> + return $! Left (noVersion location) + + where notFound = "The program '" + ++ programName prog ++ "'" ++ versionRequirement + ++ " is required but it could not be found." + badVersion v l = "The program '" + ++ programName prog ++ "'" ++ versionRequirement + ++ " is required but the version found at " + ++ locationPath l ++ " is version " ++ display v + noVersion l = "The program '" + ++ programName prog ++ "'" ++ versionRequirement + ++ " is required but the version of " + ++ locationPath l ++ " could not be determined." + versionRequirement + | isAnyVersion range = "" + | otherwise = " version " ++ display range + +-- | Like 'lookupProgramVersion', but raises an exception in case of error +-- instead of returning 'Left errMsg'. +-- +requireProgramVersion :: Verbosity -> Program -> VersionRange + -> ProgramDb + -> IO (ConfiguredProgram, Version, ProgramDb) +requireProgramVersion verbosity prog range programDb = + join $ either die return <$> + lookupProgramVersion verbosity prog range programDb diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Find.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Find.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Find.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Find.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,126 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Types +-- Copyright : Duncan Coutts 2013 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- A somewhat extended notion of the normal program search path concept. +-- +-- Usually when finding executables we just want to look in the usual places +-- using the OS's usual method for doing so. In Haskell the normal OS-specific +-- method is captured by 'findExecutable'. On all common OSs that makes use of +-- a @PATH@ environment variable, (though on Windows it is not just the @PATH@). +-- +-- However it is sometimes useful to be able to look in additional locations +-- without having to change the process-global @PATH@ environment variable. +-- So we need an extension of the usual 'findExecutable' that can look in +-- additional locations, either before, after or instead of the normal OS +-- locations. +-- +module Distribution.Simple.Program.Find ( + -- * Program search path + ProgramSearchPath, + ProgramSearchPathEntry(..), + defaultProgramSearchPath, + findProgramOnSearchPath, + programSearchPathAsPATHVar, + ) where + +import Distribution.Verbosity + ( Verbosity ) +import Distribution.Simple.Utils + ( debug, doesExecutableExist ) +import Distribution.System + ( OS(..), buildOS ) +import System.Directory + ( findExecutable ) +import Distribution.Compat.Environment + ( getEnvironment ) +import System.FilePath + ( (), (<.>), splitSearchPath, searchPathSeparator ) +import Data.List + ( intercalate ) + + +-- | A search path to use when locating executables. This is analogous +-- to the unix @$PATH@ or win32 @%PATH%@ but with the ability to use +-- the system default method for finding executables ('findExecutable' which +-- on unix is simply looking on the @$PATH@ but on win32 is a bit more +-- complicated). +-- +-- The default to use is @[ProgSearchPathDefault]@ but you can add extra dirs +-- either before, after or instead of the default, e.g. here we add an extra +-- dir to search after the usual ones. +-- +-- > ['ProgramSearchPathDefault', 'ProgramSearchPathDir' dir] +-- +type ProgramSearchPath = [ProgramSearchPathEntry] +data ProgramSearchPathEntry = + ProgramSearchPathDir FilePath -- ^ A specific dir + | ProgramSearchPathDefault -- ^ The system default + +defaultProgramSearchPath :: ProgramSearchPath +defaultProgramSearchPath = [ProgramSearchPathDefault] + +findProgramOnSearchPath :: Verbosity -> ProgramSearchPath + -> FilePath -> IO (Maybe FilePath) +findProgramOnSearchPath verbosity searchpath prog = do + debug verbosity $ "Searching for " ++ prog ++ " in path." + res <- tryPathElems searchpath + case res of + Nothing -> debug verbosity ("Cannot find " ++ prog ++ " on the path") + Just path -> debug verbosity ("Found " ++ prog ++ " at "++ path) + return res + where + tryPathElems [] = return Nothing + tryPathElems (pe:pes) = do + res <- tryPathElem pe + case res of + Nothing -> tryPathElems pes + Just _ -> return res + + tryPathElem (ProgramSearchPathDir dir) = + findFirstExe [ dir prog <.> ext | ext <- extensions ] + where + -- Possible improvement: on Windows, read the list of extensions from + -- the PATHEXT environment variable. By default PATHEXT is ".com; .exe; + -- .bat; .cmd". + extensions = case buildOS of + Windows -> ["", "exe"] + Ghcjs -> ["", "exe"] + _ -> [""] + + tryPathElem ProgramSearchPathDefault = do + -- 'findExecutable' doesn't check that the path really refers to an + -- executable on Windows (at least with GHC < 7.8). See + -- https://ghc.haskell.org/trac/ghc/ticket/2184 + mExe <- findExecutable prog + case mExe of + Just exe -> do + exeExists <- doesExecutableExist exe + if exeExists + then return mExe + else return Nothing + _ -> return mExe + + findFirstExe [] = return Nothing + findFirstExe (f:fs) = do + isExe <- doesExecutableExist f + if isExe + then return (Just f) + else findFirstExe fs + +-- | Interpret a 'ProgramSearchPath' to construct a new @$PATH@ env var. +-- Note that this is close but not perfect because on Windows the search +-- algorithm looks at more than just the @%PATH%@. +programSearchPathAsPATHVar :: ProgramSearchPath -> IO String +programSearchPathAsPATHVar searchpath = do + ess <- mapM getEntries searchpath + return (intercalate [searchPathSeparator] (concat ess)) + where + getEntries (ProgramSearchPathDir dir) = return [dir] + getEntries ProgramSearchPathDefault = do + env <- getEnvironment + return (maybe [] splitSearchPath (lookup "PATH" env)) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/GHC.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/GHC.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/GHC.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/GHC.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,562 @@ +{-# LANGUAGE CPP #-} +module Distribution.Simple.Program.GHC ( + GhcOptions(..), + GhcMode(..), + GhcOptimisation(..), + GhcDynLinkMode(..), + + ghcInvocation, + renderGhcOptions, + + runGHC, + + ) where + +import Distribution.Simple.GHC.ImplInfo ( getImplInfo, GhcImplInfo(..) ) +import Distribution.Package +import Distribution.PackageDescription hiding (Flag) +import Distribution.ModuleName +import Distribution.Simple.Compiler hiding (Flag) +import Distribution.Simple.Setup ( Flag(..), flagToMaybe, fromFlagOrDefault, + flagToList ) +import Distribution.Simple.Program.Types +import Distribution.Simple.Program.Run +import Distribution.Text +import Distribution.Verbosity +import Distribution.Utils.NubList ( NubListR, fromNubListR ) +import Language.Haskell.Extension ( Language(..), Extension(..) ) + +import qualified Data.Map as M +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid +#endif +import Data.List ( intercalate ) + +-- | A structured set of GHC options/flags +-- +data GhcOptions = GhcOptions { + + -- | The major mode for the ghc invocation. + ghcOptMode :: Flag GhcMode, + + -- | Any extra options to pass directly to ghc. These go at the end and hence + -- override other stuff. + ghcOptExtra :: NubListR String, + + -- | Extra default flags to pass directly to ghc. These go at the beginning + -- and so can be overridden by other stuff. + ghcOptExtraDefault :: NubListR String, + + ----------------------- + -- Inputs and outputs + + -- | The main input files; could be .hs, .hi, .c, .o, depending on mode. + ghcOptInputFiles :: NubListR FilePath, + + -- | The names of input Haskell modules, mainly for @--make@ mode. + ghcOptInputModules :: NubListR ModuleName, + + -- | Location for output file; the @ghc -o@ flag. + ghcOptOutputFile :: Flag FilePath, + + -- | Location for dynamic output file in 'GhcStaticAndDynamic' mode; + -- the @ghc -dyno@ flag. + ghcOptOutputDynFile :: Flag FilePath, + + -- | Start with an empty search path for Haskell source files; + -- the @ghc -i@ flag (@-i@ on it's own with no path argument). + ghcOptSourcePathClear :: Flag Bool, + + -- | Search path for Haskell source files; the @ghc -i@ flag. + ghcOptSourcePath :: NubListR FilePath, + + ------------- + -- Packages + + -- | The package key the modules will belong to; the @ghc -this-package-key@ + -- flag. + ghcOptPackageKey :: Flag PackageKey, + + -- | GHC package databases to use, the @ghc -package-conf@ flag. + ghcOptPackageDBs :: PackageDBStack, + + -- | The GHC packages to use. For compatability with old and new ghc, this + -- requires both the short and long form of the package id; + -- the @ghc -package@ or @ghc -package-id@ flags. + ghcOptPackages :: + NubListR (InstalledPackageId, PackageId, ModuleRenaming), + + -- | Start with a clean package set; the @ghc -hide-all-packages@ flag + ghcOptHideAllPackages :: Flag Bool, + + -- | Don't automatically link in Haskell98 etc; the @ghc + -- -no-auto-link-packages@ flag. + ghcOptNoAutoLinkPackages :: Flag Bool, + + -- | What packages are implementing the signatures + ghcOptSigOf :: [(ModuleName, (PackageKey, ModuleName))], + + ----------------- + -- Linker stuff + + -- | Names of libraries to link in; the @ghc -l@ flag. + ghcOptLinkLibs :: NubListR FilePath, + + -- | Search path for libraries to link in; the @ghc -L@ flag. + ghcOptLinkLibPath :: NubListR FilePath, + + -- | Options to pass through to the linker; the @ghc -optl@ flag. + ghcOptLinkOptions :: NubListR String, + + -- | OSX only: frameworks to link in; the @ghc -framework@ flag. + ghcOptLinkFrameworks :: NubListR String, + + -- | Don't do the link step, useful in make mode; the @ghc -no-link@ flag. + ghcOptNoLink :: Flag Bool, + + -- | Don't link in the normal RTS @main@ entry point; the @ghc -no-hs-main@ + -- flag. + ghcOptLinkNoHsMain :: Flag Bool, + + -------------------- + -- C and CPP stuff + + -- | Options to pass through to the C compiler; the @ghc -optc@ flag. + ghcOptCcOptions :: NubListR String, + + -- | Options to pass through to CPP; the @ghc -optP@ flag. + ghcOptCppOptions :: NubListR String, + + -- | Search path for CPP includes like header files; the @ghc -I@ flag. + ghcOptCppIncludePath :: NubListR FilePath, + + -- | Extra header files to include at CPP stage; the @ghc -optP-include@ flag. + ghcOptCppIncludes :: NubListR FilePath, + + -- | Extra header files to include for old-style FFI; the @ghc -#include@ flag. + ghcOptFfiIncludes :: NubListR FilePath, + + ---------------------------- + -- Language and extensions + + -- | The base language; the @ghc -XHaskell98@ or @-XHaskell2010@ flag. + ghcOptLanguage :: Flag Language, + + -- | The language extensions; the @ghc -X@ flag. + ghcOptExtensions :: NubListR Extension, + + -- | A GHC version-dependent mapping of extensions to flags. This must be + -- set to be able to make use of the 'ghcOptExtensions'. + ghcOptExtensionMap :: M.Map Extension String, + + ---------------- + -- Compilation + + -- | What optimisation level to use; the @ghc -O@ flag. + ghcOptOptimisation :: Flag GhcOptimisation, + + -- | Emit debug info; the @ghc -g@ flag. + ghcOptDebugInfo :: Flag Bool, + + -- | Compile in profiling mode; the @ghc -prof@ flag. + ghcOptProfilingMode :: Flag Bool, + + -- | Use the \"split object files\" feature; the @ghc -split-objs@ flag. + ghcOptSplitObjs :: Flag Bool, + + -- | Run N jobs simultaneously (if possible). + ghcOptNumJobs :: Flag (Maybe Int), + + -- | Enable coverage analysis; the @ghc -fhpc -hpcdir@ flags. + ghcOptHPCDir :: Flag FilePath, + + ---------------- + -- GHCi + + -- | Extra GHCi startup scripts; the @-ghci-script@ flag + ghcOptGHCiScripts :: NubListR FilePath, + + ------------------------ + -- Redirecting outputs + + ghcOptHiSuffix :: Flag String, + ghcOptObjSuffix :: Flag String, + ghcOptDynHiSuffix :: Flag String, -- ^ only in 'GhcStaticAndDynamic' mode + ghcOptDynObjSuffix :: Flag String, -- ^ only in 'GhcStaticAndDynamic' mode + ghcOptHiDir :: Flag FilePath, + ghcOptObjDir :: Flag FilePath, + ghcOptOutputDir :: Flag FilePath, + ghcOptStubDir :: Flag FilePath, + + -------------------- + -- Dynamic linking + + ghcOptDynLinkMode :: Flag GhcDynLinkMode, + ghcOptShared :: Flag Bool, + ghcOptFPic :: Flag Bool, + ghcOptDylibName :: Flag String, + ghcOptRPaths :: NubListR FilePath, + + --------------- + -- Misc flags + + -- | Get GHC to be quiet or verbose with what it's doing; the @ghc -v@ flag. + ghcOptVerbosity :: Flag Verbosity, + + -- | Let GHC know that it is Cabal that's calling it. + -- Modifies some of the GHC error messages. + ghcOptCabal :: Flag Bool + +} deriving Show + + +data GhcMode = GhcModeCompile -- ^ @ghc -c@ + | GhcModeLink -- ^ @ghc@ + | GhcModeMake -- ^ @ghc --make@ + | GhcModeInteractive -- ^ @ghci@ \/ @ghc --interactive@ + | GhcModeAbiHash -- ^ @ghc --abi-hash@ +-- | GhcModeDepAnalysis -- ^ @ghc -M@ +-- | GhcModeEvaluate -- ^ @ghc -e@ + deriving (Show, Eq) + +data GhcOptimisation = GhcNoOptimisation -- ^ @-O0@ + | GhcNormalOptimisation -- ^ @-O@ + | GhcMaximumOptimisation -- ^ @-O2@ + | GhcSpecialOptimisation String -- ^ e.g. @-Odph@ + deriving (Show, Eq) + +data GhcDynLinkMode = GhcStaticOnly -- ^ @-static@ + | GhcDynamicOnly -- ^ @-dynamic@ + | GhcStaticAndDynamic -- ^ @-static -dynamic-too@ + deriving (Show, Eq) + + +runGHC :: Verbosity -> ConfiguredProgram -> Compiler -> GhcOptions -> IO () +runGHC verbosity ghcProg comp opts = do + runProgramInvocation verbosity (ghcInvocation ghcProg comp opts) + + +ghcInvocation :: ConfiguredProgram -> Compiler -> GhcOptions -> ProgramInvocation +ghcInvocation prog comp opts = + programInvocation prog (renderGhcOptions comp opts) + +renderGhcOptions :: Compiler -> GhcOptions -> [String] +renderGhcOptions comp opts + | compilerFlavor comp `notElem` [GHC, GHCJS] = + error $ "Distribution.Simple.Program.GHC.renderGhcOptions: " + ++ "compiler flavor must be 'GHC' or 'GHCJS'!" + | otherwise = + concat + [ case flagToMaybe (ghcOptMode opts) of + Nothing -> [] + Just GhcModeCompile -> ["-c"] + Just GhcModeLink -> [] + Just GhcModeMake -> ["--make"] + Just GhcModeInteractive -> ["--interactive"] + Just GhcModeAbiHash -> ["--abi-hash"] +-- Just GhcModeDepAnalysis -> ["-M"] +-- Just GhcModeEvaluate -> ["-e", expr] + + , flags ghcOptExtraDefault + + , [ "-no-link" | flagBool ghcOptNoLink ] + + --------------- + -- Misc flags + + , maybe [] verbosityOpts (flagToMaybe (ghcOptVerbosity opts)) + + , [ "-fbuilding-cabal-package" | flagBool ghcOptCabal + , flagBuildingCabalPkg implInfo ] + + ---------------- + -- Compilation + + , case flagToMaybe (ghcOptOptimisation opts) of + Nothing -> [] + Just GhcNoOptimisation -> ["-O0"] + Just GhcNormalOptimisation -> ["-O"] + Just GhcMaximumOptimisation -> ["-O2"] + Just (GhcSpecialOptimisation s) -> ["-O" ++ s] -- eg -Odph + + , [ "-g" | flagDebugInfo implInfo && flagBool ghcOptDebugInfo ] + + , [ "-prof" | flagBool ghcOptProfilingMode ] + + , [ "-split-objs" | flagBool ghcOptSplitObjs ] + + , case flagToMaybe (ghcOptHPCDir opts) of + Nothing -> [] + Just hpcdir -> ["-fhpc", "-hpcdir", hpcdir] + + , if parmakeSupported comp + then case ghcOptNumJobs opts of + NoFlag -> [] + Flag n -> ["-j" ++ maybe "" show n] + else [] + + -------------------- + -- Dynamic linking + + , [ "-shared" | flagBool ghcOptShared ] + , case flagToMaybe (ghcOptDynLinkMode opts) of + Nothing -> [] + Just GhcStaticOnly -> ["-static"] + Just GhcDynamicOnly -> ["-dynamic"] + Just GhcStaticAndDynamic -> ["-static", "-dynamic-too"] + , [ "-fPIC" | flagBool ghcOptFPic ] + + , concat [ ["-dylib-install-name", libname] | libname <- flag ghcOptDylibName ] + + ------------------------ + -- Redirecting outputs + + , concat [ ["-osuf", suf] | suf <- flag ghcOptObjSuffix ] + , concat [ ["-hisuf", suf] | suf <- flag ghcOptHiSuffix ] + , concat [ ["-dynosuf", suf] | suf <- flag ghcOptDynObjSuffix ] + , concat [ ["-dynhisuf",suf] | suf <- flag ghcOptDynHiSuffix ] + , concat [ ["-outputdir", dir] | dir <- flag ghcOptOutputDir + , flagOutputDir implInfo ] + , concat [ ["-odir", dir] | dir <- flag ghcOptObjDir ] + , concat [ ["-hidir", dir] | dir <- flag ghcOptHiDir ] + , concat [ ["-stubdir", dir] | dir <- flag ghcOptStubDir + , flagStubdir implInfo ] + + ----------------------- + -- Source search path + + , [ "-i" | flagBool ghcOptSourcePathClear ] + , [ "-i" ++ dir | dir <- flags ghcOptSourcePath ] + + -------------------- + -- C and CPP stuff + + , [ "-I" ++ dir | dir <- flags ghcOptCppIncludePath ] + , [ "-optP" ++ opt | opt <- flags ghcOptCppOptions ] + , concat [ [ "-optP-include", "-optP" ++ inc] + | inc <- flags ghcOptCppIncludes ] + , [ "-#include \"" ++ inc ++ "\"" + | inc <- flags ghcOptFfiIncludes, flagFfiIncludes implInfo ] + , [ "-optc" ++ opt | opt <- flags ghcOptCcOptions ] + + ----------------- + -- Linker stuff + + , [ "-optl" ++ opt | opt <- flags ghcOptLinkOptions ] + , ["-l" ++ lib | lib <- flags ghcOptLinkLibs ] + , ["-L" ++ dir | dir <- flags ghcOptLinkLibPath ] + , concat [ ["-framework", fmwk] | fmwk <- flags ghcOptLinkFrameworks ] + , [ "-no-hs-main" | flagBool ghcOptLinkNoHsMain ] + , [ "-dynload deploy" | not (null (flags ghcOptRPaths)) ] + , concat [ [ "-optl-Wl,-rpath," ++ dir] + | dir <- flags ghcOptRPaths ] + + ------------- + -- Packages + + , concat [ [if packageKeySupported comp + then "-this-package-key" + else "-package-name", display pkgid] + | pkgid <- flag ghcOptPackageKey ] + + , [ "-hide-all-packages" | flagBool ghcOptHideAllPackages ] + , [ "-no-auto-link-packages" | flagBool ghcOptNoAutoLinkPackages ] + + , packageDbArgs implInfo (ghcOptPackageDBs opts) + + , if null (ghcOptSigOf opts) + then [] + else "-sig-of" + : intercalate "," (map (\(n,(p,m)) -> display n ++ " is " + ++ display p ++ ":" + ++ display m) + (ghcOptSigOf opts)) + : [] + + , concat $ if flagPackageId implInfo + then let space "" = "" + space xs = ' ' : xs + in [ ["-package-id", display ipkgid ++ space (display rns)] + | (ipkgid,_,rns) <- flags ghcOptPackages ] + else [ ["-package", display pkgid] + | (_,pkgid,_) <- flags ghcOptPackages ] + + ---------------------------- + -- Language and extensions + + , if supportsHaskell2010 implInfo + then [ "-X" ++ display lang | lang <- flag ghcOptLanguage ] + else [] + + , [ case M.lookup ext (ghcOptExtensionMap opts) of + Just arg -> arg + Nothing -> error $ "Distribution.Simple.Program.GHC.renderGhcOptions: " + ++ display ext ++ " not present in ghcOptExtensionMap." + | ext <- flags ghcOptExtensions ] + + ---------------- + -- GHCi + + , concat [ [ "-ghci-script", script ] | script <- flags ghcOptGHCiScripts + , flagGhciScript implInfo ] + + --------------- + -- Inputs + + , [ display modu | modu <- flags ghcOptInputModules ] + , flags ghcOptInputFiles + + , concat [ [ "-o", out] | out <- flag ghcOptOutputFile ] + , concat [ [ "-dyno", out] | out <- flag ghcOptOutputDynFile ] + + --------------- + -- Extra + + , flags ghcOptExtra + + ] + + + where + implInfo = getImplInfo comp + flag flg = flagToList (flg opts) + flags flg = fromNubListR . flg $ opts + flagBool flg = fromFlagOrDefault False (flg opts) + +verbosityOpts :: Verbosity -> [String] +verbosityOpts verbosity + | verbosity >= deafening = ["-v"] + | verbosity >= normal = [] + | otherwise = ["-w", "-v0"] + + +packageDbArgs :: GhcImplInfo -> PackageDBStack -> [String] +packageDbArgs implInfo dbstack = case dbstack of + (GlobalPackageDB:UserPackageDB:dbs) -> concatMap specific dbs + (GlobalPackageDB:dbs) -> ("-no-user-" ++ packageDbFlag) + : concatMap specific dbs + _ -> ierror + where + specific (SpecificPackageDB db) = [ '-':packageDbFlag , db ] + specific _ = ierror + ierror = error $ "internal error: unexpected package db stack: " + ++ show dbstack + packageDbFlag + | flagPackageConf implInfo + = "package-conf" + | otherwise + = "package-db" + + +-- ----------------------------------------------------------------------------- +-- Boilerplate Monoid instance for GhcOptions + +instance Monoid GhcOptions where + mempty = GhcOptions { + ghcOptMode = mempty, + ghcOptExtra = mempty, + ghcOptExtraDefault = mempty, + ghcOptInputFiles = mempty, + ghcOptInputModules = mempty, + ghcOptOutputFile = mempty, + ghcOptOutputDynFile = mempty, + ghcOptSourcePathClear = mempty, + ghcOptSourcePath = mempty, + ghcOptPackageKey = mempty, + ghcOptPackageDBs = mempty, + ghcOptPackages = mempty, + ghcOptHideAllPackages = mempty, + ghcOptNoAutoLinkPackages = mempty, + ghcOptSigOf = mempty, + ghcOptLinkLibs = mempty, + ghcOptLinkLibPath = mempty, + ghcOptLinkOptions = mempty, + ghcOptLinkFrameworks = mempty, + ghcOptNoLink = mempty, + ghcOptLinkNoHsMain = mempty, + ghcOptCcOptions = mempty, + ghcOptCppOptions = mempty, + ghcOptCppIncludePath = mempty, + ghcOptCppIncludes = mempty, + ghcOptFfiIncludes = mempty, + ghcOptLanguage = mempty, + ghcOptExtensions = mempty, + ghcOptExtensionMap = mempty, + ghcOptOptimisation = mempty, + ghcOptDebugInfo = mempty, + ghcOptProfilingMode = mempty, + ghcOptSplitObjs = mempty, + ghcOptNumJobs = mempty, + ghcOptHPCDir = mempty, + ghcOptGHCiScripts = mempty, + ghcOptHiSuffix = mempty, + ghcOptObjSuffix = mempty, + ghcOptDynHiSuffix = mempty, + ghcOptDynObjSuffix = mempty, + ghcOptHiDir = mempty, + ghcOptObjDir = mempty, + ghcOptOutputDir = mempty, + ghcOptStubDir = mempty, + ghcOptDynLinkMode = mempty, + ghcOptShared = mempty, + ghcOptFPic = mempty, + ghcOptDylibName = mempty, + ghcOptRPaths = mempty, + ghcOptVerbosity = mempty, + ghcOptCabal = mempty + } + mappend a b = GhcOptions { + ghcOptMode = combine ghcOptMode, + ghcOptExtra = combine ghcOptExtra, + ghcOptExtraDefault = combine ghcOptExtraDefault, + ghcOptInputFiles = combine ghcOptInputFiles, + ghcOptInputModules = combine ghcOptInputModules, + ghcOptOutputFile = combine ghcOptOutputFile, + ghcOptOutputDynFile = combine ghcOptOutputDynFile, + ghcOptSourcePathClear = combine ghcOptSourcePathClear, + ghcOptSourcePath = combine ghcOptSourcePath, + ghcOptPackageKey = combine ghcOptPackageKey, + ghcOptPackageDBs = combine ghcOptPackageDBs, + ghcOptPackages = combine ghcOptPackages, + ghcOptHideAllPackages = combine ghcOptHideAllPackages, + ghcOptNoAutoLinkPackages = combine ghcOptNoAutoLinkPackages, + ghcOptSigOf = combine ghcOptSigOf, + ghcOptLinkLibs = combine ghcOptLinkLibs, + ghcOptLinkLibPath = combine ghcOptLinkLibPath, + ghcOptLinkOptions = combine ghcOptLinkOptions, + ghcOptLinkFrameworks = combine ghcOptLinkFrameworks, + ghcOptNoLink = combine ghcOptNoLink, + ghcOptLinkNoHsMain = combine ghcOptLinkNoHsMain, + ghcOptCcOptions = combine ghcOptCcOptions, + ghcOptCppOptions = combine ghcOptCppOptions, + ghcOptCppIncludePath = combine ghcOptCppIncludePath, + ghcOptCppIncludes = combine ghcOptCppIncludes, + ghcOptFfiIncludes = combine ghcOptFfiIncludes, + ghcOptLanguage = combine ghcOptLanguage, + ghcOptExtensions = combine ghcOptExtensions, + ghcOptExtensionMap = combine ghcOptExtensionMap, + ghcOptOptimisation = combine ghcOptOptimisation, + ghcOptDebugInfo = combine ghcOptDebugInfo, + ghcOptProfilingMode = combine ghcOptProfilingMode, + ghcOptSplitObjs = combine ghcOptSplitObjs, + ghcOptNumJobs = combine ghcOptNumJobs, + ghcOptHPCDir = combine ghcOptHPCDir, + ghcOptGHCiScripts = combine ghcOptGHCiScripts, + ghcOptHiSuffix = combine ghcOptHiSuffix, + ghcOptObjSuffix = combine ghcOptObjSuffix, + ghcOptDynHiSuffix = combine ghcOptDynHiSuffix, + ghcOptDynObjSuffix = combine ghcOptDynObjSuffix, + ghcOptHiDir = combine ghcOptHiDir, + ghcOptObjDir = combine ghcOptObjDir, + ghcOptOutputDir = combine ghcOptOutputDir, + ghcOptStubDir = combine ghcOptStubDir, + ghcOptDynLinkMode = combine ghcOptDynLinkMode, + ghcOptShared = combine ghcOptShared, + ghcOptFPic = combine ghcOptFPic, + ghcOptDylibName = combine ghcOptDylibName, + ghcOptRPaths = combine ghcOptRPaths, + ghcOptVerbosity = combine ghcOptVerbosity, + ghcOptCabal = combine ghcOptCabal + } + where + combine field = field a `mappend` field b diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/HcPkg.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/HcPkg.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/HcPkg.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/HcPkg.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,396 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.HcPkg +-- Copyright : Duncan Coutts 2009, 2013 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module provides an library interface to the @hc-pkg@ program. +-- Currently only GHC, GHCJS and LHC have hc-pkg programs. + +module Distribution.Simple.Program.HcPkg ( + HcPkgInfo(..), + + init, + invoke, + register, + reregister, + unregister, + expose, + hide, + dump, + list, + + -- * Program invocations + initInvocation, + registerInvocation, + reregisterInvocation, + unregisterInvocation, + exposeInvocation, + hideInvocation, + dumpInvocation, + listInvocation, + ) where + +import Prelude hiding (init) +import Distribution.Package + ( PackageId, InstalledPackageId(..) ) +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo, InstalledPackageInfo_(..) + , showInstalledPackageInfo + , emptyInstalledPackageInfo, fieldsInstalledPackageInfo ) +import Distribution.ParseUtils +import Distribution.Simple.Compiler + ( PackageDB(..), PackageDBStack ) +import Distribution.Simple.Program.Types + ( ConfiguredProgram(programId) ) +import Distribution.Simple.Program.Run + ( ProgramInvocation(..), IOEncoding(..), programInvocation + , runProgramInvocation, getProgramInvocationOutput ) +import Distribution.Text + ( display, simpleParse ) +import Distribution.Simple.Utils + ( die ) +import Distribution.Verbosity + ( Verbosity, deafening, silent ) +import Distribution.Compat.Exception + ( catchExit ) + +import Data.Char + ( isSpace ) +import Data.List + ( stripPrefix ) +import System.FilePath as FilePath + ( (), splitPath, splitDirectories, joinPath, isPathSeparator ) +import qualified System.FilePath.Posix as FilePath.Posix + +-- | Information about the features and capabilities of an @hc-pkg@ +-- program. +-- +data HcPkgInfo = HcPkgInfo + { hcPkgProgram :: ConfiguredProgram + , noPkgDbStack :: Bool -- ^ no package DB stack supported + , noVerboseFlag :: Bool -- ^ hc-pkg does not support verbosity flags + , flagPackageConf :: Bool -- ^ use package-conf option instead of package-db + , useSingleFileDb :: Bool -- ^ requires single file package database + } + +-- | Call @hc-pkg@ to initialise a package database at the location {path}. +-- +-- > hc-pkg init {path} +-- +init :: HcPkgInfo -> Verbosity -> FilePath -> IO () +init hpi verbosity path = + runProgramInvocation verbosity (initInvocation hpi verbosity path) + +-- | Run @hc-pkg@ using a given package DB stack, directly forwarding the +-- provided command-line arguments to it. +invoke :: HcPkgInfo -> Verbosity -> PackageDBStack -> [String] -> IO () +invoke hpi verbosity dbStack extraArgs = + runProgramInvocation verbosity invocation + where + args = packageDbStackOpts hpi dbStack ++ extraArgs + invocation = programInvocation (hcPkgProgram hpi) args + +-- | Call @hc-pkg@ to register a package. +-- +-- > hc-pkg register {filename | -} [--user | --global | --package-db] +-- +register :: HcPkgInfo -> Verbosity -> PackageDBStack + -> Either FilePath + InstalledPackageInfo + -> IO () +register hpi verbosity packagedb pkgFile = + runProgramInvocation verbosity + (registerInvocation hpi verbosity packagedb pkgFile) + + +-- | Call @hc-pkg@ to re-register a package. +-- +-- > hc-pkg register {filename | -} [--user | --global | --package-db] +-- +reregister :: HcPkgInfo -> Verbosity -> PackageDBStack + -> Either FilePath + InstalledPackageInfo + -> IO () +reregister hpi verbosity packagedb pkgFile = + runProgramInvocation verbosity + (reregisterInvocation hpi verbosity packagedb pkgFile) + + +-- | Call @hc-pkg@ to unregister a package +-- +-- > hc-pkg unregister [pkgid] [--user | --global | --package-db] +-- +unregister :: HcPkgInfo -> Verbosity -> PackageDB -> PackageId -> IO () +unregister hpi verbosity packagedb pkgid = + runProgramInvocation verbosity + (unregisterInvocation hpi verbosity packagedb pkgid) + + +-- | Call @hc-pkg@ to expose a package. +-- +-- > hc-pkg expose [pkgid] [--user | --global | --package-db] +-- +expose :: HcPkgInfo -> Verbosity -> PackageDB -> PackageId -> IO () +expose hpi verbosity packagedb pkgid = + runProgramInvocation verbosity + (exposeInvocation hpi verbosity packagedb pkgid) + + +-- | Call @hc-pkg@ to hide a package. +-- +-- > hc-pkg hide [pkgid] [--user | --global | --package-db] +-- +hide :: HcPkgInfo -> Verbosity -> PackageDB -> PackageId -> IO () +hide hpi verbosity packagedb pkgid = + runProgramInvocation verbosity + (hideInvocation hpi verbosity packagedb pkgid) + + +-- | Call @hc-pkg@ to get all the details of all the packages in the given +-- package database. +-- +dump :: HcPkgInfo -> Verbosity -> PackageDB -> IO [InstalledPackageInfo] +dump hpi verbosity packagedb = do + + output <- getProgramInvocationOutput verbosity + (dumpInvocation hpi verbosity packagedb) + `catchExit` \_ -> die $ programId (hcPkgProgram hpi) ++ " dump failed" + + case parsePackages output of + Left ok -> return ok + _ -> die $ "failed to parse output of '" + ++ programId (hcPkgProgram hpi) ++ " dump'" + + where + parsePackages str = + let parsed = map parseInstalledPackageInfo' (splitPkgs str) + in case [ msg | ParseFailed msg <- parsed ] of + [] -> Left [ setInstalledPackageId + . maybe id mungePackagePaths (pkgRoot pkg) + $ pkg + | ParseOk _ pkg <- parsed ] + msgs -> Right msgs + + parseInstalledPackageInfo' = + parseFieldsFlat fieldsInstalledPackageInfo emptyInstalledPackageInfo + + --TODO: this could be a lot faster. We're doing normaliseLineEndings twice + -- and converting back and forth with lines/unlines. + splitPkgs :: String -> [String] + splitPkgs = checkEmpty . map unlines . splitWith ("---" ==) . lines + where + -- Handle the case of there being no packages at all. + checkEmpty [s] | all isSpace s = [] + checkEmpty ss = ss + + splitWith :: (a -> Bool) -> [a] -> [[a]] + splitWith p xs = ys : case zs of + [] -> [] + _:ws -> splitWith p ws + where (ys,zs) = break p xs + +mungePackagePaths :: FilePath -> InstalledPackageInfo -> InstalledPackageInfo +-- Perform path/URL variable substitution as per the Cabal ${pkgroot} spec +-- (http://www.haskell.org/pipermail/libraries/2009-May/011772.html) +-- Paths/URLs can be relative to ${pkgroot} or ${pkgrooturl}. +-- The "pkgroot" is the directory containing the package database. +mungePackagePaths pkgroot pkginfo = + pkginfo { + importDirs = mungePaths (importDirs pkginfo), + includeDirs = mungePaths (includeDirs pkginfo), + libraryDirs = mungePaths (libraryDirs pkginfo), + frameworkDirs = mungePaths (frameworkDirs pkginfo), + haddockInterfaces = mungePaths (haddockInterfaces pkginfo), + haddockHTMLs = mungeUrls (haddockHTMLs pkginfo) + } + where + mungePaths = map mungePath + mungeUrls = map mungeUrl + + mungePath p = case stripVarPrefix "${pkgroot}" p of + Just p' -> pkgroot p' + Nothing -> p + + mungeUrl p = case stripVarPrefix "${pkgrooturl}" p of + Just p' -> toUrlPath pkgroot p' + Nothing -> p + + toUrlPath r p = "file:///" + -- URLs always use posix style '/' separators: + ++ FilePath.Posix.joinPath (r : FilePath.splitDirectories p) + + stripVarPrefix var p = + case splitPath p of + (root:path') -> case stripPrefix var root of + Just [sep] | isPathSeparator sep -> Just (joinPath path') + _ -> Nothing + _ -> Nothing + + +-- Older installed package info files did not have the installedPackageId +-- field, so if it is missing then we fill it as the source package ID. +setInstalledPackageId :: InstalledPackageInfo -> InstalledPackageInfo +setInstalledPackageId pkginfo@InstalledPackageInfo { + installedPackageId = InstalledPackageId "", + sourcePackageId = pkgid + } + = pkginfo { + --TODO use a proper named function for the conversion + -- from source package id to installed package id + installedPackageId = InstalledPackageId (display pkgid) + } +setInstalledPackageId pkginfo = pkginfo + + +-- | Call @hc-pkg@ to get the source package Id of all the packages in the +-- given package database. +-- +-- This is much less information than with 'dump', but also rather quicker. +-- Note in particular that it does not include the 'InstalledPackageId', just +-- the source 'PackageId' which is not necessarily unique in any package db. +-- +list :: HcPkgInfo -> Verbosity -> PackageDB + -> IO [PackageId] +list hpi verbosity packagedb = do + + output <- getProgramInvocationOutput verbosity + (listInvocation hpi verbosity packagedb) + `catchExit` \_ -> die $ programId (hcPkgProgram hpi) ++ " list failed" + + case parsePackageIds output of + Just ok -> return ok + _ -> die $ "failed to parse output of '" + ++ programId (hcPkgProgram hpi) ++ " list'" + + where + parsePackageIds = sequence . map simpleParse . words + +-------------------------- +-- The program invocations +-- + +initInvocation :: HcPkgInfo -> Verbosity -> FilePath -> ProgramInvocation +initInvocation hpi verbosity path = + programInvocation (hcPkgProgram hpi) args + where + args = ["init", path] + ++ verbosityOpts hpi verbosity + +registerInvocation, reregisterInvocation + :: HcPkgInfo -> Verbosity -> PackageDBStack + -> Either FilePath InstalledPackageInfo + -> ProgramInvocation +registerInvocation = registerInvocation' "register" +reregisterInvocation = registerInvocation' "update" + + +registerInvocation' :: String -> HcPkgInfo -> Verbosity -> PackageDBStack + -> Either FilePath InstalledPackageInfo + -> ProgramInvocation +registerInvocation' cmdname hpi verbosity packagedbs (Left pkgFile) = + programInvocation (hcPkgProgram hpi) args + where + args = [cmdname, pkgFile] + ++ (if noPkgDbStack hpi + then [packageDbOpts hpi (last packagedbs)] + else packageDbStackOpts hpi packagedbs) + ++ verbosityOpts hpi verbosity + +registerInvocation' cmdname hpi verbosity packagedbs (Right pkgInfo) = + (programInvocation (hcPkgProgram hpi) args) { + progInvokeInput = Just (showInstalledPackageInfo pkgInfo), + progInvokeInputEncoding = IOEncodingUTF8 + } + where + args = [cmdname, "-"] + ++ (if noPkgDbStack hpi + then [packageDbOpts hpi (last packagedbs)] + else packageDbStackOpts hpi packagedbs) + ++ verbosityOpts hpi verbosity + + +unregisterInvocation :: HcPkgInfo -> Verbosity -> PackageDB -> PackageId + -> ProgramInvocation +unregisterInvocation hpi verbosity packagedb pkgid = + programInvocation (hcPkgProgram hpi) $ + ["unregister", packageDbOpts hpi packagedb, display pkgid] + ++ verbosityOpts hpi verbosity + + +exposeInvocation :: HcPkgInfo -> Verbosity -> PackageDB -> PackageId + -> ProgramInvocation +exposeInvocation hpi verbosity packagedb pkgid = + programInvocation (hcPkgProgram hpi) $ + ["expose", packageDbOpts hpi packagedb, display pkgid] + ++ verbosityOpts hpi verbosity + + +hideInvocation :: HcPkgInfo -> Verbosity -> PackageDB -> PackageId + -> ProgramInvocation +hideInvocation hpi verbosity packagedb pkgid = + programInvocation (hcPkgProgram hpi) $ + ["hide", packageDbOpts hpi packagedb, display pkgid] + ++ verbosityOpts hpi verbosity + + +dumpInvocation :: HcPkgInfo -> Verbosity -> PackageDB -> ProgramInvocation +dumpInvocation hpi _verbosity packagedb = + (programInvocation (hcPkgProgram hpi) args) { + progInvokeOutputEncoding = IOEncodingUTF8 + } + where + args = ["dump", packageDbOpts hpi packagedb] + ++ verbosityOpts hpi silent + -- We use verbosity level 'silent' because it is important that we + -- do not contaminate the output with info/debug messages. + +listInvocation :: HcPkgInfo -> Verbosity -> PackageDB -> ProgramInvocation +listInvocation hpi _verbosity packagedb = + (programInvocation (hcPkgProgram hpi) args) { + progInvokeOutputEncoding = IOEncodingUTF8 + } + where + args = ["list", "--simple-output", packageDbOpts hpi packagedb] + ++ verbosityOpts hpi silent + -- We use verbosity level 'silent' because it is important that we + -- do not contaminate the output with info/debug messages. + + +packageDbStackOpts :: HcPkgInfo -> PackageDBStack -> [String] +packageDbStackOpts hpi dbstack = case dbstack of + (GlobalPackageDB:UserPackageDB:dbs) -> "--global" + : "--user" + : map specific dbs + (GlobalPackageDB:dbs) -> "--global" + : ("--no-user-" ++ packageDbFlag hpi) + : map specific dbs + _ -> ierror + where + specific (SpecificPackageDB db) = "--" ++ packageDbFlag hpi ++ "=" ++ db + specific _ = ierror + ierror :: a + ierror = error ("internal error: unexpected package db stack: " ++ show dbstack) + +packageDbFlag :: HcPkgInfo -> String +packageDbFlag hpi + | flagPackageConf hpi + = "package-conf" + | otherwise + = "package-db" + +packageDbOpts :: HcPkgInfo -> PackageDB -> String +packageDbOpts _ GlobalPackageDB = "--global" +packageDbOpts _ UserPackageDB = "--user" +packageDbOpts hpi (SpecificPackageDB db) = "--" ++ packageDbFlag hpi ++ "=" ++ db + +verbosityOpts :: HcPkgInfo -> Verbosity -> [String] +verbosityOpts hpi v + | noVerboseFlag hpi + = [] + | v >= deafening = ["-v2"] + | v == silent = ["-v0"] + | otherwise = [] + diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Hpc.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Hpc.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Hpc.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Hpc.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,100 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Hpc +-- Copyright : Thomas Tuegel 2011 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module provides an library interface to the @hpc@ program. + +module Distribution.Simple.Program.Hpc + ( markup + , union + ) where + +import Distribution.ModuleName ( ModuleName ) +import Distribution.Simple.Program.Run + ( ProgramInvocation, programInvocation, runProgramInvocation ) +import Distribution.Simple.Program.Types ( ConfiguredProgram(..) ) +import Distribution.Text ( display ) +import Distribution.Simple.Utils ( warn ) +import Distribution.Verbosity ( Verbosity ) +import Distribution.Version ( Version(..), orLaterVersion, withinRange ) + +-- | Invoke hpc with the given parameters. +-- +-- Prior to HPC version 0.7 (packaged with GHC 7.8), hpc did not handle +-- multiple .mix paths correctly, so we print a warning, and only pass it the +-- first path in the list. This means that e.g. test suites that import their +-- library as a dependency can still work, but those that include the library +-- modules directly (in other-modules) don't. +markup :: ConfiguredProgram + -> Version + -> Verbosity + -> FilePath -- ^ Path to .tix file + -> [FilePath] -- ^ Paths to .mix file directories + -> FilePath -- ^ Path where html output should be located + -> [ModuleName] -- ^ List of modules to exclude from report + -> IO () +markup hpc hpcVer verbosity tixFile hpcDirs destDir excluded = do + hpcDirs' <- if withinRange hpcVer (orLaterVersion version07) + then return hpcDirs + else do + warn verbosity $ "Your version of HPC (" ++ display hpcVer + ++ ") does not properly handle multiple search paths. " + ++ "Coverage report generation may fail unexpectedly. These " + ++ "issues are addressed in version 0.7 or later (GHC 7.8 or " + ++ "later)." + ++ if null droppedDirs + then "" + else " The following search paths have been abandoned: " + ++ show droppedDirs + return passedDirs + + runProgramInvocation verbosity + (markupInvocation hpc tixFile hpcDirs' destDir excluded) + where + version07 = Version [0, 7] [] + (passedDirs, droppedDirs) = splitAt 1 hpcDirs + +markupInvocation :: ConfiguredProgram + -> FilePath -- ^ Path to .tix file + -> [FilePath] -- ^ Paths to .mix file directories + -> FilePath -- ^ Path where html output should be + -- located + -> [ModuleName] -- ^ List of modules to exclude from + -- report + -> ProgramInvocation +markupInvocation hpc tixFile hpcDirs destDir excluded = + let args = [ "markup", tixFile + , "--destdir=" ++ destDir + ] + ++ map ("--hpcdir=" ++) hpcDirs + ++ ["--exclude=" ++ display moduleName + | moduleName <- excluded ] + in programInvocation hpc args + +union :: ConfiguredProgram + -> Verbosity + -> [FilePath] -- ^ Paths to .tix files + -> FilePath -- ^ Path to resultant .tix file + -> [ModuleName] -- ^ List of modules to exclude from union + -> IO () +union hpc verbosity tixFiles outFile excluded = + runProgramInvocation verbosity + (unionInvocation hpc tixFiles outFile excluded) + +unionInvocation :: ConfiguredProgram + -> [FilePath] -- ^ Paths to .tix files + -> FilePath -- ^ Path to resultant .tix file + -> [ModuleName] -- ^ List of modules to exclude from union + -> ProgramInvocation +unionInvocation hpc tixFiles outFile excluded = + programInvocation hpc $ concat + [ ["sum", "--union"] + , tixFiles + , ["--output=" ++ outFile] + , ["--exclude=" ++ display moduleName + | moduleName <- excluded ] + ] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Ld.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Ld.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Ld.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Ld.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,62 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Ld +-- Copyright : Duncan Coutts 2009 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module provides an library interface to the @ld@ linker program. + +module Distribution.Simple.Program.Ld ( + combineObjectFiles, + ) where + +import Distribution.Simple.Program.Types + ( ConfiguredProgram(..) ) +import Distribution.Simple.Program.Run + ( programInvocation, multiStageProgramInvocation + , runProgramInvocation ) +import Distribution.Verbosity + ( Verbosity ) + +import System.Directory + ( renameFile ) +import System.FilePath + ( (<.>) ) + +-- | Call @ld -r@ to link a bunch of object files together. +-- +combineObjectFiles :: Verbosity -> ConfiguredProgram + -> FilePath -> [FilePath] -> IO () +combineObjectFiles verbosity ld target files = + + -- Unlike "ar", the "ld" tool is not designed to be used with xargs. That is, + -- if we have more object files than fit on a single command line then we + -- have a slight problem. What we have to do is link files in batches into + -- a temp object file and then include that one in the next batch. + + let simpleArgs = ["-r", "-o", target] + + initialArgs = ["-r", "-o", target] + middleArgs = ["-r", "-o", target, tmpfile] + finalArgs = middleArgs + + simple = programInvocation ld simpleArgs + initial = programInvocation ld initialArgs + middle = programInvocation ld middleArgs + final = programInvocation ld finalArgs + + invocations = multiStageProgramInvocation + simple (initial, middle, final) files + + in run invocations + + where + tmpfile = target <.> "tmp" -- perhaps should use a proper temp file + + run [] = return () + run [inv] = runProgramInvocation verbosity inv + run (inv:invs) = do runProgramInvocation verbosity inv + renameFile target tmpfile + run invs diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Run.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Run.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Run.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Run.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,257 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Run +-- Copyright : Duncan Coutts 2009 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module provides a data type for program invocations and functions to +-- run them. + +module Distribution.Simple.Program.Run ( + ProgramInvocation(..), + IOEncoding(..), + emptyProgramInvocation, + simpleProgramInvocation, + programInvocation, + multiStageProgramInvocation, + + runProgramInvocation, + getProgramInvocationOutput, + + getEffectiveEnvironment, + ) where + +import Distribution.Simple.Program.Types + ( ConfiguredProgram(..), programPath ) +import Distribution.Simple.Utils + ( die, rawSystemExit, rawSystemIOWithEnv, rawSystemStdInOut + , toUTF8, fromUTF8, normaliseLineEndings ) +import Distribution.Verbosity + ( Verbosity ) + +import Data.List + ( foldl', unfoldr ) +import qualified Data.Map as Map +import Control.Monad + ( when ) +import System.Exit + ( ExitCode(..), exitWith ) +import Distribution.Compat.Environment + ( getEnvironment ) + +-- | Represents a specific invocation of a specific program. +-- +-- This is used as an intermediate type between deciding how to call a program +-- and actually doing it. This provides the opportunity to the caller to +-- adjust how the program will be called. These invocations can either be run +-- directly or turned into shell or batch scripts. +-- +data ProgramInvocation = ProgramInvocation { + progInvokePath :: FilePath, + progInvokeArgs :: [String], + progInvokeEnv :: [(String, Maybe String)], + progInvokeCwd :: Maybe FilePath, + progInvokeInput :: Maybe String, + progInvokeInputEncoding :: IOEncoding, + progInvokeOutputEncoding :: IOEncoding + } + +data IOEncoding = IOEncodingText -- locale mode text + | IOEncodingUTF8 -- always utf8 + +emptyProgramInvocation :: ProgramInvocation +emptyProgramInvocation = + ProgramInvocation { + progInvokePath = "", + progInvokeArgs = [], + progInvokeEnv = [], + progInvokeCwd = Nothing, + progInvokeInput = Nothing, + progInvokeInputEncoding = IOEncodingText, + progInvokeOutputEncoding = IOEncodingText + } + +simpleProgramInvocation :: FilePath -> [String] -> ProgramInvocation +simpleProgramInvocation path args = + emptyProgramInvocation { + progInvokePath = path, + progInvokeArgs = args + } + +programInvocation :: ConfiguredProgram -> [String] -> ProgramInvocation +programInvocation prog args = + emptyProgramInvocation { + progInvokePath = programPath prog, + progInvokeArgs = programDefaultArgs prog + ++ args + ++ programOverrideArgs prog, + progInvokeEnv = programOverrideEnv prog + } + + +runProgramInvocation :: Verbosity -> ProgramInvocation -> IO () +runProgramInvocation verbosity + ProgramInvocation { + progInvokePath = path, + progInvokeArgs = args, + progInvokeEnv = [], + progInvokeCwd = Nothing, + progInvokeInput = Nothing + } = + rawSystemExit verbosity path args + +runProgramInvocation verbosity + ProgramInvocation { + progInvokePath = path, + progInvokeArgs = args, + progInvokeEnv = envOverrides, + progInvokeCwd = mcwd, + progInvokeInput = Nothing + } = do + menv <- getEffectiveEnvironment envOverrides + exitCode <- rawSystemIOWithEnv verbosity + path args + mcwd menv + Nothing Nothing Nothing + when (exitCode /= ExitSuccess) $ + exitWith exitCode + +runProgramInvocation verbosity + ProgramInvocation { + progInvokePath = path, + progInvokeArgs = args, + progInvokeEnv = envOverrides, + progInvokeCwd = mcwd, + progInvokeInput = Just inputStr, + progInvokeInputEncoding = encoding + } = do + menv <- getEffectiveEnvironment envOverrides + (_, errors, exitCode) <- rawSystemStdInOut verbosity + path args + mcwd menv + (Just input) True + when (exitCode /= ExitSuccess) $ + die $ "'" ++ path ++ "' exited with an error:\n" ++ errors + where + input = case encoding of + IOEncodingText -> (inputStr, False) + IOEncodingUTF8 -> (toUTF8 inputStr, True) -- use binary mode for + -- utf8 + + +getProgramInvocationOutput :: Verbosity -> ProgramInvocation -> IO String +getProgramInvocationOutput verbosity + ProgramInvocation { + progInvokePath = path, + progInvokeArgs = args, + progInvokeEnv = envOverrides, + progInvokeCwd = mcwd, + progInvokeInput = minputStr, + progInvokeOutputEncoding = encoding + } = do + let utf8 = case encoding of IOEncodingUTF8 -> True; _ -> False + decode | utf8 = fromUTF8 . normaliseLineEndings + | otherwise = id + menv <- getEffectiveEnvironment envOverrides + (output, errors, exitCode) <- rawSystemStdInOut verbosity + path args + mcwd menv + input utf8 + when (exitCode /= ExitSuccess) $ + die $ "'" ++ path ++ "' exited with an error:\n" ++ errors + return (decode output) + where + input = + case minputStr of + Nothing -> Nothing + Just inputStr -> Just $ + case encoding of + IOEncodingText -> (inputStr, False) + IOEncodingUTF8 -> (toUTF8 inputStr, True) -- use binary mode for utf8 + + +-- | Return the current environment extended with the given overrides. +-- +getEffectiveEnvironment :: [(String, Maybe String)] + -> IO (Maybe [(String, String)]) +getEffectiveEnvironment [] = return Nothing +getEffectiveEnvironment overrides = + fmap (Just . Map.toList . apply overrides . Map.fromList) getEnvironment + where + apply os env = foldl' (flip update) env os + update (var, Nothing) = Map.delete var + update (var, Just val) = Map.insert var val + +-- | Like the unix xargs program. Useful for when we've got very long command +-- lines that might overflow an OS limit on command line length and so you +-- need to invoke a command multiple times to get all the args in. +-- +-- It takes four template invocations corresponding to the simple, initial, +-- middle and last invocations. If the number of args given is small enough +-- that we can get away with just a single invocation then the simple one is +-- used: +-- +-- > $ simple args +-- +-- If the number of args given means that we need to use multiple invocations +-- then the templates for the initial, middle and last invocations are used: +-- +-- > $ initial args_0 +-- > $ middle args_1 +-- > $ middle args_2 +-- > ... +-- > $ final args_n +-- +multiStageProgramInvocation + :: ProgramInvocation + -> (ProgramInvocation, ProgramInvocation, ProgramInvocation) + -> [String] + -> [ProgramInvocation] +multiStageProgramInvocation simple (initial, middle, final) args = + + let argSize inv = length (progInvokePath inv) + + foldl' (\s a -> length a + 1 + s) 1 (progInvokeArgs inv) + fixedArgSize = maximum (map argSize [simple, initial, middle, final]) + chunkSize = maxCommandLineSize - fixedArgSize + + in case splitChunks chunkSize args of + [] -> [ simple ] + + [c] -> [ simple `appendArgs` c ] + + [c,c'] -> [ initial `appendArgs` c ] + ++ [ final `appendArgs` c'] + + (c:cs) -> [ initial `appendArgs` c ] + ++ [ middle `appendArgs` c'| c' <- init cs ] + ++ [ final `appendArgs` c'| let c' = last cs ] + + where + inv `appendArgs` as = inv { progInvokeArgs = progInvokeArgs inv ++ as } + + splitChunks len = unfoldr $ \s -> + if null s then Nothing + else Just (chunk len s) + + chunk len (s:_) | length s >= len = error toolong + chunk len ss = chunk' [] len ss + + chunk' acc _ [] = (reverse acc,[]) + chunk' acc len (s:ss) + | len' < len = chunk' (s:acc) (len-len'-1) ss + | otherwise = (reverse acc, s:ss) + where len' = length s + + toolong = "multiStageProgramInvocation: a single program arg is larger " + ++ "than the maximum command line length!" + + +--FIXME: discover this at configure time or runtime on unix +-- The value is 32k on Windows and posix specifies a minimum of 4k +-- but all sensible unixes use more than 4k. +-- we could use getSysVar ArgumentLimit but that's in the unix lib +-- +maxCommandLineSize :: Int +maxCommandLineSize = 30 * 1024 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Script.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Script.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Script.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Script.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,110 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Script +-- Copyright : Duncan Coutts 2009 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module provides an library interface to the @hc-pkg@ program. +-- Currently only GHC and LHC have hc-pkg programs. + +module Distribution.Simple.Program.Script ( + + invocationAsSystemScript, + invocationAsShellScript, + invocationAsBatchFile, + ) where + +import Distribution.Simple.Program.Run + ( ProgramInvocation(..) ) +import Distribution.System + ( OS(..) ) + +import Data.Maybe + ( maybeToList ) + +-- | Generate a system script, either POSIX shell script or Windows batch file +-- as appropriate for the given system. +-- +invocationAsSystemScript :: OS -> ProgramInvocation -> String +invocationAsSystemScript Windows = invocationAsBatchFile +invocationAsSystemScript _ = invocationAsShellScript + + +-- | Generate a POSIX shell script that invokes a program. +-- +invocationAsShellScript :: ProgramInvocation -> String +invocationAsShellScript + ProgramInvocation { + progInvokePath = path, + progInvokeArgs = args, + progInvokeEnv = envExtra, + progInvokeCwd = mcwd, + progInvokeInput = minput + } = unlines $ + [ "#!/bin/sh" ] + ++ concatMap setEnv envExtra + ++ [ "cd " ++ quote cwd | cwd <- maybeToList mcwd ] + ++ [ (case minput of + Nothing -> "" + Just input -> "echo " ++ quote input ++ " | ") + ++ unwords (map quote $ path : args) ++ " \"$@\""] + + where + setEnv (var, Nothing) = ["unset " ++ var, "export " ++ var] + setEnv (var, Just val) = ["export " ++ var ++ "=" ++ quote val] + + quote :: String -> String + quote s = "'" ++ escape s ++ "'" + + escape [] = [] + escape ('\'':cs) = "'\\''" ++ escape cs + escape (c :cs) = c : escape cs + + +-- | Generate a Windows batch file that invokes a program. +-- +invocationAsBatchFile :: ProgramInvocation -> String +invocationAsBatchFile + ProgramInvocation { + progInvokePath = path, + progInvokeArgs = args, + progInvokeEnv = envExtra, + progInvokeCwd = mcwd, + progInvokeInput = minput + } = unlines $ + [ "@echo off" ] + ++ map setEnv envExtra + ++ [ "cd \"" ++ cwd ++ "\"" | cwd <- maybeToList mcwd ] + ++ case minput of + Nothing -> + [ path ++ concatMap (' ':) args ] + + Just input -> + [ "(" ] + ++ [ "echo " ++ escape line | line <- lines input ] + ++ [ ") | " + ++ "\"" ++ path ++ "\"" + ++ concatMap (\arg -> ' ':quote arg) args ] + + where + setEnv (var, Nothing) = "set " ++ var ++ "=" + setEnv (var, Just val) = "set " ++ var ++ "=" ++ escape val + + quote :: String -> String + quote s = "\"" ++ escapeQ s ++ "\"" + + escapeQ [] = [] + escapeQ ('"':cs) = "\"\"\"" ++ escapeQ cs + escapeQ (c :cs) = c : escapeQ cs + + escape [] = [] + escape ('|':cs) = "^|" ++ escape cs + escape ('<':cs) = "^<" ++ escape cs + escape ('>':cs) = "^>" ++ escape cs + escape ('&':cs) = "^&" ++ escape cs + escape ('(':cs) = "^(" ++ escape cs + escape (')':cs) = "^)" ++ escape cs + escape ('^':cs) = "^^" ++ escape cs + escape (c :cs) = c : escape cs diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Strip.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Strip.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Strip.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Strip.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,72 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Strip +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module provides an library interface to the @strip@ program. + +module Distribution.Simple.Program.Strip (stripLib, stripExe) + where + +import Distribution.Simple.Program (ProgramConfiguration, lookupProgram + , programVersion, rawSystemProgram + , stripProgram) +import Distribution.Simple.Utils (warn) +import Distribution.System (Arch(..), Platform(..), OS (..), buildOS) +import Distribution.Verbosity (Verbosity) +import Distribution.Version (orLaterVersion, withinRange) + +import Control.Monad (unless) +import Data.Version (Version(..)) +import System.FilePath (takeBaseName) + +runStrip :: Verbosity -> ProgramConfiguration -> FilePath -> [String] -> IO () +runStrip verbosity progConf path args = + case lookupProgram stripProgram progConf of + Just strip -> rawSystemProgram verbosity strip (path:args) + Nothing -> unless (buildOS == Windows) $ + -- Don't bother warning on windows, we don't expect them to + -- have the strip program anyway. + warn verbosity $ "Unable to strip executable or library '" + ++ (takeBaseName path) + ++ "' (missing the 'strip' program)" + +stripExe :: Verbosity -> Platform -> ProgramConfiguration -> FilePath -> IO () +stripExe verbosity (Platform _arch os) conf path = + runStrip verbosity conf path args + where + args = case os of + OSX -> ["-x"] -- By default, stripping the ghc binary on at least + -- some OS X installations causes: + -- HSbase-3.0.o: unknown symbol `_environ'" + -- The -x flag fixes that. + _ -> [] + +stripLib :: Verbosity -> Platform -> ProgramConfiguration -> FilePath -> IO () +stripLib verbosity (Platform arch os) conf path = do + case os of + OSX -> -- '--strip-unneeded' is not supported on OS X, iOS or + -- Solaris. See #1630. + return () + IOS -> return () + Solaris -> return () + Windows -> -- Stripping triggers a bug in 'strip.exe' for + -- libraries with lots identically named modules. See + -- #1784. + return() + Linux | arch == I386 -> + -- Versions of 'strip' on 32-bit Linux older than 2.18 are + -- broken. See #2339. + let okVersion = orLaterVersion (Version [2,18] []) + in case programVersion =<< lookupProgram stripProgram conf of + Just v | withinRange v okVersion -> + runStrip verbosity conf path args + _ -> warn verbosity $ "Unable to strip library '" + ++ (takeBaseName path) + ++ "' (version of 'strip' too old; " + ++ "requires >= 2.18 on 32-bit Linux)" + _ -> runStrip verbosity conf path args + where + args = ["--strip-unneeded"] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Types.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Types.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Types.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program/Types.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,164 @@ +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program.Types +-- Copyright : Isaac Jones 2006, Duncan Coutts 2007-2009 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This provides an abstraction which deals with configuring and running +-- programs. A 'Program' is a static notion of a known program. A +-- 'ConfiguredProgram' is a 'Program' that has been found on the current +-- machine and is ready to be run (possibly with some user-supplied default +-- args). Configuring a program involves finding its location and if necessary +-- finding its version. There's reasonable default behavior for trying to find +-- \"foo\" in PATH, being able to override its location, etc. +-- +module Distribution.Simple.Program.Types ( + -- * Program and functions for constructing them + Program(..), + ProgramSearchPath, + ProgramSearchPathEntry(..), + simpleProgram, + + -- * Configured program and related functions + ConfiguredProgram(..), + programPath, + suppressOverrideArgs, + ProgArg, + ProgramLocation(..), + simpleConfiguredProgram, + ) where + +import Distribution.Simple.Program.Find + ( ProgramSearchPath, ProgramSearchPathEntry(..) + , findProgramOnSearchPath ) +import Distribution.Version + ( Version ) +import Distribution.Verbosity + ( Verbosity ) + +import Distribution.Compat.Binary (Binary) +import qualified Data.Map as Map +import GHC.Generics (Generic) + +-- | Represents a program which can be configured. +-- +-- Note: rather than constructing this directly, start with 'simpleProgram' and +-- override any extra fields. +-- +data Program = Program { + -- | The simple name of the program, eg. ghc + programName :: String, + + -- | A function to search for the program if its location was not + -- specified by the user. Usually this will just be a call to + -- 'findProgramOnSearchPath'. + -- + -- It is supplied with the prevailing search path which will typically + -- just be used as-is, but can be extended or ignored as needed. + programFindLocation :: Verbosity -> ProgramSearchPath + -> IO (Maybe FilePath), + + -- | Try to find the version of the program. For many programs this is + -- not possible or is not necessary so it's OK to return Nothing. + programFindVersion :: Verbosity -> FilePath -> IO (Maybe Version), + + -- | A function to do any additional configuration after we have + -- located the program (and perhaps identified its version). For example + -- it could add args, or environment vars. + programPostConf :: Verbosity -> ConfiguredProgram -> IO ConfiguredProgram + } + +type ProgArg = String + +-- | Represents a program which has been configured and is thus ready to be run. +-- +-- These are usually made by configuring a 'Program', but if you have to +-- construct one directly then start with 'simpleConfiguredProgram' and +-- override any extra fields. +-- +data ConfiguredProgram = ConfiguredProgram { + -- | Just the name again + programId :: String, + + -- | The version of this program, if it is known. + programVersion :: Maybe Version, + + -- | Default command-line args for this program. + -- These flags will appear first on the command line, so they can be + -- overridden by subsequent flags. + programDefaultArgs :: [String], + + -- | Override command-line args for this program. + -- These flags will appear last on the command line, so they override + -- all earlier flags. + programOverrideArgs :: [String], + + -- | Override environment variables for this program. + -- These env vars will extend\/override the prevailing environment of + -- the current to form the environment for the new process. + programOverrideEnv :: [(String, Maybe String)], + + -- | A key-value map listing various properties of the program, useful + -- for feature detection. Populated during the configuration step, key + -- names depend on the specific program. + programProperties :: Map.Map String String, + + -- | Location of the program. eg. @\/usr\/bin\/ghc-6.4@ + programLocation :: ProgramLocation + } deriving (Eq, Generic, Read, Show) + +instance Binary ConfiguredProgram + +-- | Where a program was found. Also tells us whether it's specified by user or +-- not. This includes not just the path, but the program as well. +data ProgramLocation + = UserSpecified { locationPath :: FilePath } + -- ^The user gave the path to this program, + -- eg. --ghc-path=\/usr\/bin\/ghc-6.6 + | FoundOnSystem { locationPath :: FilePath } + -- ^The program was found automatically. + deriving (Eq, Generic, Read, Show) + +instance Binary ProgramLocation + +-- | The full path of a configured program. +programPath :: ConfiguredProgram -> FilePath +programPath = locationPath . programLocation + +-- | Suppress any extra arguments added by the user. +suppressOverrideArgs :: ConfiguredProgram -> ConfiguredProgram +suppressOverrideArgs prog = prog { programOverrideArgs = [] } + +-- | Make a simple named program. +-- +-- By default we'll just search for it in the path and not try to find the +-- version name. You can override these behaviours if necessary, eg: +-- +-- > simpleProgram "foo" { programFindLocation = ... , programFindVersion ... } +-- +simpleProgram :: String -> Program +simpleProgram name = Program { + programName = name, + programFindLocation = \v p -> findProgramOnSearchPath v p name, + programFindVersion = \_ _ -> return Nothing, + programPostConf = \_ p -> return p + } + +-- | Make a simple 'ConfiguredProgram'. +-- +-- > simpleConfiguredProgram "foo" (FoundOnSystem path) +-- +simpleConfiguredProgram :: String -> ProgramLocation -> ConfiguredProgram +simpleConfiguredProgram name loc = ConfiguredProgram { + programId = name, + programVersion = Nothing, + programDefaultArgs = [], + programOverrideArgs = [], + programOverrideEnv = [], + programProperties = Map.empty, + programLocation = loc + } diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Program.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Program.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,222 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Program +-- Copyright : Isaac Jones 2006, Duncan Coutts 2007-2009 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This provides an abstraction which deals with configuring and running +-- programs. A 'Program' is a static notion of a known program. A +-- 'ConfiguredProgram' is a 'Program' that has been found on the current +-- machine and is ready to be run (possibly with some user-supplied default +-- args). Configuring a program involves finding its location and if necessary +-- finding its version. There is also a 'ProgramConfiguration' type which holds +-- configured and not-yet configured programs. It is the parameter to lots of +-- actions elsewhere in Cabal that need to look up and run programs. If we had +-- a Cabal monad, the 'ProgramConfiguration' would probably be a reader or +-- state component of it. +-- +-- The module also defines all the known built-in 'Program's and the +-- 'defaultProgramConfiguration' which contains them all. +-- +-- One nice thing about using it is that any program that is +-- registered with Cabal will get some \"configure\" and \".cabal\" +-- helpers like --with-foo-args --foo-path= and extra-foo-args. +-- +-- There's also good default behavior for trying to find \"foo\" in +-- PATH, being able to override its location, etc. +-- +-- There's also a hook for adding programs in a Setup.lhs script. See +-- hookedPrograms in 'Distribution.Simple.UserHooks'. This gives a +-- hook user the ability to get the above flags and such so that they +-- don't have to write all the PATH logic inside Setup.lhs. + +module Distribution.Simple.Program ( + -- * Program and functions for constructing them + Program(..) + , ProgramSearchPath + , ProgramSearchPathEntry(..) + , simpleProgram + , findProgramLocation + , findProgramVersion + + -- * Configured program and related functions + , ConfiguredProgram(..) + , programPath + , ProgArg + , ProgramLocation(..) + , runProgram + , getProgramOutput + , suppressOverrideArgs + + -- * Program invocations + , ProgramInvocation(..) + , emptyProgramInvocation + , simpleProgramInvocation + , programInvocation + , runProgramInvocation + , getProgramInvocationOutput + + -- * The collection of unconfigured and configured programs + , builtinPrograms + + -- * The collection of configured programs we can run + , ProgramConfiguration + , emptyProgramConfiguration + , defaultProgramConfiguration + , restoreProgramConfiguration + , addKnownProgram + , addKnownPrograms + , lookupKnownProgram + , knownPrograms + , getProgramSearchPath + , setProgramSearchPath + , userSpecifyPath + , userSpecifyPaths + , userMaybeSpecifyPath + , userSpecifyArgs + , userSpecifyArgss + , userSpecifiedArgs + , lookupProgram + , lookupProgramVersion + , updateProgram + , configureProgram + , configureAllKnownPrograms + , reconfigurePrograms + , requireProgram + , requireProgramVersion + , runDbProgram + , getDbProgramOutput + + -- * Programs that Cabal knows about + , ghcProgram + , ghcPkgProgram + , ghcjsProgram + , ghcjsPkgProgram + , lhcProgram + , lhcPkgProgram + , hmakeProgram + , jhcProgram + , uhcProgram + , gccProgram + , arProgram + , stripProgram + , happyProgram + , alexProgram + , hsc2hsProgram + , c2hsProgram + , cpphsProgram + , hscolourProgram + , haddockProgram + , greencardProgram + , ldProgram + , tarProgram + , cppProgram + , pkgConfigProgram + , hpcProgram + + -- * deprecated + , rawSystemProgram + , rawSystemProgramStdout + , rawSystemProgramConf + , rawSystemProgramStdoutConf + , findProgramOnPath + + ) where + +import Distribution.Simple.Program.Types +import Distribution.Simple.Program.Run +import Distribution.Simple.Program.Db +import Distribution.Simple.Program.Builtin + +import Distribution.Simple.Utils + ( die, findProgramLocation, findProgramVersion ) +import Distribution.Verbosity + ( Verbosity ) + + +-- | Runs the given configured program. +-- +runProgram :: Verbosity -- ^Verbosity + -> ConfiguredProgram -- ^The program to run + -> [ProgArg] -- ^Any /extra/ arguments to add + -> IO () +runProgram verbosity prog args = + runProgramInvocation verbosity (programInvocation prog args) + + +-- | Runs the given configured program and gets the output. +-- +getProgramOutput :: Verbosity -- ^Verbosity + -> ConfiguredProgram -- ^The program to run + -> [ProgArg] -- ^Any /extra/ arguments to add + -> IO String +getProgramOutput verbosity prog args = + getProgramInvocationOutput verbosity (programInvocation prog args) + + +-- | Looks up the given program in the program database and runs it. +-- +runDbProgram :: Verbosity -- ^verbosity + -> Program -- ^The program to run + -> ProgramDb -- ^look up the program here + -> [ProgArg] -- ^Any /extra/ arguments to add + -> IO () +runDbProgram verbosity prog programDb args = + case lookupProgram prog programDb of + Nothing -> die notFound + Just configuredProg -> runProgram verbosity configuredProg args + where + notFound = "The program '" ++ programName prog + ++ "' is required but it could not be found" + +-- | Looks up the given program in the program database and runs it. +-- +getDbProgramOutput :: Verbosity -- ^verbosity + -> Program -- ^The program to run + -> ProgramDb -- ^look up the program here + -> [ProgArg] -- ^Any /extra/ arguments to add + -> IO String +getDbProgramOutput verbosity prog programDb args = + case lookupProgram prog programDb of + Nothing -> die notFound + Just configuredProg -> getProgramOutput verbosity configuredProg args + where + notFound = "The program '" ++ programName prog + ++ "' is required but it could not be found" + + +--------------------- +-- Deprecated aliases +-- + +rawSystemProgram :: Verbosity -> ConfiguredProgram + -> [ProgArg] -> IO () +rawSystemProgram = runProgram + +rawSystemProgramStdout :: Verbosity -> ConfiguredProgram + -> [ProgArg] -> IO String +rawSystemProgramStdout = getProgramOutput + +rawSystemProgramConf :: Verbosity -> Program -> ProgramConfiguration + -> [ProgArg] -> IO () +rawSystemProgramConf = runDbProgram + +rawSystemProgramStdoutConf :: Verbosity -> Program -> ProgramConfiguration + -> [ProgArg] -> IO String +rawSystemProgramStdoutConf = getDbProgramOutput + +type ProgramConfiguration = ProgramDb + +emptyProgramConfiguration, defaultProgramConfiguration :: ProgramConfiguration +emptyProgramConfiguration = emptyProgramDb +defaultProgramConfiguration = defaultProgramDb + +restoreProgramConfiguration :: [Program] -> ProgramConfiguration + -> ProgramConfiguration +restoreProgramConfiguration = restoreProgramDb + +{-# DEPRECATED findProgramOnPath "use findProgramLocation instead" #-} +findProgramOnPath :: String -> Verbosity -> IO (Maybe FilePath) +findProgramOnPath = flip findProgramLocation diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Register.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Register.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Register.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Register.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,464 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Register +-- Copyright : Isaac Jones 2003-2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module deals with registering and unregistering packages. There are a +-- couple ways it can do this, one is to do it directly. Another is to generate +-- a script that can be run later to do it. The idea here being that the user +-- is shielded from the details of what command to use for package registration +-- for a particular compiler. In practice this aspect was not especially +-- popular so we also provide a way to simply generate the package registration +-- file which then must be manually passed to @ghc-pkg@. It is possible to +-- generate registration information for where the package is to be installed, +-- or alternatively to register the package in place in the build tree. The +-- latter is occasionally handy, and will become more important when we try to +-- build multi-package systems. +-- +-- This module does not delegate anything to the per-compiler modules but just +-- mixes it all in in this module, which is rather unsatisfactory. The script +-- generation and the unregister feature are not well used or tested. + +module Distribution.Simple.Register ( + register, + unregister, + + initPackageDB, + invokeHcPkg, + registerPackage, + generateRegistrationInfo, + inplaceInstalledPackageInfo, + absoluteInstalledPackageInfo, + generalInstalledPackageInfo, + ) where + +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), ComponentLocalBuildInfo(..) + , ComponentName(..), getComponentLocalBuildInfo + , LibraryName(..) + , InstallDirs(..), absoluteInstallDirs ) +import Distribution.Simple.BuildPaths (haddockName) + +import qualified Distribution.Simple.GHC as GHC +import qualified Distribution.Simple.GHCJS as GHCJS +import qualified Distribution.Simple.LHC as LHC +import qualified Distribution.Simple.UHC as UHC +import qualified Distribution.Simple.HaskellSuite as HaskellSuite + +import Distribution.Simple.Compiler + ( compilerVersion, Compiler, CompilerFlavor(..), compilerFlavor + , PackageDB, PackageDBStack, absolutePackageDBPaths + , registrationPackageDB ) +import Distribution.Simple.Program + ( ProgramConfiguration, runProgramInvocation ) +import Distribution.Simple.Program.Script + ( invocationAsSystemScript ) +import Distribution.Simple.Program.HcPkg (HcPkgInfo) +import qualified Distribution.Simple.Program.HcPkg as HcPkg +import Distribution.Simple.Setup + ( RegisterFlags(..), CopyDest(..) + , fromFlag, fromFlagOrDefault, flagToMaybe ) +import Distribution.PackageDescription + ( PackageDescription(..), Library(..), BuildInfo(..), libModules ) +import Distribution.Package + ( Package(..), packageName, InstalledPackageId(..) ) +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo, InstalledPackageInfo_(InstalledPackageInfo) + , showInstalledPackageInfo ) +import qualified Distribution.InstalledPackageInfo as IPI +import Distribution.Simple.Utils + ( writeUTF8File, writeFileAtomic, setFileExecutable + , die, notice, setupMessage, shortRelativePath ) +import Distribution.System + ( OS(..), buildOS ) +import Distribution.Text + ( display ) +import Distribution.Version ( Version(..) ) +import Distribution.Verbosity as Verbosity + ( Verbosity, normal ) + +import System.FilePath ((), (<.>), isAbsolute) +import System.Directory + ( getCurrentDirectory ) + +import Control.Monad (when) +import Data.Maybe + ( isJust, fromMaybe, maybeToList ) +import Data.List + ( partition, nub ) +import qualified Data.ByteString.Lazy.Char8 as BS.Char8 + +-- ----------------------------------------------------------------------------- +-- Registration + +register :: PackageDescription -> LocalBuildInfo + -> RegisterFlags -- ^Install in the user's database?; verbose + -> IO () +register pkg@PackageDescription { library = Just lib } lbi regFlags + = do + let clbi = getComponentLocalBuildInfo lbi CLibName + + absPackageDBs <- absolutePackageDBPaths packageDbs + installedPkgInfo <- generateRegistrationInfo + verbosity pkg lib lbi clbi inplace reloc distPref + (registrationPackageDB absPackageDBs) + + when (fromFlag (regPrintId regFlags)) $ do + putStrLn (display (IPI.installedPackageId installedPkgInfo)) + + -- Three different modes: + case () of + _ | modeGenerateRegFile -> writeRegistrationFile installedPkgInfo + | modeGenerateRegScript -> writeRegisterScript installedPkgInfo + | otherwise -> registerPackage verbosity + installedPkgInfo pkg lbi inplace packageDbs + + where + modeGenerateRegFile = isJust (flagToMaybe (regGenPkgConf regFlags)) + regFile = fromMaybe (display (packageId pkg) <.> "conf") + (fromFlag (regGenPkgConf regFlags)) + + modeGenerateRegScript = fromFlag (regGenScript regFlags) + + inplace = fromFlag (regInPlace regFlags) + reloc = relocatable lbi + -- FIXME: there's really no guarantee this will work. + -- registering into a totally different db stack can + -- fail if dependencies cannot be satisfied. + packageDbs = nub $ withPackageDB lbi + ++ maybeToList (flagToMaybe (regPackageDB regFlags)) + distPref = fromFlag (regDistPref regFlags) + verbosity = fromFlag (regVerbosity regFlags) + + writeRegistrationFile installedPkgInfo = do + notice verbosity ("Creating package registration file: " ++ regFile) + writeUTF8File regFile (showInstalledPackageInfo installedPkgInfo) + + writeRegisterScript installedPkgInfo = + case compilerFlavor (compiler lbi) of + JHC -> notice verbosity "Registration scripts not needed for jhc" + UHC -> notice verbosity "Registration scripts not needed for uhc" + _ -> withHcPkg + "Registration scripts are not implemented for this compiler" + (compiler lbi) (withPrograms lbi) + (writeHcPkgRegisterScript verbosity installedPkgInfo packageDbs) + +register _ _ regFlags = notice verbosity "No package to register" + where + verbosity = fromFlag (regVerbosity regFlags) + + +generateRegistrationInfo :: Verbosity + -> PackageDescription + -> Library + -> LocalBuildInfo + -> ComponentLocalBuildInfo + -> Bool + -> Bool + -> FilePath + -> PackageDB + -> IO InstalledPackageInfo +generateRegistrationInfo verbosity pkg lib lbi clbi inplace reloc distPref packageDb = do + --TODO: eliminate pwd! + pwd <- getCurrentDirectory + + --TODO: the method of setting the InstalledPackageId is compiler specific + -- this aspect should be delegated to a per-compiler helper. + let comp = compiler lbi + ipid <- + case compilerFlavor comp of + GHC | compilerVersion comp >= Version [6,11] [] -> do + s <- GHC.libAbiHash verbosity pkg lbi lib clbi + return (InstalledPackageId (display (packageId pkg) ++ '-':s)) + GHCJS -> do + s <- GHCJS.libAbiHash verbosity pkg lbi lib clbi + return (InstalledPackageId (display (packageId pkg) ++ '-':s)) + _other -> do + return (InstalledPackageId (display (packageId pkg))) + + installedPkgInfo <- + if inplace + then return (inplaceInstalledPackageInfo pwd distPref + pkg ipid lib lbi clbi) + else if reloc + then relocRegistrationInfo verbosity + pkg lib lbi clbi ipid packageDb + else return (absoluteInstalledPackageInfo + pkg ipid lib lbi clbi) + + + return installedPkgInfo{ IPI.installedPackageId = ipid } + +relocRegistrationInfo :: Verbosity + -> PackageDescription + -> Library + -> LocalBuildInfo + -> ComponentLocalBuildInfo + -> InstalledPackageId + -> PackageDB + -> IO InstalledPackageInfo +relocRegistrationInfo verbosity pkg lib lbi clbi ipid packageDb = + case (compilerFlavor (compiler lbi)) of + GHC -> do fs <- GHC.pkgRoot verbosity lbi packageDb + return (relocatableInstalledPackageInfo + pkg ipid lib lbi clbi fs) + _ -> die "Distribution.Simple.Register.relocRegistrationInfo: \ + \not implemented for this compiler" + +-- | Create an empty package DB at the specified location. +initPackageDB :: Verbosity -> Compiler -> ProgramConfiguration -> FilePath + -> IO () +initPackageDB verbosity comp conf dbPath = + case compilerFlavor comp of + HaskellSuite {} -> HaskellSuite.initPackageDB verbosity conf dbPath + _ -> withHcPkg "Distribution.Simple.Register.initPackageDB: \ + \not implemented for this compiler" comp conf + (\hpi -> HcPkg.init hpi verbosity dbPath) + +-- | Run @hc-pkg@ using a given package DB stack, directly forwarding the +-- provided command-line arguments to it. +invokeHcPkg :: Verbosity -> Compiler -> ProgramConfiguration -> PackageDBStack + -> [String] -> IO () +invokeHcPkg verbosity comp conf dbStack extraArgs = + withHcPkg "invokeHcPkg" comp conf + (\hpi -> HcPkg.invoke hpi verbosity dbStack extraArgs) + +withHcPkg :: String -> Compiler -> ProgramConfiguration + -> (HcPkgInfo -> IO a) -> IO a +withHcPkg name comp conf f = + case compilerFlavor comp of + GHC -> f (GHC.hcPkgInfo conf) + GHCJS -> f (GHCJS.hcPkgInfo conf) + LHC -> f (LHC.hcPkgInfo conf) + _ -> die ("Distribution.Simple.Register." ++ name ++ ":\ + \not implemented for this compiler") + +registerPackage :: Verbosity + -> InstalledPackageInfo + -> PackageDescription + -> LocalBuildInfo + -> Bool + -> PackageDBStack + -> IO () +registerPackage verbosity installedPkgInfo pkg lbi inplace packageDbs = do + let msg = if inplace + then "In-place registering" + else "Registering" + setupMessage verbosity msg (packageId pkg) + case compilerFlavor (compiler lbi) of + GHC -> GHC.registerPackage verbosity installedPkgInfo pkg lbi inplace packageDbs + GHCJS -> GHCJS.registerPackage verbosity installedPkgInfo pkg lbi inplace packageDbs + LHC -> LHC.registerPackage verbosity installedPkgInfo pkg lbi inplace packageDbs + UHC -> UHC.registerPackage verbosity installedPkgInfo pkg lbi inplace packageDbs + JHC -> notice verbosity "Registering for jhc (nothing to do)" + HaskellSuite {} -> + HaskellSuite.registerPackage verbosity installedPkgInfo pkg lbi inplace packageDbs + _ -> die "Registering is not implemented for this compiler" + +writeHcPkgRegisterScript :: Verbosity + -> InstalledPackageInfo + -> PackageDBStack + -> HcPkgInfo + -> IO () +writeHcPkgRegisterScript verbosity installedPkgInfo packageDbs hpi = do + let invocation = HcPkg.reregisterInvocation hpi Verbosity.normal + packageDbs (Right installedPkgInfo) + regScript = invocationAsSystemScript buildOS invocation + + notice verbosity ("Creating package registration script: " ++ regScriptFileName) + writeUTF8File regScriptFileName regScript + setFileExecutable regScriptFileName + +regScriptFileName :: FilePath +regScriptFileName = case buildOS of + Windows -> "register.bat" + _ -> "register.sh" + + +-- ----------------------------------------------------------------------------- +-- Making the InstalledPackageInfo + +-- | Construct 'InstalledPackageInfo' for a library in a package, given a set +-- of installation directories. +-- +generalInstalledPackageInfo + :: ([FilePath] -> [FilePath]) -- ^ Translate relative include dir paths to + -- absolute paths. + -> PackageDescription + -> InstalledPackageId + -> Library + -> LocalBuildInfo + -> ComponentLocalBuildInfo + -> InstallDirs FilePath + -> InstalledPackageInfo +generalInstalledPackageInfo adjustRelIncDirs pkg ipid lib lbi clbi installDirs = + InstalledPackageInfo { + IPI.installedPackageId = ipid, + IPI.sourcePackageId = packageId pkg, + IPI.packageKey = pkgKey lbi, + IPI.license = license pkg, + IPI.copyright = copyright pkg, + IPI.maintainer = maintainer pkg, + IPI.author = author pkg, + IPI.stability = stability pkg, + IPI.homepage = homepage pkg, + IPI.pkgUrl = pkgUrl pkg, + IPI.synopsis = synopsis pkg, + IPI.description = description pkg, + IPI.category = category pkg, + IPI.exposed = libExposed lib, + IPI.exposedModules = map fixupSelf (componentExposedModules clbi), + IPI.hiddenModules = otherModules bi, + IPI.instantiatedWith = map (\(k,(p,n)) -> + (k,IPI.OriginalModule (IPI.installedPackageId p) n)) + (instantiatedWith lbi), + IPI.trusted = IPI.trusted IPI.emptyInstalledPackageInfo, + IPI.importDirs = [ libdir installDirs | hasModules ], + -- Note. the libsubdir and datasubdir templates have already been expanded + -- into libdir and datadir. + IPI.libraryDirs = if hasLibrary + then libdir installDirs : extraLibDirs bi + else extraLibDirs bi, + IPI.dataDir = datadir installDirs, + IPI.hsLibraries = [ libname + | LibraryName libname <- componentLibraries clbi + , hasLibrary ], + IPI.extraLibraries = extraLibs bi, + IPI.extraGHCiLibraries = extraGHCiLibs bi, + IPI.includeDirs = absinc ++ adjustRelIncDirs relinc, + IPI.includes = includes bi, + IPI.depends = map fst (componentPackageDeps clbi), + IPI.ccOptions = [], -- Note. NOT ccOptions bi! + -- We don't want cc-options to be propagated + -- to C compilations in other packages. + IPI.ldOptions = ldOptions bi, + IPI.frameworkDirs = [], + IPI.frameworks = frameworks bi, + IPI.haddockInterfaces = [haddockdir installDirs haddockName pkg], + IPI.haddockHTMLs = [htmldir installDirs], + IPI.pkgRoot = Nothing + } + where + bi = libBuildInfo lib + (absinc, relinc) = partition isAbsolute (includeDirs bi) + hasModules = not $ null (libModules lib) + hasLibrary = hasModules || not (null (cSources bi)) + || (not (null (jsSources bi)) && + compilerFlavor (compiler lbi) == GHCJS) + + -- Since we currently don't decide the InstalledPackageId of our package + -- until just before we register, we didn't have one for the re-exports + -- of modules defined within this package, so we used an empty one that + -- we fill in here now that we know what it is. It's a bit of a hack, + -- we ought really to decide the InstalledPackageId ahead of time. + fixupSelf (IPI.ExposedModule n o o') = + IPI.ExposedModule n (fmap fixupOriginalModule o) + (fmap fixupOriginalModule o') + fixupOriginalModule (IPI.OriginalModule i m) = IPI.OriginalModule (fixupIpid i) m + fixupIpid (InstalledPackageId []) = ipid + fixupIpid x = x + +-- | Construct 'InstalledPackageInfo' for a library that is in place in the +-- build tree. +-- +-- This function knows about the layout of in place packages. +-- +inplaceInstalledPackageInfo :: FilePath -- ^ top of the build tree + -> FilePath -- ^ location of the dist tree + -> PackageDescription + -> InstalledPackageId + -> Library + -> LocalBuildInfo + -> ComponentLocalBuildInfo + -> InstalledPackageInfo +inplaceInstalledPackageInfo inplaceDir distPref pkg ipid lib lbi clbi = + generalInstalledPackageInfo adjustRelativeIncludeDirs + pkg ipid lib lbi clbi installDirs + where + adjustRelativeIncludeDirs = map (inplaceDir ) + installDirs = + (absoluteInstallDirs pkg lbi NoCopyDest) { + libdir = inplaceDir buildDir lbi, + datadir = inplaceDir dataDir pkg, + docdir = inplaceDocdir, + htmldir = inplaceHtmldir, + haddockdir = inplaceHtmldir + } + inplaceDocdir = inplaceDir distPref "doc" + inplaceHtmldir = inplaceDocdir "html" display (packageName pkg) + + +-- | Construct 'InstalledPackageInfo' for the final install location of a +-- library package. +-- +-- This function knows about the layout of installed packages. +-- +absoluteInstalledPackageInfo :: PackageDescription + -> InstalledPackageId + -> Library + -> LocalBuildInfo + -> ComponentLocalBuildInfo + -> InstalledPackageInfo +absoluteInstalledPackageInfo pkg ipid lib lbi clbi = + generalInstalledPackageInfo adjustReativeIncludeDirs + pkg ipid lib lbi clbi installDirs + where + -- For installed packages we install all include files into one dir, + -- whereas in the build tree they may live in multiple local dirs. + adjustReativeIncludeDirs _ + | null (installIncludes bi) = [] + | otherwise = [includedir installDirs] + bi = libBuildInfo lib + installDirs = absoluteInstallDirs pkg lbi NoCopyDest + + +relocatableInstalledPackageInfo :: PackageDescription + -> InstalledPackageId + -> Library + -> LocalBuildInfo + -> ComponentLocalBuildInfo + -> FilePath + -> InstalledPackageInfo +relocatableInstalledPackageInfo pkg ipid lib lbi clbi pkgroot = + generalInstalledPackageInfo adjustReativeIncludeDirs + pkg ipid lib lbi clbi installDirs + where + -- For installed packages we install all include files into one dir, + -- whereas in the build tree they may live in multiple local dirs. + adjustReativeIncludeDirs _ + | null (installIncludes bi) = [] + | otherwise = [includedir installDirs] + bi = libBuildInfo lib + + installDirs = fmap (("${pkgroot}" ) . shortRelativePath pkgroot) + $ absoluteInstallDirs pkg lbi NoCopyDest + +-- ----------------------------------------------------------------------------- +-- Unregistration + +unregister :: PackageDescription -> LocalBuildInfo -> RegisterFlags -> IO () +unregister pkg lbi regFlags = do + let pkgid = packageId pkg + genScript = fromFlag (regGenScript regFlags) + verbosity = fromFlag (regVerbosity regFlags) + packageDb = fromFlagOrDefault (registrationPackageDB (withPackageDB lbi)) + (regPackageDB regFlags) + unreg hpi = + let invocation = HcPkg.unregisterInvocation + hpi Verbosity.normal packageDb pkgid + in if genScript + then writeFileAtomic unregScriptFileName + (BS.Char8.pack $ invocationAsSystemScript buildOS invocation) + else runProgramInvocation verbosity invocation + setupMessage verbosity "Unregistering" pkgid + withHcPkg "unregistering is only implemented for GHC and GHCJS" + (compiler lbi) (withPrograms lbi) unreg + +unregScriptFileName :: FilePath +unregScriptFileName = case buildOS of + Windows -> "unregister.bat" + _ -> "unregister.sh" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Setup.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Setup.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Setup.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,2223 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Setup +-- Copyright : Isaac Jones 2003-2004 +-- Duncan Coutts 2007 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is a big module, but not very complicated. The code is very regular +-- and repetitive. It defines the command line interface for all the Cabal +-- commands. For each command (like @configure@, @build@ etc) it defines a type +-- that holds all the flags, the default set of flags and a 'CommandUI' that +-- maps command line flags to and from the corresponding flags type. +-- +-- All the flags types are instances of 'Monoid', see +-- +-- for an explanation. +-- +-- The types defined here get used in the front end and especially in +-- @cabal-install@ which has to do quite a bit of manipulating sets of command +-- line flags. +-- +-- This is actually relatively nice, it works quite well. The main change it +-- needs is to unify it with the code for managing sets of fields that can be +-- read and written from files. This would allow us to save configure flags in +-- config files. + +{-# LANGUAGE CPP #-} + +module Distribution.Simple.Setup ( + + GlobalFlags(..), emptyGlobalFlags, defaultGlobalFlags, globalCommand, + ConfigFlags(..), emptyConfigFlags, defaultConfigFlags, configureCommand, + configAbsolutePaths, readPackageDbList, showPackageDbList, + CopyFlags(..), emptyCopyFlags, defaultCopyFlags, copyCommand, + InstallFlags(..), emptyInstallFlags, defaultInstallFlags, installCommand, + HaddockFlags(..), emptyHaddockFlags, defaultHaddockFlags, haddockCommand, + HscolourFlags(..), emptyHscolourFlags, defaultHscolourFlags, hscolourCommand, + BuildFlags(..), emptyBuildFlags, defaultBuildFlags, buildCommand, + buildVerbose, + ReplFlags(..), defaultReplFlags, replCommand, + CleanFlags(..), emptyCleanFlags, defaultCleanFlags, cleanCommand, + RegisterFlags(..), emptyRegisterFlags, defaultRegisterFlags, registerCommand, + unregisterCommand, + SDistFlags(..), emptySDistFlags, defaultSDistFlags, sdistCommand, + TestFlags(..), emptyTestFlags, defaultTestFlags, testCommand, + TestShowDetails(..), + BenchmarkFlags(..), emptyBenchmarkFlags, + defaultBenchmarkFlags, benchmarkCommand, + CopyDest(..), + configureArgs, configureOptions, configureCCompiler, configureLinker, + buildOptions, haddockOptions, installDirsOptions, + programConfigurationOptions, programConfigurationPaths', + + defaultDistPref, + + Flag(..), + toFlag, + fromFlag, + fromFlagOrDefault, + flagToMaybe, + flagToList, + boolOpt, boolOpt', trueArg, falseArg, optionVerbosity, optionNumJobs ) where + +import Distribution.Compiler () +import Distribution.ReadE +import Distribution.Text + ( Text(..), display ) +import qualified Distribution.Compat.ReadP as Parse +import qualified Text.PrettyPrint as Disp +import Distribution.ModuleName +import Distribution.Package ( Dependency(..) + , PackageName + , InstalledPackageId ) +import Distribution.PackageDescription + ( FlagName(..), FlagAssignment ) +import Distribution.Simple.Command hiding (boolOpt, boolOpt') +import qualified Distribution.Simple.Command as Command +import Distribution.Simple.Compiler + ( CompilerFlavor(..), defaultCompilerFlavor, PackageDB(..) + , DebugInfoLevel(..), flagToDebugInfoLevel + , OptimisationLevel(..), flagToOptimisationLevel + , absolutePackageDBPath ) +import Distribution.Simple.Utils + ( wrapText, wrapLine, lowercase, intercalate ) +import Distribution.Simple.Program (Program(..), ProgramConfiguration, + requireProgram, + programInvocation, progInvokePath, progInvokeArgs, + knownPrograms, + addKnownProgram, emptyProgramConfiguration, + haddockProgram, ghcProgram, gccProgram, ldProgram) +import Distribution.Simple.InstallDirs + ( InstallDirs(..), CopyDest(..), + PathTemplate, toPathTemplate, fromPathTemplate ) +import Distribution.Verbosity +import Distribution.Utils.NubList + +import Control.Monad (liftM) +import Distribution.Compat.Binary (Binary) +import Data.List ( sort ) +import Data.Char ( isSpace, isAlpha ) +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid ( Monoid(..) ) +#endif +import GHC.Generics (Generic) + +-- FIXME Not sure where this should live +defaultDistPref :: FilePath +defaultDistPref = "dist" + +-- ------------------------------------------------------------ +-- * Flag type +-- ------------------------------------------------------------ + +-- | All flags are monoids, they come in two flavours: +-- +-- 1. list flags eg +-- +-- > --ghc-option=foo --ghc-option=bar +-- +-- gives us all the values ["foo", "bar"] +-- +-- 2. singular value flags, eg: +-- +-- > --enable-foo --disable-foo +-- +-- gives us Just False +-- So this Flag type is for the latter singular kind of flag. +-- Its monoid instance gives us the behaviour where it starts out as +-- 'NoFlag' and later flags override earlier ones. +-- +data Flag a = Flag a | NoFlag deriving (Eq, Generic, Show, Read) + +instance Binary a => Binary (Flag a) + +instance Functor Flag where + fmap f (Flag x) = Flag (f x) + fmap _ NoFlag = NoFlag + +instance Monoid (Flag a) where + mempty = NoFlag + _ `mappend` f@(Flag _) = f + f `mappend` NoFlag = f + +instance Bounded a => Bounded (Flag a) where + minBound = toFlag minBound + maxBound = toFlag maxBound + +instance Enum a => Enum (Flag a) where + fromEnum = fromEnum . fromFlag + toEnum = toFlag . toEnum + enumFrom (Flag a) = map toFlag . enumFrom $ a + enumFrom _ = [] + enumFromThen (Flag a) (Flag b) = toFlag `map` enumFromThen a b + enumFromThen _ _ = [] + enumFromTo (Flag a) (Flag b) = toFlag `map` enumFromTo a b + enumFromTo _ _ = [] + enumFromThenTo (Flag a) (Flag b) (Flag c) = toFlag `map` enumFromThenTo a b c + enumFromThenTo _ _ _ = [] + +toFlag :: a -> Flag a +toFlag = Flag + +fromFlag :: Flag a -> a +fromFlag (Flag x) = x +fromFlag NoFlag = error "fromFlag NoFlag. Use fromFlagOrDefault" + +fromFlagOrDefault :: a -> Flag a -> a +fromFlagOrDefault _ (Flag x) = x +fromFlagOrDefault def NoFlag = def + +flagToMaybe :: Flag a -> Maybe a +flagToMaybe (Flag x) = Just x +flagToMaybe NoFlag = Nothing + +flagToList :: Flag a -> [a] +flagToList (Flag x) = [x] +flagToList NoFlag = [] + +allFlags :: [Flag Bool] -> Flag Bool +allFlags flags = if all (\f -> fromFlagOrDefault False f) flags + then Flag True + else NoFlag + +-- ------------------------------------------------------------ +-- * Global flags +-- ------------------------------------------------------------ + +-- In fact since individual flags types are monoids and these are just sets of +-- flags then they are also monoids pointwise. This turns out to be really +-- useful. The mempty is the set of empty flags and mappend allows us to +-- override specific flags. For example we can start with default flags and +-- override with the ones we get from a file or the command line, or both. + +-- | Flags that apply at the top level, not to any sub-command. +data GlobalFlags = GlobalFlags { + globalVersion :: Flag Bool, + globalNumericVersion :: Flag Bool + } + +defaultGlobalFlags :: GlobalFlags +defaultGlobalFlags = GlobalFlags { + globalVersion = Flag False, + globalNumericVersion = Flag False + } + +globalCommand :: [Command action] -> CommandUI GlobalFlags +globalCommand commands = CommandUI + { commandName = "" + , commandSynopsis = "" + , commandUsage = \pname -> + "This Setup program uses the Haskell Cabal Infrastructure.\n" + ++ "See http://www.haskell.org/cabal/ for more information.\n" + ++ "\n" + ++ "Usage: " ++ pname ++ " [GLOBAL FLAGS] [COMMAND [FLAGS]]\n" + , commandDescription = Just $ \pname -> + let + commands' = commands ++ [commandAddAction helpCommandUI undefined] + cmdDescs = getNormalCommandDescriptions commands' + maxlen = maximum $ [length name | (name, _) <- cmdDescs] + align str = str ++ replicate (maxlen - length str) ' ' + in + "Commands:\n" + ++ unlines [ " " ++ align name ++ " " ++ description + | (name, description) <- cmdDescs ] + ++ "\n" + ++ "For more information about a command use\n" + ++ " " ++ pname ++ " COMMAND --help\n\n" + ++ "Typical steps for installing Cabal packages:\n" + ++ concat [ " " ++ pname ++ " " ++ x ++ "\n" + | x <- ["configure", "build", "install"]] + , commandNotes = Nothing + , commandDefaultFlags = defaultGlobalFlags + , commandOptions = \_ -> + [option ['V'] ["version"] + "Print version information" + globalVersion (\v flags -> flags { globalVersion = v }) + trueArg + ,option [] ["numeric-version"] + "Print just the version number" + globalNumericVersion (\v flags -> flags { globalNumericVersion = v }) + trueArg + ] + } + +emptyGlobalFlags :: GlobalFlags +emptyGlobalFlags = mempty + +instance Monoid GlobalFlags where + mempty = GlobalFlags { + globalVersion = mempty, + globalNumericVersion = mempty + } + mappend a b = GlobalFlags { + globalVersion = combine globalVersion, + globalNumericVersion = combine globalNumericVersion + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Config flags +-- ------------------------------------------------------------ + +-- | Flags to @configure@ command. +-- +-- IMPORTANT: every time a new flag is added, 'D.C.Setup.filterConfigureFlags' +-- should be updated. +data ConfigFlags = ConfigFlags { + --FIXME: the configPrograms is only here to pass info through to configure + -- because the type of configure is constrained by the UserHooks. + -- when we change UserHooks next we should pass the initial + -- ProgramConfiguration directly and not via ConfigFlags + configPrograms :: ProgramConfiguration, -- ^All programs that cabal may + -- run + + configProgramPaths :: [(String, FilePath)], -- ^user specified programs paths + configProgramArgs :: [(String, [String])], -- ^user specified programs args + configProgramPathExtra :: NubList FilePath, -- ^Extend the $PATH + configHcFlavor :: Flag CompilerFlavor, -- ^The \"flavor\" of the + -- compiler, such as GHC or + -- JHC. + configHcPath :: Flag FilePath, -- ^given compiler location + configHcPkg :: Flag FilePath, -- ^given hc-pkg location + configVanillaLib :: Flag Bool, -- ^Enable vanilla library + configProfLib :: Flag Bool, -- ^Enable profiling in the library + configSharedLib :: Flag Bool, -- ^Build shared library + configDynExe :: Flag Bool, -- ^Enable dynamic linking of the + -- executables. + configProfExe :: Flag Bool, -- ^Enable profiling in the + -- executables. + configConfigureArgs :: [String], -- ^Extra arguments to @configure@ + configOptimization :: Flag OptimisationLevel, -- ^Enable optimization. + configProgPrefix :: Flag PathTemplate, -- ^Installed executable prefix. + configProgSuffix :: Flag PathTemplate, -- ^Installed executable suffix. + configInstallDirs :: InstallDirs (Flag PathTemplate), -- ^Installation + -- paths + configScratchDir :: Flag FilePath, + configExtraLibDirs :: [FilePath], -- ^ path to search for extra libraries + configExtraIncludeDirs :: [FilePath], -- ^ path to search for header files + + configDistPref :: Flag FilePath, -- ^"dist" prefix + configVerbosity :: Flag Verbosity, -- ^verbosity level + configUserInstall :: Flag Bool, -- ^The --user\/--global flag + configPackageDBs :: [Maybe PackageDB], -- ^Which package DBs to use + configGHCiLib :: Flag Bool, -- ^Enable compiling library for GHCi + configSplitObjs :: Flag Bool, -- ^Enable -split-objs with GHC + configStripExes :: Flag Bool, -- ^Enable executable stripping + configStripLibs :: Flag Bool, -- ^Enable library stripping + configConstraints :: [Dependency], -- ^Additional constraints for + -- dependencies. + configDependencies :: [(PackageName, InstalledPackageId)], + configInstantiateWith :: [(ModuleName, (InstalledPackageId, ModuleName))], + -- ^The packages depended on. + configConfigurationsFlags :: FlagAssignment, + configTests :: Flag Bool, -- ^Enable test suite compilation + configBenchmarks :: Flag Bool, -- ^Enable benchmark compilation + configCoverage :: Flag Bool, -- ^Enable program coverage + configLibCoverage :: Flag Bool, -- ^Enable program coverage (deprecated) + configExactConfiguration :: Flag Bool, + -- ^All direct dependencies and flags are provided on the command line by + -- the user via the '--dependency' and '--flags' options. + configFlagError :: Flag String, + -- ^Halt and show an error message indicating an error in flag assignment + configRelocatable :: Flag Bool, -- ^ Enable relocatable package built + configDebugInfo :: Flag DebugInfoLevel -- ^ Emit debug info. + } + deriving (Generic, Read, Show) + +instance Binary ConfigFlags + +configAbsolutePaths :: ConfigFlags -> IO ConfigFlags +configAbsolutePaths f = + (\v -> f { configPackageDBs = v }) + `liftM` mapM (maybe (return Nothing) (liftM Just . absolutePackageDBPath)) + (configPackageDBs f) + +defaultConfigFlags :: ProgramConfiguration -> ConfigFlags +defaultConfigFlags progConf = emptyConfigFlags { + configPrograms = progConf, + configHcFlavor = maybe NoFlag Flag defaultCompilerFlavor, + configVanillaLib = Flag True, + configProfLib = NoFlag, + configSharedLib = NoFlag, + configDynExe = Flag False, + configProfExe = NoFlag, + configOptimization = Flag NormalOptimisation, + configProgPrefix = Flag (toPathTemplate ""), + configProgSuffix = Flag (toPathTemplate ""), + configDistPref = Flag defaultDistPref, + configVerbosity = Flag normal, + configUserInstall = Flag False, --TODO: reverse this +#if defined(mingw32_HOST_OS) + -- See #1589. + configGHCiLib = Flag True, +#else + configGHCiLib = NoFlag, +#endif + configSplitObjs = Flag False, -- takes longer, so turn off by default + configStripExes = Flag True, + configStripLibs = Flag True, + configTests = Flag False, + configBenchmarks = Flag False, + configCoverage = Flag False, + configLibCoverage = NoFlag, + configExactConfiguration = Flag False, + configFlagError = NoFlag, + configRelocatable = Flag False, + configDebugInfo = Flag NoDebugInfo + } + +configureCommand :: ProgramConfiguration -> CommandUI ConfigFlags +configureCommand progConf = CommandUI + { commandName = "configure" + , commandSynopsis = "Prepare to build the package." + , commandDescription = Just $ \_ -> wrapText $ + "Configure how the package is built by setting " + ++ "package (and other) flags.\n" + ++ "\n" + ++ "The configuration affects several other commands, " + ++ "including build, test, bench, run, repl.\n" + , commandNotes = Just (\_ -> programFlagsDescription progConf) + , commandUsage = \pname -> + "Usage: " ++ pname ++ " configure [FLAGS]\n" + , commandDefaultFlags = defaultConfigFlags progConf + , commandOptions = \showOrParseArgs -> + configureOptions showOrParseArgs + ++ programConfigurationPaths progConf showOrParseArgs + configProgramPaths (\v fs -> fs { configProgramPaths = v }) + ++ programConfigurationOption progConf showOrParseArgs + configProgramArgs (\v fs -> fs { configProgramArgs = v }) + ++ programConfigurationOptions progConf showOrParseArgs + configProgramArgs (\v fs -> fs { configProgramArgs = v }) + } + +configureOptions :: ShowOrParseArgs -> [OptionField ConfigFlags] +configureOptions showOrParseArgs = + [optionVerbosity configVerbosity + (\v flags -> flags { configVerbosity = v }) + ,optionDistPref + configDistPref (\d flags -> flags { configDistPref = d }) + showOrParseArgs + + ,option [] ["compiler"] "compiler" + configHcFlavor (\v flags -> flags { configHcFlavor = v }) + (choiceOpt [ (Flag GHC, ("g", ["ghc"]), "compile with GHC") + , (Flag GHCJS, ([] , ["ghcjs"]), "compile with GHCJS") + , (Flag JHC, ([] , ["jhc"]), "compile with JHC") + , (Flag LHC, ([] , ["lhc"]), "compile with LHC") + , (Flag UHC, ([] , ["uhc"]), "compile with UHC") + -- "haskell-suite" compiler id string will be replaced + -- by a more specific one during the configure stage + , (Flag (HaskellSuite "haskell-suite"), ([] , ["haskell-suite"]), + "compile with a haskell-suite compiler")]) + + ,option "w" ["with-compiler"] + "give the path to a particular compiler" + configHcPath (\v flags -> flags { configHcPath = v }) + (reqArgFlag "PATH") + + ,option "" ["with-hc-pkg"] + "give the path to the package tool" + configHcPkg (\v flags -> flags { configHcPkg = v }) + (reqArgFlag "PATH") + ] + ++ map liftInstallDirs installDirsOptions + ++ [option "" ["program-prefix"] + "prefix to be applied to installed executables" + configProgPrefix + (\v flags -> flags { configProgPrefix = v }) + (reqPathTemplateArgFlag "PREFIX") + + ,option "" ["program-suffix"] + "suffix to be applied to installed executables" + configProgSuffix (\v flags -> flags { configProgSuffix = v } ) + (reqPathTemplateArgFlag "SUFFIX") + + ,option "" ["library-vanilla"] + "Vanilla libraries" + configVanillaLib (\v flags -> flags { configVanillaLib = v }) + (boolOpt [] []) + + ,option "p" ["library-profiling"] + "Library profiling" + configProfLib (\v flags -> flags { configProfLib = v }) + (boolOpt "p" []) + + ,option "" ["shared"] + "Shared library" + configSharedLib (\v flags -> flags { configSharedLib = v }) + (boolOpt [] []) + + ,option "" ["executable-dynamic"] + "Executable dynamic linking" + configDynExe (\v flags -> flags { configDynExe = v }) + (boolOpt [] []) + + ,option "" ["profiling"] + "Executable profiling (requires library profiling)" + -- HACK: See #2409. Thankfully, this is 1.22-specific. + (\flags -> + case (configProfLib flags, configProfExe flags) of + (Flag a, Flag b) + | (a == b) + && ("cabalConfProf", "/TRUE") `elem` configProgramPaths flags + -> configProfExe flags + _ -> NoFlag) + (\v flags -> flags + { configProfLib = v, configProfExe = v + , configProgramPaths = ("cabalConfProf", "/TRUE") + : configProgramPaths flags }) + (boolOpt [] []) + + ,option "" ["executable-profiling"] + "Executable profiling (DEPRECATED)" + configProfExe (\v flags -> flags { configProfExe = v }) + (boolOpt [] []) + + ,multiOption "optimization" + configOptimization (\v flags -> flags { configOptimization = v }) + [optArg' "n" (Flag . flagToOptimisationLevel) + (\f -> case f of + Flag NoOptimisation -> [] + Flag NormalOptimisation -> [Nothing] + Flag MaximumOptimisation -> [Just "2"] + _ -> []) + "O" ["enable-optimization","enable-optimisation"] + "Build with optimization (n is 0--2, default is 1)", + noArg (Flag NoOptimisation) [] + ["disable-optimization","disable-optimisation"] + "Build without optimization" + ] + + ,multiOption "debug-info" + configDebugInfo (\v flags -> flags { configDebugInfo = v }) + [optArg' "n" (Flag . flagToDebugInfoLevel) + (\f -> case f of + Flag NoDebugInfo -> [] + Flag MinimalDebugInfo -> [Just "1"] + Flag NormalDebugInfo -> [Nothing] + Flag MaximalDebugInfo -> [Just "3"] + _ -> []) + "" ["enable-debug-info"] + "Emit debug info (n is 0--3, default is 0)", + noArg (Flag NoDebugInfo) [] + ["disable-debug-info"] + "Don't emit debug info" + ] + + ,option "" ["library-for-ghci"] + "compile library for use with GHCi" + configGHCiLib (\v flags -> flags { configGHCiLib = v }) + (boolOpt [] []) + + ,option "" ["split-objs"] + "split library into smaller objects to reduce binary sizes (GHC 6.6+)" + configSplitObjs (\v flags -> flags { configSplitObjs = v }) + (boolOpt [] []) + + ,option "" ["executable-stripping"] + "strip executables upon installation to reduce binary sizes" + configStripExes (\v flags -> flags { configStripExes = v }) + (boolOpt [] []) + + ,option "" ["library-stripping"] + "strip libraries upon installation to reduce binary sizes" + configStripLibs (\v flags -> flags { configStripLibs = v }) + (boolOpt [] []) + + ,option "" ["configure-option"] + "Extra option for configure" + configConfigureArgs (\v flags -> flags { configConfigureArgs = v }) + (reqArg' "OPT" (\x -> [x]) id) + + ,option "" ["user-install"] + "doing a per-user installation" + configUserInstall (\v flags -> flags { configUserInstall = v }) + (boolOpt' ([],["user"]) ([], ["global"])) + + ,option "" ["package-db"] + "Use a given package database (to satisfy dependencies and register in). May be a specific file, 'global', 'user' or 'clear'." + configPackageDBs (\v flags -> flags { configPackageDBs = v }) + (reqArg' "DB" readPackageDbList showPackageDbList) + + ,option "f" ["flags"] + "Force values for the given flags in Cabal conditionals in the .cabal file. E.g., --flags=\"debug -usebytestrings\" forces the flag \"debug\" to true and \"usebytestrings\" to false." + configConfigurationsFlags (\v flags -> flags { configConfigurationsFlags = v }) + (reqArg' "FLAGS" readFlagList showFlagList) + + ,option "" ["extra-include-dirs"] + "A list of directories to search for header files" + configExtraIncludeDirs (\v flags -> flags {configExtraIncludeDirs = v}) + (reqArg' "PATH" (\x -> [x]) id) + + ,option "" ["extra-lib-dirs"] + "A list of directories to search for external libraries" + configExtraLibDirs (\v flags -> flags {configExtraLibDirs = v}) + (reqArg' "PATH" (\x -> [x]) id) + + ,option "" ["extra-prog-path"] + "A list of directories to search for required programs (in addition to the normal search locations)" + configProgramPathExtra (\v flags -> flags {configProgramPathExtra = v}) + (reqArg' "PATH" (\x -> toNubList [x]) fromNubList) + + ,option "" ["constraint"] + "A list of additional constraints on the dependencies." + configConstraints (\v flags -> flags { configConstraints = v}) + (reqArg "DEPENDENCY" + (readP_to_E (const "dependency expected") ((\x -> [x]) `fmap` parse)) + (map (\x -> display x))) + + ,option "" ["dependency"] + "A list of exact dependencies. E.g., --dependency=\"void=void-0.5.8-177d5cdf20962d0581fe2e4932a6c309\"" + configDependencies (\v flags -> flags { configDependencies = v}) + (reqArg "NAME=ID" + (readP_to_E (const "dependency expected") ((\x -> [x]) `fmap` parseDependency)) + (map (\x -> display (fst x) ++ "=" ++ display (snd x)))) + + ,option "" ["instantiate-with"] + "A mapping of signature names to concrete module instantiations. E.g., --instantiate-with=\"Map=Data.Map.Strict@containers-0.5.5.1-inplace\"" + configInstantiateWith (\v flags -> flags { configInstantiateWith = v }) + (reqArg "NAME=PKG:MOD" + (readP_to_E (const "signature mapping expected") ((\x -> [x]) `fmap` parseHoleMapEntry)) + (map (\(n,(p,m)) -> display n ++ "=" ++ display m ++ "@" ++ display p))) + + ,option "" ["tests"] + "dependency checking and compilation for test suites listed in the package description file." + configTests (\v flags -> flags { configTests = v }) + (boolOpt [] []) + + ,option "" ["coverage"] + "build package with Haskell Program Coverage. (GHC only)" + configCoverage (\v flags -> flags { configCoverage = v }) + (boolOpt [] []) + + ,option "" ["library-coverage"] + "build package with Haskell Program Coverage. (GHC only) (DEPRECATED)" + configLibCoverage (\v flags -> flags { configLibCoverage = v }) + (boolOpt [] []) + + ,option "" ["exact-configuration"] + "All direct dependencies and flags are provided on the command line." + configExactConfiguration + (\v flags -> flags { configExactConfiguration = v }) + trueArg + + ,option "" ["benchmarks"] + "dependency checking and compilation for benchmarks listed in the package description file." + configBenchmarks (\v flags -> flags { configBenchmarks = v }) + (boolOpt [] []) + + ,option "" ["relocatable"] + "building a package that is relocatable. (GHC only)" + configRelocatable (\v flags -> flags { configRelocatable = v}) + (boolOpt [] []) + ] + where + readFlagList :: String -> FlagAssignment + readFlagList = map tagWithValue . words + where tagWithValue ('-':fname) = (FlagName (lowercase fname), False) + tagWithValue fname = (FlagName (lowercase fname), True) + + showFlagList :: FlagAssignment -> [String] + showFlagList fs = [ if not set then '-':fname else fname + | (FlagName fname, set) <- fs] + + liftInstallDirs = + liftOption configInstallDirs (\v flags -> flags { configInstallDirs = v }) + + reqPathTemplateArgFlag title _sf _lf d get set = + reqArgFlag title _sf _lf d + (fmap fromPathTemplate . get) (set . fmap toPathTemplate) + +readPackageDbList :: String -> [Maybe PackageDB] +readPackageDbList "clear" = [Nothing] +readPackageDbList "global" = [Just GlobalPackageDB] +readPackageDbList "user" = [Just UserPackageDB] +readPackageDbList other = [Just (SpecificPackageDB other)] + +showPackageDbList :: [Maybe PackageDB] -> [String] +showPackageDbList = map showPackageDb + where + showPackageDb Nothing = "clear" + showPackageDb (Just GlobalPackageDB) = "global" + showPackageDb (Just UserPackageDB) = "user" + showPackageDb (Just (SpecificPackageDB db)) = db + + +parseDependency :: Parse.ReadP r (PackageName, InstalledPackageId) +parseDependency = do + x <- parse + _ <- Parse.char '=' + y <- parse + return (x, y) + +parseHoleMapEntry :: Parse.ReadP r (ModuleName, (InstalledPackageId, ModuleName)) +parseHoleMapEntry = do + x <- parse + _ <- Parse.char '=' + y <- parse + _ <- Parse.char '@' + z <- parse + return (x, (z, y)) + +installDirsOptions :: [OptionField (InstallDirs (Flag PathTemplate))] +installDirsOptions = + [ option "" ["prefix"] + "bake this prefix in preparation of installation" + prefix (\v flags -> flags { prefix = v }) + installDirArg + + , option "" ["bindir"] + "installation directory for executables" + bindir (\v flags -> flags { bindir = v }) + installDirArg + + , option "" ["libdir"] + "installation directory for libraries" + libdir (\v flags -> flags { libdir = v }) + installDirArg + + , option "" ["libsubdir"] + "subdirectory of libdir in which libs are installed" + libsubdir (\v flags -> flags { libsubdir = v }) + installDirArg + + , option "" ["libexecdir"] + "installation directory for program executables" + libexecdir (\v flags -> flags { libexecdir = v }) + installDirArg + + , option "" ["datadir"] + "installation directory for read-only data" + datadir (\v flags -> flags { datadir = v }) + installDirArg + + , option "" ["datasubdir"] + "subdirectory of datadir in which data files are installed" + datasubdir (\v flags -> flags { datasubdir = v }) + installDirArg + + , option "" ["docdir"] + "installation directory for documentation" + docdir (\v flags -> flags { docdir = v }) + installDirArg + + , option "" ["htmldir"] + "installation directory for HTML documentation" + htmldir (\v flags -> flags { htmldir = v }) + installDirArg + + , option "" ["haddockdir"] + "installation directory for haddock interfaces" + haddockdir (\v flags -> flags { haddockdir = v }) + installDirArg + + , option "" ["sysconfdir"] + "installation directory for configuration files" + sysconfdir (\v flags -> flags { sysconfdir = v }) + installDirArg + ] + where + installDirArg _sf _lf d get set = + reqArgFlag "DIR" _sf _lf d + (fmap fromPathTemplate . get) (set . fmap toPathTemplate) + +emptyConfigFlags :: ConfigFlags +emptyConfigFlags = mempty + +instance Monoid ConfigFlags where + mempty = ConfigFlags { + configPrograms = error "FIXME: remove configPrograms", + configProgramPaths = mempty, + configProgramArgs = mempty, + configProgramPathExtra = mempty, + configHcFlavor = mempty, + configHcPath = mempty, + configHcPkg = mempty, + configVanillaLib = mempty, + configProfLib = mempty, + configSharedLib = mempty, + configDynExe = mempty, + configProfExe = mempty, + configConfigureArgs = mempty, + configOptimization = mempty, + configProgPrefix = mempty, + configProgSuffix = mempty, + configInstallDirs = mempty, + configScratchDir = mempty, + configDistPref = mempty, + configVerbosity = mempty, + configUserInstall = mempty, + configPackageDBs = mempty, + configGHCiLib = mempty, + configSplitObjs = mempty, + configStripExes = mempty, + configStripLibs = mempty, + configExtraLibDirs = mempty, + configConstraints = mempty, + configDependencies = mempty, + configInstantiateWith = mempty, + configExtraIncludeDirs = mempty, + configConfigurationsFlags = mempty, + configTests = mempty, + configCoverage = mempty, + configLibCoverage = mempty, + configExactConfiguration = mempty, + configBenchmarks = mempty, + configFlagError = mempty, + configRelocatable = mempty, + configDebugInfo = mempty + } + mappend a b = ConfigFlags { + configPrograms = configPrograms b, + configProgramPaths = combine configProgramPaths, + configProgramArgs = combine configProgramArgs, + configProgramPathExtra = combine configProgramPathExtra, + configHcFlavor = combine configHcFlavor, + configHcPath = combine configHcPath, + configHcPkg = combine configHcPkg, + configVanillaLib = combine configVanillaLib, + configProfLib = combine configProfLib, + configSharedLib = combine configSharedLib, + configDynExe = combine configDynExe, + configProfExe = combine configProfExe, + configConfigureArgs = combine configConfigureArgs, + configOptimization = combine configOptimization, + configProgPrefix = combine configProgPrefix, + configProgSuffix = combine configProgSuffix, + configInstallDirs = combine configInstallDirs, + configScratchDir = combine configScratchDir, + configDistPref = combine configDistPref, + configVerbosity = combine configVerbosity, + configUserInstall = combine configUserInstall, + configPackageDBs = combine configPackageDBs, + configGHCiLib = combine configGHCiLib, + configSplitObjs = combine configSplitObjs, + configStripExes = combine configStripExes, + configStripLibs = combine configStripLibs, + configExtraLibDirs = combine configExtraLibDirs, + configConstraints = combine configConstraints, + configDependencies = combine configDependencies, + configInstantiateWith = combine configInstantiateWith, + configExtraIncludeDirs = combine configExtraIncludeDirs, + configConfigurationsFlags = combine configConfigurationsFlags, + configTests = combine configTests, + configCoverage = combine configCoverage, + configLibCoverage = combine configLibCoverage, + configExactConfiguration = combine configExactConfiguration, + configBenchmarks = combine configBenchmarks, + configFlagError = combine configFlagError, + configRelocatable = combine configRelocatable, + configDebugInfo = combine configDebugInfo + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Copy flags +-- ------------------------------------------------------------ + +-- | Flags to @copy@: (destdir, copy-prefix (backwards compat), verbosity) +data CopyFlags = CopyFlags { + copyDest :: Flag CopyDest, + copyDistPref :: Flag FilePath, + copyVerbosity :: Flag Verbosity + } + deriving Show + +defaultCopyFlags :: CopyFlags +defaultCopyFlags = CopyFlags { + copyDest = Flag NoCopyDest, + copyDistPref = Flag defaultDistPref, + copyVerbosity = Flag normal + } + +copyCommand :: CommandUI CopyFlags +copyCommand = CommandUI + { commandName = "copy" + , commandSynopsis = "Copy the files into the install locations." + , commandDescription = Just $ \_ -> wrapText $ + "Does not call register, and allows a prefix at install time. " + ++ "Without the --destdir flag, configure determines location.\n" + , commandNotes = Nothing + , commandUsage = \pname -> + "Usage: " ++ pname ++ " copy [FLAGS]\n" + , commandDefaultFlags = defaultCopyFlags + , commandOptions = \showOrParseArgs -> + [optionVerbosity copyVerbosity (\v flags -> flags { copyVerbosity = v }) + + ,optionDistPref + copyDistPref (\d flags -> flags { copyDistPref = d }) + showOrParseArgs + + ,option "" ["destdir"] + "directory to copy files to, prepended to installation directories" + copyDest (\v flags -> flags { copyDest = v }) + (reqArg "DIR" (succeedReadE (Flag . CopyTo)) + (\f -> case f of Flag (CopyTo p) -> [p]; _ -> [])) + ] + } + +emptyCopyFlags :: CopyFlags +emptyCopyFlags = mempty + +instance Monoid CopyFlags where + mempty = CopyFlags { + copyDest = mempty, + copyDistPref = mempty, + copyVerbosity = mempty + } + mappend a b = CopyFlags { + copyDest = combine copyDest, + copyDistPref = combine copyDistPref, + copyVerbosity = combine copyVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Install flags +-- ------------------------------------------------------------ + +-- | Flags to @install@: (package db, verbosity) +data InstallFlags = InstallFlags { + installPackageDB :: Flag PackageDB, + installDistPref :: Flag FilePath, + installUseWrapper :: Flag Bool, + installInPlace :: Flag Bool, + installVerbosity :: Flag Verbosity + } + deriving Show + +defaultInstallFlags :: InstallFlags +defaultInstallFlags = InstallFlags { + installPackageDB = NoFlag, + installDistPref = Flag defaultDistPref, + installUseWrapper = Flag False, + installInPlace = Flag False, + installVerbosity = Flag normal + } + +installCommand :: CommandUI InstallFlags +installCommand = CommandUI + { commandName = "install" + , commandSynopsis = + "Copy the files into the install locations. Run register." + , commandDescription = Just $ \_ -> wrapText $ + "Unlike the copy command, install calls the register command." + ++ "If you want to install into a location that is not what was" + ++ "specified in the configure step, use the copy command.\n" + , commandNotes = Nothing + , commandUsage = \pname -> + "Usage: " ++ pname ++ " install [FLAGS]\n" + , commandDefaultFlags = defaultInstallFlags + , commandOptions = \showOrParseArgs -> + [optionVerbosity installVerbosity (\v flags -> flags { installVerbosity = v }) + ,optionDistPref + installDistPref (\d flags -> flags { installDistPref = d }) + showOrParseArgs + + ,option "" ["inplace"] + "install the package in the install subdirectory of the dist prefix, so it can be used without being installed" + installInPlace (\v flags -> flags { installInPlace = v }) + trueArg + + ,option "" ["shell-wrappers"] + "using shell script wrappers around executables" + installUseWrapper (\v flags -> flags { installUseWrapper = v }) + (boolOpt [] []) + + ,option "" ["package-db"] "" + installPackageDB (\v flags -> flags { installPackageDB = v }) + (choiceOpt [ (Flag UserPackageDB, ([],["user"]), + "upon configuration register this package in the user's local package database") + , (Flag GlobalPackageDB, ([],["global"]), + "(default) upon configuration register this package in the system-wide package database")]) + ] + } + +emptyInstallFlags :: InstallFlags +emptyInstallFlags = mempty + +instance Monoid InstallFlags where + mempty = InstallFlags{ + installPackageDB = mempty, + installDistPref = mempty, + installUseWrapper = mempty, + installInPlace = mempty, + installVerbosity = mempty + } + mappend a b = InstallFlags{ + installPackageDB = combine installPackageDB, + installDistPref = combine installDistPref, + installUseWrapper = combine installUseWrapper, + installInPlace = combine installInPlace, + installVerbosity = combine installVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * SDist flags +-- ------------------------------------------------------------ + +-- | Flags to @sdist@: (snapshot, verbosity) +data SDistFlags = SDistFlags { + sDistSnapshot :: Flag Bool, + sDistDirectory :: Flag FilePath, + sDistDistPref :: Flag FilePath, + sDistListSources :: Flag FilePath, + sDistVerbosity :: Flag Verbosity + } + deriving Show + +defaultSDistFlags :: SDistFlags +defaultSDistFlags = SDistFlags { + sDistSnapshot = Flag False, + sDistDirectory = mempty, + sDistDistPref = Flag defaultDistPref, + sDistListSources = mempty, + sDistVerbosity = Flag normal + } + +sdistCommand :: CommandUI SDistFlags +sdistCommand = CommandUI + { commandName = "sdist" + , commandSynopsis = + "Generate a source distribution file (.tar.gz)." + , commandDescription = Nothing + , commandNotes = Nothing + , commandUsage = \pname -> + "Usage: " ++ pname ++ " sdist [FLAGS]\n" + , commandDefaultFlags = defaultSDistFlags + , commandOptions = \showOrParseArgs -> + [optionVerbosity sDistVerbosity (\v flags -> flags { sDistVerbosity = v }) + ,optionDistPref + sDistDistPref (\d flags -> flags { sDistDistPref = d }) + showOrParseArgs + + ,option "" ["list-sources"] + "Just write a list of the package's sources to a file" + sDistListSources (\v flags -> flags { sDistListSources = v }) + (reqArgFlag "FILE") + + ,option "" ["snapshot"] + "Produce a snapshot source distribution" + sDistSnapshot (\v flags -> flags { sDistSnapshot = v }) + trueArg + + ,option "" ["output-directory"] + ("Generate a source distribution in the given directory, " + ++ "without creating a tarball") + sDistDirectory (\v flags -> flags { sDistDirectory = v }) + (reqArgFlag "DIR") + ] + } + +emptySDistFlags :: SDistFlags +emptySDistFlags = mempty + +instance Monoid SDistFlags where + mempty = SDistFlags { + sDistSnapshot = mempty, + sDistDirectory = mempty, + sDistDistPref = mempty, + sDistListSources = mempty, + sDistVerbosity = mempty + } + mappend a b = SDistFlags { + sDistSnapshot = combine sDistSnapshot, + sDistDirectory = combine sDistDirectory, + sDistDistPref = combine sDistDistPref, + sDistListSources = combine sDistListSources, + sDistVerbosity = combine sDistVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Register flags +-- ------------------------------------------------------------ + +-- | Flags to @register@ and @unregister@: (user package, gen-script, +-- in-place, verbosity) +data RegisterFlags = RegisterFlags { + regPackageDB :: Flag PackageDB, + regGenScript :: Flag Bool, + regGenPkgConf :: Flag (Maybe FilePath), + regInPlace :: Flag Bool, + regDistPref :: Flag FilePath, + regPrintId :: Flag Bool, + regVerbosity :: Flag Verbosity + } + deriving Show + +defaultRegisterFlags :: RegisterFlags +defaultRegisterFlags = RegisterFlags { + regPackageDB = NoFlag, + regGenScript = Flag False, + regGenPkgConf = NoFlag, + regInPlace = Flag False, + regDistPref = Flag defaultDistPref, + regPrintId = Flag False, + regVerbosity = Flag normal + } + +registerCommand :: CommandUI RegisterFlags +registerCommand = CommandUI + { commandName = "register" + , commandSynopsis = + "Register this package with the compiler." + , commandDescription = Nothing + , commandNotes = Nothing + , commandUsage = \pname -> + "Usage: " ++ pname ++ " register [FLAGS]\n" + , commandDefaultFlags = defaultRegisterFlags + , commandOptions = \showOrParseArgs -> + [optionVerbosity regVerbosity (\v flags -> flags { regVerbosity = v }) + ,optionDistPref + regDistPref (\d flags -> flags { regDistPref = d }) + showOrParseArgs + + ,option "" ["packageDB"] "" + regPackageDB (\v flags -> flags { regPackageDB = v }) + (choiceOpt [ (Flag UserPackageDB, ([],["user"]), + "upon registration, register this package in the user's local package database") + , (Flag GlobalPackageDB, ([],["global"]), + "(default)upon registration, register this package in the system-wide package database")]) + + ,option "" ["inplace"] + "register the package in the build location, so it can be used without being installed" + regInPlace (\v flags -> flags { regInPlace = v }) + trueArg + + ,option "" ["gen-script"] + "instead of registering, generate a script to register later" + regGenScript (\v flags -> flags { regGenScript = v }) + trueArg + + ,option "" ["gen-pkg-config"] + "instead of registering, generate a package registration file" + regGenPkgConf (\v flags -> flags { regGenPkgConf = v }) + (optArg' "PKG" Flag flagToList) + + ,option "" ["print-ipid"] + "print the installed package ID calculated for this package" + regPrintId (\v flags -> flags { regPrintId = v }) + trueArg + ] + } + +unregisterCommand :: CommandUI RegisterFlags +unregisterCommand = CommandUI + { commandName = "unregister" + , commandSynopsis = + "Unregister this package with the compiler." + , commandDescription = Nothing + , commandNotes = Nothing + , commandUsage = \pname -> + "Usage: " ++ pname ++ " unregister [FLAGS]\n" + , commandDefaultFlags = defaultRegisterFlags + , commandOptions = \showOrParseArgs -> + [optionVerbosity regVerbosity (\v flags -> flags { regVerbosity = v }) + ,optionDistPref + regDistPref (\d flags -> flags { regDistPref = d }) + showOrParseArgs + + ,option "" ["user"] "" + regPackageDB (\v flags -> flags { regPackageDB = v }) + (choiceOpt [ (Flag UserPackageDB, ([],["user"]), + "unregister this package in the user's local package database") + , (Flag GlobalPackageDB, ([],["global"]), + "(default) unregister this package in the system-wide package database")]) + + ,option "" ["gen-script"] + "Instead of performing the unregister command, generate a script to unregister later" + regGenScript (\v flags -> flags { regGenScript = v }) + trueArg + ] + } + +emptyRegisterFlags :: RegisterFlags +emptyRegisterFlags = mempty + +instance Monoid RegisterFlags where + mempty = RegisterFlags { + regPackageDB = mempty, + regGenScript = mempty, + regGenPkgConf = mempty, + regInPlace = mempty, + regPrintId = mempty, + regDistPref = mempty, + regVerbosity = mempty + } + mappend a b = RegisterFlags { + regPackageDB = combine regPackageDB, + regGenScript = combine regGenScript, + regGenPkgConf = combine regGenPkgConf, + regInPlace = combine regInPlace, + regPrintId = combine regPrintId, + regDistPref = combine regDistPref, + regVerbosity = combine regVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * HsColour flags +-- ------------------------------------------------------------ + +data HscolourFlags = HscolourFlags { + hscolourCSS :: Flag FilePath, + hscolourExecutables :: Flag Bool, + hscolourTestSuites :: Flag Bool, + hscolourBenchmarks :: Flag Bool, + hscolourDistPref :: Flag FilePath, + hscolourVerbosity :: Flag Verbosity + } + deriving Show + +emptyHscolourFlags :: HscolourFlags +emptyHscolourFlags = mempty + +defaultHscolourFlags :: HscolourFlags +defaultHscolourFlags = HscolourFlags { + hscolourCSS = NoFlag, + hscolourExecutables = Flag False, + hscolourTestSuites = Flag False, + hscolourBenchmarks = Flag False, + hscolourDistPref = Flag defaultDistPref, + hscolourVerbosity = Flag normal + } + +instance Monoid HscolourFlags where + mempty = HscolourFlags { + hscolourCSS = mempty, + hscolourExecutables = mempty, + hscolourTestSuites = mempty, + hscolourBenchmarks = mempty, + hscolourDistPref = mempty, + hscolourVerbosity = mempty + } + mappend a b = HscolourFlags { + hscolourCSS = combine hscolourCSS, + hscolourExecutables = combine hscolourExecutables, + hscolourTestSuites = combine hscolourTestSuites, + hscolourBenchmarks = combine hscolourBenchmarks, + hscolourDistPref = combine hscolourDistPref, + hscolourVerbosity = combine hscolourVerbosity + } + where combine field = field a `mappend` field b + +hscolourCommand :: CommandUI HscolourFlags +hscolourCommand = CommandUI + { commandName = "hscolour" + , commandSynopsis = + "Generate HsColour colourised code, in HTML format." + , commandDescription = Just (\_ -> "Requires the hscolour program.\n") + , commandNotes = Nothing + , commandUsage = \pname -> + "Usage: " ++ pname ++ " hscolour [FLAGS]\n" + , commandDefaultFlags = defaultHscolourFlags + , commandOptions = \showOrParseArgs -> + [optionVerbosity hscolourVerbosity + (\v flags -> flags { hscolourVerbosity = v }) + ,optionDistPref + hscolourDistPref (\d flags -> flags { hscolourDistPref = d }) + showOrParseArgs + + ,option "" ["executables"] + "Run hscolour for Executables targets" + hscolourExecutables (\v flags -> flags { hscolourExecutables = v }) + trueArg + + ,option "" ["tests"] + "Run hscolour for Test Suite targets" + hscolourTestSuites (\v flags -> flags { hscolourTestSuites = v }) + trueArg + + ,option "" ["benchmarks"] + "Run hscolour for Benchmark targets" + hscolourBenchmarks (\v flags -> flags { hscolourBenchmarks = v }) + trueArg + + ,option "" ["all"] + "Run hscolour for all targets" + (\f -> allFlags [ hscolourExecutables f + , hscolourTestSuites f + , hscolourBenchmarks f]) + (\v flags -> flags { hscolourExecutables = v + , hscolourTestSuites = v + , hscolourBenchmarks = v }) + trueArg + + ,option "" ["css"] + "Use a cascading style sheet" + hscolourCSS (\v flags -> flags { hscolourCSS = v }) + (reqArgFlag "PATH") + ] + } + +-- ------------------------------------------------------------ +-- * Haddock flags +-- ------------------------------------------------------------ + +data HaddockFlags = HaddockFlags { + haddockProgramPaths :: [(String, FilePath)], + haddockProgramArgs :: [(String, [String])], + haddockHoogle :: Flag Bool, + haddockHtml :: Flag Bool, + haddockHtmlLocation :: Flag String, + haddockExecutables :: Flag Bool, + haddockTestSuites :: Flag Bool, + haddockBenchmarks :: Flag Bool, + haddockInternal :: Flag Bool, + haddockCss :: Flag FilePath, + haddockHscolour :: Flag Bool, + haddockHscolourCss :: Flag FilePath, + haddockContents :: Flag PathTemplate, + haddockDistPref :: Flag FilePath, + haddockKeepTempFiles:: Flag Bool, + haddockVerbosity :: Flag Verbosity + } + deriving Show + +defaultHaddockFlags :: HaddockFlags +defaultHaddockFlags = HaddockFlags { + haddockProgramPaths = mempty, + haddockProgramArgs = [], + haddockHoogle = Flag False, + haddockHtml = Flag False, + haddockHtmlLocation = NoFlag, + haddockExecutables = Flag False, + haddockTestSuites = Flag False, + haddockBenchmarks = Flag False, + haddockInternal = Flag False, + haddockCss = NoFlag, + haddockHscolour = Flag False, + haddockHscolourCss = NoFlag, + haddockContents = NoFlag, + haddockDistPref = Flag defaultDistPref, + haddockKeepTempFiles= Flag False, + haddockVerbosity = Flag normal + } + +haddockCommand :: CommandUI HaddockFlags +haddockCommand = CommandUI + { commandName = "haddock" + , commandSynopsis = "Generate Haddock HTML documentation." + , commandDescription = Just $ \_ -> + "Requires the program haddock, version 2.x.\n" + , commandNotes = Nothing + , commandUsage = \pname -> + "Usage: " ++ pname ++ " haddock [FLAGS]\n" + , commandDefaultFlags = defaultHaddockFlags + , commandOptions = \showOrParseArgs -> + haddockOptions showOrParseArgs + ++ programConfigurationPaths progConf ParseArgs + haddockProgramPaths (\v flags -> flags { haddockProgramPaths = v}) + ++ programConfigurationOption progConf showOrParseArgs + haddockProgramArgs (\v fs -> fs { haddockProgramArgs = v }) + ++ programConfigurationOptions progConf ParseArgs + haddockProgramArgs (\v flags -> flags { haddockProgramArgs = v}) + } + where + progConf = addKnownProgram haddockProgram + $ addKnownProgram ghcProgram + $ emptyProgramConfiguration + +haddockOptions :: ShowOrParseArgs -> [OptionField HaddockFlags] +haddockOptions showOrParseArgs = + [optionVerbosity haddockVerbosity + (\v flags -> flags { haddockVerbosity = v }) + ,optionDistPref + haddockDistPref (\d flags -> flags { haddockDistPref = d }) + showOrParseArgs + + ,option "" ["keep-temp-files"] + "Keep temporary files" + haddockKeepTempFiles (\b flags -> flags { haddockKeepTempFiles = b }) + trueArg + + ,option "" ["hoogle"] + "Generate a hoogle database" + haddockHoogle (\v flags -> flags { haddockHoogle = v }) + trueArg + + ,option "" ["html"] + "Generate HTML documentation (the default)" + haddockHtml (\v flags -> flags { haddockHtml = v }) + trueArg + + ,option "" ["html-location"] + "Location of HTML documentation for pre-requisite packages" + haddockHtmlLocation (\v flags -> flags { haddockHtmlLocation = v }) + (reqArgFlag "URL") + + ,option "" ["executables"] + "Run haddock for Executables targets" + haddockExecutables (\v flags -> flags { haddockExecutables = v }) + trueArg + + ,option "" ["tests"] + "Run haddock for Test Suite targets" + haddockTestSuites (\v flags -> flags { haddockTestSuites = v }) + trueArg + + ,option "" ["benchmarks"] + "Run haddock for Benchmark targets" + haddockBenchmarks (\v flags -> flags { haddockBenchmarks = v }) + trueArg + + ,option "" ["all"] + "Run haddock for all targets" + (\f -> allFlags [ haddockExecutables f + , haddockTestSuites f + , haddockBenchmarks f]) + (\v flags -> flags { haddockExecutables = v + , haddockTestSuites = v + , haddockBenchmarks = v }) + trueArg + + ,option "" ["internal"] + "Run haddock for internal modules and include all symbols" + haddockInternal (\v flags -> flags { haddockInternal = v }) + trueArg + + ,option "" ["css"] + "Use PATH as the haddock stylesheet" + haddockCss (\v flags -> flags { haddockCss = v }) + (reqArgFlag "PATH") + + ,option "" ["hyperlink-source","hyperlink-sources"] + "Hyperlink the documentation to the source code (using HsColour)" + haddockHscolour (\v flags -> flags { haddockHscolour = v }) + trueArg + + ,option "" ["hscolour-css"] + "Use PATH as the HsColour stylesheet" + haddockHscolourCss (\v flags -> flags { haddockHscolourCss = v }) + (reqArgFlag "PATH") + + ,option "" ["contents-location"] + "Bake URL in as the location for the contents page" + haddockContents (\v flags -> flags { haddockContents = v }) + (reqArg' "URL" + (toFlag . toPathTemplate) + (flagToList . fmap fromPathTemplate)) + ] + +emptyHaddockFlags :: HaddockFlags +emptyHaddockFlags = mempty + +instance Monoid HaddockFlags where + mempty = HaddockFlags { + haddockProgramPaths = mempty, + haddockProgramArgs = mempty, + haddockHoogle = mempty, + haddockHtml = mempty, + haddockHtmlLocation = mempty, + haddockExecutables = mempty, + haddockTestSuites = mempty, + haddockBenchmarks = mempty, + haddockInternal = mempty, + haddockCss = mempty, + haddockHscolour = mempty, + haddockHscolourCss = mempty, + haddockContents = mempty, + haddockDistPref = mempty, + haddockKeepTempFiles= mempty, + haddockVerbosity = mempty + } + mappend a b = HaddockFlags { + haddockProgramPaths = combine haddockProgramPaths, + haddockProgramArgs = combine haddockProgramArgs, + haddockHoogle = combine haddockHoogle, + haddockHtml = combine haddockHoogle, + haddockHtmlLocation = combine haddockHtmlLocation, + haddockExecutables = combine haddockExecutables, + haddockTestSuites = combine haddockTestSuites, + haddockBenchmarks = combine haddockBenchmarks, + haddockInternal = combine haddockInternal, + haddockCss = combine haddockCss, + haddockHscolour = combine haddockHscolour, + haddockHscolourCss = combine haddockHscolourCss, + haddockContents = combine haddockContents, + haddockDistPref = combine haddockDistPref, + haddockKeepTempFiles= combine haddockKeepTempFiles, + haddockVerbosity = combine haddockVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Clean flags +-- ------------------------------------------------------------ + +data CleanFlags = CleanFlags { + cleanSaveConf :: Flag Bool, + cleanDistPref :: Flag FilePath, + cleanVerbosity :: Flag Verbosity + } + deriving Show + +defaultCleanFlags :: CleanFlags +defaultCleanFlags = CleanFlags { + cleanSaveConf = Flag False, + cleanDistPref = Flag defaultDistPref, + cleanVerbosity = Flag normal + } + +cleanCommand :: CommandUI CleanFlags +cleanCommand = CommandUI + { commandName = "clean" + , commandSynopsis = "Clean up after a build." + , commandDescription = Just $ \_ -> + "Removes .hi, .o, preprocessed sources, etc.\n" + , commandNotes = Nothing + , commandUsage = \pname -> + "Usage: " ++ pname ++ " clean [FLAGS]\n" + , commandDefaultFlags = defaultCleanFlags + , commandOptions = \showOrParseArgs -> + [optionVerbosity cleanVerbosity (\v flags -> flags { cleanVerbosity = v }) + ,optionDistPref + cleanDistPref (\d flags -> flags { cleanDistPref = d }) + showOrParseArgs + + ,option "s" ["save-configure"] + "Do not remove the configuration file (dist/setup-config) during cleaning. Saves need to reconfigure." + cleanSaveConf (\v flags -> flags { cleanSaveConf = v }) + trueArg + ] + } + +emptyCleanFlags :: CleanFlags +emptyCleanFlags = mempty + +instance Monoid CleanFlags where + mempty = CleanFlags { + cleanSaveConf = mempty, + cleanDistPref = mempty, + cleanVerbosity = mempty + } + mappend a b = CleanFlags { + cleanSaveConf = combine cleanSaveConf, + cleanDistPref = combine cleanDistPref, + cleanVerbosity = combine cleanVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Build flags +-- ------------------------------------------------------------ + +data BuildFlags = BuildFlags { + buildProgramPaths :: [(String, FilePath)], + buildProgramArgs :: [(String, [String])], + buildDistPref :: Flag FilePath, + buildVerbosity :: Flag Verbosity, + buildNumJobs :: Flag (Maybe Int), + -- TODO: this one should not be here, it's just that the silly + -- UserHooks stop us from passing extra info in other ways + buildArgs :: [String] + } + deriving Show + +{-# DEPRECATED buildVerbose "Use buildVerbosity instead" #-} +buildVerbose :: BuildFlags -> Verbosity +buildVerbose = fromFlagOrDefault normal . buildVerbosity + +defaultBuildFlags :: BuildFlags +defaultBuildFlags = BuildFlags { + buildProgramPaths = mempty, + buildProgramArgs = [], + buildDistPref = Flag defaultDistPref, + buildVerbosity = Flag normal, + buildNumJobs = mempty, + buildArgs = [] + } + +buildCommand :: ProgramConfiguration -> CommandUI BuildFlags +buildCommand progConf = CommandUI + { commandName = "build" + , commandSynopsis = "Compile all/specific components." + , commandDescription = Just $ \_ -> wrapText $ + "Components encompass executables, tests, and benchmarks.\n" + ++ "\n" + ++ "Affected by configuration options, see `configure`.\n" + , commandNotes = Just $ \pname -> + "Examples:\n" + ++ " " ++ pname ++ " build " + ++ " All the components in the package\n" + ++ " " ++ pname ++ " build foo " + ++ " A component (i.e. lib, exe, test suite)\n\n" + ++ programFlagsDescription progConf +--TODO: re-enable once we have support for module/file targets +-- ++ " " ++ pname ++ " build Foo.Bar " +-- ++ " A module\n" +-- ++ " " ++ pname ++ " build Foo/Bar.hs" +-- ++ " A file\n\n" +-- ++ "If a target is ambiguous it can be qualified with the component " +-- ++ "name, e.g.\n" +-- ++ " " ++ pname ++ " build foo:Foo.Bar\n" +-- ++ " " ++ pname ++ " build testsuite1:Foo/Bar.hs\n" + , commandUsage = usageAlternatives "build" $ + [ "[FLAGS]" + , "COMPONENTS [FLAGS]" + ] + , commandDefaultFlags = defaultBuildFlags + , commandOptions = \showOrParseArgs -> + [ optionVerbosity + buildVerbosity (\v flags -> flags { buildVerbosity = v }) + + , optionDistPref + buildDistPref (\d flags -> flags { buildDistPref = d }) showOrParseArgs + ] + ++ buildOptions progConf showOrParseArgs + } + +buildOptions :: ProgramConfiguration -> ShowOrParseArgs + -> [OptionField BuildFlags] +buildOptions progConf showOrParseArgs = + [ optionNumJobs + buildNumJobs (\v flags -> flags { buildNumJobs = v }) + ] + + ++ programConfigurationPaths progConf showOrParseArgs + buildProgramPaths (\v flags -> flags { buildProgramPaths = v}) + + ++ programConfigurationOption progConf showOrParseArgs + buildProgramArgs (\v fs -> fs { buildProgramArgs = v }) + + ++ programConfigurationOptions progConf showOrParseArgs + buildProgramArgs (\v flags -> flags { buildProgramArgs = v}) + +emptyBuildFlags :: BuildFlags +emptyBuildFlags = mempty + +instance Monoid BuildFlags where + mempty = BuildFlags { + buildProgramPaths = mempty, + buildProgramArgs = mempty, + buildVerbosity = mempty, + buildDistPref = mempty, + buildNumJobs = mempty, + buildArgs = mempty + } + mappend a b = BuildFlags { + buildProgramPaths = combine buildProgramPaths, + buildProgramArgs = combine buildProgramArgs, + buildVerbosity = combine buildVerbosity, + buildDistPref = combine buildDistPref, + buildNumJobs = combine buildNumJobs, + buildArgs = combine buildArgs + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * REPL Flags +-- ------------------------------------------------------------ + +data ReplFlags = ReplFlags { + replProgramPaths :: [(String, FilePath)], + replProgramArgs :: [(String, [String])], + replDistPref :: Flag FilePath, + replVerbosity :: Flag Verbosity, + replReload :: Flag Bool + } + deriving Show + +defaultReplFlags :: ReplFlags +defaultReplFlags = ReplFlags { + replProgramPaths = mempty, + replProgramArgs = [], + replDistPref = Flag defaultDistPref, + replVerbosity = Flag normal, + replReload = Flag False + } + +instance Monoid ReplFlags where + mempty = ReplFlags { + replProgramPaths = mempty, + replProgramArgs = mempty, + replVerbosity = mempty, + replDistPref = mempty, + replReload = mempty + } + mappend a b = ReplFlags { + replProgramPaths = combine replProgramPaths, + replProgramArgs = combine replProgramArgs, + replVerbosity = combine replVerbosity, + replDistPref = combine replDistPref, + replReload = combine replReload + } + where combine field = field a `mappend` field b + +replCommand :: ProgramConfiguration -> CommandUI ReplFlags +replCommand progConf = CommandUI + { commandName = "repl" + , commandSynopsis = + "Open an interpreter session for the given component." + , commandDescription = Just $ \pname -> wrapText $ + "If the current directory contains no package, ignores COMPONENT " + ++ "parameters and opens an interactive interpreter session; if a " + ++ "sandbox is present, its package database will be used.\n" + ++ "\n" + ++ "Otherwise, (re)configures with the given or default flags, and " + ++ "loads the interpreter with the relevant modules. For executables, " + ++ "tests and benchmarks, loads the main module (and its " + ++ "dependencies); for libraries all exposed/other modules.\n" + ++ "\n" + ++ "The default component is the library itself, or the executable " + ++ "if that is the only component.\n" + ++ "\n" + ++ "Support for loading specific modules is planned but not " + ++ "implemented yet. For certain scenarios, `" ++ pname + ++ " exec -- ghci :l Foo` may be used instead. Note that `exec` will " + ++ "not (re)configure and you will have to specify the location of " + ++ "other modules, if required.\n" + + , commandNotes = Just $ \pname -> + "Examples:\n" + ++ " " ++ pname ++ " repl " + ++ " The first component in the package\n" + ++ " " ++ pname ++ " repl foo " + ++ " A named component (i.e. lib, exe, test suite)\n" + ++ " " ++ pname ++ " repl --ghc-options=\"-lstdc++\"" + ++ " Specifying flags for interpreter\n" +--TODO: re-enable once we have support for module/file targets +-- ++ " " ++ pname ++ " repl Foo.Bar " +-- ++ " A module\n" +-- ++ " " ++ pname ++ " repl Foo/Bar.hs" +-- ++ " A file\n\n" +-- ++ "If a target is ambiguous it can be qualified with the component " +-- ++ "name, e.g.\n" +-- ++ " " ++ pname ++ " repl foo:Foo.Bar\n" +-- ++ " " ++ pname ++ " repl testsuite1:Foo/Bar.hs\n" + , commandUsage = \pname -> "Usage: " ++ pname ++ " repl [COMPONENT] [FLAGS]\n" + , commandDefaultFlags = defaultReplFlags + , commandOptions = \showOrParseArgs -> + optionVerbosity replVerbosity (\v flags -> flags { replVerbosity = v }) + : optionDistPref + replDistPref (\d flags -> flags { replDistPref = d }) + showOrParseArgs + + : programConfigurationPaths progConf showOrParseArgs + replProgramPaths (\v flags -> flags { replProgramPaths = v}) + + ++ programConfigurationOption progConf showOrParseArgs + replProgramArgs (\v flags -> flags { replProgramArgs = v}) + + ++ programConfigurationOptions progConf showOrParseArgs + replProgramArgs (\v flags -> flags { replProgramArgs = v}) + + ++ case showOrParseArgs of + ParseArgs -> + [ option "" ["reload"] + "Used from within an interpreter to update files." + replReload (\v flags -> flags { replReload = v }) + trueArg + ] + _ -> [] + } + +-- ------------------------------------------------------------ +-- * Test flags +-- ------------------------------------------------------------ + +data TestShowDetails = Never | Failures | Always | Streaming + deriving (Eq, Ord, Enum, Bounded, Show) + +knownTestShowDetails :: [TestShowDetails] +knownTestShowDetails = [minBound..maxBound] + +instance Text TestShowDetails where + disp = Disp.text . lowercase . show + + parse = maybe Parse.pfail return . classify =<< ident + where + ident = Parse.munch1 (\c -> isAlpha c || c == '_' || c == '-') + classify str = lookup (lowercase str) enumMap + enumMap :: [(String, TestShowDetails)] + enumMap = [ (display x, x) + | x <- knownTestShowDetails ] + +--TODO: do we need this instance? +instance Monoid TestShowDetails where + mempty = Never + mappend a b = if a < b then b else a + +data TestFlags = TestFlags { + testDistPref :: Flag FilePath, + testVerbosity :: Flag Verbosity, + testHumanLog :: Flag PathTemplate, + testMachineLog :: Flag PathTemplate, + testShowDetails :: Flag TestShowDetails, + testKeepTix :: Flag Bool, + -- TODO: think about if/how options are passed to test exes + testOptions :: [PathTemplate] + } + +defaultTestFlags :: TestFlags +defaultTestFlags = TestFlags { + testDistPref = Flag defaultDistPref, + testVerbosity = Flag normal, + testHumanLog = toFlag $ toPathTemplate $ "$pkgid-$test-suite.log", + testMachineLog = toFlag $ toPathTemplate $ "$pkgid.log", + testShowDetails = toFlag Failures, + testKeepTix = toFlag False, + testOptions = [] + } + +testCommand :: CommandUI TestFlags +testCommand = CommandUI + { commandName = "test" + , commandSynopsis = + "Run all/specific tests in the test suite." + , commandDescription = Just $ \pname -> wrapText $ + "If necessary (re)configures with `--enable-tests` flag and builds" + ++ " the test suite.\n" + ++ "\n" + ++ "Remember that the tests' dependencies must be installed if there" + ++ " are additional ones; e.g. with `" ++ pname + ++ " install --only-dependencies --enable-tests`.\n" + ++ "\n" + ++ "By defining UserHooks in a custom Setup.hs, the package can" + ++ " define actions to be executed before and after running tests.\n" + , commandNotes = Nothing + , commandUsage = usageAlternatives "test" + [ "[FLAGS]" + , "TESTCOMPONENTS [FLAGS]" + ] + , commandDefaultFlags = defaultTestFlags + , commandOptions = \showOrParseArgs -> + [ optionVerbosity testVerbosity (\v flags -> flags { testVerbosity = v }) + , optionDistPref + testDistPref (\d flags -> flags { testDistPref = d }) + showOrParseArgs + , option [] ["log"] + ("Log all test suite results to file (name template can use " + ++ "$pkgid, $compiler, $os, $arch, $test-suite, $result)") + testHumanLog (\v flags -> flags { testHumanLog = v }) + (reqArg' "TEMPLATE" + (toFlag . toPathTemplate) + (flagToList . fmap fromPathTemplate)) + , option [] ["machine-log"] + ("Produce a machine-readable log file (name template can use " + ++ "$pkgid, $compiler, $os, $arch, $result)") + testMachineLog (\v flags -> flags { testMachineLog = v }) + (reqArg' "TEMPLATE" + (toFlag . toPathTemplate) + (flagToList . fmap fromPathTemplate)) + , option [] ["show-details"] + ("'always': always show results of individual test cases. " + ++ "'never': never show results of individual test cases. " + ++ "'failures': show results of failing test cases. " + ++ "'streaming': show results of test cases in real time.") + testShowDetails (\v flags -> flags { testShowDetails = v }) + (reqArg "FILTER" + (readP_to_E (\_ -> "--show-details flag expects one of " + ++ intercalate ", " + (map display knownTestShowDetails)) + (fmap toFlag parse)) + (flagToList . fmap display)) + , option [] ["keep-tix-files"] + "keep .tix files for HPC between test runs" + testKeepTix (\v flags -> flags { testKeepTix = v}) + trueArg + , option [] ["test-options"] + ("give extra options to test executables " + ++ "(name templates can use $pkgid, $compiler, " + ++ "$os, $arch, $test-suite)") + testOptions (\v flags -> flags { testOptions = v }) + (reqArg' "TEMPLATES" (map toPathTemplate . splitArgs) + (const [])) + , option [] ["test-option"] + ("give extra option to test executables " + ++ "(no need to quote options containing spaces, " + ++ "name template can use $pkgid, $compiler, " + ++ "$os, $arch, $test-suite)") + testOptions (\v flags -> flags { testOptions = v }) + (reqArg' "TEMPLATE" (\x -> [toPathTemplate x]) + (map fromPathTemplate)) + ] + } + +emptyTestFlags :: TestFlags +emptyTestFlags = mempty + +instance Monoid TestFlags where + mempty = TestFlags { + testDistPref = mempty, + testVerbosity = mempty, + testHumanLog = mempty, + testMachineLog = mempty, + testShowDetails = mempty, + testKeepTix = mempty, + testOptions = mempty + } + mappend a b = TestFlags { + testDistPref = combine testDistPref, + testVerbosity = combine testVerbosity, + testHumanLog = combine testHumanLog, + testMachineLog = combine testMachineLog, + testShowDetails = combine testShowDetails, + testKeepTix = combine testKeepTix, + testOptions = combine testOptions + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Benchmark flags +-- ------------------------------------------------------------ + +data BenchmarkFlags = BenchmarkFlags { + benchmarkDistPref :: Flag FilePath, + benchmarkVerbosity :: Flag Verbosity, + benchmarkOptions :: [PathTemplate] + } + +defaultBenchmarkFlags :: BenchmarkFlags +defaultBenchmarkFlags = BenchmarkFlags { + benchmarkDistPref = Flag defaultDistPref, + benchmarkVerbosity = Flag normal, + benchmarkOptions = [] + } + +benchmarkCommand :: CommandUI BenchmarkFlags +benchmarkCommand = CommandUI + { commandName = "bench" + , commandSynopsis = + "Run all/specific benchmarks." + , commandDescription = Just $ \pname -> wrapText $ + "If necessary (re)configures with `--enable-benchmarks` flag and" + ++ " builds the benchmarks.\n" + ++ "\n" + ++ "Remember that the benchmarks' dependencies must be installed if" + ++ " there are additional ones; e.g. with `" ++ pname + ++ " install --only-dependencies --enable-benchmarks`.\n" + ++ "\n" + ++ "By defining UserHooks in a custom Setup.hs, the package can" + ++ " define actions to be executed before and after running" + ++ " benchmarks.\n" + , commandNotes = Nothing + , commandUsage = usageAlternatives "bench" + [ "[FLAGS]" + , "BENCHCOMPONENTS [FLAGS]" + ] + , commandDefaultFlags = defaultBenchmarkFlags + , commandOptions = \showOrParseArgs -> + [ optionVerbosity benchmarkVerbosity + (\v flags -> flags { benchmarkVerbosity = v }) + , optionDistPref + benchmarkDistPref (\d flags -> flags { benchmarkDistPref = d }) + showOrParseArgs + , option [] ["benchmark-options"] + ("give extra options to benchmark executables " + ++ "(name templates can use $pkgid, $compiler, " + ++ "$os, $arch, $benchmark)") + benchmarkOptions (\v flags -> flags { benchmarkOptions = v }) + (reqArg' "TEMPLATES" (map toPathTemplate . splitArgs) + (const [])) + , option [] ["benchmark-option"] + ("give extra option to benchmark executables " + ++ "(no need to quote options containing spaces, " + ++ "name template can use $pkgid, $compiler, " + ++ "$os, $arch, $benchmark)") + benchmarkOptions (\v flags -> flags { benchmarkOptions = v }) + (reqArg' "TEMPLATE" (\x -> [toPathTemplate x]) + (map fromPathTemplate)) + ] + } + +emptyBenchmarkFlags :: BenchmarkFlags +emptyBenchmarkFlags = mempty + +instance Monoid BenchmarkFlags where + mempty = BenchmarkFlags { + benchmarkDistPref = mempty, + benchmarkVerbosity = mempty, + benchmarkOptions = mempty + } + mappend a b = BenchmarkFlags { + benchmarkDistPref = combine benchmarkDistPref, + benchmarkVerbosity = combine benchmarkVerbosity, + benchmarkOptions = combine benchmarkOptions + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Shared options utils +-- ------------------------------------------------------------ + +programFlagsDescription :: ProgramConfiguration -> String +programFlagsDescription progConf = + "The flags --with-PROG and --PROG-option(s) can be used with" + ++ " the following programs:" + ++ (concatMap (\line -> "\n " ++ unwords line) . wrapLine 77 . sort) + [ programName prog | (prog, _) <- knownPrograms progConf ] + ++ "\n" + +-- | For each known program @PROG@ in 'progConf', produce a @with-PROG@ +-- 'OptionField'. +programConfigurationPaths + :: ProgramConfiguration + -> ShowOrParseArgs + -> (flags -> [(String, FilePath)]) + -> ([(String, FilePath)] -> (flags -> flags)) + -> [OptionField flags] +programConfigurationPaths progConf showOrParseArgs get set = + programConfigurationPaths' ("with-" ++) progConf showOrParseArgs get set + +-- | Like 'programConfigurationPaths', but allows to customise the option name. +programConfigurationPaths' + :: (String -> String) + -> ProgramConfiguration + -> ShowOrParseArgs + -> (flags -> [(String, FilePath)]) + -> ([(String, FilePath)] -> (flags -> flags)) + -> [OptionField flags] +programConfigurationPaths' mkName progConf showOrParseArgs get set = + case showOrParseArgs of + -- we don't want a verbose help text list so we just show a generic one: + ShowArgs -> [withProgramPath "PROG"] + ParseArgs -> map (withProgramPath . programName . fst) + (knownPrograms progConf) + where + withProgramPath prog = + option "" [mkName prog] + ("give the path to " ++ prog) + get set + (reqArg' "PATH" (\path -> [(prog, path)]) + (\progPaths -> [ path | (prog', path) <- progPaths, prog==prog' ])) + +-- | For each known program @PROG@ in 'progConf', produce a @PROG-option@ +-- 'OptionField'. +programConfigurationOption + :: ProgramConfiguration + -> ShowOrParseArgs + -> (flags -> [(String, [String])]) + -> ([(String, [String])] -> (flags -> flags)) + -> [OptionField flags] +programConfigurationOption progConf showOrParseArgs get set = + case showOrParseArgs of + -- we don't want a verbose help text list so we just show a generic one: + ShowArgs -> [programOption "PROG"] + ParseArgs -> map (programOption . programName . fst) + (knownPrograms progConf) + where + programOption prog = + option "" [prog ++ "-option"] + ("give an extra option to " ++ prog ++ + " (no need to quote options containing spaces)") + get set + (reqArg' "OPT" (\arg -> [(prog, [arg])]) + (\progArgs -> concat [ args + | (prog', args) <- progArgs, prog==prog' ])) + +-- | For each known program @PROG@ in 'progConf', produce a @PROG-options@ +-- 'OptionField'. +programConfigurationOptions + :: ProgramConfiguration + -> ShowOrParseArgs + -> (flags -> [(String, [String])]) + -> ([(String, [String])] -> (flags -> flags)) + -> [OptionField flags] +programConfigurationOptions progConf showOrParseArgs get set = + case showOrParseArgs of + -- we don't want a verbose help text list so we just show a generic one: + ShowArgs -> [programOptions "PROG"] + ParseArgs -> map (programOptions . programName . fst) + (knownPrograms progConf) + where + programOptions prog = + option "" [prog ++ "-options"] + ("give extra options to " ++ prog) + get set + (reqArg' "OPTS" (\args -> [(prog, splitArgs args)]) (const [])) + +-- ------------------------------------------------------------ +-- * GetOpt Utils +-- ------------------------------------------------------------ + +boolOpt :: SFlags -> SFlags + -> MkOptDescr (a -> Flag Bool) (Flag Bool -> a -> a) a +boolOpt = Command.boolOpt flagToMaybe Flag + +boolOpt' :: OptFlags -> OptFlags + -> MkOptDescr (a -> Flag Bool) (Flag Bool -> a -> a) a +boolOpt' = Command.boolOpt' flagToMaybe Flag + +trueArg, falseArg :: MkOptDescr (a -> Flag Bool) (Flag Bool -> a -> a) a +trueArg sfT lfT = boolOpt' (sfT, lfT) ([], []) sfT lfT +falseArg sfF lfF = boolOpt' ([], []) (sfF, lfF) sfF lfF + +reqArgFlag :: ArgPlaceHolder -> SFlags -> LFlags -> Description -> + (b -> Flag String) -> (Flag String -> b -> b) -> OptDescr b +reqArgFlag ad = reqArg ad (succeedReadE Flag) flagToList + +optionDistPref :: (flags -> Flag FilePath) + -> (Flag FilePath -> flags -> flags) + -> ShowOrParseArgs + -> OptionField flags +optionDistPref get set = \showOrParseArgs -> + option "" (distPrefFlagName showOrParseArgs) + ( "The directory where Cabal puts generated build files " + ++ "(default " ++ defaultDistPref ++ ")") + get set + (reqArgFlag "DIR") + where + distPrefFlagName ShowArgs = ["builddir"] + distPrefFlagName ParseArgs = ["builddir", "distdir", "distpref"] + +optionVerbosity :: (flags -> Flag Verbosity) + -> (Flag Verbosity -> flags -> flags) + -> OptionField flags +optionVerbosity get set = + option "v" ["verbose"] + "Control verbosity (n is 0--3, default verbosity level is 1)" + get set + (optArg "n" (fmap Flag flagToVerbosity) + (Flag verbose) -- default Value if no n is given + (fmap (Just . showForCabal) . flagToList)) + +optionNumJobs :: (flags -> Flag (Maybe Int)) + -> (Flag (Maybe Int) -> flags -> flags) + -> OptionField flags +optionNumJobs get set = + option "j" ["jobs"] + "Run NUM jobs simultaneously (or '$ncpus' if no NUM is given)." + get set + (optArg "NUM" (fmap Flag numJobsParser) + (Flag Nothing) + (map (Just . maybe "$ncpus" show) . flagToList)) + where + numJobsParser :: ReadE (Maybe Int) + numJobsParser = ReadE $ \s -> + case s of + "$ncpus" -> Right Nothing + _ -> case reads s of + [(n, "")] + | n < 1 -> Left "The number of jobs should be 1 or more." + | otherwise -> Right (Just n) + _ -> Left "The jobs value should be a number or '$ncpus'" + +-- ------------------------------------------------------------ +-- * Other Utils +-- ------------------------------------------------------------ + +-- | Arguments to pass to a @configure@ script, e.g. generated by +-- @autoconf@. +configureArgs :: Bool -> ConfigFlags -> [String] +configureArgs bcHack flags + = hc_flag + ++ optFlag "with-hc-pkg" configHcPkg + ++ optFlag' "prefix" prefix + ++ optFlag' "bindir" bindir + ++ optFlag' "libdir" libdir + ++ optFlag' "libexecdir" libexecdir + ++ optFlag' "datadir" datadir + ++ optFlag' "sysconfdir" sysconfdir + ++ configConfigureArgs flags + where + hc_flag = case (configHcFlavor flags, configHcPath flags) of + (_, Flag hc_path) -> [hc_flag_name ++ hc_path] + (Flag hc, NoFlag) -> [hc_flag_name ++ display hc] + (NoFlag,NoFlag) -> [] + hc_flag_name + --TODO kill off thic bc hack when defaultUserHooks is removed. + | bcHack = "--with-hc=" + | otherwise = "--with-compiler=" + optFlag name config_field = case config_field flags of + Flag p -> ["--" ++ name ++ "=" ++ p] + NoFlag -> [] + optFlag' name config_field = optFlag name (fmap fromPathTemplate + . config_field + . configInstallDirs) + +configureCCompiler :: Verbosity -> ProgramConfiguration + -> IO (FilePath, [String]) +configureCCompiler verbosity lbi = configureProg verbosity lbi gccProgram + +configureLinker :: Verbosity -> ProgramConfiguration -> IO (FilePath, [String]) +configureLinker verbosity lbi = configureProg verbosity lbi ldProgram + +configureProg :: Verbosity -> ProgramConfiguration -> Program + -> IO (FilePath, [String]) +configureProg verbosity programConfig prog = do + (p, _) <- requireProgram verbosity prog programConfig + let pInv = programInvocation p [] + return (progInvokePath pInv, progInvokeArgs pInv) + +-- | Helper function to split a string into a list of arguments. +-- It's supposed to handle quoted things sensibly, eg: +-- +-- > splitArgs "--foo=\"C:\Program Files\Bar\" --baz" +-- > = ["--foo=C:\Program Files\Bar", "--baz"] +-- +splitArgs :: String -> [String] +splitArgs = space [] + where + space :: String -> String -> [String] + space w [] = word w [] + space w ( c :s) + | isSpace c = word w (space [] s) + space w ('"':s) = string w s + space w s = nonstring w s + + string :: String -> String -> [String] + string w [] = word w [] + string w ('"':s) = space w s + string w ( c :s) = string (c:w) s + + nonstring :: String -> String -> [String] + nonstring w [] = word w [] + nonstring w ('"':s) = string w s + nonstring w ( c :s) = space (c:w) s + + word [] s = s + word w s = reverse w : s + +-- The test cases kinda have to be rewritten from the ground up... :/ +--hunitTests :: [Test] +--hunitTests = +-- let m = [("ghc", GHC), ("nhc98", NHC), ("hugs", Hugs)] +-- (flags, commands', unkFlags, ers) +-- = getOpt Permute options ["configure", "foobar", "--prefix=/foo", "--ghc", "--nhc98", "--hugs", "--with-compiler=/comp", "--unknown1", "--unknown2", "--install-prefix=/foo", "--user", "--global"] +-- in [TestLabel "very basic option parsing" $ TestList [ +-- "getOpt flags" ~: "failed" ~: +-- [Prefix "/foo", GhcFlag, NhcFlag, HugsFlag, +-- WithCompiler "/comp", InstPrefix "/foo", UserFlag, GlobalFlag] +-- ~=? flags, +-- "getOpt commands" ~: "failed" ~: ["configure", "foobar"] ~=? commands', +-- "getOpt unknown opts" ~: "failed" ~: +-- ["--unknown1", "--unknown2"] ~=? unkFlags, +-- "getOpt errors" ~: "failed" ~: [] ~=? ers], +-- +-- TestLabel "test location of various compilers" $ TestList +-- ["configure parsing for prefix and compiler flag" ~: "failed" ~: +-- (Right (ConfigCmd (Just comp, Nothing, Just "/usr/local"), [])) +-- ~=? (parseArgs ["--prefix=/usr/local", "--"++name, "configure"]) +-- | (name, comp) <- m], +-- +-- TestLabel "find the package tool" $ TestList +-- ["configure parsing for prefix comp flag, withcompiler" ~: "failed" ~: +-- (Right (ConfigCmd (Just comp, Just "/foo/comp", Just "/usr/local"), [])) +-- ~=? (parseArgs ["--prefix=/usr/local", "--"++name, +-- "--with-compiler=/foo/comp", "configure"]) +-- | (name, comp) <- m], +-- +-- TestLabel "simpler commands" $ TestList +-- [flag ~: "failed" ~: (Right (flagCmd, [])) ~=? (parseArgs [flag]) +-- | (flag, flagCmd) <- [("build", BuildCmd), +-- ("install", InstallCmd Nothing False), +-- ("sdist", SDistCmd), +-- ("register", RegisterCmd False)] +-- ] +-- ] + +{- Testing ideas: + * IO to look for hugs and hugs-pkg (which hugs, etc) + * quickCheck to test permutations of arguments + * what other options can we over-ride with a command-line flag? +-} diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/SrcDist.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/SrcDist.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/SrcDist.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/SrcDist.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,490 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.SrcDist +-- Copyright : Simon Marlow 2004 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This handles the @sdist@ command. The module exports an 'sdist' action but +-- also some of the phases that make it up so that other tools can use just the +-- bits they need. In particular the preparation of the tree of files to go +-- into the source tarball is separated from actually building the source +-- tarball. +-- +-- The 'createArchive' action uses the external @tar@ program and assumes that +-- it accepts the @-z@ flag. Neither of these assumptions are valid on Windows. +-- The 'sdist' action now also does some distribution QA checks. + +-- NOTE: FIX: we don't have a great way of testing this module, since +-- we can't easily look inside a tarball once its created. + +module Distribution.Simple.SrcDist ( + -- * The top level action + sdist, + + -- ** Parts of 'sdist' + printPackageProblems, + prepareTree, + createArchive, + + -- ** Snapshots + prepareSnapshotTree, + snapshotPackage, + snapshotVersion, + dateToSnapshotNumber, + + -- * Extracting the source files + listPackageSources + + ) where + +import Distribution.PackageDescription + ( PackageDescription(..), BuildInfo(..), Executable(..), Library(..) + , TestSuite(..), TestSuiteInterface(..), Benchmark(..) + , BenchmarkInterface(..) ) +import Distribution.PackageDescription.Check + ( PackageCheck(..), checkConfiguredPackage, checkPackageFiles ) +import Distribution.Package + ( PackageIdentifier(pkgVersion), Package(..), packageVersion ) +import Distribution.ModuleName (ModuleName) +import qualified Distribution.ModuleName as ModuleName +import Distribution.Version + ( Version(versionBranch) ) +import Distribution.Simple.Utils + ( createDirectoryIfMissingVerbose, withUTF8FileContents, writeUTF8File + , installOrdinaryFiles, installMaybeExecutableFiles + , findFile, findFileWithExtension, matchFileGlob + , withTempDirectory, defaultPackageDesc + , die, warn, notice, info, setupMessage ) +import Distribution.Simple.Setup ( Flag(..), SDistFlags(..) + , fromFlag, flagToMaybe) +import Distribution.Simple.PreProcess ( PPSuffixHandler, ppSuffixes + , preprocessComponent ) +import Distribution.Simple.LocalBuildInfo + ( LocalBuildInfo(..), withAllComponentsInBuildOrder ) +import Distribution.Simple.BuildPaths ( autogenModuleName ) +import Distribution.Simple.Program ( defaultProgramConfiguration, requireProgram, + runProgram, programProperties, tarProgram ) +import Distribution.Text + ( display ) + +import Control.Monad(when, unless, forM) +import Data.Char (toLower) +import Data.List (partition, isPrefixOf) +import qualified Data.Map as Map +import Data.Maybe (isNothing, catMaybes) +import Data.Time (UTCTime, getCurrentTime, toGregorian, utctDay) +import System.Directory ( doesFileExist ) +import System.IO (IOMode(WriteMode), hPutStrLn, withFile) +import Distribution.Verbosity (Verbosity) +import System.FilePath + ( (), (<.>), dropExtension, isAbsolute ) + +-- |Create a source distribution. +sdist :: PackageDescription -- ^information from the tarball + -> Maybe LocalBuildInfo -- ^Information from configure + -> SDistFlags -- ^verbosity & snapshot + -> (FilePath -> FilePath) -- ^build prefix (temp dir) + -> [PPSuffixHandler] -- ^ extra preprocessors (includes suffixes) + -> IO () +sdist pkg mb_lbi flags mkTmpDir pps = + + -- When given --list-sources, just output the list of sources to a file. + case (sDistListSources flags) of + Flag path -> withFile path WriteMode $ \outHandle -> do + (ordinary, maybeExecutable) <- listPackageSources verbosity pkg pps + mapM_ (hPutStrLn outHandle) ordinary + mapM_ (hPutStrLn outHandle) maybeExecutable + notice verbosity $ "List of package sources written to file '" + ++ path ++ "'" + NoFlag -> do + -- do some QA + printPackageProblems verbosity pkg + + when (isNothing mb_lbi) $ + warn verbosity "Cannot run preprocessors. Run 'configure' command first." + + date <- getCurrentTime + let pkg' | snapshot = snapshotPackage date pkg + | otherwise = pkg + + case flagToMaybe (sDistDirectory flags) of + Just targetDir -> do + generateSourceDir targetDir pkg' + info verbosity $ "Source directory created: " ++ targetDir + + Nothing -> do + createDirectoryIfMissingVerbose verbosity True tmpTargetDir + withTempDirectory verbosity tmpTargetDir "sdist." $ \tmpDir -> do + let targetDir = tmpDir tarBallName pkg' + generateSourceDir targetDir pkg' + targzFile <- createArchive verbosity pkg' mb_lbi tmpDir targetPref + notice verbosity $ "Source tarball created: " ++ targzFile + + where + generateSourceDir targetDir pkg' = do + + setupMessage verbosity "Building source dist for" (packageId pkg') + prepareTree verbosity pkg' mb_lbi targetDir pps + when snapshot $ + overwriteSnapshotPackageDesc verbosity pkg' targetDir + + verbosity = fromFlag (sDistVerbosity flags) + snapshot = fromFlag (sDistSnapshot flags) + + distPref = fromFlag $ sDistDistPref flags + targetPref = distPref + tmpTargetDir = mkTmpDir distPref + +-- | List all source files of a package. Returns a tuple of lists: first +-- component is a list of ordinary files, second one is a list of those files +-- that may be executable. +listPackageSources :: Verbosity -- ^ verbosity + -> PackageDescription -- ^ info from the cabal file + -> [PPSuffixHandler] -- ^ extra preprocessors (include + -- suffixes) + -> IO ([FilePath], [FilePath]) +listPackageSources verbosity pkg_descr0 pps = do + -- Call helpers that actually do all work. + ordinary <- listPackageSourcesOrdinary verbosity pkg_descr pps + maybeExecutable <- listPackageSourcesMaybeExecutable pkg_descr + return (ordinary, maybeExecutable) + where + pkg_descr = filterAutogenModule pkg_descr0 + +-- | List those source files that may be executable (e.g. the configure script). +listPackageSourcesMaybeExecutable :: PackageDescription -> IO [FilePath] +listPackageSourcesMaybeExecutable pkg_descr = + -- Extra source files. + fmap concat . forM (extraSrcFiles pkg_descr) $ \fpath -> matchFileGlob fpath + +-- | List those source files that should be copied with ordinary permissions. +listPackageSourcesOrdinary :: Verbosity + -> PackageDescription + -> [PPSuffixHandler] + -> IO [FilePath] +listPackageSourcesOrdinary verbosity pkg_descr pps = + fmap concat . sequence $ + [ + -- Library sources. + withLib $ \Library { exposedModules = modules, libBuildInfo = libBi } -> + allSourcesBuildInfo libBi pps modules + + -- Executables sources. + , fmap concat + . withExe $ \Executable { modulePath = mainPath, buildInfo = exeBi } -> do + biSrcs <- allSourcesBuildInfo exeBi pps [] + mainSrc <- findMainExeFile exeBi pps mainPath + return (mainSrc:biSrcs) + + -- Test suites sources. + , fmap concat + . withTest $ \t -> do + let bi = testBuildInfo t + case testInterface t of + TestSuiteExeV10 _ mainPath -> do + biSrcs <- allSourcesBuildInfo bi pps [] + srcMainFile <- do + ppFile <- findFileWithExtension (ppSuffixes pps) + (hsSourceDirs bi) (dropExtension mainPath) + case ppFile of + Nothing -> findFile (hsSourceDirs bi) mainPath + Just pp -> return pp + return (srcMainFile:biSrcs) + TestSuiteLibV09 _ m -> + allSourcesBuildInfo bi pps [m] + TestSuiteUnsupported tp -> die $ "Unsupported test suite type: " + ++ show tp + + -- Benchmarks sources. + , fmap concat + . withBenchmark $ \bm -> do + let bi = benchmarkBuildInfo bm + case benchmarkInterface bm of + BenchmarkExeV10 _ mainPath -> do + biSrcs <- allSourcesBuildInfo bi pps [] + srcMainFile <- do + ppFile <- findFileWithExtension (ppSuffixes pps) + (hsSourceDirs bi) (dropExtension mainPath) + case ppFile of + Nothing -> findFile (hsSourceDirs bi) mainPath + Just pp -> return pp + return (srcMainFile:biSrcs) + BenchmarkUnsupported tp -> die $ "Unsupported benchmark type: " + ++ show tp + + -- Data files. + , fmap concat + . forM (dataFiles pkg_descr) $ \filename -> + matchFileGlob (dataDir pkg_descr filename) + + -- Extra doc files. + , fmap concat + . forM (extraDocFiles pkg_descr) $ \ filename -> + matchFileGlob filename + + -- License file(s). + , return (licenseFiles pkg_descr) + + -- Install-include files. + , withLib $ \ l -> do + let lbi = libBuildInfo l + relincdirs = "." : filter (not.isAbsolute) (includeDirs lbi) + mapM (fmap snd . findIncludeFile relincdirs) (installIncludes lbi) + + -- Setup script, if it exists. + , fmap (maybe [] (\f -> [f])) $ findSetupFile "" + + -- The .cabal file itself. + , fmap (\d -> [d]) (defaultPackageDesc verbosity) + + ] + where + -- We have to deal with all libs and executables, so we have local + -- versions of these functions that ignore the 'buildable' attribute: + withLib action = maybe (return []) action (library pkg_descr) + withExe action = mapM action (executables pkg_descr) + withTest action = mapM action (testSuites pkg_descr) + withBenchmark action = mapM action (benchmarks pkg_descr) + + +-- |Prepare a directory tree of source files. +prepareTree :: Verbosity -- ^verbosity + -> PackageDescription -- ^info from the cabal file + -> Maybe LocalBuildInfo + -> FilePath -- ^source tree to populate + -> [PPSuffixHandler] -- ^extra preprocessors (includes suffixes) + -> IO () +prepareTree verbosity pkg_descr0 mb_lbi targetDir pps = do + -- If the package was configured then we can run platform-independent + -- pre-processors and include those generated files. + case mb_lbi of + Just lbi | not (null pps) -> do + let lbi' = lbi{ buildDir = targetDir buildDir lbi } + withAllComponentsInBuildOrder pkg_descr lbi' $ \c _ -> + preprocessComponent pkg_descr c lbi' True verbosity pps + _ -> return () + + (ordinary, mExecutable) <- listPackageSources verbosity pkg_descr0 pps + installOrdinaryFiles verbosity targetDir (zip (repeat []) ordinary) + installMaybeExecutableFiles verbosity targetDir (zip (repeat []) mExecutable) + maybeCreateDefaultSetupScript targetDir + + where + pkg_descr = filterAutogenModule pkg_descr0 + +-- | Find the setup script file, if it exists. +findSetupFile :: FilePath -> IO (Maybe FilePath) +findSetupFile targetDir = do + hsExists <- doesFileExist setupHs + lhsExists <- doesFileExist setupLhs + if hsExists + then return (Just setupHs) + else if lhsExists + then return (Just setupLhs) + else return Nothing + where + setupHs = targetDir "Setup.hs" + setupLhs = targetDir "Setup.lhs" + +-- | Create a default setup script in the target directory, if it doesn't exist. +maybeCreateDefaultSetupScript :: FilePath -> IO () +maybeCreateDefaultSetupScript targetDir = do + mSetupFile <- findSetupFile targetDir + case mSetupFile of + Just _setupFile -> return () + Nothing -> do + writeUTF8File (targetDir "Setup.hs") $ unlines [ + "import Distribution.Simple", + "main = defaultMain"] + +-- | Find the main executable file. +findMainExeFile :: BuildInfo -> [PPSuffixHandler] -> FilePath -> IO FilePath +findMainExeFile exeBi pps mainPath = do + ppFile <- findFileWithExtension (ppSuffixes pps) (hsSourceDirs exeBi) + (dropExtension mainPath) + case ppFile of + Nothing -> findFile (hsSourceDirs exeBi) mainPath + Just pp -> return pp + +-- | Given a list of include paths, try to find the include file named +-- @f@. Return the name of the file and the full path, or exit with error if +-- there's no such file. +findIncludeFile :: [FilePath] -> String -> IO (String, FilePath) +findIncludeFile [] f = die ("can't find include file " ++ f) +findIncludeFile (d:ds) f = do + let path = (d f) + b <- doesFileExist path + if b then return (f,path) else findIncludeFile ds f + +-- | Remove the auto-generated module ('Paths_*') from 'exposed-modules' and +-- 'other-modules'. +filterAutogenModule :: PackageDescription -> PackageDescription +filterAutogenModule pkg_descr0 = mapLib filterAutogenModuleLib $ + mapAllBuildInfo filterAutogenModuleBI pkg_descr0 + where + mapLib f pkg = pkg { library = fmap f (library pkg) } + filterAutogenModuleLib lib = lib { + exposedModules = filter (/=autogenModule) (exposedModules lib) + } + filterAutogenModuleBI bi = bi { + otherModules = filter (/=autogenModule) (otherModules bi) + } + autogenModule = autogenModuleName pkg_descr0 + +-- | Prepare a directory tree of source files for a snapshot version. +-- It is expected that the appropriate snapshot version has already been set +-- in the package description, eg using 'snapshotPackage' or 'snapshotVersion'. +-- +prepareSnapshotTree :: Verbosity -- ^verbosity + -> PackageDescription -- ^info from the cabal file + -> Maybe LocalBuildInfo + -> FilePath -- ^source tree to populate + -> [PPSuffixHandler] -- ^extra preprocessors (includes + -- suffixes) + -> IO () +prepareSnapshotTree verbosity pkg mb_lbi targetDir pps = do + prepareTree verbosity pkg mb_lbi targetDir pps + overwriteSnapshotPackageDesc verbosity pkg targetDir + +overwriteSnapshotPackageDesc :: Verbosity -- ^verbosity + -> PackageDescription -- ^info from the cabal file + -> FilePath -- ^source tree + -> IO () +overwriteSnapshotPackageDesc verbosity pkg targetDir = do + -- We could just writePackageDescription targetDescFile pkg_descr, + -- but that would lose comments and formatting. + descFile <- defaultPackageDesc verbosity + withUTF8FileContents descFile $ + writeUTF8File (targetDir descFile) + . unlines . map (replaceVersion (packageVersion pkg)) . lines + + where + replaceVersion :: Version -> String -> String + replaceVersion version line + | "version:" `isPrefixOf` map toLower line + = "version: " ++ display version + | otherwise = line + +-- | Modifies a 'PackageDescription' by appending a snapshot number +-- corresponding to the given date. +-- +snapshotPackage :: UTCTime -> PackageDescription -> PackageDescription +snapshotPackage date pkg = + pkg { + package = pkgid { pkgVersion = snapshotVersion date (pkgVersion pkgid) } + } + where pkgid = packageId pkg + +-- | Modifies a 'Version' by appending a snapshot number corresponding +-- to the given date. +-- +snapshotVersion :: UTCTime -> Version -> Version +snapshotVersion date version = version { + versionBranch = versionBranch version + ++ [dateToSnapshotNumber date] + } + +-- | Given a date produce a corresponding integer representation. +-- For example given a date @18/03/2008@ produce the number @20080318@. +-- +dateToSnapshotNumber :: UTCTime -> Int +dateToSnapshotNumber date = case toGregorian (utctDay date) of + (year, month, day) -> + fromIntegral year * 10000 + + month * 100 + + day + +-- | Callback type for use by sdistWith. +type CreateArchiveFun = Verbosity -- ^verbosity + -> PackageDescription -- ^info from cabal file + -> Maybe LocalBuildInfo -- ^info from configure + -> FilePath -- ^source tree to archive + -> FilePath -- ^name of archive to create + -> IO FilePath + +-- | Create an archive from a tree of source files, and clean up the tree. +createArchive :: CreateArchiveFun +createArchive verbosity pkg_descr mb_lbi tmpDir targetPref = do + let tarBallFilePath = targetPref tarBallName pkg_descr <.> "tar.gz" + + (tarProg, _) <- requireProgram verbosity tarProgram + (maybe defaultProgramConfiguration withPrograms mb_lbi) + let formatOptSupported = maybe False (== "YES") $ + Map.lookup "Supports --format" + (programProperties tarProg) + runProgram verbosity tarProg $ + -- Hmm: I could well be skating on thinner ice here by using the -C option + -- (=> seems to be supported at least by GNU and *BSD tar) [The + -- prev. solution used pipes and sub-command sequences to set up the paths + -- correctly, which is problematic in a Windows setting.] + ["-czf", tarBallFilePath, "-C", tmpDir] + ++ (if formatOptSupported then ["--format", "ustar"] else []) + ++ [tarBallName pkg_descr] + return tarBallFilePath + +-- | Given a buildinfo, return the names of all source files. +allSourcesBuildInfo :: BuildInfo + -> [PPSuffixHandler] -- ^ Extra preprocessors + -> [ModuleName] -- ^ Exposed modules + -> IO [FilePath] +allSourcesBuildInfo bi pps modules = do + let searchDirs = hsSourceDirs bi + sources <- sequence + [ let file = ModuleName.toFilePath module_ + in findFileWithExtension suffixes searchDirs file + >>= maybe (notFound module_) return + | module_ <- modules ++ otherModules bi ] + bootFiles <- sequence + [ let file = ModuleName.toFilePath module_ + fileExts = ["hs-boot", "lhs-boot"] + in findFileWithExtension fileExts (hsSourceDirs bi) file + | module_ <- modules ++ otherModules bi ] + + return $ sources ++ catMaybes bootFiles ++ cSources bi ++ jsSources bi + + where + suffixes = ppSuffixes pps ++ ["hs", "lhs"] + notFound m = die $ "Error: Could not find module: " ++ display m + ++ " with any suffix: " ++ show suffixes + + +printPackageProblems :: Verbosity -> PackageDescription -> IO () +printPackageProblems verbosity pkg_descr = do + ioChecks <- checkPackageFiles pkg_descr "." + let pureChecks = checkConfiguredPackage pkg_descr + isDistError (PackageDistSuspicious _) = False + isDistError _ = True + (errors, warnings) = partition isDistError (pureChecks ++ ioChecks) + unless (null errors) $ + notice verbosity $ "Distribution quality errors:\n" + ++ unlines (map explanation errors) + unless (null warnings) $ + notice verbosity $ "Distribution quality warnings:\n" + ++ unlines (map explanation warnings) + unless (null errors) $ + notice verbosity + "Note: the public hackage server would reject this package." + +------------------------------------------------------------ + +-- | The name of the tarball without extension +-- +tarBallName :: PackageDescription -> String +tarBallName = display . packageId + +mapAllBuildInfo :: (BuildInfo -> BuildInfo) + -> (PackageDescription -> PackageDescription) +mapAllBuildInfo f pkg = pkg { + library = fmap mapLibBi (library pkg), + executables = fmap mapExeBi (executables pkg), + testSuites = fmap mapTestBi (testSuites pkg), + benchmarks = fmap mapBenchBi (benchmarks pkg) + } + where + mapLibBi lib = lib { libBuildInfo = f (libBuildInfo lib) } + mapExeBi exe = exe { buildInfo = f (buildInfo exe) } + mapTestBi t = t { testBuildInfo = f (testBuildInfo t) } + mapBenchBi bm = bm { benchmarkBuildInfo = f (benchmarkBuildInfo bm) } diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/ExeV10.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/ExeV10.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/ExeV10.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/ExeV10.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,168 @@ +module Distribution.Simple.Test.ExeV10 + ( runTest + ) where + +import Distribution.Compat.CreatePipe ( createPipe ) +import Distribution.Compat.Environment ( getEnvironment ) +import qualified Distribution.PackageDescription as PD +import Distribution.Simple.Build.PathsModule ( pkgPathEnvVar ) +import Distribution.Simple.BuildPaths ( exeExtension ) +import Distribution.Simple.Compiler ( compilerInfo ) +import Distribution.Simple.Hpc ( guessWay, markupTest, tixDir, tixFilePath ) +import Distribution.Simple.InstallDirs + ( fromPathTemplate, initialPathTemplateEnv, PathTemplateVariable(..) + , substPathTemplate , toPathTemplate, PathTemplate ) +import qualified Distribution.Simple.LocalBuildInfo as LBI +import Distribution.Simple.Setup + ( TestFlags(..), TestShowDetails(..), fromFlag, configCoverage ) +import Distribution.Simple.Test.Log +import Distribution.Simple.Utils + ( die, notice, rawSystemIOWithEnv, addLibraryPath ) +import Distribution.System ( Platform (..) ) +import Distribution.TestSuite +import Distribution.Text +import Distribution.Verbosity ( normal ) + +import Control.Concurrent (forkIO) +import Control.Monad ( unless, void, when ) +import System.Directory + ( createDirectoryIfMissing, doesDirectoryExist, doesFileExist + , getCurrentDirectory, removeDirectoryRecursive ) +import System.Exit ( ExitCode(..) ) +import System.FilePath ( (), (<.>) ) +import System.IO ( hGetContents, hPutStr, stdout ) + +runTest :: PD.PackageDescription + -> LBI.LocalBuildInfo + -> TestFlags + -> PD.TestSuite + -> IO TestSuiteLog +runTest pkg_descr lbi flags suite = do + let isCoverageEnabled = fromFlag $ configCoverage $ LBI.configFlags lbi + way = guessWay lbi + tixDir_ = tixDir distPref way $ PD.testName suite + + pwd <- getCurrentDirectory + existingEnv <- getEnvironment + + let cmd = LBI.buildDir lbi PD.testName suite + PD.testName suite <.> exeExtension + -- Check that the test executable exists. + exists <- doesFileExist cmd + unless exists $ die $ "Error: Could not find test program \"" ++ cmd + ++ "\". Did you build the package first?" + + -- Remove old .tix files if appropriate. + unless (fromFlag $ testKeepTix flags) $ do + exists' <- doesDirectoryExist tixDir_ + when exists' $ removeDirectoryRecursive tixDir_ + + -- Create directory for HPC files. + createDirectoryIfMissing True tixDir_ + + -- Write summary notices indicating start of test suite + notice verbosity $ summarizeSuiteStart $ PD.testName suite + + (rOut, wOut) <- createPipe + + -- Read test executable's output lazily (returns immediately) + logText <- hGetContents rOut + -- Force the IO manager to drain the test output pipe + void $ forkIO $ length logText `seq` return () + + -- '--show-details=streaming': print the log output in another thread + when (details == Streaming) $ void $ forkIO $ hPutStr stdout logText + + -- Run the test executable + let opts = map (testOption pkg_descr lbi suite) + (testOptions flags) + dataDirPath = pwd PD.dataDir pkg_descr + tixFile = pwd tixFilePath distPref way (PD.testName suite) + pkgPathEnv = (pkgPathEnvVar pkg_descr "datadir", dataDirPath) + : existingEnv + shellEnv = [("HPCTIXFILE", tixFile) | isCoverageEnabled] ++ pkgPathEnv + + -- Add (DY)LD_LIBRARY_PATH if needed + shellEnv' <- if LBI.withDynExe lbi + then do let (Platform _ os) = LBI.hostPlatform lbi + clbi = LBI.getComponentLocalBuildInfo lbi + (LBI.CTestName (PD.testName suite)) + paths <- LBI.depLibraryPaths True False lbi clbi + return (addLibraryPath os paths shellEnv) + else return shellEnv + + exit <- rawSystemIOWithEnv verbosity cmd opts Nothing (Just shellEnv') + -- these handles are automatically closed + Nothing (Just wOut) (Just wOut) + + -- Generate TestSuiteLog from executable exit code and a machine- + -- readable test log. + let suiteLog = buildLog exit + + -- Write summary notice to log file indicating start of test suite + appendFile (logFile suiteLog) $ summarizeSuiteStart $ PD.testName suite + + -- Append contents of temporary log file to the final human- + -- readable log file + appendFile (logFile suiteLog) logText + + -- Write end-of-suite summary notice to log file + appendFile (logFile suiteLog) $ summarizeSuiteFinish suiteLog + + -- Show the contents of the human-readable log file on the terminal + -- if there is a failure and/or detailed output is requested + let whenPrinting = when $ + (details > Never) + && (not (suitePassed $ testLogs suiteLog) || details == Always) + -- verbosity overrides show-details + && verbosity >= normal + -- if streaming, we already printed the log + && details /= Streaming + whenPrinting $ putStr $ unlines $ lines logText + + -- Write summary notice to terminal indicating end of test suite + notice verbosity $ summarizeSuiteFinish suiteLog + + when isCoverageEnabled $ + markupTest verbosity lbi distPref (display $ PD.package pkg_descr) suite + + return suiteLog + where + distPref = fromFlag $ testDistPref flags + verbosity = fromFlag $ testVerbosity flags + details = fromFlag $ testShowDetails flags + testLogDir = distPref "test" + + buildLog exit = + let r = case exit of + ExitSuccess -> Pass + ExitFailure c -> Fail $ "exit code: " ++ show c + n = PD.testName suite + l = TestLog + { testName = n + , testOptionsReturned = [] + , testResult = r + } + in TestSuiteLog + { testSuiteName = n + , testLogs = l + , logFile = + testLogDir + testSuiteLogPath (fromFlag $ testHumanLog flags) + pkg_descr lbi n l + } + +-- TODO: This is abusing the notion of a 'PathTemplate'. The result isn't +-- necessarily a path. +testOption :: PD.PackageDescription + -> LBI.LocalBuildInfo + -> PD.TestSuite + -> PathTemplate + -> String +testOption pkg_descr lbi suite template = + fromPathTemplate $ substPathTemplate env template + where + env = initialPathTemplateEnv + (PD.package pkg_descr) (LBI.pkgKey lbi) + (compilerInfo $ LBI.compiler lbi) (LBI.hostPlatform lbi) ++ + [(TestSuiteNameVar, toPathTemplate $ PD.testName suite)] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/LibV09.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/LibV09.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/LibV09.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/LibV09.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,258 @@ +module Distribution.Simple.Test.LibV09 + ( runTest + -- Test stub + , simpleTestStub + , stubFilePath, stubMain, stubName, stubWriteLog + , writeSimpleTestStub + ) where + +import Distribution.Compat.CreatePipe ( createPipe ) +import Distribution.Compat.Environment ( getEnvironment ) +import Distribution.Compat.TempFile ( openTempFile ) +import Distribution.ModuleName ( ModuleName ) +import qualified Distribution.PackageDescription as PD +import Distribution.Simple.Build.PathsModule ( pkgPathEnvVar ) +import Distribution.Simple.BuildPaths ( exeExtension ) +import Distribution.Simple.Compiler ( compilerInfo ) +import Distribution.Simple.Hpc ( guessWay, markupTest, tixDir, tixFilePath ) +import Distribution.Simple.InstallDirs + ( fromPathTemplate, initialPathTemplateEnv, PathTemplateVariable(..) + , substPathTemplate , toPathTemplate, PathTemplate ) +import qualified Distribution.Simple.LocalBuildInfo as LBI +import Distribution.Simple.Setup + ( TestFlags(..), TestShowDetails(..), fromFlag, configCoverage ) +import Distribution.Simple.Test.Log +import Distribution.Simple.Utils + ( die, notice, rawSystemIOWithEnv, addLibraryPath ) +import Distribution.System ( Platform (..) ) +import Distribution.TestSuite +import Distribution.Text +import Distribution.Verbosity ( normal ) + +import Control.Exception ( bracket ) +import Control.Monad ( when, unless ) +import Data.Maybe ( mapMaybe ) +import System.Directory + ( createDirectoryIfMissing, doesDirectoryExist, doesFileExist + , getCurrentDirectory, removeDirectoryRecursive, removeFile + , setCurrentDirectory ) +import System.Exit ( ExitCode(..), exitWith ) +import System.FilePath ( (), (<.>) ) +import System.IO ( hClose, hGetContents, hPutStr ) + +runTest :: PD.PackageDescription + -> LBI.LocalBuildInfo + -> TestFlags + -> PD.TestSuite + -> IO TestSuiteLog +runTest pkg_descr lbi flags suite = do + let isCoverageEnabled = fromFlag $ configCoverage $ LBI.configFlags lbi + way = guessWay lbi + + pwd <- getCurrentDirectory + existingEnv <- getEnvironment + + let cmd = LBI.buildDir lbi stubName suite + stubName suite <.> exeExtension + -- Check that the test executable exists. + exists <- doesFileExist cmd + unless exists $ die $ "Error: Could not find test program \"" ++ cmd + ++ "\". Did you build the package first?" + + -- Remove old .tix files if appropriate. + unless (fromFlag $ testKeepTix flags) $ do + let tDir = tixDir distPref way $ PD.testName suite + exists' <- doesDirectoryExist tDir + when exists' $ removeDirectoryRecursive tDir + + -- Create directory for HPC files. + createDirectoryIfMissing True $ tixDir distPref way $ PD.testName suite + + -- Write summary notices indicating start of test suite + notice verbosity $ summarizeSuiteStart $ PD.testName suite + + suiteLog <- bracket openCabalTemp deleteIfExists $ \tempLog -> do + + (rIn, wIn) <- createPipe + (rOut, wOut) <- createPipe + + -- Prepare standard input for test executable + --appendFile tempInput $ show (tempInput, PD.testName suite) + hPutStr wIn $ show (tempLog, PD.testName suite) + hClose wIn + + -- Run test executable + _ <- do let opts = map (testOption pkg_descr lbi suite) $ testOptions flags + dataDirPath = pwd PD.dataDir pkg_descr + tixFile = pwd tixFilePath distPref way (PD.testName suite) + pkgPathEnv = (pkgPathEnvVar pkg_descr "datadir", dataDirPath) + : existingEnv + shellEnv = [("HPCTIXFILE", tixFile) | isCoverageEnabled] + ++ pkgPathEnv + -- Add (DY)LD_LIBRARY_PATH if needed + shellEnv' <- if LBI.withDynExe lbi + then do + let (Platform _ os) = LBI.hostPlatform lbi + clbi = LBI.getComponentLocalBuildInfo + lbi + (LBI.CTestName + (PD.testName suite)) + paths <- LBI.depLibraryPaths + True False lbi clbi + return (addLibraryPath os paths shellEnv) + else return shellEnv + rawSystemIOWithEnv verbosity cmd opts Nothing (Just shellEnv') + -- these handles are closed automatically + (Just rIn) (Just wOut) (Just wOut) + + -- Generate final log file name + let finalLogName l = testLogDir + testSuiteLogPath + (fromFlag $ testHumanLog flags) pkg_descr lbi + (testSuiteName l) (testLogs l) + -- Generate TestSuiteLog from executable exit code and a machine- + -- readable test log + suiteLog <- fmap ((\l -> l { logFile = finalLogName l }) . read) + $ readFile tempLog + + -- Write summary notice to log file indicating start of test suite + appendFile (logFile suiteLog) $ summarizeSuiteStart $ PD.testName suite + + -- Append contents of temporary log file to the final human- + -- readable log file + logText <- hGetContents rOut + appendFile (logFile suiteLog) logText + + -- Write end-of-suite summary notice to log file + appendFile (logFile suiteLog) $ summarizeSuiteFinish suiteLog + + -- Show the contents of the human-readable log file on the terminal + -- if there is a failure and/or detailed output is requested + let details = fromFlag $ testShowDetails flags + whenPrinting = when $ (details > Never) + && (not (suitePassed $ testLogs suiteLog) || details == Always) + && verbosity >= normal + whenPrinting $ putStr $ unlines $ lines logText + + return suiteLog + + -- Write summary notice to terminal indicating end of test suite + notice verbosity $ summarizeSuiteFinish suiteLog + + when isCoverageEnabled $ + markupTest verbosity lbi distPref (display $ PD.package pkg_descr) suite + + return suiteLog + where + deleteIfExists file = do + exists <- doesFileExist file + when exists $ removeFile file + + testLogDir = distPref "test" + openCabalTemp = do + (f, h) <- openTempFile testLogDir $ "cabal-test-" <.> "log" + hClose h >> return f + + distPref = fromFlag $ testDistPref flags + verbosity = fromFlag $ testVerbosity flags + +-- TODO: This is abusing the notion of a 'PathTemplate'. The result isn't +-- necessarily a path. +testOption :: PD.PackageDescription + -> LBI.LocalBuildInfo + -> PD.TestSuite + -> PathTemplate + -> String +testOption pkg_descr lbi suite template = + fromPathTemplate $ substPathTemplate env template + where + env = initialPathTemplateEnv + (PD.package pkg_descr) (LBI.pkgKey lbi) + (compilerInfo $ LBI.compiler lbi) (LBI.hostPlatform lbi) ++ + [(TestSuiteNameVar, toPathTemplate $ PD.testName suite)] + +-- Test stub ---------- + +-- | The name of the stub executable associated with a library 'TestSuite'. +stubName :: PD.TestSuite -> FilePath +stubName t = PD.testName t ++ "Stub" + +-- | The filename of the source file for the stub executable associated with a +-- library 'TestSuite'. +stubFilePath :: PD.TestSuite -> FilePath +stubFilePath t = stubName t <.> "hs" + +-- | Write the source file for a library 'TestSuite' stub executable. +writeSimpleTestStub :: PD.TestSuite -- ^ library 'TestSuite' for which a stub + -- is being created + -> FilePath -- ^ path to directory where stub source + -- should be located + -> IO () +writeSimpleTestStub t dir = do + createDirectoryIfMissing True dir + let filename = dir stubFilePath t + PD.TestSuiteLibV09 _ m = PD.testInterface t + writeFile filename $ simpleTestStub m + +-- | Source code for library test suite stub executable +simpleTestStub :: ModuleName -> String +simpleTestStub m = unlines + [ "module Main ( main ) where" + , "import Distribution.Simple.Test.LibV09 ( stubMain )" + , "import " ++ show (disp m) ++ " ( tests )" + , "main :: IO ()" + , "main = stubMain tests" + ] + +-- | Main function for test stubs. Once, it was written directly into the stub, +-- but minimizing the amount of code actually in the stub maximizes the number +-- of detectable errors when Cabal is compiled. +stubMain :: IO [Test] -> IO () +stubMain tests = do + (f, n) <- fmap read getContents + dir <- getCurrentDirectory + results <- tests >>= stubRunTests + setCurrentDirectory dir + stubWriteLog f n results + +-- | The test runner used in library "TestSuite" stub executables. Runs a list +-- of 'Test's. An executable calling this function is meant to be invoked as +-- the child of a Cabal process during @.\/setup test@. A 'TestSuiteLog', +-- provided by Cabal, is read from the standard input; it supplies the name of +-- the test suite and the location of the machine-readable test suite log file. +-- Human-readable log information is written to the standard output for capture +-- by the calling Cabal process. +stubRunTests :: [Test] -> IO TestLogs +stubRunTests tests = do + logs <- mapM stubRunTests' tests + return $ GroupLogs "Default" logs + where + stubRunTests' (Test t) = do + l <- run t >>= finish + summarizeTest normal Always l + return l + where + finish (Finished result) = + return TestLog + { testName = name t + , testOptionsReturned = defaultOptions t + , testResult = result + } + finish (Progress _ next) = next >>= finish + stubRunTests' g@(Group {}) = do + logs <- mapM stubRunTests' $ groupTests g + return $ GroupLogs (groupName g) logs + stubRunTests' (ExtraOptions _ t) = stubRunTests' t + maybeDefaultOption opt = + maybe Nothing (\d -> Just (optionName opt, d)) $ optionDefault opt + defaultOptions testInst = mapMaybe maybeDefaultOption $ options testInst + +-- | From a test stub, write the 'TestSuiteLog' to temporary file for the calling +-- Cabal process to read. +stubWriteLog :: FilePath -> String -> TestLogs -> IO () +stubWriteLog f n logs = do + let testLog = TestSuiteLog { testSuiteName = n, testLogs = logs, logFile = f } + writeFile (logFile testLog) $ show testLog + when (suiteError logs) $ exitWith $ ExitFailure 2 + when (suiteFailed logs) $ exitWith $ ExitFailure 1 + exitWith ExitSuccess diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/Log.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/Log.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/Log.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Test/Log.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,161 @@ +module Distribution.Simple.Test.Log + ( PackageLog(..) + , TestLogs(..) + , TestSuiteLog(..) + , countTestResults + , localPackageLog + , summarizePackage + , summarizeSuiteFinish, summarizeSuiteStart + , summarizeTest + , suiteError, suiteFailed, suitePassed + , testSuiteLogPath + ) where + +import Distribution.Package ( PackageId ) +import qualified Distribution.PackageDescription as PD +import Distribution.Simple.Compiler ( Compiler(..), compilerInfo, CompilerId ) +import Distribution.Simple.InstallDirs + ( fromPathTemplate, initialPathTemplateEnv, PathTemplateVariable(..) + , substPathTemplate , toPathTemplate, PathTemplate ) +import qualified Distribution.Simple.LocalBuildInfo as LBI +import Distribution.Simple.Setup ( TestShowDetails(..) ) +import Distribution.Simple.Utils ( notice ) +import Distribution.System ( Platform ) +import Distribution.TestSuite ( Options, Result(..) ) +import Distribution.Verbosity ( Verbosity ) + +import Control.Monad ( when ) +import Data.Char ( toUpper ) + +-- | Logs all test results for a package, broken down first by test suite and +-- then by test case. +data PackageLog = PackageLog + { package :: PackageId + , compiler :: CompilerId + , platform :: Platform + , testSuites :: [TestSuiteLog] + } + deriving (Read, Show, Eq) + +-- | A 'PackageLog' with package and platform information specified. +localPackageLog :: PD.PackageDescription -> LBI.LocalBuildInfo -> PackageLog +localPackageLog pkg_descr lbi = PackageLog + { package = PD.package pkg_descr + , compiler = compilerId $ LBI.compiler lbi + , platform = LBI.hostPlatform lbi + , testSuites = [] + } + +-- | Logs test suite results, itemized by test case. +data TestSuiteLog = TestSuiteLog + { testSuiteName :: String + , testLogs :: TestLogs + , logFile :: FilePath -- path to human-readable log file + } + deriving (Read, Show, Eq) + +data TestLogs + = TestLog + { testName :: String + , testOptionsReturned :: Options + , testResult :: Result + } + | GroupLogs String [TestLogs] + deriving (Read, Show, Eq) + +-- | Count the number of pass, fail, and error test results in a 'TestLogs' +-- tree. +countTestResults :: TestLogs + -> (Int, Int, Int) -- ^ Passes, fails, and errors, + -- respectively. +countTestResults = go (0, 0, 0) + where + go (p, f, e) (TestLog { testResult = r }) = + case r of + Pass -> (p + 1, f, e) + Fail _ -> (p, f + 1, e) + Error _ -> (p, f, e + 1) + go (p, f, e) (GroupLogs _ ts) = foldl go (p, f, e) ts + +-- | From a 'TestSuiteLog', determine if the test suite passed. +suitePassed :: TestLogs -> Bool +suitePassed l = + case countTestResults l of + (_, 0, 0) -> True + _ -> False + +-- | From a 'TestSuiteLog', determine if the test suite failed. +suiteFailed :: TestLogs -> Bool +suiteFailed l = + case countTestResults l of + (_, 0, _) -> False + _ -> True + +-- | From a 'TestSuiteLog', determine if the test suite encountered errors. +suiteError :: TestLogs -> Bool +suiteError l = + case countTestResults l of + (_, _, 0) -> False + _ -> True + +resultString :: TestLogs -> String +resultString l | suiteError l = "error" + | suiteFailed l = "fail" + | otherwise = "pass" + +testSuiteLogPath :: PathTemplate + -> PD.PackageDescription + -> LBI.LocalBuildInfo + -> String -- ^ test suite name + -> TestLogs -- ^ test suite results + -> FilePath +testSuiteLogPath template pkg_descr lbi name result = + fromPathTemplate $ substPathTemplate env template + where + env = initialPathTemplateEnv + (PD.package pkg_descr) (LBI.pkgKey lbi) + (compilerInfo $ LBI.compiler lbi) (LBI.hostPlatform lbi) + ++ [ (TestSuiteNameVar, toPathTemplate name) + , (TestSuiteResultVar, toPathTemplate $ resultString result) + ] + +-- | Print a summary to the console after all test suites have been run +-- indicating the number of successful test suites and cases. Returns 'True' if +-- all test suites passed and 'False' otherwise. +summarizePackage :: Verbosity -> PackageLog -> IO Bool +summarizePackage verbosity packageLog = do + let counts = map (countTestResults . testLogs) $ testSuites packageLog + (passed, failed, errors) = foldl1 addTriple counts + totalCases = passed + failed + errors + passedSuites = length + $ filter (suitePassed . testLogs) + $ testSuites packageLog + totalSuites = length $ testSuites packageLog + notice verbosity $ show passedSuites ++ " of " ++ show totalSuites + ++ " test suites (" ++ show passed ++ " of " + ++ show totalCases ++ " test cases) passed." + return $! passedSuites == totalSuites + where + addTriple (p1, f1, e1) (p2, f2, e2) = (p1 + p2, f1 + f2, e1 + e2) + +-- | Print a summary of a single test case's result to the console, supressing +-- output for certain verbosity or test filter levels. +summarizeTest :: Verbosity -> TestShowDetails -> TestLogs -> IO () +summarizeTest _ _ (GroupLogs {}) = return () +summarizeTest verbosity details t = + when shouldPrint $ notice verbosity $ "Test case " ++ testName t + ++ ": " ++ show (testResult t) + where shouldPrint = (details > Never) && (notPassed || details == Always) + notPassed = testResult t /= Pass + +-- | Print a summary of the test suite's results on the console, suppressing +-- output for certain verbosity or test filter levels. +summarizeSuiteFinish :: TestSuiteLog -> String +summarizeSuiteFinish testLog = unlines + [ "Test suite " ++ testSuiteName testLog ++ ": " ++ resStr + , "Test suite logged to: " ++ logFile testLog + ] + where resStr = map toUpper (resultString $ testLogs testLog) + +summarizeSuiteStart :: String -> String +summarizeSuiteStart n = "Test suite " ++ n ++ ": RUNNING...\n" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Test.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Test.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Test.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Test.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,136 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Test +-- Copyright : Thomas Tuegel 2010 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is the entry point into testing a built package. It performs the +-- \"@.\/setup test@\" action. It runs test suites designated in the package +-- description and reports on the results. + +module Distribution.Simple.Test + ( test + ) where + +import qualified Distribution.PackageDescription as PD + ( PackageDescription(..), BuildInfo(buildable) + , TestSuite(..) + , TestSuiteInterface(..), testType, hasTests ) +import Distribution.Simple.Compiler ( compilerInfo ) +import Distribution.Simple.Hpc ( markupPackage ) +import Distribution.Simple.InstallDirs + ( fromPathTemplate, initialPathTemplateEnv, substPathTemplate + , PathTemplate ) +import qualified Distribution.Simple.LocalBuildInfo as LBI + ( LocalBuildInfo(..) ) +import Distribution.Simple.Setup ( TestFlags(..), fromFlag, configCoverage ) +import Distribution.Simple.UserHooks ( Args ) +import qualified Distribution.Simple.Test.ExeV10 as ExeV10 +import qualified Distribution.Simple.Test.LibV09 as LibV09 +import Distribution.Simple.Test.Log +import Distribution.Simple.Utils ( die, notice ) +import Distribution.TestSuite ( Result(..) ) +import Distribution.Text + +import Control.Monad ( when, unless, filterM ) +import System.Directory + ( createDirectoryIfMissing, doesFileExist, getDirectoryContents + , removeFile ) +import System.Exit ( ExitCode(..), exitFailure, exitWith ) +import System.FilePath ( () ) + +-- |Perform the \"@.\/setup test@\" action. +test :: Args -- ^positional command-line arguments + -> PD.PackageDescription -- ^information from the .cabal file + -> LBI.LocalBuildInfo -- ^information from the configure step + -> TestFlags -- ^flags sent to test + -> IO () +test args pkg_descr lbi flags = do + let verbosity = fromFlag $ testVerbosity flags + machineTemplate = fromFlag $ testMachineLog flags + distPref = fromFlag $ testDistPref flags + testLogDir = distPref "test" + testNames = args + pkgTests = PD.testSuites pkg_descr + enabledTests = [ t | t <- pkgTests + , PD.testEnabled t + , PD.buildable (PD.testBuildInfo t) ] + + doTest :: (PD.TestSuite, Maybe TestSuiteLog) -> IO TestSuiteLog + doTest (suite, _) = + case PD.testInterface suite of + PD.TestSuiteExeV10 _ _ -> + ExeV10.runTest pkg_descr lbi flags suite + + PD.TestSuiteLibV09 _ _ -> + LibV09.runTest pkg_descr lbi flags suite + + _ -> return TestSuiteLog + { testSuiteName = PD.testName suite + , testLogs = TestLog + { testName = PD.testName suite + , testOptionsReturned = [] + , testResult = + Error $ "No support for running test suite type: " + ++ show (disp $ PD.testType suite) + } + , logFile = "" + } + + when (not $ PD.hasTests pkg_descr) $ do + notice verbosity "Package has no test suites." + exitWith ExitSuccess + + when (PD.hasTests pkg_descr && null enabledTests) $ + die $ "No test suites enabled. Did you remember to configure with " + ++ "\'--enable-tests\'?" + + testsToRun <- case testNames of + [] -> return $ zip enabledTests $ repeat Nothing + names -> flip mapM names $ \tName -> + let testMap = zip enabledNames enabledTests + enabledNames = map PD.testName enabledTests + allNames = map PD.testName pkgTests + in case lookup tName testMap of + Just t -> return (t, Nothing) + _ | tName `elem` allNames -> + die $ "Package configured with test suite " + ++ tName ++ " disabled." + | otherwise -> die $ "no such test: " ++ tName + + createDirectoryIfMissing True testLogDir + + -- Delete ordinary files from test log directory. + getDirectoryContents testLogDir + >>= filterM doesFileExist . map (testLogDir ) + >>= mapM_ removeFile + + let totalSuites = length testsToRun + notice verbosity $ "Running " ++ show totalSuites ++ " test suites..." + suites <- mapM doTest testsToRun + let packageLog = (localPackageLog pkg_descr lbi) { testSuites = suites } + packageLogFile = () testLogDir + $ packageLogPath machineTemplate pkg_descr lbi + allOk <- summarizePackage verbosity packageLog + writeFile packageLogFile $ show packageLog + + let isCoverageEnabled = fromFlag $ configCoverage $ LBI.configFlags lbi + when isCoverageEnabled $ + markupPackage verbosity lbi distPref (display $ PD.package pkg_descr) $ + map fst testsToRun + + unless allOk exitFailure + +packageLogPath :: PathTemplate + -> PD.PackageDescription + -> LBI.LocalBuildInfo + -> FilePath +packageLogPath template pkg_descr lbi = + fromPathTemplate $ substPathTemplate env template + where + env = initialPathTemplateEnv + (PD.package pkg_descr) (LBI.pkgKey lbi) + (compilerInfo $ LBI.compiler lbi) (LBI.hostPlatform lbi) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/UHC.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/UHC.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/UHC.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/UHC.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,276 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.UHC +-- Copyright : Andres Loeh 2009 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module contains most of the UHC-specific code for configuring, building +-- and installing packages. +-- +-- Thanks to the authors of the other implementation-specific files, in +-- particular to Isaac Jones, Duncan Coutts and Henning Thielemann, for +-- inspiration on how to design this module. + +module Distribution.Simple.UHC ( + configure, getInstalledPackages, + buildLib, buildExe, installLib, registerPackage + ) where + +import Control.Monad +import Data.List +import qualified Data.Map as M ( empty ) +import Distribution.Compat.ReadP +import Distribution.InstalledPackageInfo +import Distribution.Package hiding (installedPackageId) +import Distribution.PackageDescription +import Distribution.Simple.BuildPaths +import Distribution.Simple.Compiler as C +import Distribution.Simple.LocalBuildInfo +import Distribution.Simple.PackageIndex +import Distribution.Simple.Program +import Distribution.Simple.Utils +import Distribution.Text +import Distribution.Verbosity +import Distribution.Version +import Language.Haskell.Extension +import System.Directory +import System.FilePath +import Distribution.System ( Platform ) + +-- ----------------------------------------------------------------------------- +-- Configuring + +configure :: Verbosity -> Maybe FilePath -> Maybe FilePath + -> ProgramConfiguration -> IO (Compiler, Maybe Platform, ProgramConfiguration) +configure verbosity hcPath _hcPkgPath conf = do + + (_uhcProg, uhcVersion, conf') <- + requireProgramVersion verbosity uhcProgram + (orLaterVersion (Version [1,0,2] [])) + (userMaybeSpecifyPath "uhc" hcPath conf) + + let comp = Compiler { + compilerId = CompilerId UHC uhcVersion, + compilerAbiTag = C.NoAbiTag, + compilerCompat = [], + compilerLanguages = uhcLanguages, + compilerExtensions = uhcLanguageExtensions, + compilerProperties = M.empty + } + compPlatform = Nothing + return (comp, compPlatform, conf') + +uhcLanguages :: [(Language, C.Flag)] +uhcLanguages = [(Haskell98, "")] + +-- | The flags for the supported extensions. +uhcLanguageExtensions :: [(Extension, C.Flag)] +uhcLanguageExtensions = + let doFlag (f, (enable, disable)) = [(EnableExtension f, enable), + (DisableExtension f, disable)] + alwaysOn = ("", ""{- wrong -}) + in concatMap doFlag + [(CPP, ("--cpp", ""{- wrong -})), + (PolymorphicComponents, alwaysOn), + (ExistentialQuantification, alwaysOn), + (ForeignFunctionInterface, alwaysOn), + (UndecidableInstances, alwaysOn), + (MultiParamTypeClasses, alwaysOn), + (Rank2Types, alwaysOn), + (PatternSignatures, alwaysOn), + (EmptyDataDecls, alwaysOn), + (ImplicitPrelude, ("", "--no-prelude"{- wrong -})), + (TypeOperators, alwaysOn), + (OverlappingInstances, alwaysOn), + (FlexibleInstances, alwaysOn)] + +getInstalledPackages :: Verbosity -> Compiler -> PackageDBStack -> ProgramConfiguration + -> IO InstalledPackageIndex +getInstalledPackages verbosity comp packagedbs conf = do + let compilerid = compilerId comp + systemPkgDir <- rawSystemProgramStdoutConf verbosity uhcProgram conf ["--meta-pkgdir-system"] + userPkgDir <- getUserPackageDir + let pkgDirs = nub (concatMap (packageDbPaths userPkgDir systemPkgDir) packagedbs) + -- putStrLn $ "pkgdirs: " ++ show pkgDirs + -- call to "lines" necessary, because pkgdir contains an extra newline at the end + pkgs <- liftM (map addBuiltinVersions . concat) . + mapM (\ d -> getDirectoryContents d >>= filterM (isPkgDir (display compilerid) d)) . + concatMap lines $ pkgDirs + -- putStrLn $ "pkgs: " ++ show pkgs + let iPkgs = + map mkInstalledPackageInfo $ + concatMap parsePackage $ + pkgs + -- putStrLn $ "installed pkgs: " ++ show iPkgs + return (fromList iPkgs) + +getUserPackageDir :: IO FilePath +getUserPackageDir = + do + homeDir <- getHomeDirectory + return $ homeDir ".cabal" "lib" -- TODO: determine in some other way + +packageDbPaths :: FilePath -> FilePath -> PackageDB -> [FilePath] +packageDbPaths user system db = + case db of + GlobalPackageDB -> [ system ] + UserPackageDB -> [ user ] + SpecificPackageDB path -> [ path ] + +-- | Hack to add version numbers to UHC-built-in packages. This should sooner or +-- later be fixed on the UHC side. +addBuiltinVersions :: String -> String +{- +addBuiltinVersions "uhcbase" = "uhcbase-1.0" +addBuiltinVersions "base" = "base-3.0" +addBuiltinVersions "array" = "array-0.2" +-} +addBuiltinVersions xs = xs + +-- | Name of the installed package config file. +installedPkgConfig :: String +installedPkgConfig = "installed-pkg-config" + +-- | Check if a certain dir contains a valid package. Currently, we are +-- looking only for the presence of an installed package configuration. +-- TODO: Actually make use of the information provided in the file. +isPkgDir :: String -> String -> String -> IO Bool +isPkgDir _ _ ('.' : _) = return False -- ignore files starting with a . +isPkgDir c dir xs = do + let candidate = dir uhcPackageDir xs c + -- putStrLn $ "trying: " ++ candidate + doesFileExist (candidate installedPkgConfig) + +parsePackage :: String -> [PackageId] +parsePackage x = map fst (filter (\ (_,y) -> null y) (readP_to_S parse x)) + +-- | Create a trivial package info from a directory name. +mkInstalledPackageInfo :: PackageId -> InstalledPackageInfo +mkInstalledPackageInfo p = emptyInstalledPackageInfo + { installedPackageId = InstalledPackageId (display p), + sourcePackageId = p } + + +-- ----------------------------------------------------------------------------- +-- Building + +buildLib :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Library -> ComponentLocalBuildInfo -> IO () +buildLib verbosity pkg_descr lbi lib clbi = do + + systemPkgDir <- rawSystemProgramStdoutConf verbosity uhcProgram (withPrograms lbi) ["--meta-pkgdir-system"] + userPkgDir <- getUserPackageDir + let runUhcProg = rawSystemProgramConf verbosity uhcProgram (withPrograms lbi) + let uhcArgs = -- set package name + ["--pkg-build=" ++ display (packageId pkg_descr)] + -- common flags lib/exe + ++ constructUHCCmdLine userPkgDir systemPkgDir + lbi (libBuildInfo lib) clbi + (buildDir lbi) verbosity + -- source files + -- suboptimal: UHC does not understand module names, so + -- we replace periods by path separators + ++ map (map (\ c -> if c == '.' then pathSeparator else c)) + (map display (libModules lib)) + + runUhcProg uhcArgs + + return () + +buildExe :: Verbosity -> PackageDescription -> LocalBuildInfo + -> Executable -> ComponentLocalBuildInfo -> IO () +buildExe verbosity _pkg_descr lbi exe clbi = do + systemPkgDir <- rawSystemProgramStdoutConf verbosity uhcProgram (withPrograms lbi) ["--meta-pkgdir-system"] + userPkgDir <- getUserPackageDir + let runUhcProg = rawSystemProgramConf verbosity uhcProgram (withPrograms lbi) + let uhcArgs = -- common flags lib/exe + constructUHCCmdLine userPkgDir systemPkgDir + lbi (buildInfo exe) clbi + (buildDir lbi) verbosity + -- output file + ++ ["--output", buildDir lbi exeName exe] + -- main source module + ++ [modulePath exe] + runUhcProg uhcArgs + +constructUHCCmdLine :: FilePath -> FilePath + -> LocalBuildInfo -> BuildInfo -> ComponentLocalBuildInfo + -> FilePath -> Verbosity -> [String] +constructUHCCmdLine user system lbi bi clbi odir verbosity = + -- verbosity + (if verbosity >= deafening then ["-v4"] + else if verbosity >= normal then [] + else ["-v0"]) + ++ hcOptions UHC bi + -- flags for language extensions + ++ languageToFlags (compiler lbi) (defaultLanguage bi) + ++ extensionsToFlags (compiler lbi) (usedExtensions bi) + -- packages + ++ ["--hide-all-packages"] + ++ uhcPackageDbOptions user system (withPackageDB lbi) + ++ ["--package=uhcbase"] + ++ ["--package=" ++ display (pkgName pkgid) | (_, pkgid) <- componentPackageDeps clbi ] + -- search paths + ++ ["-i" ++ odir] + ++ ["-i" ++ l | l <- nub (hsSourceDirs bi)] + ++ ["-i" ++ autogenModulesDir lbi] + -- cpp options + ++ ["--optP=" ++ opt | opt <- cppOptions bi] + -- output path + ++ ["--odir=" ++ odir] + -- optimization + ++ (case withOptimization lbi of + NoOptimisation -> ["-O0"] + NormalOptimisation -> ["-O1"] + MaximumOptimisation -> ["-O2"]) + +uhcPackageDbOptions :: FilePath -> FilePath -> PackageDBStack -> [String] +uhcPackageDbOptions user system db = map (\ x -> "--pkg-searchpath=" ++ x) + (concatMap (packageDbPaths user system) db) + +-- ----------------------------------------------------------------------------- +-- Installation + +installLib :: Verbosity -> LocalBuildInfo + -> FilePath -> FilePath -> FilePath + -> PackageDescription -> Library -> IO () +installLib verbosity _lbi targetDir _dynlibTargetDir builtDir pkg _library = do + -- putStrLn $ "dest: " ++ targetDir + -- putStrLn $ "built: " ++ builtDir + installDirectoryContents verbosity (builtDir display (packageId pkg)) targetDir + +-- currently hard-coded UHC code generator and variant to use +uhcTarget, uhcTargetVariant :: String +uhcTarget = "bc" +uhcTargetVariant = "plain" + +-- root directory for a package in UHC +uhcPackageDir :: String -> String -> FilePath +uhcPackageSubDir :: String -> FilePath +uhcPackageDir pkgid compilerid = pkgid uhcPackageSubDir compilerid +uhcPackageSubDir compilerid = compilerid uhcTarget uhcTargetVariant + +-- ----------------------------------------------------------------------------- +-- Registering + +registerPackage + :: Verbosity + -> InstalledPackageInfo + -> PackageDescription + -> LocalBuildInfo + -> Bool + -> PackageDBStack + -> IO () +registerPackage verbosity installedPkgInfo pkg lbi inplace _packageDbs = do + let installDirs = absoluteInstallDirs pkg lbi NoCopyDest + pkgdir | inplace = buildDir lbi uhcPackageDir (display pkgid) (display compilerid) + | otherwise = libdir installDirs uhcPackageSubDir (display compilerid) + createDirectoryIfMissingVerbose verbosity True pkgdir + writeUTF8File (pkgdir installedPkgConfig) + (showInstalledPackageInfo installedPkgInfo) + where + pkgid = packageId pkg + compilerid = compilerId (compiler lbi) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/UserHooks.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/UserHooks.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/UserHooks.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/UserHooks.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,211 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.UserHooks +-- Copyright : Isaac Jones 2003-2005 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This defines the API that @Setup.hs@ scripts can use to customise the way +-- the build works. This module just defines the 'UserHooks' type. The +-- predefined sets of hooks that implement the @Simple@, @Make@ and @Configure@ +-- build systems are defined in "Distribution.Simple". The 'UserHooks' is a big +-- record of functions. There are 3 for each action, a pre, post and the action +-- itself. There are few other miscellaneous hooks, ones to extend the set of +-- programs and preprocessors and one to override the function used to read the +-- @.cabal@ file. +-- +-- This hooks type is widely agreed to not be the right solution. Partly this +-- is because changes to it usually break custom @Setup.hs@ files and yet many +-- internal code changes do require changes to the hooks. For example we cannot +-- pass any extra parameters to most of the functions that implement the +-- various phases because it would involve changing the types of the +-- corresponding hook. At some point it will have to be replaced. + +module Distribution.Simple.UserHooks ( + UserHooks(..), Args, + emptyUserHooks, + ) where + +import Distribution.PackageDescription + (PackageDescription, GenericPackageDescription, + HookedBuildInfo, emptyHookedBuildInfo) +import Distribution.Simple.Program (Program) +import Distribution.Simple.Command (noExtraFlags) +import Distribution.Simple.PreProcess (PPSuffixHandler) +import Distribution.Simple.Setup + (ConfigFlags, BuildFlags, ReplFlags, CleanFlags, CopyFlags, + InstallFlags, SDistFlags, RegisterFlags, HscolourFlags, + HaddockFlags, TestFlags, BenchmarkFlags) +import Distribution.Simple.LocalBuildInfo (LocalBuildInfo) + +type Args = [String] + +-- | Hooks allow authors to add specific functionality before and after a +-- command is run, and also to specify additional preprocessors. +-- +-- * WARNING: The hooks interface is under rather constant flux as we try to +-- understand users needs. Setup files that depend on this interface may +-- break in future releases. +data UserHooks = UserHooks { + + -- | Used for @.\/setup test@ + runTests :: Args -> Bool -> PackageDescription -> LocalBuildInfo -> IO (), + -- | Read the description file + readDesc :: IO (Maybe GenericPackageDescription), + -- | Custom preprocessors in addition to and overriding 'knownSuffixHandlers'. + hookedPreProcessors :: [ PPSuffixHandler ], + -- | These programs are detected at configure time. Arguments for them are + -- added to the configure command. + hookedPrograms :: [Program], + + -- |Hook to run before configure command + preConf :: Args -> ConfigFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during configure. + confHook :: (GenericPackageDescription, HookedBuildInfo) + -> ConfigFlags -> IO LocalBuildInfo, + -- |Hook to run after configure command + postConf :: Args -> ConfigFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before build command. Second arg indicates verbosity level. + preBuild :: Args -> BuildFlags -> IO HookedBuildInfo, + + -- |Over-ride this hook to get different behavior during build. + buildHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> BuildFlags -> IO (), + -- |Hook to run after build command. Second arg indicates verbosity level. + postBuild :: Args -> BuildFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before repl command. Second arg indicates verbosity level. + preRepl :: Args -> ReplFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during interpretation. + replHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> ReplFlags -> [String] -> IO (), + -- |Hook to run after repl command. Second arg indicates verbosity level. + postRepl :: Args -> ReplFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before clean command. Second arg indicates verbosity level. + preClean :: Args -> CleanFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during clean. + cleanHook :: PackageDescription -> () -> UserHooks -> CleanFlags -> IO (), + -- |Hook to run after clean command. Second arg indicates verbosity level. + postClean :: Args -> CleanFlags -> PackageDescription -> () -> IO (), + + -- |Hook to run before copy command + preCopy :: Args -> CopyFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during copy. + copyHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> CopyFlags -> IO (), + -- |Hook to run after copy command + postCopy :: Args -> CopyFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before install command + preInst :: Args -> InstallFlags -> IO HookedBuildInfo, + + -- |Over-ride this hook to get different behavior during install. + instHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> InstallFlags -> IO (), + -- |Hook to run after install command. postInst should be run + -- on the target, not on the build machine. + postInst :: Args -> InstallFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before sdist command. Second arg indicates verbosity level. + preSDist :: Args -> SDistFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during sdist. + sDistHook :: PackageDescription -> Maybe LocalBuildInfo -> UserHooks -> SDistFlags -> IO (), + -- |Hook to run after sdist command. Second arg indicates verbosity level. + postSDist :: Args -> SDistFlags -> PackageDescription -> Maybe LocalBuildInfo -> IO (), + + -- |Hook to run before register command + preReg :: Args -> RegisterFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during registration. + regHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> RegisterFlags -> IO (), + -- |Hook to run after register command + postReg :: Args -> RegisterFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before unregister command + preUnreg :: Args -> RegisterFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during unregistration. + unregHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> RegisterFlags -> IO (), + -- |Hook to run after unregister command + postUnreg :: Args -> RegisterFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before hscolour command. Second arg indicates verbosity level. + preHscolour :: Args -> HscolourFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during hscolour. + hscolourHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> HscolourFlags -> IO (), + -- |Hook to run after hscolour command. Second arg indicates verbosity level. + postHscolour :: Args -> HscolourFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before haddock command. Second arg indicates verbosity level. + preHaddock :: Args -> HaddockFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during haddock. + haddockHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> HaddockFlags -> IO (), + -- |Hook to run after haddock command. Second arg indicates verbosity level. + postHaddock :: Args -> HaddockFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before test command. + preTest :: Args -> TestFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during test. + testHook :: Args -> PackageDescription -> LocalBuildInfo -> UserHooks -> TestFlags -> IO (), + -- |Hook to run after test command. + postTest :: Args -> TestFlags -> PackageDescription -> LocalBuildInfo -> IO (), + + -- |Hook to run before bench command. + preBench :: Args -> BenchmarkFlags -> IO HookedBuildInfo, + -- |Over-ride this hook to get different behavior during bench. + benchHook :: Args -> PackageDescription -> LocalBuildInfo -> UserHooks -> BenchmarkFlags -> IO (), + -- |Hook to run after bench command. + postBench :: Args -> BenchmarkFlags -> PackageDescription -> LocalBuildInfo -> IO () + } + +{-# DEPRECATED runTests "Please use the new testing interface instead!" #-} + +-- |Empty 'UserHooks' which do nothing. +emptyUserHooks :: UserHooks +emptyUserHooks + = UserHooks { + runTests = ru, + readDesc = return Nothing, + hookedPreProcessors = [], + hookedPrograms = [], + preConf = rn, + confHook = (\_ _ -> return (error "No local build info generated during configure. Over-ride empty configure hook.")), + postConf = ru, + preBuild = rn', + buildHook = ru, + postBuild = ru, + preRepl = \_ _ -> return emptyHookedBuildInfo, + replHook = \_ _ _ _ _ -> return (), + postRepl = ru, + preClean = rn, + cleanHook = ru, + postClean = ru, + preCopy = rn, + copyHook = ru, + postCopy = ru, + preInst = rn, + instHook = ru, + postInst = ru, + preSDist = rn, + sDistHook = ru, + postSDist = ru, + preReg = rn, + regHook = ru, + postReg = ru, + preUnreg = rn, + unregHook = ru, + postUnreg = ru, + preHscolour = rn, + hscolourHook = ru, + postHscolour = ru, + preHaddock = rn, + haddockHook = ru, + postHaddock = ru, + preTest = rn', + testHook = \_ -> ru, + postTest = ru, + preBench = rn', + benchHook = \_ -> ru, + postBench = ru + } + where rn args _ = noExtraFlags args >> return emptyHookedBuildInfo + rn' _ _ = return emptyHookedBuildInfo + ru _ _ _ _ = return () diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Utils.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Utils.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple/Utils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple/Utils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,1370 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple.Utils +-- Copyright : Isaac Jones, Simon Marlow 2003-2004 +-- License : BSD3 +-- portions Copyright (c) 2007, Galois Inc. +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- A large and somewhat miscellaneous collection of utility functions used +-- throughout the rest of the Cabal lib and in other tools that use the Cabal +-- lib like @cabal-install@. It has a very simple set of logging actions. It +-- has low level functions for running programs, a bunch of wrappers for +-- various directory and file functions that do extra logging. + +module Distribution.Simple.Utils ( + cabalVersion, + + -- * logging and errors + die, + dieWithLocation, + topHandler, topHandlerWith, + warn, notice, setupMessage, info, debug, + debugNoWrap, chattyTry, + printRawCommandAndArgs, printRawCommandAndArgsAndEnv, + + -- * running programs + rawSystemExit, + rawSystemExitCode, + rawSystemExitWithEnv, + rawSystemStdout, + rawSystemStdInOut, + rawSystemIOWithEnv, + maybeExit, + xargs, + findProgramLocation, + findProgramVersion, + + -- * copying files + smartCopySources, + createDirectoryIfMissingVerbose, + copyFileVerbose, + copyDirectoryRecursiveVerbose, + copyFiles, + copyFileTo, + + -- * installing files + installOrdinaryFile, + installExecutableFile, + installMaybeExecutableFile, + installOrdinaryFiles, + installExecutableFiles, + installMaybeExecutableFiles, + installDirectoryContents, + copyDirectoryRecursive, + + -- * File permissions + doesExecutableExist, + setFileOrdinary, + setFileExecutable, + + -- * file names + currentDir, + shortRelativePath, + + -- * finding files + findFile, + findFirstFile, + findFileWithExtension, + findFileWithExtension', + findModuleFile, + findModuleFiles, + getDirectoryContentsRecursive, + + -- * environment variables + isInSearchPath, + addLibraryPath, + + -- * simple file globbing + matchFileGlob, + matchDirFileGlob, + parseFileGlob, + FileGlob(..), + + -- * modification time + moreRecentFile, + existsAndIsMoreRecentThan, + + -- * temp files and dirs + TempFileOptions(..), defaultTempFileOptions, + withTempFile, withTempFileEx, + withTempDirectory, withTempDirectoryEx, + + -- * .cabal and .buildinfo files + defaultPackageDesc, + findPackageDesc, + tryFindPackageDesc, + defaultHookedPackageDesc, + findHookedPackageDesc, + + -- * reading and writing files safely + withFileContents, + writeFileAtomic, + rewriteFile, + + -- * Unicode + fromUTF8, + toUTF8, + readUTF8File, + withUTF8FileContents, + writeUTF8File, + normaliseLineEndings, + + -- * generic utils + dropWhileEndLE, + takeWhileEndLE, + equating, + comparing, + isInfixOf, + intercalate, + lowercase, + listUnion, + listUnionRight, + ordNub, + ordNubRight, + wrapText, + wrapLine, + ) where + +import Control.Monad + ( join, when, unless, filterM ) +import Control.Concurrent.MVar + ( newEmptyMVar, putMVar, takeMVar ) +import Data.List + ( nub, unfoldr, isPrefixOf, tails, intercalate ) +import Data.Char as Char + ( isDigit, toLower, chr, ord ) +import Data.Bits + ( Bits((.|.), (.&.), shiftL, shiftR) ) +import qualified Data.ByteString.Lazy as BS +import qualified Data.ByteString.Lazy.Char8 as BS.Char8 +import qualified Data.Set as Set + +import System.Directory + ( Permissions(executable), getDirectoryContents, getPermissions + , doesDirectoryExist, doesFileExist, removeFile, findExecutable + , getModificationTime ) +import System.Environment + ( getProgName ) +import System.Exit + ( exitWith, ExitCode(..) ) +import System.FilePath + ( normalise, (), (<.>) + , getSearchPath, joinPath, takeDirectory, splitFileName + , splitExtension, splitExtensions, splitDirectories + , searchPathSeparator ) +import System.Directory + ( createDirectory, renameFile, removeDirectoryRecursive ) +import System.IO + ( Handle, openFile, openBinaryFile, openBinaryTempFileWithDefaultPermissions + , IOMode(ReadMode), hSetBinaryMode + , hGetContents, stderr, stdout, hPutStr, hFlush, hClose ) +import System.IO.Error as IO.Error + ( isDoesNotExistError, isAlreadyExistsError + , ioeSetFileName, ioeGetFileName, ioeGetErrorString ) +import System.IO.Error + ( ioeSetLocation, ioeGetLocation ) +import System.IO.Unsafe + ( unsafeInterleaveIO ) +import qualified Control.Exception as Exception + +import Distribution.Text + ( display, simpleParse ) +import Distribution.Package + ( PackageIdentifier ) +import Distribution.ModuleName (ModuleName) +import qualified Distribution.ModuleName as ModuleName +import Distribution.System + ( OS (..) ) +import Distribution.Version + (Version(..)) + +import Control.Exception (IOException, evaluate, throwIO) +import Control.Concurrent (forkIO) +import qualified System.Process as Process + ( CreateProcess(..), StdStream(..), proc) +import System.Process + ( createProcess, rawSystem, runInteractiveProcess + , showCommandForUser, waitForProcess) +import Distribution.Compat.CopyFile + ( copyFile, copyOrdinaryFile, copyExecutableFile + , setFileOrdinary, setFileExecutable, setDirOrdinary ) +import Distribution.Compat.TempFile + ( openTempFile, createTempDirectory ) +import Distribution.Compat.Exception + ( tryIO, catchIO, catchExit ) +import Distribution.Verbosity + +#ifdef VERSION_base +import qualified Paths_Cabal (version) +#endif + +-- We only get our own version number when we're building with ourselves +cabalVersion :: Version +#if defined(VERSION_base) +cabalVersion = Paths_Cabal.version +#elif defined(CABAL_VERSION) +cabalVersion = Version [CABAL_VERSION] [] +#else +cabalVersion = Version [1,9999] [] --used when bootstrapping +#endif + +-- ---------------------------------------------------------------------------- +-- Exception and logging utils + +dieWithLocation :: FilePath -> Maybe Int -> String -> IO a +dieWithLocation filename lineno msg = + ioError . setLocation lineno + . flip ioeSetFileName (normalise filename) + $ userError msg + where + setLocation Nothing err = err + setLocation (Just n) err = ioeSetLocation err (show n) + +die :: String -> IO a +die msg = ioError (userError msg) + +topHandlerWith :: (Exception.IOException -> IO a) -> IO a -> IO a +topHandlerWith cont prog = catchIO prog handle + where + handle ioe = do + hFlush stdout + pname <- getProgName + hPutStr stderr (mesage pname) + cont ioe + where + mesage pname = wrapText (pname ++ ": " ++ file ++ detail) + file = case ioeGetFileName ioe of + Nothing -> "" + Just path -> path ++ location ++ ": " + location = case ioeGetLocation ioe of + l@(n:_) | Char.isDigit n -> ':' : l + _ -> "" + detail = ioeGetErrorString ioe + +topHandler :: IO a -> IO a +topHandler prog = topHandlerWith (const $ exitWith (ExitFailure 1)) prog + +-- | Non fatal conditions that may be indicative of an error or problem. +-- +-- We display these at the 'normal' verbosity level. +-- +warn :: Verbosity -> String -> IO () +warn verbosity msg = + when (verbosity >= normal) $ do + hFlush stdout + hPutStr stderr (wrapText ("Warning: " ++ msg)) + +-- | Useful status messages. +-- +-- We display these at the 'normal' verbosity level. +-- +-- This is for the ordinary helpful status messages that users see. Just +-- enough information to know that things are working but not floods of detail. +-- +notice :: Verbosity -> String -> IO () +notice verbosity msg = + when (verbosity >= normal) $ + putStr (wrapText msg) + +setupMessage :: Verbosity -> String -> PackageIdentifier -> IO () +setupMessage verbosity msg pkgid = + notice verbosity (msg ++ ' ': display pkgid ++ "...") + +-- | More detail on the operation of some action. +-- +-- We display these messages when the verbosity level is 'verbose' +-- +info :: Verbosity -> String -> IO () +info verbosity msg = + when (verbosity >= verbose) $ + putStr (wrapText msg) + +-- | Detailed internal debugging information +-- +-- We display these messages when the verbosity level is 'deafening' +-- +debug :: Verbosity -> String -> IO () +debug verbosity msg = + when (verbosity >= deafening) $ do + putStr (wrapText msg) + hFlush stdout + +-- | A variant of 'debug' that doesn't perform the automatic line +-- wrapping. Produces better output in some cases. +debugNoWrap :: Verbosity -> String -> IO () +debugNoWrap verbosity msg = + when (verbosity >= deafening) $ do + putStrLn msg + hFlush stdout + +-- | Perform an IO action, catching any IO exceptions and printing an error +-- if one occurs. +chattyTry :: String -- ^ a description of the action we were attempting + -> IO () -- ^ the action itself + -> IO () +chattyTry desc action = + catchIO action $ \exception -> + putStrLn $ "Error while " ++ desc ++ ": " ++ show exception + +-- ----------------------------------------------------------------------------- +-- Helper functions + +-- | Wraps text to the default line width. Existing newlines are preserved. +wrapText :: String -> String +wrapText = unlines + . map (intercalate "\n" + . map unwords + . wrapLine 79 + . words) + . lines + +-- | Wraps a list of words to a list of lines of words of a particular width. +wrapLine :: Int -> [String] -> [[String]] +wrapLine width = wrap 0 [] + where wrap :: Int -> [String] -> [String] -> [[String]] + wrap 0 [] (w:ws) + | length w + 1 > width + = wrap (length w) [w] ws + wrap col line (w:ws) + | col + length w + 1 > width + = reverse line : wrap 0 [] (w:ws) + wrap col line (w:ws) + = let col' = col + length w + 1 + in wrap col' (w:line) ws + wrap _ [] [] = [] + wrap _ line [] = [reverse line] + +-- ----------------------------------------------------------------------------- +-- rawSystem variants +maybeExit :: IO ExitCode -> IO () +maybeExit cmd = do + res <- cmd + unless (res == ExitSuccess) $ exitWith res + +printRawCommandAndArgs :: Verbosity -> FilePath -> [String] -> IO () +printRawCommandAndArgs verbosity path args = + printRawCommandAndArgsAndEnv verbosity path args Nothing + +printRawCommandAndArgsAndEnv :: Verbosity + -> FilePath + -> [String] + -> Maybe [(String, String)] + -> IO () +printRawCommandAndArgsAndEnv verbosity path args menv + | verbosity >= deafening = do + maybe (return ()) (putStrLn . ("Environment: " ++) . show) menv + print (path, args) + | verbosity >= verbose = putStrLn $ showCommandForUser path args + | otherwise = return () + + +-- Exit with the same exit code if the subcommand fails +rawSystemExit :: Verbosity -> FilePath -> [String] -> IO () +rawSystemExit verbosity path args = do + printRawCommandAndArgs verbosity path args + hFlush stdout + exitcode <- rawSystem path args + unless (exitcode == ExitSuccess) $ do + debug verbosity $ path ++ " returned " ++ show exitcode + exitWith exitcode + +rawSystemExitCode :: Verbosity -> FilePath -> [String] -> IO ExitCode +rawSystemExitCode verbosity path args = do + printRawCommandAndArgs verbosity path args + hFlush stdout + exitcode <- rawSystem path args + unless (exitcode == ExitSuccess) $ do + debug verbosity $ path ++ " returned " ++ show exitcode + return exitcode + +rawSystemExitWithEnv :: Verbosity + -> FilePath + -> [String] + -> [(String, String)] + -> IO () +rawSystemExitWithEnv verbosity path args env = do + printRawCommandAndArgsAndEnv verbosity path args (Just env) + hFlush stdout + (_,_,_,ph) <- createProcess $ + (Process.proc path args) { Process.env = (Just env) +#ifdef MIN_VERSION_process +#if MIN_VERSION_process(1,2,0) +-- delegate_ctlc has been added in process 1.2, and we still want to be able to +-- bootstrap GHC on systems not having that version + , Process.delegate_ctlc = True +#endif +#endif + } + exitcode <- waitForProcess ph + unless (exitcode == ExitSuccess) $ do + debug verbosity $ path ++ " returned " ++ show exitcode + exitWith exitcode + +-- Closes the passed in handles before returning. +rawSystemIOWithEnv :: Verbosity + -> FilePath + -> [String] + -> Maybe FilePath -- ^ New working dir or inherit + -> Maybe [(String, String)] -- ^ New environment or inherit + -> Maybe Handle -- ^ stdin + -> Maybe Handle -- ^ stdout + -> Maybe Handle -- ^ stderr + -> IO ExitCode +rawSystemIOWithEnv verbosity path args mcwd menv inp out err = do + printRawCommandAndArgsAndEnv verbosity path args menv + hFlush stdout + (_,_,_,ph) <- createProcess $ + (Process.proc path args) { Process.cwd = mcwd + , Process.env = menv + , Process.std_in = mbToStd inp + , Process.std_out = mbToStd out + , Process.std_err = mbToStd err +#ifdef MIN_VERSION_process +#if MIN_VERSION_process(1,2,0) +-- delegate_ctlc has been added in process 1.2, and we still want to be able to +-- bootstrap GHC on systems not having that version + , Process.delegate_ctlc = True +#endif +#endif + } + exitcode <- waitForProcess ph + unless (exitcode == ExitSuccess) $ do + debug verbosity $ path ++ " returned " ++ show exitcode + return exitcode + where + mbToStd :: Maybe Handle -> Process.StdStream + mbToStd = maybe Process.Inherit Process.UseHandle + +-- | Run a command and return its output. +-- +-- The output is assumed to be text in the locale encoding. +-- +rawSystemStdout :: Verbosity -> FilePath -> [String] -> IO String +rawSystemStdout verbosity path args = do + (output, errors, exitCode) <- rawSystemStdInOut verbosity path args + Nothing Nothing + Nothing False + when (exitCode /= ExitSuccess) $ + die errors + return output + +-- | Run a command and return its output, errors and exit status. Optionally +-- also supply some input. Also provides control over whether the binary/text +-- mode of the input and output. +-- +rawSystemStdInOut :: Verbosity + -> FilePath -- ^ Program location + -> [String] -- ^ Arguments + -> Maybe FilePath -- ^ New working dir or inherit + -> Maybe [(String, String)] -- ^ New environment or inherit + -> Maybe (String, Bool) -- ^ input text and binary mode + -> Bool -- ^ output in binary mode + -> IO (String, String, ExitCode) -- ^ output, errors, exit +rawSystemStdInOut verbosity path args mcwd menv input outputBinary = do + printRawCommandAndArgs verbosity path args + + Exception.bracket + (runInteractiveProcess path args mcwd menv) + (\(inh,outh,errh,_) -> hClose inh >> hClose outh >> hClose errh) + $ \(inh,outh,errh,pid) -> do + + -- output mode depends on what the caller wants + hSetBinaryMode outh outputBinary + -- but the errors are always assumed to be text (in the current locale) + hSetBinaryMode errh False + + -- fork off a couple threads to pull on the stderr and stdout + -- so if the process writes to stderr we do not block. + + err <- hGetContents errh + out <- hGetContents outh + + mv <- newEmptyMVar + let force str = (evaluate (length str) >> return ()) + `Exception.finally` putMVar mv () + --TODO: handle exceptions like text decoding. + _ <- forkIO $ force out + _ <- forkIO $ force err + + -- push all the input, if any + case input of + Nothing -> return () + Just (inputStr, inputBinary) -> do + -- input mode depends on what the caller wants + hSetBinaryMode inh inputBinary + hPutStr inh inputStr + hClose inh + --TODO: this probably fails if the process refuses to consume + -- or if it closes stdin (eg if it exits) + + -- wait for both to finish, in either order + takeMVar mv + takeMVar mv + + -- wait for the program to terminate + exitcode <- waitForProcess pid + unless (exitcode == ExitSuccess) $ + debug verbosity $ path ++ " returned " ++ show exitcode + ++ if null err then "" else + " with error message:\n" ++ err + ++ case input of + Nothing -> "" + Just ("", _) -> "" + Just (inp, _) -> "\nstdin input:\n" ++ inp + + return (out, err, exitcode) + + +-- | Look for a program on the path. +findProgramLocation :: Verbosity -> FilePath -> IO (Maybe FilePath) +findProgramLocation verbosity prog = do + debug verbosity $ "searching for " ++ prog ++ " in path." + res <- findExecutable prog + case res of + Nothing -> debug verbosity ("Cannot find " ++ prog ++ " on the path") + Just path -> debug verbosity ("found " ++ prog ++ " at "++ path) + return res + + +-- | Look for a program and try to find it's version number. It can accept +-- either an absolute path or the name of a program binary, in which case we +-- will look for the program on the path. +-- +findProgramVersion :: String -- ^ version args + -> (String -> String) -- ^ function to select version + -- number from program output + -> Verbosity + -> FilePath -- ^ location + -> IO (Maybe Version) +findProgramVersion versionArg selectVersion verbosity path = do + str <- rawSystemStdout verbosity path [versionArg] + `catchIO` (\_ -> return "") + `catchExit` (\_ -> return "") + let version :: Maybe Version + version = simpleParse (selectVersion str) + case version of + Nothing -> warn verbosity $ "cannot determine version of " ++ path + ++ " :\n" ++ show str + Just v -> debug verbosity $ path ++ " is version " ++ display v + return version + + +-- | Like the Unix xargs program. Useful for when we've got very long command +-- lines that might overflow an OS limit on command line length and so you +-- need to invoke a command multiple times to get all the args in. +-- +-- Use it with either of the rawSystem variants above. For example: +-- +-- > xargs (32*1024) (rawSystemExit verbosity) prog fixedArgs bigArgs +-- +xargs :: Int -> ([String] -> IO ()) + -> [String] -> [String] -> IO () +xargs maxSize rawSystemFun fixedArgs bigArgs = + let fixedArgSize = sum (map length fixedArgs) + length fixedArgs + chunkSize = maxSize - fixedArgSize + in mapM_ (rawSystemFun . (fixedArgs ++)) (chunks chunkSize bigArgs) + + where chunks len = unfoldr $ \s -> + if null s then Nothing + else Just (chunk [] len s) + + chunk acc _ [] = (reverse acc,[]) + chunk acc len (s:ss) + | len' < len = chunk (s:acc) (len-len'-1) ss + | otherwise = (reverse acc, s:ss) + where len' = length s + +-- ------------------------------------------------------------ +-- * File Utilities +-- ------------------------------------------------------------ + +---------------- +-- Finding files + +-- | Find a file by looking in a search path. The file path must match exactly. +-- +findFile :: [FilePath] -- ^search locations + -> FilePath -- ^File Name + -> IO FilePath +findFile searchPath fileName = + findFirstFile id + [ path fileName + | path <- nub searchPath] + >>= maybe (die $ fileName ++ " doesn't exist") return + +-- | Find a file by looking in a search path with one of a list of possible +-- file extensions. The file base name should be given and it will be tried +-- with each of the extensions in each element of the search path. +-- +findFileWithExtension :: [String] + -> [FilePath] + -> FilePath + -> IO (Maybe FilePath) +findFileWithExtension extensions searchPath baseName = + findFirstFile id + [ path baseName <.> ext + | path <- nub searchPath + , ext <- nub extensions ] + +-- | Like 'findFileWithExtension' but returns which element of the search path +-- the file was found in, and the file path relative to that base directory. +-- +findFileWithExtension' :: [String] + -> [FilePath] + -> FilePath + -> IO (Maybe (FilePath, FilePath)) +findFileWithExtension' extensions searchPath baseName = + findFirstFile (uncurry ()) + [ (path, baseName <.> ext) + | path <- nub searchPath + , ext <- nub extensions ] + +findFirstFile :: (a -> FilePath) -> [a] -> IO (Maybe a) +findFirstFile file = findFirst + where findFirst [] = return Nothing + findFirst (x:xs) = do exists <- doesFileExist (file x) + if exists + then return (Just x) + else findFirst xs + +-- | Finds the files corresponding to a list of Haskell module names. +-- +-- As 'findModuleFile' but for a list of module names. +-- +findModuleFiles :: [FilePath] -- ^ build prefix (location of objects) + -> [String] -- ^ search suffixes + -> [ModuleName] -- ^ modules + -> IO [(FilePath, FilePath)] +findModuleFiles searchPath extensions moduleNames = + mapM (findModuleFile searchPath extensions) moduleNames + +-- | Find the file corresponding to a Haskell module name. +-- +-- This is similar to 'findFileWithExtension'' but specialised to a module +-- name. The function fails if the file corresponding to the module is missing. +-- +findModuleFile :: [FilePath] -- ^ build prefix (location of objects) + -> [String] -- ^ search suffixes + -> ModuleName -- ^ module + -> IO (FilePath, FilePath) +findModuleFile searchPath extensions moduleName = + maybe notFound return + =<< findFileWithExtension' extensions searchPath + (ModuleName.toFilePath moduleName) + where + notFound = die $ "Error: Could not find module: " ++ display moduleName + ++ " with any suffix: " ++ show extensions + ++ " in the search path: " ++ show searchPath + +-- | List all the files in a directory and all subdirectories. +-- +-- The order places files in sub-directories after all the files in their +-- parent directories. The list is generated lazily so is not well defined if +-- the source directory structure changes before the list is used. +-- +getDirectoryContentsRecursive :: FilePath -> IO [FilePath] +getDirectoryContentsRecursive topdir = recurseDirectories [""] + where + recurseDirectories :: [FilePath] -> IO [FilePath] + recurseDirectories [] = return [] + recurseDirectories (dir:dirs) = unsafeInterleaveIO $ do + (files, dirs') <- collect [] [] =<< getDirectoryContents (topdir dir) + files' <- recurseDirectories (dirs' ++ dirs) + return (files ++ files') + + where + collect files dirs' [] = return (reverse files + ,reverse dirs') + collect files dirs' (entry:entries) | ignore entry + = collect files dirs' entries + collect files dirs' (entry:entries) = do + let dirEntry = dir entry + isDirectory <- doesDirectoryExist (topdir dirEntry) + if isDirectory + then collect files (dirEntry:dirs') entries + else collect (dirEntry:files) dirs' entries + + ignore ['.'] = True + ignore ['.', '.'] = True + ignore _ = False + +------------------------ +-- Environment variables + +-- | Is this directory in the system search path? +isInSearchPath :: FilePath -> IO Bool +isInSearchPath path = fmap (elem path) getSearchPath + +addLibraryPath :: OS + -> [FilePath] + -> [(String,String)] + -> [(String,String)] +addLibraryPath os paths = addEnv + where + pathsString = intercalate [searchPathSeparator] paths + ldPath = case os of + OSX -> "DYLD_LIBRARY_PATH" + _ -> "LD_LIBRARY_PATH" + + addEnv [] = [(ldPath,pathsString)] + addEnv ((key,value):xs) + | key == ldPath = + if null value + then (key,pathsString):xs + else (key,value ++ (searchPathSeparator:pathsString)):xs + | otherwise = (key,value):addEnv xs + +---------------- +-- File globbing + +data FileGlob + -- | No glob at all, just an ordinary file + = NoGlob FilePath + + -- | dir prefix and extension, like @\"foo\/bar\/\*.baz\"@ corresponds to + -- @FileGlob \"foo\/bar\" \".baz\"@ + | FileGlob FilePath String + +parseFileGlob :: FilePath -> Maybe FileGlob +parseFileGlob filepath = case splitExtensions filepath of + (filepath', ext) -> case splitFileName filepath' of + (dir, "*") | '*' `elem` dir + || '*' `elem` ext + || null ext -> Nothing + | null dir -> Just (FileGlob "." ext) + | otherwise -> Just (FileGlob dir ext) + _ | '*' `elem` filepath -> Nothing + | otherwise -> Just (NoGlob filepath) + +matchFileGlob :: FilePath -> IO [FilePath] +matchFileGlob = matchDirFileGlob "." + +matchDirFileGlob :: FilePath -> FilePath -> IO [FilePath] +matchDirFileGlob dir filepath = case parseFileGlob filepath of + Nothing -> die $ "invalid file glob '" ++ filepath + ++ "'. Wildcards '*' are only allowed in place of the file" + ++ " name, not in the directory name or file extension." + ++ " If a wildcard is used it must be with an file extension." + Just (NoGlob filepath') -> return [filepath'] + Just (FileGlob dir' ext) -> do + files <- getDirectoryContents (dir dir') + case [ dir' file + | file <- files + , let (name, ext') = splitExtensions file + , not (null name) && ext' == ext ] of + [] -> die $ "filepath wildcard '" ++ filepath + ++ "' does not match any files." + matches -> return matches + +-------------------- +-- Modification time + +-- | Compare the modification times of two files to see if the first is newer +-- than the second. The first file must exist but the second need not. +-- The expected use case is when the second file is generated using the first. +-- In this use case, if the result is True then the second file is out of date. +-- +moreRecentFile :: FilePath -> FilePath -> IO Bool +moreRecentFile a b = do + exists <- doesFileExist b + if not exists + then return True + else do tb <- getModificationTime b + ta <- getModificationTime a + return (ta > tb) + +-- | Like 'moreRecentFile', but also checks that the first file exists. +existsAndIsMoreRecentThan :: FilePath -> FilePath -> IO Bool +existsAndIsMoreRecentThan a b = do + exists <- doesFileExist a + if not exists + then return False + else a `moreRecentFile` b + +---------------------------------------- +-- Copying and installing files and dirs + +-- | Same as 'createDirectoryIfMissing' but logs at higher verbosity levels. +-- +createDirectoryIfMissingVerbose :: Verbosity + -> Bool -- ^ Create its parents too? + -> FilePath + -> IO () +createDirectoryIfMissingVerbose verbosity create_parents path0 + | create_parents = createDirs (parents path0) + | otherwise = createDirs (take 1 (parents path0)) + where + parents = reverse . scanl1 () . splitDirectories . normalise + + createDirs [] = return () + createDirs (dir:[]) = createDir dir throwIO + createDirs (dir:dirs) = + createDir dir $ \_ -> do + createDirs dirs + createDir dir throwIO + + createDir :: FilePath -> (IOException -> IO ()) -> IO () + createDir dir notExistHandler = do + r <- tryIO $ createDirectoryVerbose verbosity dir + case (r :: Either IOException ()) of + Right () -> return () + Left e + | isDoesNotExistError e -> notExistHandler e + -- createDirectory (and indeed POSIX mkdir) does not distinguish + -- between a dir already existing and a file already existing. So we + -- check for it here. Unfortunately there is a slight race condition + -- here, but we think it is benign. It could report an exception in + -- the case that the dir did exist but another process deletes the + -- directory and creates a file in its place before we can check + -- that the directory did indeed exist. + | isAlreadyExistsError e -> (do + isDir <- doesDirectoryExist dir + if isDir then return () + else throwIO e + ) `catchIO` ((\_ -> return ()) :: IOException -> IO ()) + | otherwise -> throwIO e + +createDirectoryVerbose :: Verbosity -> FilePath -> IO () +createDirectoryVerbose verbosity dir = do + info verbosity $ "creating " ++ dir + createDirectory dir + setDirOrdinary dir + +-- | Copies a file without copying file permissions. The target file is created +-- with default permissions. Any existing target file is replaced. +-- +-- At higher verbosity levels it logs an info message. +-- +copyFileVerbose :: Verbosity -> FilePath -> FilePath -> IO () +copyFileVerbose verbosity src dest = do + info verbosity ("copy " ++ src ++ " to " ++ dest) + copyFile src dest + +-- | Install an ordinary file. This is like a file copy but the permissions +-- are set appropriately for an installed file. On Unix it is \"-rw-r--r--\" +-- while on Windows it uses the default permissions for the target directory. +-- +installOrdinaryFile :: Verbosity -> FilePath -> FilePath -> IO () +installOrdinaryFile verbosity src dest = do + info verbosity ("Installing " ++ src ++ " to " ++ dest) + copyOrdinaryFile src dest + +-- | Install an executable file. This is like a file copy but the permissions +-- are set appropriately for an installed file. On Unix it is \"-rwxr-xr-x\" +-- while on Windows it uses the default permissions for the target directory. +-- +installExecutableFile :: Verbosity -> FilePath -> FilePath -> IO () +installExecutableFile verbosity src dest = do + info verbosity ("Installing executable " ++ src ++ " to " ++ dest) + copyExecutableFile src dest + +-- | Install a file that may or not be executable, preserving permissions. +installMaybeExecutableFile :: Verbosity -> FilePath -> FilePath -> IO () +installMaybeExecutableFile verbosity src dest = do + perms <- getPermissions src + if (executable perms) --only checks user x bit + then installExecutableFile verbosity src dest + else installOrdinaryFile verbosity src dest + +-- | Given a relative path to a file, copy it to the given directory, preserving +-- the relative path and creating the parent directories if needed. +copyFileTo :: Verbosity -> FilePath -> FilePath -> IO () +copyFileTo verbosity dir file = do + let targetFile = dir file + createDirectoryIfMissingVerbose verbosity True (takeDirectory targetFile) + installOrdinaryFile verbosity file targetFile + +-- | Common implementation of 'copyFiles', 'installOrdinaryFiles', +-- 'installExecutableFiles' and 'installMaybeExecutableFiles'. +copyFilesWith :: (Verbosity -> FilePath -> FilePath -> IO ()) + -> Verbosity -> FilePath -> [(FilePath, FilePath)] -> IO () +copyFilesWith doCopy verbosity targetDir srcFiles = do + + -- Create parent directories for everything + let dirs = map (targetDir ) . nub . map (takeDirectory . snd) $ srcFiles + mapM_ (createDirectoryIfMissingVerbose verbosity True) dirs + + -- Copy all the files + sequence_ [ let src = srcBase srcFile + dest = targetDir srcFile + in doCopy verbosity src dest + | (srcBase, srcFile) <- srcFiles ] + +-- | Copies a bunch of files to a target directory, preserving the directory +-- structure in the target location. The target directories are created if they +-- do not exist. +-- +-- The files are identified by a pair of base directory and a path relative to +-- that base. It is only the relative part that is preserved in the +-- destination. +-- +-- For example: +-- +-- > copyFiles normal "dist/src" +-- > [("", "src/Foo.hs"), ("dist/build/", "src/Bar.hs")] +-- +-- This would copy \"src\/Foo.hs\" to \"dist\/src\/src\/Foo.hs\" and +-- copy \"dist\/build\/src\/Bar.hs\" to \"dist\/src\/src\/Bar.hs\". +-- +-- This operation is not atomic. Any IO failure during the copy (including any +-- missing source files) leaves the target in an unknown state so it is best to +-- use it with a freshly created directory so that it can be simply deleted if +-- anything goes wrong. +-- +copyFiles :: Verbosity -> FilePath -> [(FilePath, FilePath)] -> IO () +copyFiles = copyFilesWith copyFileVerbose + +-- | This is like 'copyFiles' but uses 'installOrdinaryFile'. +-- +installOrdinaryFiles :: Verbosity -> FilePath -> [(FilePath, FilePath)] -> IO () +installOrdinaryFiles = copyFilesWith installOrdinaryFile + +-- | This is like 'copyFiles' but uses 'installExecutableFile'. +-- +installExecutableFiles :: Verbosity -> FilePath -> [(FilePath, FilePath)] + -> IO () +installExecutableFiles = copyFilesWith installExecutableFile + +-- | This is like 'copyFiles' but uses 'installMaybeExecutableFile'. +-- +installMaybeExecutableFiles :: Verbosity -> FilePath -> [(FilePath, FilePath)] + -> IO () +installMaybeExecutableFiles = copyFilesWith installMaybeExecutableFile + +-- | This installs all the files in a directory to a target location, +-- preserving the directory layout. All the files are assumed to be ordinary +-- rather than executable files. +-- +installDirectoryContents :: Verbosity -> FilePath -> FilePath -> IO () +installDirectoryContents verbosity srcDir destDir = do + info verbosity ("copy directory '" ++ srcDir ++ "' to '" ++ destDir ++ "'.") + srcFiles <- getDirectoryContentsRecursive srcDir + installOrdinaryFiles verbosity destDir [ (srcDir, f) | f <- srcFiles ] + +-- | Recursively copy the contents of one directory to another path. +copyDirectoryRecursive :: Verbosity -> FilePath -> FilePath -> IO () +copyDirectoryRecursive verbosity srcDir destDir = do + info verbosity ("copy directory '" ++ srcDir ++ "' to '" ++ destDir ++ "'.") + srcFiles <- getDirectoryContentsRecursive srcDir + copyFilesWith (const copyFile) verbosity destDir [ (srcDir, f) | f <- srcFiles ] + +------------------- +-- File permissions + +-- | Like 'doesFileExist', but also checks that the file is executable. +doesExecutableExist :: FilePath -> IO Bool +doesExecutableExist f = do + exists <- doesFileExist f + if exists + then do perms <- getPermissions f + return (executable perms) + else return False + +--------------------------------- +-- Deprecated file copy functions + +{-# DEPRECATED smartCopySources + "Use findModuleFiles and copyFiles or installOrdinaryFiles" #-} +smartCopySources :: Verbosity -> [FilePath] -> FilePath + -> [ModuleName] -> [String] -> IO () +smartCopySources verbosity searchPath targetDir moduleNames extensions = + findModuleFiles searchPath extensions moduleNames + >>= copyFiles verbosity targetDir + +{-# DEPRECATED copyDirectoryRecursiveVerbose + "You probably want installDirectoryContents instead" #-} +copyDirectoryRecursiveVerbose :: Verbosity -> FilePath -> FilePath -> IO () +copyDirectoryRecursiveVerbose verbosity srcDir destDir = do + info verbosity ("copy directory '" ++ srcDir ++ "' to '" ++ destDir ++ "'.") + srcFiles <- getDirectoryContentsRecursive srcDir + copyFiles verbosity destDir [ (srcDir, f) | f <- srcFiles ] + +--------------------------- +-- Temporary files and dirs + +-- | Advanced options for 'withTempFile' and 'withTempDirectory'. +data TempFileOptions = TempFileOptions { + optKeepTempFiles :: Bool -- ^ Keep temporary files? + } + +defaultTempFileOptions :: TempFileOptions +defaultTempFileOptions = TempFileOptions { optKeepTempFiles = False } + +-- | Use a temporary filename that doesn't already exist. +-- +withTempFile :: FilePath -- ^ Temp dir to create the file in + -> String -- ^ File name template. See 'openTempFile'. + -> (FilePath -> Handle -> IO a) -> IO a +withTempFile tmpDir template action = + withTempFileEx defaultTempFileOptions tmpDir template action + +-- | A version of 'withTempFile' that additionally takes a 'TempFileOptions' +-- argument. +withTempFileEx :: TempFileOptions + -> FilePath -- ^ Temp dir to create the file in + -> String -- ^ File name template. See 'openTempFile'. + -> (FilePath -> Handle -> IO a) -> IO a +withTempFileEx opts tmpDir template action = + Exception.bracket + (openTempFile tmpDir template) + (\(name, handle) -> do hClose handle + unless (optKeepTempFiles opts) $ removeFile name) + (uncurry action) + +-- | Create and use a temporary directory. +-- +-- Creates a new temporary directory inside the given directory, making use +-- of the template. The temp directory is deleted after use. For example: +-- +-- > withTempDirectory verbosity "src" "sdist." $ \tmpDir -> do ... +-- +-- The @tmpDir@ will be a new subdirectory of the given directory, e.g. +-- @src/sdist.342@. +-- +withTempDirectory :: Verbosity + -> FilePath -> String -> (FilePath -> IO a) -> IO a +withTempDirectory verbosity targetDir template = + withTempDirectoryEx verbosity defaultTempFileOptions targetDir template + +-- | A version of 'withTempDirectory' that additionally takes a +-- 'TempFileOptions' argument. +withTempDirectoryEx :: Verbosity + -> TempFileOptions + -> FilePath -> String -> (FilePath -> IO a) -> IO a +withTempDirectoryEx _verbosity opts targetDir template = + Exception.bracket + (createTempDirectory targetDir template) + (unless (optKeepTempFiles opts) . removeDirectoryRecursive) + +----------------------------------- +-- Safely reading and writing files + +-- | Gets the contents of a file, but guarantee that it gets closed. +-- +-- The file is read lazily but if it is not fully consumed by the action then +-- the remaining input is truncated and the file is closed. +-- +withFileContents :: FilePath -> (String -> IO a) -> IO a +withFileContents name action = + Exception.bracket (openFile name ReadMode) hClose + (\hnd -> hGetContents hnd >>= action) + +-- | Writes a file atomically. +-- +-- The file is either written successfully or an IO exception is raised and +-- the original file is left unchanged. +-- +-- On windows it is not possible to delete a file that is open by a process. +-- This case will give an IO exception but the atomic property is not affected. +-- +writeFileAtomic :: FilePath -> BS.ByteString -> IO () +writeFileAtomic targetPath content = do + let (targetDir, targetFile) = splitFileName targetPath + Exception.bracketOnError + (openBinaryTempFileWithDefaultPermissions targetDir $ targetFile <.> "tmp") + (\(tmpPath, handle) -> hClose handle >> removeFile tmpPath) + (\(tmpPath, handle) -> do + BS.hPut handle content + hClose handle + renameFile tmpPath targetPath) + +-- | Write a file but only if it would have new content. If we would be writing +-- the same as the existing content then leave the file as is so that we do not +-- update the file's modification time. +-- +-- NB: the file is assumed to be ASCII-encoded. +rewriteFile :: FilePath -> String -> IO () +rewriteFile path newContent = + flip catchIO mightNotExist $ do + existingContent <- readFile path + _ <- evaluate (length existingContent) + unless (existingContent == newContent) $ + writeFileAtomic path (BS.Char8.pack newContent) + where + mightNotExist e | isDoesNotExistError e = writeFileAtomic path + (BS.Char8.pack newContent) + | otherwise = ioError e + +-- | The path name that represents the current directory. +-- In Unix, it's @\".\"@, but this is system-specific. +-- (E.g. AmigaOS uses the empty string @\"\"@ for the current directory.) +currentDir :: FilePath +currentDir = "." + +shortRelativePath :: FilePath -> FilePath -> FilePath +shortRelativePath from to = + case dropCommonPrefix (splitDirectories from) (splitDirectories to) of + (stuff, path) -> joinPath (map (const "..") stuff ++ path) + where + dropCommonPrefix :: Eq a => [a] -> [a] -> ([a],[a]) + dropCommonPrefix (x:xs) (y:ys) + | x == y = dropCommonPrefix xs ys + dropCommonPrefix xs ys = (xs,ys) + +-- ------------------------------------------------------------ +-- * Finding the description file +-- ------------------------------------------------------------ + +-- |Package description file (/pkgname/@.cabal@) +defaultPackageDesc :: Verbosity -> IO FilePath +defaultPackageDesc _verbosity = tryFindPackageDesc currentDir + +-- |Find a package description file in the given directory. Looks for +-- @.cabal@ files. +findPackageDesc :: FilePath -- ^Where to look + -> IO (Either String FilePath) -- ^.cabal +findPackageDesc dir + = do files <- getDirectoryContents dir + -- to make sure we do not mistake a ~/.cabal/ dir for a .cabal + -- file we filter to exclude dirs and null base file names: + cabalFiles <- filterM doesFileExist + [ dir file + | file <- files + , let (name, ext) = splitExtension file + , not (null name) && ext == ".cabal" ] + case cabalFiles of + [] -> return (Left noDesc) + [cabalFile] -> return (Right cabalFile) + multiple -> return (Left $ multiDesc multiple) + + where + noDesc :: String + noDesc = "No cabal file found.\n" + ++ "Please create a package description file .cabal" + + multiDesc :: [String] -> String + multiDesc l = "Multiple cabal files found.\n" + ++ "Please use only one of: " + ++ intercalate ", " l + +-- |Like 'findPackageDesc', but calls 'die' in case of error. +tryFindPackageDesc :: FilePath -> IO FilePath +tryFindPackageDesc dir = join . fmap (either die return) $ findPackageDesc dir + +-- |Optional auxiliary package information file (/pkgname/@.buildinfo@) +defaultHookedPackageDesc :: IO (Maybe FilePath) +defaultHookedPackageDesc = findHookedPackageDesc currentDir + +-- |Find auxiliary package information in the given directory. +-- Looks for @.buildinfo@ files. +findHookedPackageDesc + :: FilePath -- ^Directory to search + -> IO (Maybe FilePath) -- ^/dir/@\/@/pkgname/@.buildinfo@, if present +findHookedPackageDesc dir = do + files <- getDirectoryContents dir + buildInfoFiles <- filterM doesFileExist + [ dir file + | file <- files + , let (name, ext) = splitExtension file + , not (null name) && ext == buildInfoExt ] + case buildInfoFiles of + [] -> return Nothing + [f] -> return (Just f) + _ -> die ("Multiple files with extension " ++ buildInfoExt) + +buildInfoExt :: String +buildInfoExt = ".buildinfo" + +-- ------------------------------------------------------------ +-- * Unicode stuff +-- ------------------------------------------------------------ + +-- This is a modification of the UTF8 code from gtk2hs and the +-- utf8-string package. + +fromUTF8 :: String -> String +fromUTF8 [] = [] +fromUTF8 (c:cs) + | c <= '\x7F' = c : fromUTF8 cs + | c <= '\xBF' = replacementChar : fromUTF8 cs + | c <= '\xDF' = twoBytes c cs + | c <= '\xEF' = moreBytes 3 0x800 cs (ord c .&. 0xF) + | c <= '\xF7' = moreBytes 4 0x10000 cs (ord c .&. 0x7) + | c <= '\xFB' = moreBytes 5 0x200000 cs (ord c .&. 0x3) + | c <= '\xFD' = moreBytes 6 0x4000000 cs (ord c .&. 0x1) + | otherwise = replacementChar : fromUTF8 cs + where + twoBytes c0 (c1:cs') + | ord c1 .&. 0xC0 == 0x80 + = let d = ((ord c0 .&. 0x1F) `shiftL` 6) + .|. (ord c1 .&. 0x3F) + in if d >= 0x80 + then chr d : fromUTF8 cs' + else replacementChar : fromUTF8 cs' + twoBytes _ cs' = replacementChar : fromUTF8 cs' + + moreBytes :: Int -> Int -> [Char] -> Int -> [Char] + moreBytes 1 overlong cs' acc + | overlong <= acc && acc <= 0x10FFFF + && (acc < 0xD800 || 0xDFFF < acc) + && (acc < 0xFFFE || 0xFFFF < acc) + = chr acc : fromUTF8 cs' + + | otherwise + = replacementChar : fromUTF8 cs' + + moreBytes byteCount overlong (cn:cs') acc + | ord cn .&. 0xC0 == 0x80 + = moreBytes (byteCount-1) overlong cs' + ((acc `shiftL` 6) .|. ord cn .&. 0x3F) + + moreBytes _ _ cs' _ + = replacementChar : fromUTF8 cs' + + replacementChar = '\xfffd' + +toUTF8 :: String -> String +toUTF8 [] = [] +toUTF8 (c:cs) + | c <= '\x07F' = c + : toUTF8 cs + | c <= '\x7FF' = chr (0xC0 .|. (w `shiftR` 6)) + : chr (0x80 .|. (w .&. 0x3F)) + : toUTF8 cs + | c <= '\xFFFF'= chr (0xE0 .|. (w `shiftR` 12)) + : chr (0x80 .|. ((w `shiftR` 6) .&. 0x3F)) + : chr (0x80 .|. (w .&. 0x3F)) + : toUTF8 cs + | otherwise = chr (0xf0 .|. (w `shiftR` 18)) + : chr (0x80 .|. ((w `shiftR` 12) .&. 0x3F)) + : chr (0x80 .|. ((w `shiftR` 6) .&. 0x3F)) + : chr (0x80 .|. (w .&. 0x3F)) + : toUTF8 cs + where w = ord c + +-- | Ignore a Unicode byte order mark (BOM) at the beginning of the input +-- +ignoreBOM :: String -> String +ignoreBOM ('\xFEFF':string) = string +ignoreBOM string = string + +-- | Reads a UTF8 encoded text file as a Unicode String +-- +-- Reads lazily using ordinary 'readFile'. +-- +readUTF8File :: FilePath -> IO String +readUTF8File f = fmap (ignoreBOM . fromUTF8) + . hGetContents =<< openBinaryFile f ReadMode + +-- | Reads a UTF8 encoded text file as a Unicode String +-- +-- Same behaviour as 'withFileContents'. +-- +withUTF8FileContents :: FilePath -> (String -> IO a) -> IO a +withUTF8FileContents name action = + Exception.bracket + (openBinaryFile name ReadMode) + hClose + (\hnd -> hGetContents hnd >>= action . ignoreBOM . fromUTF8) + +-- | Writes a Unicode String as a UTF8 encoded text file. +-- +-- Uses 'writeFileAtomic', so provides the same guarantees. +-- +writeUTF8File :: FilePath -> String -> IO () +writeUTF8File path = writeFileAtomic path . BS.Char8.pack . toUTF8 + +-- | Fix different systems silly line ending conventions +normaliseLineEndings :: String -> String +normaliseLineEndings [] = [] +normaliseLineEndings ('\r':'\n':s) = '\n' : normaliseLineEndings s -- windows +normaliseLineEndings ('\r':s) = '\n' : normaliseLineEndings s -- old OS X +normaliseLineEndings ( c :s) = c : normaliseLineEndings s + +-- ------------------------------------------------------------ +-- * Common utils +-- ------------------------------------------------------------ + +-- | @dropWhileEndLE p@ is equivalent to @reverse . dropWhile p . reverse@, but +-- quite a bit faster. The difference between "Data.List.dropWhileEnd" and this +-- version is that the one in "Data.List" is strict in elements, but spine-lazy, +-- while this one is spine-strict but lazy in elements. That's what @LE@ stands +-- for - "lazy in elements". +-- +-- Example: +-- +-- @ +-- > tail $ Data.List.dropWhileEnd (<3) [undefined, 5, 4, 3, 2, 1] +-- *** Exception: Prelude.undefined +-- > tail $ dropWhileEndLE (<3) [undefined, 5, 4, 3, 2, 1] +-- [5,4,3] +-- > take 3 $ Data.List.dropWhileEnd (<3) [5, 4, 3, 2, 1, undefined] +-- [5,4,3] +-- > take 3 $ dropWhileEndLE (<3) [5, 4, 3, 2, 1, undefined] +-- *** Exception: Prelude.undefined +-- @ +dropWhileEndLE :: (a -> Bool) -> [a] -> [a] +dropWhileEndLE p = foldr (\x r -> if null r && p x then [] else x:r) [] + +-- | @takeWhileEndLE p@ is equivalent to @reverse . takeWhile p . reverse@, but +-- is usually faster (as well as being easier to read). +takeWhileEndLE :: (a -> Bool) -> [a] -> [a] +takeWhileEndLE p = fst . foldr go ([], False) + where + go x (rest, done) + | not done && p x = (x:rest, False) + | otherwise = (rest, True) + +-- | Like "Data.List.nub", but has @O(n log n)@ complexity instead of +-- @O(n^2)@. Code for 'ordNub' and 'listUnion' taken from Niklas Hambüchen's +-- package. +ordNub :: (Ord a) => [a] -> [a] +ordNub l = go Set.empty l + where + go _ [] = [] + go s (x:xs) = if x `Set.member` s then go s xs + else x : go (Set.insert x s) xs + +-- | Like "Data.List.union", but has @O(n log n)@ complexity instead of +-- @O(n^2)@. +listUnion :: (Ord a) => [a] -> [a] -> [a] +listUnion a b = a ++ ordNub (filter (`Set.notMember` aSet) b) + where + aSet = Set.fromList a + +-- | A right-biased version of 'ordNub'. +-- +-- Example: +-- +-- @ +-- > ordNub [1,2,1] +-- [1,2] +-- > ordNubRight [1,2,1] +-- [2,1] +-- @ +ordNubRight :: (Ord a) => [a] -> [a] +ordNubRight = fst . foldr go ([], Set.empty) + where + go x p@(l, s) = if x `Set.member` s then p + else (x:l, Set.insert x s) + +-- | A right-biased version of 'listUnion'. +-- +-- Example: +-- +-- @ +-- > listUnion [1,2,3,4,3] [2,1,1] +-- [1,2,3,4,3] +-- > listUnionRight [1,2,3,4,3] [2,1,1] +-- [4,3,2,1,1] +-- @ +listUnionRight :: (Ord a) => [a] -> [a] -> [a] +listUnionRight a b = ordNubRight (filter (`Set.notMember` bSet) a) ++ b + where + bSet = Set.fromList b + +equating :: Eq a => (b -> a) -> b -> b -> Bool +equating p x y = p x == p y + +comparing :: Ord a => (b -> a) -> b -> b -> Ordering +comparing p x y = p x `compare` p y + +isInfixOf :: String -> String -> Bool +isInfixOf needle haystack = any (isPrefixOf needle) (tails haystack) + +lowercase :: String -> String +lowercase = map Char.toLower diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Simple.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Simple.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,700 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Simple +-- Copyright : Isaac Jones 2003-2005 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This is the command line front end to the Simple build system. When given +-- the parsed command-line args and package information, is able to perform +-- basic commands like configure, build, install, register, etc. +-- +-- This module exports the main functions that Setup.hs scripts use. It +-- re-exports the 'UserHooks' type, the standard entry points like +-- 'defaultMain' and 'defaultMainWithHooks' and the predefined sets of +-- 'UserHooks' that custom @Setup.hs@ scripts can extend to add their own +-- behaviour. +-- +-- This module isn't called \"Simple\" because it's simple. Far from +-- it. It's called \"Simple\" because it does complicated things to +-- simple software. +-- +-- The original idea was that there could be different build systems that all +-- presented the same compatible command line interfaces. There is still a +-- "Distribution.Make" system but in practice no packages use it. + +{- +Work around this warning: +libraries/Cabal/Distribution/Simple.hs:78:0: + Warning: In the use of `runTests' + (imported from Distribution.Simple.UserHooks): + Deprecated: "Please use the new testing interface instead!" +-} +{-# OPTIONS_GHC -fno-warn-deprecations #-} + +module Distribution.Simple ( + module Distribution.Package, + module Distribution.Version, + module Distribution.License, + module Distribution.Simple.Compiler, + module Language.Haskell.Extension, + -- * Simple interface + defaultMain, defaultMainNoRead, defaultMainArgs, + -- * Customization + UserHooks(..), Args, + defaultMainWithHooks, defaultMainWithHooksArgs, + -- ** Standard sets of hooks + simpleUserHooks, + autoconfUserHooks, + defaultUserHooks, emptyUserHooks, + -- ** Utils + defaultHookedPackageDesc + ) where + +-- local +import Distribution.Simple.Compiler hiding (Flag) +import Distribution.Simple.UserHooks +import Distribution.Package --must not specify imports, since we're exporting module. +import Distribution.PackageDescription + ( PackageDescription(..), GenericPackageDescription, Executable(..) + , updatePackageDescription, hasLibs + , HookedBuildInfo, emptyHookedBuildInfo ) +import Distribution.PackageDescription.Parse + ( readPackageDescription, readHookedBuildInfo ) +import Distribution.PackageDescription.Configuration + ( flattenPackageDescription ) +import Distribution.Simple.Program + ( defaultProgramConfiguration, addKnownPrograms, builtinPrograms + , restoreProgramConfiguration, reconfigurePrograms ) +import Distribution.Simple.PreProcess (knownSuffixHandlers, PPSuffixHandler) +import Distribution.Simple.Setup +import Distribution.Simple.Command + +import Distribution.Simple.Build ( build, repl ) +import Distribution.Simple.SrcDist ( sdist ) +import Distribution.Simple.Register + ( register, unregister ) + +import Distribution.Simple.Configure + ( getPersistBuildConfig, maybeGetPersistBuildConfig + , writePersistBuildConfig, checkPersistBuildConfigOutdated + , configure, checkForeignDeps ) + +import Distribution.Simple.LocalBuildInfo ( LocalBuildInfo(..) ) +import Distribution.Simple.Bench (bench) +import Distribution.Simple.BuildPaths ( srcPref) +import Distribution.Simple.Test (test) +import Distribution.Simple.Install (install) +import Distribution.Simple.Haddock (haddock, hscolour) +import Distribution.Simple.Utils + (die, notice, info, warn, setupMessage, chattyTry, + defaultPackageDesc, defaultHookedPackageDesc, + rawSystemExitWithEnv, cabalVersion, topHandler ) +import Distribution.System + ( OS(..), buildOS ) +import Distribution.Verbosity +import Language.Haskell.Extension +import Distribution.Version +import Distribution.License +import Distribution.Text + ( display ) + +-- Base +import System.Environment(getArgs, getProgName) +import System.Directory(removeFile, doesFileExist, + doesDirectoryExist, removeDirectoryRecursive) +import System.Exit (exitWith,ExitCode(..)) +import System.IO.Error (isDoesNotExistError) +import Control.Exception (throwIO) +import Distribution.Compat.Environment (getEnvironment) +import Distribution.Compat.Exception (catchIO) + +import Control.Monad (when) +import Data.List (intercalate, unionBy, nub, (\\)) + +-- | A simple implementation of @main@ for a Cabal setup script. +-- It reads the package description file using IO, and performs the +-- action specified on the command line. +defaultMain :: IO () +defaultMain = getArgs >>= defaultMainHelper simpleUserHooks + +-- | A version of 'defaultMain' that is passed the command line +-- arguments, rather than getting them from the environment. +defaultMainArgs :: [String] -> IO () +defaultMainArgs = defaultMainHelper simpleUserHooks + +-- | A customizable version of 'defaultMain'. +defaultMainWithHooks :: UserHooks -> IO () +defaultMainWithHooks hooks = getArgs >>= defaultMainHelper hooks + +-- | A customizable version of 'defaultMain' that also takes the command +-- line arguments. +defaultMainWithHooksArgs :: UserHooks -> [String] -> IO () +defaultMainWithHooksArgs = defaultMainHelper + +-- | Like 'defaultMain', but accepts the package description as input +-- rather than using IO to read it. +defaultMainNoRead :: GenericPackageDescription -> IO () +defaultMainNoRead pkg_descr = + getArgs >>= + defaultMainHelper simpleUserHooks { readDesc = return (Just pkg_descr) } + +defaultMainHelper :: UserHooks -> Args -> IO () +defaultMainHelper hooks args = topHandler $ + case commandsRun (globalCommand commands) commands args of + CommandHelp help -> printHelp help + CommandList opts -> printOptionsList opts + CommandErrors errs -> printErrors errs + CommandReadyToGo (flags, commandParse) -> + case commandParse of + _ | fromFlag (globalVersion flags) -> printVersion + | fromFlag (globalNumericVersion flags) -> printNumericVersion + CommandHelp help -> printHelp help + CommandList opts -> printOptionsList opts + CommandErrors errs -> printErrors errs + CommandReadyToGo action -> action + + where + printHelp help = getProgName >>= putStr . help + printOptionsList = putStr . unlines + printErrors errs = do + putStr (intercalate "\n" errs) + exitWith (ExitFailure 1) + printNumericVersion = putStrLn $ display cabalVersion + printVersion = putStrLn $ "Cabal library version " + ++ display cabalVersion + + progs = addKnownPrograms (hookedPrograms hooks) defaultProgramConfiguration + commands = + [configureCommand progs `commandAddAction` \fs as -> + configureAction hooks fs as >> return () + ,buildCommand progs `commandAddAction` buildAction hooks + ,replCommand progs `commandAddAction` replAction hooks + ,installCommand `commandAddAction` installAction hooks + ,copyCommand `commandAddAction` copyAction hooks + ,haddockCommand `commandAddAction` haddockAction hooks + ,cleanCommand `commandAddAction` cleanAction hooks + ,sdistCommand `commandAddAction` sdistAction hooks + ,hscolourCommand `commandAddAction` hscolourAction hooks + ,registerCommand `commandAddAction` registerAction hooks + ,unregisterCommand `commandAddAction` unregisterAction hooks + ,testCommand `commandAddAction` testAction hooks + ,benchmarkCommand `commandAddAction` benchAction hooks + ] + +-- | Combine the preprocessors in the given hooks with the +-- preprocessors built into cabal. +allSuffixHandlers :: UserHooks + -> [PPSuffixHandler] +allSuffixHandlers hooks + = overridesPP (hookedPreProcessors hooks) knownSuffixHandlers + where + overridesPP :: [PPSuffixHandler] -> [PPSuffixHandler] -> [PPSuffixHandler] + overridesPP = unionBy (\x y -> fst x == fst y) + +configureAction :: UserHooks -> ConfigFlags -> Args -> IO LocalBuildInfo +configureAction hooks flags args = do + let distPref = fromFlag $ configDistPref flags + pbi <- preConf hooks args flags + + (mb_pd_file, pkg_descr0) <- confPkgDescr + + -- get_pkg_descr (configVerbosity flags') + --let pkg_descr = updatePackageDescription pbi pkg_descr0 + let epkg_descr = (pkg_descr0, pbi) + + --(warns, ers) <- sanityCheckPackage pkg_descr + --errorOut (configVerbosity flags') warns ers + + localbuildinfo0 <- confHook hooks epkg_descr flags + + -- remember the .cabal filename if we know it + -- and all the extra command line args + let localbuildinfo = localbuildinfo0 { + pkgDescrFile = mb_pd_file, + extraConfigArgs = args + } + writePersistBuildConfig distPref localbuildinfo + + let pkg_descr = localPkgDescr localbuildinfo + postConf hooks args flags pkg_descr localbuildinfo + return localbuildinfo + where + verbosity = fromFlag (configVerbosity flags) + confPkgDescr :: IO (Maybe FilePath, GenericPackageDescription) + confPkgDescr = do + mdescr <- readDesc hooks + case mdescr of + Just descr -> return (Nothing, descr) + Nothing -> do + pdfile <- defaultPackageDesc verbosity + descr <- readPackageDescription verbosity pdfile + return (Just pdfile, descr) + +buildAction :: UserHooks -> BuildFlags -> Args -> IO () +buildAction hooks flags args = do + let distPref = fromFlag $ buildDistPref flags + verbosity = fromFlag $ buildVerbosity flags + + lbi <- getBuildConfig hooks verbosity distPref + progs <- reconfigurePrograms verbosity + (buildProgramPaths flags) + (buildProgramArgs flags) + (withPrograms lbi) + + hookedAction preBuild buildHook postBuild + (return lbi { withPrograms = progs }) + hooks flags { buildArgs = args } args + +replAction :: UserHooks -> ReplFlags -> Args -> IO () +replAction hooks flags args = do + let distPref = fromFlag $ replDistPref flags + verbosity = fromFlag $ replVerbosity flags + + lbi <- getBuildConfig hooks verbosity distPref + progs <- reconfigurePrograms verbosity + (replProgramPaths flags) + (replProgramArgs flags) + (withPrograms lbi) + + pbi <- preRepl hooks args flags + let lbi' = lbi { withPrograms = progs } + pkg_descr0 = localPkgDescr lbi' + pkg_descr = updatePackageDescription pbi pkg_descr0 + replHook hooks pkg_descr lbi' hooks flags args + postRepl hooks args flags pkg_descr lbi' + +hscolourAction :: UserHooks -> HscolourFlags -> Args -> IO () +hscolourAction hooks flags args + = do let distPref = fromFlag $ hscolourDistPref flags + verbosity = fromFlag $ hscolourVerbosity flags + hookedAction preHscolour hscolourHook postHscolour + (getBuildConfig hooks verbosity distPref) + hooks flags args + +haddockAction :: UserHooks -> HaddockFlags -> Args -> IO () +haddockAction hooks flags args = do + let distPref = fromFlag $ haddockDistPref flags + verbosity = fromFlag $ haddockVerbosity flags + + lbi <- getBuildConfig hooks verbosity distPref + progs <- reconfigurePrograms verbosity + (haddockProgramPaths flags) + (haddockProgramArgs flags) + (withPrograms lbi) + + hookedAction preHaddock haddockHook postHaddock + (return lbi { withPrograms = progs }) + hooks flags args + +cleanAction :: UserHooks -> CleanFlags -> Args -> IO () +cleanAction hooks flags args = do + pbi <- preClean hooks args flags + + pdfile <- defaultPackageDesc verbosity + ppd <- readPackageDescription verbosity pdfile + let pkg_descr0 = flattenPackageDescription ppd + -- We don't sanity check for clean as an error + -- here would prevent cleaning: + --sanityCheckHookedBuildInfo pkg_descr0 pbi + let pkg_descr = updatePackageDescription pbi pkg_descr0 + + cleanHook hooks pkg_descr () hooks flags + postClean hooks args flags pkg_descr () + where verbosity = fromFlag (cleanVerbosity flags) + +copyAction :: UserHooks -> CopyFlags -> Args -> IO () +copyAction hooks flags args + = do let distPref = fromFlag $ copyDistPref flags + verbosity = fromFlag $ copyVerbosity flags + hookedAction preCopy copyHook postCopy + (getBuildConfig hooks verbosity distPref) + hooks flags args + +installAction :: UserHooks -> InstallFlags -> Args -> IO () +installAction hooks flags args + = do let distPref = fromFlag $ installDistPref flags + verbosity = fromFlag $ installVerbosity flags + hookedAction preInst instHook postInst + (getBuildConfig hooks verbosity distPref) + hooks flags args + +sdistAction :: UserHooks -> SDistFlags -> Args -> IO () +sdistAction hooks flags args = do + let distPref = fromFlag $ sDistDistPref flags + pbi <- preSDist hooks args flags + + mlbi <- maybeGetPersistBuildConfig distPref + pdfile <- defaultPackageDesc verbosity + ppd <- readPackageDescription verbosity pdfile + let pkg_descr0 = flattenPackageDescription ppd + sanityCheckHookedBuildInfo pkg_descr0 pbi + let pkg_descr = updatePackageDescription pbi pkg_descr0 + + sDistHook hooks pkg_descr mlbi hooks flags + postSDist hooks args flags pkg_descr mlbi + where verbosity = fromFlag (sDistVerbosity flags) + +testAction :: UserHooks -> TestFlags -> Args -> IO () +testAction hooks flags args = do + let distPref = fromFlag $ testDistPref flags + verbosity = fromFlag $ testVerbosity flags + localBuildInfo <- getBuildConfig hooks verbosity distPref + let pkg_descr = localPkgDescr localBuildInfo + -- It is safe to do 'runTests' before the new test handler because the + -- default action is a no-op and if the package uses the old test interface + -- the new handler will find no tests. + runTests hooks args False pkg_descr localBuildInfo + hookedActionWithArgs preTest testHook postTest + (getBuildConfig hooks verbosity distPref) + hooks flags args + +benchAction :: UserHooks -> BenchmarkFlags -> Args -> IO () +benchAction hooks flags args = do + let distPref = fromFlag $ benchmarkDistPref flags + verbosity = fromFlag $ benchmarkVerbosity flags + hookedActionWithArgs preBench benchHook postBench + (getBuildConfig hooks verbosity distPref) + hooks flags args + +registerAction :: UserHooks -> RegisterFlags -> Args -> IO () +registerAction hooks flags args + = do let distPref = fromFlag $ regDistPref flags + verbosity = fromFlag $ regVerbosity flags + hookedAction preReg regHook postReg + (getBuildConfig hooks verbosity distPref) + hooks flags args + +unregisterAction :: UserHooks -> RegisterFlags -> Args -> IO () +unregisterAction hooks flags args + = do let distPref = fromFlag $ regDistPref flags + verbosity = fromFlag $ regVerbosity flags + hookedAction preUnreg unregHook postUnreg + (getBuildConfig hooks verbosity distPref) + hooks flags args + +hookedAction :: (UserHooks -> Args -> flags -> IO HookedBuildInfo) + -> (UserHooks -> PackageDescription -> LocalBuildInfo + -> UserHooks -> flags -> IO ()) + -> (UserHooks -> Args -> flags -> PackageDescription + -> LocalBuildInfo -> IO ()) + -> IO LocalBuildInfo + -> UserHooks -> flags -> Args -> IO () +hookedAction pre_hook cmd_hook = + hookedActionWithArgs pre_hook (\h _ pd lbi uh flags -> cmd_hook h pd lbi uh flags) + +hookedActionWithArgs :: (UserHooks -> Args -> flags -> IO HookedBuildInfo) + -> (UserHooks -> Args -> PackageDescription -> LocalBuildInfo + -> UserHooks -> flags -> IO ()) + -> (UserHooks -> Args -> flags -> PackageDescription + -> LocalBuildInfo -> IO ()) + -> IO LocalBuildInfo + -> UserHooks -> flags -> Args -> IO () +hookedActionWithArgs pre_hook cmd_hook post_hook get_build_config hooks flags args = do + pbi <- pre_hook hooks args flags + localbuildinfo <- get_build_config + let pkg_descr0 = localPkgDescr localbuildinfo + --pkg_descr0 <- get_pkg_descr (get_verbose flags) + sanityCheckHookedBuildInfo pkg_descr0 pbi + let pkg_descr = updatePackageDescription pbi pkg_descr0 + -- TODO: should we write the modified package descr back to the + -- localbuildinfo? + cmd_hook hooks args pkg_descr localbuildinfo hooks flags + post_hook hooks args flags pkg_descr localbuildinfo + +sanityCheckHookedBuildInfo :: PackageDescription -> HookedBuildInfo -> IO () +sanityCheckHookedBuildInfo PackageDescription { library = Nothing } (Just _,_) + = die $ "The buildinfo contains info for a library, " + ++ "but the package does not have a library." + +sanityCheckHookedBuildInfo pkg_descr (_, hookExes) + | not (null nonExistant) + = die $ "The buildinfo contains info for an executable called '" + ++ head nonExistant ++ "' but the package does not have a " + ++ "executable with that name." + where + pkgExeNames = nub (map exeName (executables pkg_descr)) + hookExeNames = nub (map fst hookExes) + nonExistant = hookExeNames \\ pkgExeNames + +sanityCheckHookedBuildInfo _ _ = return () + + +getBuildConfig :: UserHooks -> Verbosity -> FilePath -> IO LocalBuildInfo +getBuildConfig hooks verbosity distPref = do + lbi_wo_programs <- getPersistBuildConfig distPref + -- Restore info about unconfigured programs, since it is not serialized + let lbi = lbi_wo_programs { + withPrograms = restoreProgramConfiguration + (builtinPrograms ++ hookedPrograms hooks) + (withPrograms lbi_wo_programs) + } + + case pkgDescrFile lbi of + Nothing -> return lbi + Just pkg_descr_file -> do + outdated <- checkPersistBuildConfigOutdated distPref pkg_descr_file + if outdated + then reconfigure pkg_descr_file lbi + else return lbi + + where + reconfigure :: FilePath -> LocalBuildInfo -> IO LocalBuildInfo + reconfigure pkg_descr_file lbi = do + notice verbosity $ pkg_descr_file ++ " has been changed. " + ++ "Re-configuring with most recently used options. " + ++ "If this fails, please run configure manually.\n" + let cFlags = configFlags lbi + let cFlags' = cFlags { + -- Since the list of unconfigured programs is not serialized, + -- restore it to the same value as normally used at the beginning + -- of a configure run: + configPrograms = restoreProgramConfiguration + (builtinPrograms ++ hookedPrograms hooks) + (configPrograms cFlags), + + -- Use the current, not saved verbosity level: + configVerbosity = Flag verbosity + } + configureAction hooks cFlags' (extraConfigArgs lbi) + + +-- -------------------------------------------------------------------------- +-- Cleaning + +clean :: PackageDescription -> CleanFlags -> IO () +clean pkg_descr flags = do + let distPref = fromFlag $ cleanDistPref flags + notice verbosity "cleaning..." + + maybeConfig <- if fromFlag (cleanSaveConf flags) + then maybeGetPersistBuildConfig distPref + else return Nothing + + -- remove the whole dist/ directory rather than tracking exactly what files + -- we created in there. + chattyTry "removing dist/" $ do + exists <- doesDirectoryExist distPref + when exists (removeDirectoryRecursive distPref) + + -- Any extra files the user wants to remove + mapM_ removeFileOrDirectory (extraTmpFiles pkg_descr) + + -- If the user wanted to save the config, write it back + maybe (return ()) (writePersistBuildConfig distPref) maybeConfig + + where + removeFileOrDirectory :: FilePath -> IO () + removeFileOrDirectory fname = do + isDir <- doesDirectoryExist fname + isFile <- doesFileExist fname + if isDir then removeDirectoryRecursive fname + else when isFile $ removeFile fname + verbosity = fromFlag (cleanVerbosity flags) + +-- -------------------------------------------------------------------------- +-- Default hooks + +-- | Hooks that correspond to a plain instantiation of the +-- \"simple\" build system +simpleUserHooks :: UserHooks +simpleUserHooks = + emptyUserHooks { + confHook = configure, + postConf = finalChecks, + buildHook = defaultBuildHook, + replHook = defaultReplHook, + copyHook = \desc lbi _ f -> install desc lbi f, -- has correct 'copy' behavior with params + testHook = defaultTestHook, + benchHook = defaultBenchHook, + instHook = defaultInstallHook, + sDistHook = \p l h f -> sdist p l f srcPref (allSuffixHandlers h), + cleanHook = \p _ _ f -> clean p f, + hscolourHook = \p l h f -> hscolour p l (allSuffixHandlers h) f, + haddockHook = \p l h f -> haddock p l (allSuffixHandlers h) f, + regHook = defaultRegHook, + unregHook = \p l _ f -> unregister p l f + } + where + finalChecks _args flags pkg_descr lbi = + checkForeignDeps pkg_descr lbi (lessVerbose verbosity) + where + verbosity = fromFlag (configVerbosity flags) + +-- | Basic autoconf 'UserHooks': +-- +-- * 'postConf' runs @.\/configure@, if present. +-- +-- * the pre-hooks 'preBuild', 'preClean', 'preCopy', 'preInst', +-- 'preReg' and 'preUnreg' read additional build information from +-- /package/@.buildinfo@, if present. +-- +-- Thus @configure@ can use local system information to generate +-- /package/@.buildinfo@ and possibly other files. + +{-# DEPRECATED defaultUserHooks + "Use simpleUserHooks or autoconfUserHooks, unless you need Cabal-1.2\n compatibility in which case you must stick with defaultUserHooks" #-} +defaultUserHooks :: UserHooks +defaultUserHooks = autoconfUserHooks { + confHook = \pkg flags -> do + let verbosity = fromFlag (configVerbosity flags) + warn verbosity + "defaultUserHooks in Setup script is deprecated." + confHook autoconfUserHooks pkg flags, + postConf = oldCompatPostConf + } + -- This is the annoying old version that only runs configure if it exists. + -- It's here for compatibility with existing Setup.hs scripts. See: + -- https://github.com/haskell/cabal/issues/158 + where oldCompatPostConf args flags pkg_descr lbi + = do let verbosity = fromFlag (configVerbosity flags) + noExtraFlags args + confExists <- doesFileExist "configure" + when confExists $ + runConfigureScript verbosity + backwardsCompatHack flags lbi + + pbi <- getHookedBuildInfo verbosity + sanityCheckHookedBuildInfo pkg_descr pbi + let pkg_descr' = updatePackageDescription pbi pkg_descr + postConf simpleUserHooks args flags pkg_descr' lbi + + backwardsCompatHack = True + +autoconfUserHooks :: UserHooks +autoconfUserHooks + = simpleUserHooks + { + postConf = defaultPostConf, + preBuild = \_ flags -> + -- not using 'readHook' here because 'build' takes + -- extra args + getHookedBuildInfo $ fromFlag $ buildVerbosity flags, + preClean = readHook cleanVerbosity, + preCopy = readHook copyVerbosity, + preInst = readHook installVerbosity, + preHscolour = readHook hscolourVerbosity, + preHaddock = readHook haddockVerbosity, + preReg = readHook regVerbosity, + preUnreg = readHook regVerbosity + } + where defaultPostConf :: Args -> ConfigFlags -> PackageDescription -> LocalBuildInfo -> IO () + defaultPostConf args flags pkg_descr lbi + = do let verbosity = fromFlag (configVerbosity flags) + noExtraFlags args + confExists <- doesFileExist "configure" + if confExists + then runConfigureScript verbosity + backwardsCompatHack flags lbi + else die "configure script not found." + + pbi <- getHookedBuildInfo verbosity + sanityCheckHookedBuildInfo pkg_descr pbi + let pkg_descr' = updatePackageDescription pbi pkg_descr + postConf simpleUserHooks args flags pkg_descr' lbi + + backwardsCompatHack = False + + readHook :: (a -> Flag Verbosity) -> Args -> a -> IO HookedBuildInfo + readHook get_verbosity a flags = do + noExtraFlags a + getHookedBuildInfo verbosity + where + verbosity = fromFlag (get_verbosity flags) + +runConfigureScript :: Verbosity -> Bool -> ConfigFlags -> LocalBuildInfo + -> IO () +runConfigureScript verbosity backwardsCompatHack flags lbi = do + + env <- getEnvironment + let programConfig = withPrograms lbi + (ccProg, ccFlags) <- configureCCompiler verbosity programConfig + -- The C compiler's compilation and linker flags (e.g. + -- "C compiler flags" and "Gcc Linker flags" from GHC) have already + -- been merged into ccFlags, so we set both CFLAGS and LDFLAGS + -- to ccFlags + -- We don't try and tell configure which ld to use, as we don't have + -- a way to pass its flags too + let env' = appendToEnvironment ("CFLAGS", unwords ccFlags) + env + args' = args ++ ["--with-gcc=" ++ ccProg] + handleNoWindowsSH $ + rawSystemExitWithEnv verbosity "sh" args' env' + + where + args = "./configure" : configureArgs backwardsCompatHack flags + + appendToEnvironment (key, val) [] = [(key, val)] + appendToEnvironment (key, val) (kv@(k, v) : rest) + | key == k = (key, v ++ " " ++ val) : rest + | otherwise = kv : appendToEnvironment (key, val) rest + + handleNoWindowsSH action + | buildOS /= Windows + = action + + | otherwise + = action + `catchIO` \ioe -> if isDoesNotExistError ioe + then die notFoundMsg + else throwIO ioe + + notFoundMsg = "The package has a './configure' script. This requires a " + ++ "Unix compatibility toolchain such as MinGW+MSYS or Cygwin." + +getHookedBuildInfo :: Verbosity -> IO HookedBuildInfo +getHookedBuildInfo verbosity = do + maybe_infoFile <- defaultHookedPackageDesc + case maybe_infoFile of + Nothing -> return emptyHookedBuildInfo + Just infoFile -> do + info verbosity $ "Reading parameters from " ++ infoFile + readHookedBuildInfo verbosity infoFile + +defaultTestHook :: Args -> PackageDescription -> LocalBuildInfo + -> UserHooks -> TestFlags -> IO () +defaultTestHook args pkg_descr localbuildinfo _ flags = + test args pkg_descr localbuildinfo flags + +defaultBenchHook :: Args -> PackageDescription -> LocalBuildInfo + -> UserHooks -> BenchmarkFlags -> IO () +defaultBenchHook args pkg_descr localbuildinfo _ flags = + bench args pkg_descr localbuildinfo flags + +defaultInstallHook :: PackageDescription -> LocalBuildInfo + -> UserHooks -> InstallFlags -> IO () +defaultInstallHook pkg_descr localbuildinfo _ flags = do + let copyFlags = defaultCopyFlags { + copyDistPref = installDistPref flags, + copyDest = toFlag NoCopyDest, + copyVerbosity = installVerbosity flags + } + install pkg_descr localbuildinfo copyFlags + let registerFlags = defaultRegisterFlags { + regDistPref = installDistPref flags, + regInPlace = installInPlace flags, + regPackageDB = installPackageDB flags, + regVerbosity = installVerbosity flags + } + when (hasLibs pkg_descr) $ register pkg_descr localbuildinfo registerFlags + +defaultBuildHook :: PackageDescription -> LocalBuildInfo + -> UserHooks -> BuildFlags -> IO () +defaultBuildHook pkg_descr localbuildinfo hooks flags = + build pkg_descr localbuildinfo flags (allSuffixHandlers hooks) + +defaultReplHook :: PackageDescription -> LocalBuildInfo + -> UserHooks -> ReplFlags -> [String] -> IO () +defaultReplHook pkg_descr localbuildinfo hooks flags args = + repl pkg_descr localbuildinfo flags (allSuffixHandlers hooks) args + +defaultRegHook :: PackageDescription -> LocalBuildInfo + -> UserHooks -> RegisterFlags -> IO () +defaultRegHook pkg_descr localbuildinfo _ flags = + if hasLibs pkg_descr + then register pkg_descr localbuildinfo flags + else setupMessage verbosity + "Package contains no library to register:" (packageId pkg_descr) + where verbosity = fromFlag (regVerbosity flags) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/System.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/System.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/System.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/System.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,215 @@ +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.System +-- Copyright : Duncan Coutts 2007-2008 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Cabal often needs to do slightly different things on specific platforms. You +-- probably know about the 'System.Info.os' however using that is very +-- inconvenient because it is a string and different Haskell implementations +-- do not agree on using the same strings for the same platforms! (In +-- particular see the controversy over \"windows\" vs \"ming32\"). So to make it +-- more consistent and easy to use we have an 'OS' enumeration. +-- +module Distribution.System ( + -- * Operating System + OS(..), + buildOS, + + -- * Machine Architecture + Arch(..), + buildArch, + + -- * Platform is a pair of arch and OS + Platform(..), + buildPlatform, + platformFromTriple + ) where + +import qualified System.Info (os, arch) +import qualified Data.Char as Char (toLower, isAlphaNum) + +import Distribution.Compat.Binary (Binary) +import Data.Data (Data) +import Data.Typeable (Typeable) +import Data.Maybe (fromMaybe, listToMaybe) +import Distribution.Text (Text(..), display) +import qualified Distribution.Compat.ReadP as Parse +import GHC.Generics (Generic) +import qualified Text.PrettyPrint as Disp +import Text.PrettyPrint ((<>)) + +-- | How strict to be when classifying strings into the 'OS' and 'Arch' enums. +-- +-- The reason we have multiple ways to do the classification is because there +-- are two situations where we need to do it. +-- +-- For parsing OS and arch names in .cabal files we really want everyone to be +-- referring to the same or or arch by the same name. Variety is not a virtue +-- in this case. We don't mind about case though. +-- +-- For the System.Info.os\/arch different Haskell implementations use different +-- names for the same or\/arch. Also they tend to distinguish versions of an +-- OS\/arch which we just don't care about. +-- +-- The 'Compat' classification allows us to recognise aliases that are already +-- in common use but it allows us to distinguish them from the canonical name +-- which enables us to warn about such deprecated aliases. +-- +data ClassificationStrictness = Permissive | Compat | Strict + +-- ------------------------------------------------------------ +-- * Operating System +-- ------------------------------------------------------------ + +data OS = Linux | Windows | OSX -- tier 1 desktop OSs + | FreeBSD | OpenBSD | NetBSD -- other free Unix OSs + | DragonFly + | Solaris | AIX | HPUX | IRIX -- ageing Unix OSs + | HaLVM -- bare metal / VMs / hypervisors + | IOS -- iOS + | Ghcjs + | OtherOS String + deriving (Eq, Generic, Ord, Show, Read, Typeable, Data) + +instance Binary OS + +--TODO: decide how to handle Android and iOS. +-- They are like Linux and OSX but with some differences. +-- Should they be separate from Linux/OS X, or a subtype? +-- e.g. should we have os(linux) && os(android) true simultaneously? + +knownOSs :: [OS] +knownOSs = [Linux, Windows, OSX + ,FreeBSD, OpenBSD, NetBSD, DragonFly + ,Solaris, AIX, HPUX, IRIX + ,HaLVM + ,IOS + ,Ghcjs] + +osAliases :: ClassificationStrictness -> OS -> [String] +osAliases Permissive Windows = ["mingw32", "win32", "cygwin32"] +osAliases Compat Windows = ["mingw32", "win32"] +osAliases _ OSX = ["darwin"] +osAliases Permissive FreeBSD = ["kfreebsdgnu"] +osAliases Compat FreeBSD = ["kfreebsdgnu"] +osAliases Permissive Solaris = ["solaris2"] +osAliases Compat Solaris = ["solaris2"] +osAliases _ _ = [] + +instance Text OS where + disp (OtherOS name) = Disp.text name + disp other = Disp.text (lowercase (show other)) + + parse = fmap (classifyOS Compat) ident + +classifyOS :: ClassificationStrictness -> String -> OS +classifyOS strictness s = + fromMaybe (OtherOS s) $ lookup (lowercase s) osMap + where + osMap = [ (name, os) + | os <- knownOSs + , name <- display os : osAliases strictness os ] + +buildOS :: OS +buildOS = classifyOS Permissive System.Info.os + +-- ------------------------------------------------------------ +-- * Machine Architecture +-- ------------------------------------------------------------ + +data Arch = I386 | X86_64 | PPC | PPC64 | Sparc + | Arm | Mips | SH + | IA64 | S390 + | Alpha | Hppa | Rs6000 + | M68k | Vax + | JavaScript + | OtherArch String + deriving (Eq, Generic, Ord, Show, Read, Typeable, Data) + +instance Binary Arch + +knownArches :: [Arch] +knownArches = [I386, X86_64, PPC, PPC64, Sparc + ,Arm, Mips, SH + ,IA64, S390 + ,Alpha, Hppa, Rs6000 + ,M68k, Vax + ,JavaScript] + +archAliases :: ClassificationStrictness -> Arch -> [String] +archAliases Strict _ = [] +archAliases Compat _ = [] +archAliases _ PPC = ["powerpc"] +archAliases _ PPC64 = ["powerpc64"] +archAliases _ Sparc = ["sparc64", "sun4"] +archAliases _ Mips = ["mipsel", "mipseb"] +archAliases _ Arm = ["armeb", "armel"] +archAliases _ _ = [] + +instance Text Arch where + disp (OtherArch name) = Disp.text name + disp other = Disp.text (lowercase (show other)) + + parse = fmap (classifyArch Strict) ident + +classifyArch :: ClassificationStrictness -> String -> Arch +classifyArch strictness s = + fromMaybe (OtherArch s) $ lookup (lowercase s) archMap + where + archMap = [ (name, arch) + | arch <- knownArches + , name <- display arch : archAliases strictness arch ] + +buildArch :: Arch +buildArch = classifyArch Permissive System.Info.arch + +-- ------------------------------------------------------------ +-- * Platform +-- ------------------------------------------------------------ + +data Platform = Platform Arch OS + deriving (Eq, Generic, Ord, Show, Read, Typeable, Data) + +instance Binary Platform + +instance Text Platform where + disp (Platform arch os) = disp arch <> Disp.char '-' <> disp os + parse = do + arch <- parse + _ <- Parse.char '-' + os <- parse + return (Platform arch os) + +-- | The platform Cabal was compiled on. In most cases, +-- @LocalBuildInfo.hostPlatform@ should be used instead (the platform we're +-- targeting). +buildPlatform :: Platform +buildPlatform = Platform buildArch buildOS + +-- Utils: + +ident :: Parse.ReadP r String +ident = Parse.munch1 (\c -> Char.isAlphaNum c || c == '_' || c == '-') + --TODO: probably should disallow starting with a number + +lowercase :: String -> String +lowercase = map Char.toLower + +platformFromTriple :: String -> Maybe Platform +platformFromTriple triple = + fmap fst (listToMaybe $ Parse.readP_to_S parseTriple triple) + where parseWord = Parse.munch1 (\c -> Char.isAlphaNum c || c == '_') + parseTriple = do + arch <- fmap (classifyArch Strict) parseWord + _ <- Parse.char '-' + _ <- parseWord -- Skip vendor + _ <- Parse.char '-' + os <- fmap (classifyOS Compat) ident -- OS may have hyphens, like + -- 'nto-qnx' + return $ Platform arch os diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/TestSuite.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/TestSuite.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/TestSuite.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/TestSuite.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,96 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.TestSuite +-- Copyright : Thomas Tuegel 2010 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This module defines the detailed test suite interface which makes it +-- possible to expose individual tests to Cabal or other test agents. + +module Distribution.TestSuite + ( TestInstance(..) + , OptionDescr(..) + , OptionType(..) + , Test(..) + , Options + , Progress(..) + , Result(..) + , testGroup + ) where + +data TestInstance = TestInstance + { run :: IO Progress -- ^ Perform the test. + , name :: String -- ^ A name for the test, unique within a + -- test suite. + , tags :: [String] -- ^ Users can select groups of tests by + -- their tags. + , options :: [OptionDescr] -- ^ Descriptions of the options recognized + -- by this test. + , setOption :: String -> String -> Either String TestInstance + -- ^ Try to set the named option to the given value. Returns an error + -- message if the option is not supported or the value could not be + -- correctly parsed; otherwise, a 'TestInstance' with the option set to + -- the given value is returned. + } + +data OptionDescr = OptionDescr + { optionName :: String + , optionDescription :: String -- ^ A human-readable description of the + -- option to guide the user setting it. + , optionType :: OptionType + , optionDefault :: Maybe String + } + deriving (Eq, Read, Show) + +data OptionType + = OptionFile + { optionFileMustExist :: Bool + , optionFileIsDir :: Bool + , optionFileExtensions :: [String] + } + | OptionString + { optionStringMultiline :: Bool + } + | OptionNumber + { optionNumberIsInt :: Bool + , optionNumberBounds :: (Maybe String, Maybe String) + } + | OptionBool + | OptionEnum [String] + | OptionSet [String] + | OptionRngSeed + deriving (Eq, Read, Show) + +data Test + = Test TestInstance + | Group + { groupName :: String + , concurrently :: Bool + -- ^ If true, then children of this group may be run in parallel. + -- Note that this setting is not inherited by children. In + -- particular, consider a group F with "concurrently = False" that + -- has some children, including a group T with "concurrently = + -- True". The children of group T may be run concurrently with each + -- other, as long as none are run at the same time as any of the + -- direct children of group F. + , groupTests :: [Test] + } + | ExtraOptions [OptionDescr] Test + +type Options = [(String, String)] + +data Progress = Finished Result + | Progress String (IO Progress) + +data Result = Pass + | Fail String + | Error String + deriving (Eq, Read, Show) + +-- | Create a named group of tests, which are assumed to be safe to run in +-- parallel. +testGroup :: String -> [Test] -> Test +testGroup n ts = Group { groupName = n, concurrently = True, groupTests = ts } diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Text.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Text.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Text.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Text.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,68 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Text +-- Copyright : Duncan Coutts 2007 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- This defines a 'Text' class which is a bit like the 'Read' and 'Show' +-- classes. The difference is that is uses a modern pretty printer and parser +-- system and the format is not expected to be Haskell concrete syntax but +-- rather the external human readable representation used by Cabal. +-- +module Distribution.Text ( + Text(..), + display, + simpleParse, + ) where + +import qualified Distribution.Compat.ReadP as Parse +import qualified Text.PrettyPrint as Disp + +import Data.Version (Version(Version)) +import qualified Data.Char as Char (isDigit, isAlphaNum, isSpace) + +class Text a where + disp :: a -> Disp.Doc + parse :: Parse.ReadP r a + +display :: Text a => a -> String +display = Disp.renderStyle style . disp + where style = Disp.Style { + Disp.mode = Disp.PageMode, + Disp.lineLength = 79, + Disp.ribbonsPerLine = 1.0 + } + +simpleParse :: Text a => String -> Maybe a +simpleParse str = case [ p | (p, s) <- Parse.readP_to_S parse str + , all Char.isSpace s ] of + [] -> Nothing + (p:_) -> Just p + +-- ----------------------------------------------------------------------------- +-- Instances for types from the base package + +instance Text Bool where + disp = Disp.text . show + parse = Parse.choice [ (Parse.string "True" Parse.+++ + Parse.string "true") >> return True + , (Parse.string "False" Parse.+++ + Parse.string "false") >> return False ] + +instance Text Version where + disp (Version branch _tags) -- Death to version tags!! + = Disp.hcat (Disp.punctuate (Disp.char '.') (map Disp.int branch)) + + parse = do + branch <- Parse.sepBy1 digits (Parse.char '.') + tags <- Parse.many (Parse.char '-' >> Parse.munch1 Char.isAlphaNum) + return (Version branch tags) --TODO: should we ignore the tags? + where + digits = do + first <- Parse.satisfy Char.isDigit + if first == '0' + then return 0 + else do rest <- Parse.munch Char.isDigit + return (read (first : rest)) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Utils/NubList.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Utils/NubList.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Utils/NubList.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Utils/NubList.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,100 @@ +{-# LANGUAGE CPP #-} +module Distribution.Utils.NubList + ( NubList -- opaque + , toNubList -- smart construtor + , fromNubList + , overNubList + + , NubListR + , toNubListR + , fromNubListR + , overNubListR + ) where + +import Distribution.Compat.Binary +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid +#endif + +import Distribution.Simple.Utils (ordNub, listUnion, ordNubRight, listUnionRight) + +import qualified Text.Read as R + +-- | NubList : A de-duplicated list that maintains the original order. +newtype NubList a = + NubList { fromNubList :: [a] } + deriving Eq + +-- NubList assumes that nub retains the list order while removing duplicate +-- elements (keeping the first occurence). Documentation for "Data.List.nub" +-- does not specifically state that ordering is maintained so we will add a test +-- for that to the test suite. + +-- | Smart constructor for the NubList type. +toNubList :: Ord a => [a] -> NubList a +toNubList list = NubList $ ordNub list + +-- | Lift a function over lists to a function over NubLists. +overNubList :: Ord a => ([a] -> [a]) -> NubList a -> NubList a +overNubList f (NubList list) = toNubList . f $ list + +-- | Monoid operations on NubLists. +-- For a valid Monoid instance we need to satistfy the required monoid laws; +-- identity, associativity and closure. +-- +-- Identity : by inspection: +-- mempty `mappend` NubList xs == NubList xs `mappend` mempty +-- +-- Associativity : by inspection: +-- (NubList xs `mappend` NubList ys) `mappend` NubList zs +-- == NubList xs `mappend` (NubList ys `mappend` NubList zs) +-- +-- Closure : appending two lists of type a and removing duplicates obviously +-- does not change the type. + +instance Ord a => Monoid (NubList a) where + mempty = NubList [] + mappend (NubList xs) (NubList ys) = NubList $ xs `listUnion` ys + +instance Show a => Show (NubList a) where + show (NubList list) = show list + +instance (Ord a, Read a) => Read (NubList a) where + readPrec = readNubList toNubList + +-- | Helper used by NubList/NubListR's Read instances. +readNubList :: (Ord a, Read a) => ([a] -> l a) -> R.ReadPrec (l a) +readNubList toList = R.parens . R.prec 10 $ fmap toList R.readPrec + +-- | Binary instance for 'NubList a' is the same as for '[a]'. For 'put', we +-- just pull off constructor and put the list. For 'get', we get the list and +-- make a 'NubList' out of it using 'toNubList'. +instance (Ord a, Binary a) => Binary (NubList a) where + put (NubList l) = put l + get = fmap toNubList get + +-- | NubListR : A right-biased version of 'NubList'. That is @toNubListR +-- ["-XNoFoo", "-XFoo", "-XNoFoo"]@ will result in @["-XFoo", "-XNoFoo"]@, +-- unlike the normal 'NubList', which is left-biased. Built on top of +-- 'ordNubRight' and 'listUnionRight'. +newtype NubListR a = + NubListR { fromNubListR :: [a] } + deriving Eq + +-- | Smart constructor for the NubListR type. +toNubListR :: Ord a => [a] -> NubListR a +toNubListR list = NubListR $ ordNubRight list + +-- | Lift a function over lists to a function over NubListRs. +overNubListR :: Ord a => ([a] -> [a]) -> NubListR a -> NubListR a +overNubListR f (NubListR list) = toNubListR . f $ list + +instance Ord a => Monoid (NubListR a) where + mempty = NubListR [] + mappend (NubListR xs) (NubListR ys) = NubListR $ xs `listUnionRight` ys + +instance Show a => Show (NubListR a) where + show (NubListR list) = show list + +instance (Ord a, Read a) => Read (NubListR a) where + readPrec = readNubList toNubListR diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Verbosity.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Verbosity.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Verbosity.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Verbosity.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,89 @@ +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Verbosity +-- Copyright : Ian Lynagh 2007 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- A simple 'Verbosity' type with associated utilities. There are 4 standard +-- verbosity levels from 'silent', 'normal', 'verbose' up to 'deafening'. This +-- is used for deciding what logging messages to print. + +-- Verbosity for Cabal functions. + +module Distribution.Verbosity ( + -- * Verbosity + Verbosity, + silent, normal, verbose, deafening, + moreVerbose, lessVerbose, + intToVerbosity, flagToVerbosity, + showForCabal, showForGHC + ) where + +import Distribution.Compat.Binary (Binary) +import Data.List (elemIndex) +import Distribution.ReadE +import GHC.Generics + +data Verbosity = Silent | Normal | Verbose | Deafening + deriving (Generic, Show, Read, Eq, Ord, Enum, Bounded) + +instance Binary Verbosity + +-- We shouldn't print /anything/ unless an error occurs in silent mode +silent :: Verbosity +silent = Silent + +-- Print stuff we want to see by default +normal :: Verbosity +normal = Normal + +-- Be more verbose about what's going on +verbose :: Verbosity +verbose = Verbose + +-- Not only are we verbose ourselves (perhaps even noisier than when +-- being "verbose"), but we tell everything we run to be verbose too +deafening :: Verbosity +deafening = Deafening + +moreVerbose :: Verbosity -> Verbosity +moreVerbose Silent = Silent --silent should stay silent +moreVerbose Normal = Verbose +moreVerbose Verbose = Deafening +moreVerbose Deafening = Deafening + +lessVerbose :: Verbosity -> Verbosity +lessVerbose Deafening = Deafening +lessVerbose Verbose = Normal +lessVerbose Normal = Silent +lessVerbose Silent = Silent + +intToVerbosity :: Int -> Maybe Verbosity +intToVerbosity 0 = Just Silent +intToVerbosity 1 = Just Normal +intToVerbosity 2 = Just Verbose +intToVerbosity 3 = Just Deafening +intToVerbosity _ = Nothing + +flagToVerbosity :: ReadE Verbosity +flagToVerbosity = ReadE $ \s -> + case reads s of + [(i, "")] -> + case intToVerbosity i of + Just v -> Right v + Nothing -> Left ("Bad verbosity: " ++ show i ++ + ". Valid values are 0..3") + _ -> Left ("Can't parse verbosity " ++ s) + +showForCabal, showForGHC :: Verbosity -> String + +showForCabal v = maybe (error "unknown verbosity") show $ + elemIndex v [silent,normal,verbose,deafening] +showForGHC v = maybe (error "unknown verbosity") show $ + elemIndex v [silent,normal,__,verbose,deafening] + where __ = silent -- this will be always ignored by elemIndex diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Version.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Version.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Distribution/Version.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Distribution/Version.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,777 @@ +{-# LANGUAGE CPP, DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} +#if __GLASGOW_HASKELL__ < 707 +{-# LANGUAGE StandaloneDeriving #-} +#endif + +-- Hack approach to support bootstrapping +-- Assume binary <0.8 when MIN_VERSION_binary macro is not available. +-- Starting with GHC>=8.0, compiler will hopefully provide this macros too. +-- https://ghc.haskell.org/trac/ghc/ticket/10970 +-- +-- Otherwise, one can specify -DMIN_VERSION_binary_0_8_0=1, when bootstrapping +-- with binary >=0.8.0.0 +#ifdef MIN_VERSION_binary +#define MIN_VERSION_binary_0_8_0 MIN_VERSION_binary(0,8,0) +#else +#ifndef MIN_VERSION_binary_0_8_0 +#define MIN_VERSION_binary_0_8_0 0 +#endif +#endif + +#if !MIN_VERSION_binary_0_8_0 +{-# OPTIONS_GHC -fno-warn-orphans #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Version +-- Copyright : Isaac Jones, Simon Marlow 2003-2004 +-- Duncan Coutts 2008 +-- License : BSD3 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Exports the 'Version' type along with a parser and pretty printer. A version +-- is something like @\"1.3.3\"@. It also defines the 'VersionRange' data +-- types. Version ranges are like @\">= 1.2 && < 2\"@. + +module Distribution.Version ( + -- * Package versions + Version(..), + + -- * Version ranges + VersionRange(..), + + -- ** Constructing + anyVersion, noVersion, + thisVersion, notThisVersion, + laterVersion, earlierVersion, + orLaterVersion, orEarlierVersion, + unionVersionRanges, intersectVersionRanges, + withinVersion, + betweenVersionsInclusive, + + -- ** Inspection + withinRange, + isAnyVersion, + isNoVersion, + isSpecificVersion, + simplifyVersionRange, + foldVersionRange, + foldVersionRange', + + -- ** Modification + removeUpperBound, + + -- * Version intervals view + asVersionIntervals, + VersionInterval, + LowerBound(..), + UpperBound(..), + Bound(..), + + -- ** 'VersionIntervals' abstract type + -- | The 'VersionIntervals' type and the accompanying functions are exposed + -- primarily for completeness and testing purposes. In practice + -- 'asVersionIntervals' is the main function to use to + -- view a 'VersionRange' as a bunch of 'VersionInterval's. + -- + VersionIntervals, + toVersionIntervals, + fromVersionIntervals, + withinIntervals, + versionIntervals, + mkVersionIntervals, + unionVersionIntervals, + intersectVersionIntervals, + + ) where + +import Distribution.Compat.Binary ( Binary(..) ) +import Data.Data ( Data ) +import Data.Typeable ( Typeable ) +import Data.Version ( Version(..) ) +import GHC.Generics ( Generic ) + +import Distribution.Text ( Text(..) ) +import qualified Distribution.Compat.ReadP as Parse +import Distribution.Compat.ReadP ((+++)) +import qualified Text.PrettyPrint as Disp +import Text.PrettyPrint ((<>), (<+>)) +import qualified Data.Char as Char (isDigit) +import Control.Exception (assert) + +-- ----------------------------------------------------------------------------- +-- Version ranges + +-- Todo: maybe move this to Distribution.Package.Version? +-- (package-specific versioning scheme). + +data VersionRange + = AnyVersion + | ThisVersion Version -- = version + | LaterVersion Version -- > version (NB. not >=) + | EarlierVersion Version -- < version + | WildcardVersion Version -- == ver.* (same as >= ver && < ver+1) + | UnionVersionRanges VersionRange VersionRange + | IntersectVersionRanges VersionRange VersionRange + | VersionRangeParens VersionRange -- just '(exp)' parentheses syntax + deriving (Data, Eq, Generic, Read, Show, Typeable) + +instance Binary VersionRange + +#if __GLASGOW_HASKELL__ < 707 +-- starting with ghc-7.7/base-4.7 this instance is provided in "Data.Data" +deriving instance Data Version +#endif + +#if !(MIN_VERSION_binary_0_8_0) +-- Deriving this instance from Generic gives trouble on GHC 7.2 because the +-- Generic instance has to be standalone-derived. So, we hand-roll our own. +-- We can't use a generic Binary instance on later versions because we must +-- maintain compatibility between compiler versions. +instance Binary Version where + get = do + br <- get + tags <- get + return $ Version br tags + put (Version br tags) = put br >> put tags +#endif + +{-# DEPRECATED AnyVersion "Use 'anyVersion', 'foldVersionRange' or 'asVersionIntervals'" #-} +{-# DEPRECATED ThisVersion "use 'thisVersion', 'foldVersionRange' or 'asVersionIntervals'" #-} +{-# DEPRECATED LaterVersion "use 'laterVersion', 'foldVersionRange' or 'asVersionIntervals'" #-} +{-# DEPRECATED EarlierVersion "use 'earlierVersion', 'foldVersionRange' or 'asVersionIntervals'" #-} +{-# DEPRECATED WildcardVersion "use 'anyVersion', 'foldVersionRange' or 'asVersionIntervals'" #-} +{-# DEPRECATED UnionVersionRanges "use 'unionVersionRanges', 'foldVersionRange' or 'asVersionIntervals'" #-} +{-# DEPRECATED IntersectVersionRanges "use 'intersectVersionRanges', 'foldVersionRange' or 'asVersionIntervals'" #-} + +-- | The version range @-any@. That is, a version range containing all +-- versions. +-- +-- > withinRange v anyVersion = True +-- +anyVersion :: VersionRange +anyVersion = AnyVersion + +-- | The empty version range, that is a version range containing no versions. +-- +-- This can be constructed using any unsatisfiable version range expression, +-- for example @> 1 && < 1@. +-- +-- > withinRange v noVersion = False +-- +noVersion :: VersionRange +noVersion = IntersectVersionRanges (LaterVersion v) (EarlierVersion v) + where v = Version [1] [] + +-- | The version range @== v@ +-- +-- > withinRange v' (thisVersion v) = v' == v +-- +thisVersion :: Version -> VersionRange +thisVersion = ThisVersion + +-- | The version range @< v || > v@ +-- +-- > withinRange v' (notThisVersion v) = v' /= v +-- +notThisVersion :: Version -> VersionRange +notThisVersion v = UnionVersionRanges (EarlierVersion v) (LaterVersion v) + +-- | The version range @> v@ +-- +-- > withinRange v' (laterVersion v) = v' > v +-- +laterVersion :: Version -> VersionRange +laterVersion = LaterVersion + +-- | The version range @>= v@ +-- +-- > withinRange v' (orLaterVersion v) = v' >= v +-- +orLaterVersion :: Version -> VersionRange +orLaterVersion v = UnionVersionRanges (ThisVersion v) (LaterVersion v) + +-- | The version range @< v@ +-- +-- > withinRange v' (earlierVersion v) = v' < v +-- +earlierVersion :: Version -> VersionRange +earlierVersion = EarlierVersion + +-- | The version range @<= v@ +-- +-- > withinRange v' (orEarlierVersion v) = v' <= v +-- +orEarlierVersion :: Version -> VersionRange +orEarlierVersion v = UnionVersionRanges (ThisVersion v) (EarlierVersion v) + +-- | The version range @vr1 || vr2@ +-- +-- > withinRange v' (unionVersionRanges vr1 vr2) +-- > = withinRange v' vr1 || withinRange v' vr2 +-- +unionVersionRanges :: VersionRange -> VersionRange -> VersionRange +unionVersionRanges = UnionVersionRanges + +-- | The version range @vr1 && vr2@ +-- +-- > withinRange v' (intersectVersionRanges vr1 vr2) +-- > = withinRange v' vr1 && withinRange v' vr2 +-- +intersectVersionRanges :: VersionRange -> VersionRange -> VersionRange +intersectVersionRanges = IntersectVersionRanges + +-- | The version range @== v.*@. +-- +-- For example, for version @1.2@, the version range @== 1.2.*@ is the same as +-- @>= 1.2 && < 1.3@ +-- +-- > withinRange v' (laterVersion v) = v' >= v && v' < upper v +-- > where +-- > upper (Version lower t) = Version (init lower ++ [last lower + 1]) t +-- +withinVersion :: Version -> VersionRange +withinVersion = WildcardVersion + +-- | The version range @>= v1 && <= v2@. +-- +-- In practice this is not very useful because we normally use inclusive lower +-- bounds and exclusive upper bounds. +-- +-- > withinRange v' (laterVersion v) = v' > v +-- +betweenVersionsInclusive :: Version -> Version -> VersionRange +betweenVersionsInclusive v1 v2 = + IntersectVersionRanges (orLaterVersion v1) (orEarlierVersion v2) + +{-# DEPRECATED betweenVersionsInclusive + "In practice this is not very useful because we normally use inclusive lower bounds and exclusive upper bounds" #-} + +-- | Given a version range, remove the highest upper bound. Example: @(>= 1 && < +-- 3) || (>= 4 && < 5)@ is converted to @(>= 1 && < 3) || (>= 4)@. +removeUpperBound :: VersionRange -> VersionRange +removeUpperBound = fromVersionIntervals . relaxLastInterval . toVersionIntervals + where + relaxLastInterval (VersionIntervals intervals) = + VersionIntervals (relaxLastInterval' intervals) + + relaxLastInterval' [] = [] + relaxLastInterval' [(l,_)] = [(l, NoUpperBound)] + relaxLastInterval' (i:is) = i : relaxLastInterval' is + +-- | Fold over the basic syntactic structure of a 'VersionRange'. +-- +-- This provides a syntactic view of the expression defining the version range. +-- The syntactic sugar @\">= v\"@, @\"<= v\"@ and @\"== v.*\"@ is presented +-- in terms of the other basic syntax. +-- +-- For a semantic view use 'asVersionIntervals'. +-- +foldVersionRange :: a -- ^ @\"-any\"@ version + -> (Version -> a) -- ^ @\"== v\"@ + -> (Version -> a) -- ^ @\"> v\"@ + -> (Version -> a) -- ^ @\"< v\"@ + -> (a -> a -> a) -- ^ @\"_ || _\"@ union + -> (a -> a -> a) -- ^ @\"_ && _\"@ intersection + -> VersionRange -> a +foldVersionRange anyv this later earlier union intersect = fold + where + fold AnyVersion = anyv + fold (ThisVersion v) = this v + fold (LaterVersion v) = later v + fold (EarlierVersion v) = earlier v + fold (WildcardVersion v) = fold (wildcard v) + fold (UnionVersionRanges v1 v2) = union (fold v1) (fold v2) + fold (IntersectVersionRanges v1 v2) = intersect (fold v1) (fold v2) + fold (VersionRangeParens v) = fold v + + wildcard v = intersectVersionRanges + (orLaterVersion v) + (earlierVersion (wildcardUpperBound v)) + +-- | An extended variant of 'foldVersionRange' that also provides a view of +-- in which the syntactic sugar @\">= v\"@, @\"<= v\"@ and @\"== v.*\"@ is presented +-- explicitly rather than in terms of the other basic syntax. +-- +foldVersionRange' :: a -- ^ @\"-any\"@ version + -> (Version -> a) -- ^ @\"== v\"@ + -> (Version -> a) -- ^ @\"> v\"@ + -> (Version -> a) -- ^ @\"< v\"@ + -> (Version -> a) -- ^ @\">= v\"@ + -> (Version -> a) -- ^ @\"<= v\"@ + -> (Version -> Version -> a) -- ^ @\"== v.*\"@ wildcard. The + -- function is passed the + -- inclusive lower bound and the + -- exclusive upper bounds of the + -- range defined by the wildcard. + -> (a -> a -> a) -- ^ @\"_ || _\"@ union + -> (a -> a -> a) -- ^ @\"_ && _\"@ intersection + -> (a -> a) -- ^ @\"(_)\"@ parentheses + -> VersionRange -> a +foldVersionRange' anyv this later earlier orLater orEarlier + wildcard union intersect parens = fold + where + fold AnyVersion = anyv + fold (ThisVersion v) = this v + fold (LaterVersion v) = later v + fold (EarlierVersion v) = earlier v + + fold (UnionVersionRanges (ThisVersion v) + (LaterVersion v')) | v==v' = orLater v + fold (UnionVersionRanges (LaterVersion v) + (ThisVersion v')) | v==v' = orLater v + fold (UnionVersionRanges (ThisVersion v) + (EarlierVersion v')) | v==v' = orEarlier v + fold (UnionVersionRanges (EarlierVersion v) + (ThisVersion v')) | v==v' = orEarlier v + + fold (WildcardVersion v) = wildcard v (wildcardUpperBound v) + fold (UnionVersionRanges v1 v2) = union (fold v1) (fold v2) + fold (IntersectVersionRanges v1 v2) = intersect (fold v1) (fold v2) + fold (VersionRangeParens v) = parens (fold v) + + +-- | Does this version fall within the given range? +-- +-- This is the evaluation function for the 'VersionRange' type. +-- +withinRange :: Version -> VersionRange -> Bool +withinRange v = foldVersionRange + True + (\v' -> versionBranch v == versionBranch v') + (\v' -> versionBranch v > versionBranch v') + (\v' -> versionBranch v < versionBranch v') + (||) + (&&) + +-- | View a 'VersionRange' as a union of intervals. +-- +-- This provides a canonical view of the semantics of a 'VersionRange' as +-- opposed to the syntax of the expression used to define it. For the syntactic +-- view use 'foldVersionRange'. +-- +-- Each interval is non-empty. The sequence is in increasing order and no +-- intervals overlap or touch. Therefore only the first and last can be +-- unbounded. The sequence can be empty if the range is empty +-- (e.g. a range expression like @< 1 && > 2@). +-- +-- Other checks are trivial to implement using this view. For example: +-- +-- > isNoVersion vr | [] <- asVersionIntervals vr = True +-- > | otherwise = False +-- +-- > isSpecificVersion vr +-- > | [(LowerBound v InclusiveBound +-- > ,UpperBound v' InclusiveBound)] <- asVersionIntervals vr +-- > , v == v' = Just v +-- > | otherwise = Nothing +-- +asVersionIntervals :: VersionRange -> [VersionInterval] +asVersionIntervals = versionIntervals . toVersionIntervals + +-- | Does this 'VersionRange' place any restriction on the 'Version' or is it +-- in fact equivalent to 'AnyVersion'. +-- +-- Note this is a semantic check, not simply a syntactic check. So for example +-- the following is @True@ (for all @v@). +-- +-- > isAnyVersion (EarlierVersion v `UnionVersionRanges` orLaterVersion v) +-- +isAnyVersion :: VersionRange -> Bool +isAnyVersion vr = case asVersionIntervals vr of + [(LowerBound v InclusiveBound, NoUpperBound)] | isVersion0 v -> True + _ -> False + +-- | This is the converse of 'isAnyVersion'. It check if the version range is +-- empty, if there is no possible version that satisfies the version range. +-- +-- For example this is @True@ (for all @v@): +-- +-- > isNoVersion (EarlierVersion v `IntersectVersionRanges` LaterVersion v) +-- +isNoVersion :: VersionRange -> Bool +isNoVersion vr = case asVersionIntervals vr of + [] -> True + _ -> False + +-- | Is this version range in fact just a specific version? +-- +-- For example the version range @\">= 3 && <= 3\"@ contains only the version +-- @3@. +-- +isSpecificVersion :: VersionRange -> Maybe Version +isSpecificVersion vr = case asVersionIntervals vr of + [(LowerBound v InclusiveBound + ,UpperBound v' InclusiveBound)] + | v == v' -> Just v + _ -> Nothing + +-- | Simplify a 'VersionRange' expression. For non-empty version ranges +-- this produces a canonical form. Empty or inconsistent version ranges +-- are left as-is because that provides more information. +-- +-- If you need a canonical form use +-- @fromVersionIntervals . toVersionIntervals@ +-- +-- It satisfies the following properties: +-- +-- > withinRange v (simplifyVersionRange r) = withinRange v r +-- +-- > withinRange v r = withinRange v r' +-- > ==> simplifyVersionRange r = simplifyVersionRange r' +-- > || isNoVersion r +-- > || isNoVersion r' +-- +simplifyVersionRange :: VersionRange -> VersionRange +simplifyVersionRange vr + -- If the version range is inconsistent then we just return the + -- original since that has more information than ">1 && < 1", which + -- is the canonical inconsistent version range. + | null (versionIntervals vi) = vr + | otherwise = fromVersionIntervals vi + where + vi = toVersionIntervals vr + +---------------------------- +-- Wildcard range utilities +-- + +wildcardUpperBound :: Version -> Version +wildcardUpperBound (Version lowerBound ts) = Version upperBound ts + where + upperBound = init lowerBound ++ [last lowerBound + 1] + +isWildcardRange :: Version -> Version -> Bool +isWildcardRange (Version branch1 _) (Version branch2 _) = check branch1 branch2 + where check (n:[]) (m:[]) | n+1 == m = True + check (n:ns) (m:ms) | n == m = check ns ms + check _ _ = False + +------------------ +-- Intervals view +-- + +-- | A complementary representation of a 'VersionRange'. Instead of a boolean +-- version predicate it uses an increasing sequence of non-overlapping, +-- non-empty intervals. +-- +-- The key point is that this representation gives a canonical representation +-- for the semantics of 'VersionRange's. This makes it easier to check things +-- like whether a version range is empty, covers all versions, or requires a +-- certain minimum or maximum version. It also makes it easy to check equality +-- or containment. It also makes it easier to identify \'simple\' version +-- predicates for translation into foreign packaging systems that do not +-- support complex version range expressions. +-- +newtype VersionIntervals = VersionIntervals [VersionInterval] + deriving (Eq, Show) + +-- | Inspect the list of version intervals. +-- +versionIntervals :: VersionIntervals -> [VersionInterval] +versionIntervals (VersionIntervals is) = is + +type VersionInterval = (LowerBound, UpperBound) +data LowerBound = LowerBound Version !Bound deriving (Eq, Show) +data UpperBound = NoUpperBound | UpperBound Version !Bound deriving (Eq, Show) +data Bound = ExclusiveBound | InclusiveBound deriving (Eq, Show) + +minLowerBound :: LowerBound +minLowerBound = LowerBound (Version [0] []) InclusiveBound + +isVersion0 :: Version -> Bool +isVersion0 (Version [0] _) = True +isVersion0 _ = False + +instance Ord LowerBound where + LowerBound ver bound <= LowerBound ver' bound' = case compare ver ver' of + LT -> True + EQ -> not (bound == ExclusiveBound && bound' == InclusiveBound) + GT -> False + +instance Ord UpperBound where + _ <= NoUpperBound = True + NoUpperBound <= UpperBound _ _ = False + UpperBound ver bound <= UpperBound ver' bound' = case compare ver ver' of + LT -> True + EQ -> not (bound == InclusiveBound && bound' == ExclusiveBound) + GT -> False + +invariant :: VersionIntervals -> Bool +invariant (VersionIntervals intervals) = all validInterval intervals + && all doesNotTouch' adjacentIntervals + where + doesNotTouch' :: (VersionInterval, VersionInterval) -> Bool + doesNotTouch' ((_,u), (l',_)) = doesNotTouch u l' + + adjacentIntervals :: [(VersionInterval, VersionInterval)] + adjacentIntervals + | null intervals = [] + | otherwise = zip intervals (tail intervals) + +checkInvariant :: VersionIntervals -> VersionIntervals +checkInvariant is = assert (invariant is) is + +-- | Directly construct a 'VersionIntervals' from a list of intervals. +-- +-- Each interval must be non-empty. The sequence must be in increasing order +-- and no intervals may overlap or touch. If any of these conditions are not +-- satisfied the function returns @Nothing@. +-- +mkVersionIntervals :: [VersionInterval] -> Maybe VersionIntervals +mkVersionIntervals intervals + | invariant (VersionIntervals intervals) = Just (VersionIntervals intervals) + | otherwise = Nothing + +validVersion :: Version -> Bool +validVersion (Version [] _) = False +validVersion (Version vs _) = all (>=0) vs + +validInterval :: (LowerBound, UpperBound) -> Bool +validInterval i@(l, u) = validLower l && validUpper u && nonEmpty i + where + validLower (LowerBound v _) = validVersion v + validUpper NoUpperBound = True + validUpper (UpperBound v _) = validVersion v + +-- Check an interval is non-empty +-- +nonEmpty :: VersionInterval -> Bool +nonEmpty (_, NoUpperBound ) = True +nonEmpty (LowerBound l lb, UpperBound u ub) = + (l < u) || (l == u && lb == InclusiveBound && ub == InclusiveBound) + +-- Check an upper bound does not intersect, or even touch a lower bound: +-- +-- ---| or ---) but not ---] or ---) or ---] +-- |--- (--- (--- [--- [--- +-- +doesNotTouch :: UpperBound -> LowerBound -> Bool +doesNotTouch NoUpperBound _ = False +doesNotTouch (UpperBound u ub) (LowerBound l lb) = + u < l + || (u == l && ub == ExclusiveBound && lb == ExclusiveBound) + +-- | Check an upper bound does not intersect a lower bound: +-- +-- ---| or ---) or ---] or ---) but not ---] +-- |--- (--- (--- [--- [--- +-- +doesNotIntersect :: UpperBound -> LowerBound -> Bool +doesNotIntersect NoUpperBound _ = False +doesNotIntersect (UpperBound u ub) (LowerBound l lb) = + u < l + || (u == l && not (ub == InclusiveBound && lb == InclusiveBound)) + +-- | Test if a version falls within the version intervals. +-- +-- It exists mostly for completeness and testing. It satisfies the following +-- properties: +-- +-- > withinIntervals v (toVersionIntervals vr) = withinRange v vr +-- > withinIntervals v ivs = withinRange v (fromVersionIntervals ivs) +-- +withinIntervals :: Version -> VersionIntervals -> Bool +withinIntervals v (VersionIntervals intervals) = any withinInterval intervals + where + withinInterval (lowerBound, upperBound) = withinLower lowerBound + && withinUpper upperBound + withinLower (LowerBound v' ExclusiveBound) = v' < v + withinLower (LowerBound v' InclusiveBound) = v' <= v + + withinUpper NoUpperBound = True + withinUpper (UpperBound v' ExclusiveBound) = v' > v + withinUpper (UpperBound v' InclusiveBound) = v' >= v + +-- | Convert a 'VersionRange' to a sequence of version intervals. +-- +toVersionIntervals :: VersionRange -> VersionIntervals +toVersionIntervals = foldVersionRange + ( chkIvl (minLowerBound, NoUpperBound)) + (\v -> chkIvl (LowerBound v InclusiveBound, UpperBound v InclusiveBound)) + (\v -> chkIvl (LowerBound v ExclusiveBound, NoUpperBound)) + (\v -> if isVersion0 v then VersionIntervals [] else + chkIvl (minLowerBound, UpperBound v ExclusiveBound)) + unionVersionIntervals + intersectVersionIntervals + where + chkIvl interval = checkInvariant (VersionIntervals [interval]) + +-- | Convert a 'VersionIntervals' value back into a 'VersionRange' expression +-- representing the version intervals. +-- +fromVersionIntervals :: VersionIntervals -> VersionRange +fromVersionIntervals (VersionIntervals []) = noVersion +fromVersionIntervals (VersionIntervals intervals) = + foldr1 UnionVersionRanges [ interval l u | (l, u) <- intervals ] + + where + interval (LowerBound v InclusiveBound) + (UpperBound v' InclusiveBound) | v == v' + = ThisVersion v + interval (LowerBound v InclusiveBound) + (UpperBound v' ExclusiveBound) | isWildcardRange v v' + = WildcardVersion v + interval l u = lowerBound l `intersectVersionRanges'` upperBound u + + lowerBound (LowerBound v InclusiveBound) + | isVersion0 v = AnyVersion + | otherwise = orLaterVersion v + lowerBound (LowerBound v ExclusiveBound) = LaterVersion v + + upperBound NoUpperBound = AnyVersion + upperBound (UpperBound v InclusiveBound) = orEarlierVersion v + upperBound (UpperBound v ExclusiveBound) = EarlierVersion v + + intersectVersionRanges' vr AnyVersion = vr + intersectVersionRanges' AnyVersion vr = vr + intersectVersionRanges' vr vr' = IntersectVersionRanges vr vr' + +unionVersionIntervals :: VersionIntervals -> VersionIntervals + -> VersionIntervals +unionVersionIntervals (VersionIntervals is0) (VersionIntervals is'0) = + checkInvariant (VersionIntervals (union is0 is'0)) + where + union is [] = is + union [] is' = is' + union (i:is) (i':is') = case unionInterval i i' of + Left Nothing -> i : union is (i' :is') + Left (Just i'') -> union is (i'':is') + Right Nothing -> i' : union (i :is) is' + Right (Just i'') -> union (i'':is) is' + +unionInterval :: VersionInterval -> VersionInterval + -> Either (Maybe VersionInterval) (Maybe VersionInterval) +unionInterval (lower , upper ) (lower', upper') + + -- Non-intersecting intervals with the left interval ending first + | upper `doesNotTouch` lower' = Left Nothing + + -- Non-intersecting intervals with the right interval first + | upper' `doesNotTouch` lower = Right Nothing + + -- Complete or partial overlap, with the left interval ending first + | upper <= upper' = lowerBound `seq` + Left (Just (lowerBound, upper')) + + -- Complete or partial overlap, with the left interval ending first + | otherwise = lowerBound `seq` + Right (Just (lowerBound, upper)) + where + lowerBound = min lower lower' + +intersectVersionIntervals :: VersionIntervals -> VersionIntervals + -> VersionIntervals +intersectVersionIntervals (VersionIntervals is0) (VersionIntervals is'0) = + checkInvariant (VersionIntervals (intersect is0 is'0)) + where + intersect _ [] = [] + intersect [] _ = [] + intersect (i:is) (i':is') = case intersectInterval i i' of + Left Nothing -> intersect is (i':is') + Left (Just i'') -> i'' : intersect is (i':is') + Right Nothing -> intersect (i:is) is' + Right (Just i'') -> i'' : intersect (i:is) is' + +intersectInterval :: VersionInterval -> VersionInterval + -> Either (Maybe VersionInterval) (Maybe VersionInterval) +intersectInterval (lower , upper ) (lower', upper') + + -- Non-intersecting intervals with the left interval ending first + | upper `doesNotIntersect` lower' = Left Nothing + + -- Non-intersecting intervals with the right interval first + | upper' `doesNotIntersect` lower = Right Nothing + + -- Complete or partial overlap, with the left interval ending first + | upper <= upper' = lowerBound `seq` + Left (Just (lowerBound, upper)) + + -- Complete or partial overlap, with the right interval ending first + | otherwise = lowerBound `seq` + Right (Just (lowerBound, upper')) + where + lowerBound = max lower lower' + +------------------------------- +-- Parsing and pretty printing +-- + +instance Text VersionRange where + disp = fst + . foldVersionRange' -- precedence: + ( Disp.text "-any" , 0 :: Int) + (\v -> (Disp.text "==" <> disp v , 0)) + (\v -> (Disp.char '>' <> disp v , 0)) + (\v -> (Disp.char '<' <> disp v , 0)) + (\v -> (Disp.text ">=" <> disp v , 0)) + (\v -> (Disp.text "<=" <> disp v , 0)) + (\v _ -> (Disp.text "==" <> dispWild v , 0)) + (\(r1, p1) (r2, p2) -> (punct 2 p1 r1 <+> Disp.text "||" <+> punct 2 p2 r2 , 2)) + (\(r1, p1) (r2, p2) -> (punct 1 p1 r1 <+> Disp.text "&&" <+> punct 1 p2 r2 , 1)) + (\(r, p) -> (Disp.parens r, p)) + + where dispWild (Version b _) = + Disp.hcat (Disp.punctuate (Disp.char '.') (map Disp.int b)) + <> Disp.text ".*" + punct p p' | p < p' = Disp.parens + | otherwise = id + + parse = expr + where + expr = do Parse.skipSpaces + t <- term + Parse.skipSpaces + (do _ <- Parse.string "||" + Parse.skipSpaces + e <- expr + return (UnionVersionRanges t e) + +++ + return t) + term = do f <- factor + Parse.skipSpaces + (do _ <- Parse.string "&&" + Parse.skipSpaces + t <- term + return (IntersectVersionRanges f t) + +++ + return f) + factor = Parse.choice $ parens expr + : parseAnyVersion + : parseNoVersion + : parseWildcardRange + : map parseRangeOp rangeOps + parseAnyVersion = Parse.string "-any" >> return AnyVersion + parseNoVersion = Parse.string "-none" >> return noVersion + + parseWildcardRange = do + _ <- Parse.string "==" + Parse.skipSpaces + branch <- Parse.sepBy1 digits (Parse.char '.') + _ <- Parse.char '.' + _ <- Parse.char '*' + return (WildcardVersion (Version branch [])) + + parens p = Parse.between (Parse.char '(' >> Parse.skipSpaces) + (Parse.char ')' >> Parse.skipSpaces) + (do a <- p + Parse.skipSpaces + return (VersionRangeParens a)) + + digits = do + first <- Parse.satisfy Char.isDigit + if first == '0' + then return 0 + else do rest <- Parse.munch Char.isDigit + return (read (first : rest)) + + parseRangeOp (s,f) = Parse.string s >> Parse.skipSpaces >> fmap f parse + rangeOps = [ ("<", EarlierVersion), + ("<=", orEarlierVersion), + (">", LaterVersion), + (">=", orLaterVersion), + ("==", ThisVersion) ] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/doc/developing-packages.markdown cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/doc/developing-packages.markdown --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/doc/developing-packages.markdown 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/doc/developing-packages.markdown 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,2146 @@ +% Cabal User Guide: Developing Cabal packages + + +# Quickstart # + + +Lets assume we have created a project directory and already have a +Haskell module or two. + +Every project needs a name, we'll call this example "proglet". + +~~~~~~~~~~~ +$ cd proglet/ +$ ls +Proglet.hs +~~~~~~~~~~~ + +It is assumed that (apart from external dependencies) all the files that +make up a package live under a common project root directory. This +simple example has all the project files in one directory, but most +packages will use one or more subdirectories. + +To turn this into a Cabal package we need two extra files in the +project's root directory: + + * `proglet.cabal`: containing package metadata and build information. + + * `Setup.hs`: usually containing a few standardized lines of code, but + can be customized if necessary. + +We can create both files manually or we can use `cabal init` to create +them for us. + +### Using "cabal init" ### + +The `cabal init` command is interactive. It asks us a number of +questions starting with the package name and version. + +~~~~~~~~~~ +$ cabal init +Package name [default "proglet"]? +Package version [default "0.1"]? +... +~~~~~~~~~~ + +It also asks questions about various other bits of package metadata. For +a package that you never intend to distribute to others, these fields can +be left blank. + +One of the important questions is whether the package contains a library +or an executable. Libraries are collections of Haskell modules that can +be re-used by other Haskell libraries and programs, while executables +are standalone programs. + +~~~~~~~~~~ +What does the package build: + 1) Library + 2) Executable +Your choice? +~~~~~~~~~~ + +For the moment these are the only choices. For more complex packages +(e.g. a library and multiple executables or test suites) the `.cabal` +file can be edited afterwards. + +Finally, `cabal init` creates the initial `proglet.cabal` and `Setup.hs` +files, and depending on your choice of license, a `LICENSE` file as well. + +~~~~~~~~~~ +Generating LICENSE... +Generating Setup.hs... +Generating proglet.cabal... + +You may want to edit the .cabal file and add a Description field. +~~~~~~~~~~ + +As this stage the `proglet.cabal` is not quite complete and before you +are able to build the package you will need to edit the file and add +some build information about the library or executable. + +### Editing the .cabal file ### + +Load up the `.cabal` file in a text editor. The first part of the +`.cabal` file has the package metadata and towards the end of the file +you will find the `executable` or `library` section. + +You will see that the fields that have yet to be filled in are commented +out. Cabal files use "`--`" Haskell-style comment syntax. (Note that +comments are only allowed on lines on their own. Trailing comments on +other lines are not allowed because they could be confused with program +options.) + +If you selected earlier to create a library package then your `.cabal` +file will have a section that looks like this: + +~~~~~~~~~~~~~~~~~ +library + exposed-modules: Proglet + -- other-modules: + -- build-depends: +~~~~~~~~~~~~~~~~~ + +Alternatively, if you selected an executable then there will be a +section like: + +~~~~~~~~~~~~~~~~~ +executable proglet + -- main-is: + -- other-modules: + -- build-depends: +~~~~~~~~~~~~~~~~~ + +The build information fields listed (but commented out) are just the few +most important and common fields. There are many others that are covered +later in this chapter. + +Most of the build information fields are the same between libraries and +executables. The difference is that libraries have a number of "exposed" +modules that make up the public interface of the library, while +executables have a file containing a `Main` module. + +The name of a library always matches the name of the package, so it is +not specified in the library section. Executables often follow the name +of the package too, but this is not required and the name is given +explicitly. + +### Modules included in the package ### + +For a library, `cabal init` looks in the project directory for files +that look like Haskell modules and adds all the modules to the +`exposed-modules` field. For modules that do not form part of your +package's public interface, you can move those modules to the +`other-modules` field. Either way, all modules in the library need to be +listed. + +For an executable, `cabal init` does not try to guess which file +contains your program's `Main` module. You will need to fill in the +`main-is` field with the file name of your program's `Main` module +(including `.hs` or `.lhs` extension). Other modules included in the +executable should be listed in the `other-modules` field. + +### Modules imported from other packages ### + +While your library or executable may include a number of modules, it +almost certainly also imports a number of external modules from the +standard libraries or other pre-packaged libraries. (These other +libraries are of course just Cabal packages that contain a library.) + +You have to list all of the library packages that your library or +executable imports modules from. Or to put it another way: you have to +list all the other packages that your package depends on. + +For example, suppose the example `Proglet` module imports the module +`Data.Map`. The `Data.Map` module comes from the `containers` package, +so we must list it: + +~~~~~~~~~~~~~~~~~ +library + exposed-modules: Proglet + other-modules: + build-depends: containers, base == 4.* +~~~~~~~~~~~~~~~~~ + +In addition, almost every package also depends on the `base` library +package because it exports the standard `Prelude` module plus other +basic modules like `Data.List`. + +You will notice that we have listed `base == 4.*`. This gives a +constraint on the version of the base package that our package will work +with. The most common kinds of constraints are: + + * `pkgname >= n` + * `pkgname >= n && < m` + * `pkgname == n.*` + +The last is just shorthand, for example `base == 4.*` means exactly the +same thing as `base >= 4 && < 5`. + +### Building the package ### + +For simple packages that's it! We can now try configuring and building +the package: + +~~~~~~~~~~~~~~~~ +cabal configure +cabal build +~~~~~~~~~~~~~~~~ + +Assuming those two steps worked then you can also install the package: + +~~~~~~~~~~~~~~~~ +cabal install +~~~~~~~~~~~~~~~~ + +For libraries this makes them available for use in GHCi or to be used by +other packages. For executables it installs the program so that you can +run it (though you may first need to adjust your system's `$PATH`). + +### Next steps ### + +What we have covered so far should be enough for very simple packages +that you use on your own system. + +The next few sections cover more details needed for more complex +packages and details needed for distributing packages to other people. + +The previous chapter covers building and installing packages -- your own +packages or ones developed by other people. + + +# Package concepts # + +Before diving into the details of writing packages it helps to +understand a bit about packages in the Haskell world and the particular +approach that Cabal takes. + +### The point of packages ### + +Packages are a mechanism for organising and distributing code. Packages +are particularly suited for "programming in the large", that is building +big systems by using and re-using code written by different people at +different times. + +People organise code into packages based on functionality and +dependencies. Social factors are also important: most packages have a +single author, or a relatively small team of authors. + +Packages are also used for distribution: the idea is that a package can +be created in one place and be moved to a different computer and be +usable in that different environment. There are a surprising number of +details that have to be got right for this to work, and a good package +system helps to simply this process and make it reliable. + +Packages come in two main flavours: libraries of reusable code, and +complete programs. Libraries present a code interface, an API, while +programs can be run directly. In the Haskell world, library packages +expose a set of Haskell modules as their public interface. Cabal +packages can contain a library or executables or both. + +Some programming languages have packages as a builtin language concept. +For example in Java, a package provides a local namespace for types and +other definitions. In the Haskell world, packages are not a part of the +language itself. Haskell programs consist of a number of modules, and +packages just provide a way to partition the modules into sets of +related functionality. Thus the choice of module names in Haskell is +still important, even when using packages. + +### Package names and versions ### + +All packages have a name, e.g. "HUnit". Package names are assumed to be +unique. Cabal package names can use letters, numbers and hyphens, but +not spaces. The namespace for Cabal packages is flat, not hierarchical. + +Packages also have a version, e.g "1.1". This matches the typical way in +which packages are developed. Strictly speaking, each version of a +package is independent, but usually they are very similar. Cabal package +versions follow the conventional numeric style, consisting of a sequence +of digits such as "1.0.1" or "2.0". There are a range of common +conventions for "versioning" packages, that is giving some meaning to +the version number in terms of changes in the package. Section [TODO] +has some tips on package versioning. + +The combination of package name and version is called the _package ID_ +and is written with a hyphen to separate the name and version, e.g. +"HUnit-1.1". + +For Cabal packages, the combination of the package name and version +_uniquely_ identifies each package. Or to put it another way: two +packages with the same name and version are considered to _be_ the same. + +Strictly speaking, the package ID only identifies each Cabal _source_ +package; the same Cabal source package can be configured and built in +different ways. There is a separate installed package ID that uniquely +identifies each installed package instance. Most of the time however, +users need not be aware of this detail. + +### Kinds of package: Cabal vs GHC vs system ### + +It can be slightly confusing at first because there are various +different notions of package floating around. Fortunately the details +are not very complicated. + +Cabal packages +: Cabal packages are really source packages. That is they contain + Haskell (and sometimes C) source code. + + Cabal packages can be compiled to produce GHC packages. They can + also be translated into operating system packages. + +GHC packages +: This is GHC's view on packages. GHC only cares about library + packages, not executables. Library packages have to be registered + with GHC for them to be available in GHCi or to be used when + compiling other programs or packages. + + The low-level tool `ghc-pkg` is used to register GHC packages and to + get information on what packages are currently registered. + + You never need to make GHC packages manually. When you build and + install a Cabal package containing a library then it gets registered + with GHC automatically. + + Haskell implementations other than GHC have essentially the same + concept of registered packages. For the most part, Cabal hides the + slight differences. + +Operating system packages +: On operating systems like Linux and Mac OS X, the system has a + specific notion of a package and there are tools for installing and + managing packages. + + The Cabal package format is designed to allow Cabal packages to be + translated, mostly-automatically, into operating system packages. + They are usually translated 1:1, that is a single Cabal package + becomes a single system package. + + It is also possible to make Windows installers from Cabal packages, + though this is typically done for a program together with all of its + library dependencies, rather than packaging each library separately. + + +### Unit of distribution ### + +The Cabal package is the unit of distribution. What this means is that +each Cabal package can be distributed on its own in source or binary +form. Of course there may dependencies between packages, but there is +usually a degree of flexibility in which versions of packages can work +together so distributing them independently makes sense. + +It is perhaps easiest to see what being ``the unit of distribution'' +means by contrast to an alternative approach. Many projects are made up +of several interdependent packages and during development these might +all be kept under one common directory tree and be built and tested +together. When it comes to distribution however, rather than +distributing them all together in a single tarball, it is required that +they each be distributed independently in their own tarballs. + +Cabal's approach is to say that if you can specify a dependency on a +package then that package should be able to be distributed +independently. Or to put it the other way round, if you want to +distribute it as a single unit, then it should be a single package. + + +### Explicit dependencies and automatic package management ### + +Cabal takes the approach that all packages dependencies are specified +explicitly and specified in a declarative way. The point is to enable +automatic package management. This means tools like `cabal` can resolve +dependencies and install a package plus all of its dependencies +automatically. Alternatively, it is possible to mechanically (or mostly +mechanically) translate Cabal packages into system packages and let the +system package manager install dependencies automatically. + +It is important to track dependencies accurately so that packages can +reliably be moved from one system to another system and still be able to +build it there. Cabal is therefore relatively strict about specifying +dependencies. For example Cabal's default build system will not even let +code build if it tries to import a module from a package that isn't +listed in the `.cabal` file, even if that package is actually installed. +This helps to ensure that there are no "untracked dependencies" that +could cause the code to fail to build on some other system. + +The explicit dependency approach is in contrast to the traditional +"./configure" approach where instead of specifying dependencies +declaratively, the `./configure` script checks if the dependencies are +present on the system. Some manual work is required to transform a +`./configure` based package into a Linux distribution package (or +similar). This conversion work is usually done by people other than the +package author(s). The practical effect of this is that only the most +popular packages will benefit from automatic package management. Instead, +Cabal forces the original author to specify the dependencies but the +advantage is that every package can benefit from automatic package +management. + +The "./configure" approach tends to encourage packages that adapt +themselves to the environment in which they are built, for example by +disabling optional features so that they can continue to work when a +particular dependency is not available. This approach makes sense in a +world where installing additional dependencies is a tiresome manual +process and so minimising dependencies is important. The automatic +package management view is that packages should just declare what they +need and the package manager will take responsibility for ensuring that +all the dependencies are installed. + +Sometimes of course optional features and optional dependencies do make +sense. Cabal packages can have optional features and varying +dependencies. These conditional dependencies are still specified in a +declarative way however and remain compatible with automatic package +management. The need to remain compatible with automatic package +management means that Cabal's conditional dependencies system is a bit +less flexible than with the "./configure" approach. + +### Portability ### + +One of the purposes of Cabal is to make it easier to build packages on +different platforms (operating systems and CPU architectures), with +different compiler versions and indeed even with different Haskell +implementations. (Yes, there are Haskell implementations other than +GHC!) + +Cabal provides abstractions of features present in different Haskell +implementations and wherever possible it is best to take advantage of +these to increase portability. Where necessary however it is possible to +use specific features of specific implementations. + +For example a package author can list in the package's `.cabal` what +language extensions the code uses. This allows Cabal to figure out if +the language extension is supported by the Haskell implementation that +the user picks. Additionally, certain language extensions such as +Template Haskell require special handling from the build system and by +listing the extension it provides the build system with enough +information to do the right thing. + +Another similar example is linking with foreign libraries. Rather than +specifying GHC flags directly, the package author can list the libraries +that are needed and the build system will take care of using the right +flags for the compiler. Additionally this makes it easier for tools to +discover what system C libraries a package needs, which is useful for +tracking dependencies on system libraries (e.g. when translating into +Linux distribution packages). + +In fact both of these examples fall into the category of explicitly +specifying dependencies. Not all dependencies are other Cabal packages. +Foreign libraries are clearly another kind of dependency. It's also +possible to think of language extensions as dependencies: the package +depends on a Haskell implementation that supports all those extensions. + +Where compiler-specific options are needed however, there is an "escape +hatch" available. The developer can specify implementation-specific +options and more generally there is a configuration mechanism to +customise many aspects of how a package is built depending on the +Haskell implementation, the operating system, computer architecture and +user-specified configuration flags. + + +# Developing packages # + +The Cabal package is the unit of distribution. When installed, its +purpose is to make available: + + * One or more Haskell programs. + + * At most one library, exposing a number of Haskell modules. + +However having both a library and executables in a package does not work +very well; if the executables depend on the library, they must +explicitly list all the modules they directly or indirectly import from +that library. Fortunately, starting with Cabal 1.8.0.4, executables can +also declare the package that they are in as a dependency, and Cabal +will treat them as if they were in another package that depended on +the library. + +Internally, the package may consist of much more than a bunch of Haskell +modules: it may also have C source code and header files, source code +meant for preprocessing, documentation, test cases, auxiliary tools etc. + +A package is identified by a globally-unique _package name_, which +consists of one or more alphanumeric words separated by hyphens. To +avoid ambiguity, each of these words should contain at least one letter. +Chaos will result if two distinct packages with the same name are +installed on the same system. A particular version of the package is +distinguished by a _version number_, consisting of a sequence of one or +more integers separated by dots. These can be combined to form a single +text string called the _package ID_, using a hyphen to separate the name +from the version, e.g. "`HUnit-1.1`". + +Note: Packages are not part of the Haskell language; they simply +populate the hierarchical space of module names. In GHC 6.6 and later a +program may contain multiple modules with the same name if they come +from separate packages; in all other current Haskell systems packages +may not overlap in the modules they provide, including hidden modules. + + +## Creating a package ## + +Suppose you have a directory hierarchy containing the source files that +make up your package. You will need to add two more files to the root +directory of the package: + +_package_`.cabal` + +: a Unicode UTF-8 text file containing a package description. + For details of the syntax of this file, see the [section on package + descriptions](#package-descriptions). + +`Setup.hs` + +: a single-module Haskell program to perform various setup tasks (with + the interface described in the section on [building and installing + packages](installing-packages.html). This module should + import only modules that will be present in all Haskell + implementations, including modules of the Cabal library. The + content of this file is determined by the `build-type` setting in + the `.cabal` file. In most cases it will be trivial, calling on + the Cabal library to do most of the work. + +Once you have these, you can create a source bundle of this directory +for distribution. Building of the package is discussed in the section on +[building and installing packages](installing-packages.html). + +One of the purposes of Cabal is to make it easier to build a package +with different Haskell implementations. So it provides abstractions of +features present in different Haskell implementations and wherever +possible it is best to take advantage of these to increase portability. +Where necessary however it is possible to use specific features of +specific implementations. For example one of the pieces of information a +package author can put in the package's `.cabal` file is what language +extensions the code uses. This is far preferable to specifying flags for +a specific compiler as it allows Cabal to pick the right flags for the +Haskell implementation that the user picks. It also allows Cabal to +figure out if the language extension is even supported by the Haskell +implementation that the user picks. Where compiler-specific options are +needed however, there is an "escape hatch" available. The developer can +specify implementation-specific options and more generally there is a +configuration mechanism to customise many aspects of how a package is +built depending on the Haskell implementation, the Operating system, +computer architecture and user-specified configuration flags. + +~~~~~~~~~~~~~~~~ +name: Foo +version: 1.0 + +library + build-depends: base + exposed-modules: Foo + extensions: ForeignFunctionInterface + ghc-options: -Wall + if os(windows) + build-depends: Win32 +~~~~~~~~~~~~~~~~ + +#### Example: A package containing a simple library #### + +The HUnit package contains a file `HUnit.cabal` containing: + +~~~~~~~~~~~~~~~~ +name: HUnit +version: 1.1.1 +synopsis: A unit testing framework for Haskell +homepage: http://hunit.sourceforge.net/ +category: Testing +author: Dean Herington +license: BSD3 +license-file: LICENSE +cabal-version: >= 1.10 +build-type: Simple + +library + build-depends: base >= 2 && < 4 + exposed-modules: Test.HUnit.Base, Test.HUnit.Lang, + Test.HUnit.Terminal, Test.HUnit.Text, Test.HUnit + default-extensions: CPP +~~~~~~~~~~~~~~~~ + +and the following `Setup.hs`: + +~~~~~~~~~~~~~~~~ +import Distribution.Simple +main = defaultMain +~~~~~~~~~~~~~~~~ + +#### Example: A package containing executable programs #### + +~~~~~~~~~~~~~~~~ +name: TestPackage +version: 0.0 +synopsis: Small package with two programs +author: Angela Author +license: BSD3 +build-type: Simple +cabal-version: >= 1.2 + +executable program1 + build-depends: HUnit + main-is: Main.hs + hs-source-dirs: prog1 + +executable program2 + main-is: Main.hs + build-depends: HUnit + hs-source-dirs: prog2 + other-modules: Utils +~~~~~~~~~~~~~~~~ + +with `Setup.hs` the same as above. + +#### Example: A package containing a library and executable programs #### + +~~~~~~~~~~~~~~~~ +name: TestPackage +version: 0.0 +synopsis: Package with library and two programs +license: BSD3 +author: Angela Author +build-type: Simple +cabal-version: >= 1.2 + +library + build-depends: HUnit + exposed-modules: A, B, C + +executable program1 + main-is: Main.hs + hs-source-dirs: prog1 + other-modules: A, B + +executable program2 + main-is: Main.hs + hs-source-dirs: prog2 + other-modules: A, C, Utils +~~~~~~~~~~~~~~~~ + +with `Setup.hs` the same as above. Note that any library modules +required (directly or indirectly) by an executable must be listed again. + +The trivial setup script used in these examples uses the _simple build +infrastructure_ provided by the Cabal library (see +[Distribution.Simple][dist-simple]). The simplicity lies in its +interface rather that its implementation. It automatically handles +preprocessing with standard preprocessors, and builds packages for all +the Haskell implementations. + +The simple build infrastructure can also handle packages where building +is governed by system-dependent parameters, if you specify a little more +(see the section on [system-dependent +parameters](#system-dependent-parameters)). A few packages require [more +elaborate solutions](#more-complex-packages). + +## Package descriptions ## + +The package description file must have a name ending in "`.cabal`". It +must be a Unicode text file encoded using valid UTF-8. There must be +exactly one such file in the directory. The first part of the name is +usually the package name, and some of the tools that operate on Cabal +packages require this. + +In the package description file, lines whose first non-whitespace characters +are "`--`" are treated as comments and ignored. + +This file should contain of a number global property descriptions and +several sections. + +* The [global properties](#package-properties) describe the package as a + whole, such as name, license, author, etc. + +* Optionally, a number of _configuration flags_ can be declared. These + can be used to enable or disable certain features of a package. (see + the section on [configurations](#configurations)). + +* The (optional) library section specifies the [library + properties](#library) and relevant [build + information](#build-information). + +* Following is an arbitrary number of executable sections + which describe an executable program and relevant [build + information](#build-information). + +Each section consists of a number of property descriptions +in the form of field/value pairs, with a syntax roughly like mail +message headers. + +* Case is not significant in field names, but is significant in field + values. + +* To continue a field value, indent the next line relative to the field + name. + +* Field names may be indented, but all field values in the same section + must use the same indentation. + +* Tabs are *not* allowed as indentation characters due to a missing + standard interpretation of tab width. + +* To get a blank line in a field value, use an indented "`.`" + +The syntax of the value depends on the field. Field types include: + +_token_, _filename_, _directory_ +: Either a sequence of one or more non-space non-comma characters, or + a quoted string in Haskell 98 lexical syntax. Unless otherwise + stated, relative filenames and directories are interpreted from the + package root directory. + +_freeform_, _URL_, _address_ +: An arbitrary, uninterpreted string. + +_identifier_ +: A letter followed by zero or more alphanumerics or underscores. + +_compiler_ +: A compiler flavor (one of: `GHC`, `JHC`, `UHC` or `LHC`) followed by a + version range. For example, `GHC ==6.10.3`, or `LHC >=0.6 && <0.8`. + +### Modules and preprocessors ### + +Haskell module names listed in the `exposed-modules` and `other-modules` +fields may correspond to Haskell source files, i.e. with names ending in +"`.hs`" or "`.lhs`", or to inputs for various Haskell preprocessors. The +simple build infrastructure understands the extensions: + +* `.gc` ([greencard][]) +* `.chs` ([c2hs][]) +* `.hsc` (`hsc2hs`) +* `.y` and `.ly` ([happy][]) +* `.x` ([alex][]) +* `.cpphs` ([cpphs][]) + +When building, Cabal will automatically run the appropriate preprocessor +and compile the Haskell module it produces. + +Some fields take lists of values, which are optionally separated by commas, +except for the `build-depends` field, where the commas are mandatory. + +Some fields are marked as required. All others are optional, and unless +otherwise specified have empty default values. + +### Package properties ### + +These fields may occur in the first top-level properties section and +describe the package as a whole: + +`name:` _package-name_ (required) +: The unique name of the package, without the version number. + +`version:` _numbers_ (required) +: The package version number, usually consisting of a sequence of + natural numbers separated by dots. + +`cabal-version:` _>= x.y_ +: The version of the Cabal specification that this package description uses. + The Cabal specification does slowly evolve, introducing new features and + occasionally changing the meaning of existing features. By specifying + which version of the spec you are using it enables programs which process + the package description to know what syntax to expect and what each part + means. + + For historical reasons this is always expressed using _>=_ version range + syntax. No other kinds of version range make sense, in particular upper + bounds do not make sense. In future this field will specify just a version + number, rather than a version range. + + The version number you specify will affect both compatibility and + behaviour. Most tools (including the Cabal library and cabal program) + understand a range of versions of the Cabal specification. Older tools + will of course only work with older versions of the Cabal specification. + Most of the time, tools that are too old will recognise this fact and + produce a suitable error message. + + As for behaviour, new versions of the Cabal spec can change the meaning + of existing syntax. This means if you want to take advantage of the new + meaning or behaviour then you must specify the newer Cabal version. + Tools are expected to use the meaning and behaviour appropriate to the + version given in the package description. + + In particular, the syntax of package descriptions changed significantly + with Cabal version 1.2 and the `cabal-version` field is now required. + Files written in the old syntax are still recognized, so if you require + compatibility with very old Cabal versions then you may write your package + description file using the old syntax. Please consult the user's guide of + an older Cabal version for a description of that syntax. + +`build-type:` _identifier_ +: The type of build used by this package. Build types are the + constructors of the [BuildType][] type, defaulting to `Custom`. + + If the build type is anything other than `Custom`, then the + `Setup.hs` file *must* be exactly the standardized content + discussed below. This is because in these cases, `cabal` will + ignore the `Setup.hs` file completely, whereas other methods of + package management, such as `runhaskell Setup.hs [CMD]`, still + rely on the `Setup.hs` file. + + For build type `Simple`, the contents of `Setup.hs` must be: + + ~~~~~~~~~~~~~~~~ + import Distribution.Simple + main = defaultMain + ~~~~~~~~~~~~~~~~ + + For build type `Configure` (see the section on [system-dependent + parameters](#system-dependent-parameters) below), the contents of + `Setup.hs` must be: + + ~~~~~~~~~~~~~~~~ + import Distribution.Simple + main = defaultMainWithHooks autoconfUserHooks + ~~~~~~~~~~~~~~~~ + + For build type `Make` (see the section on [more complex + packages](installing-packages.html#more-complex-packages) below), + the contents of `Setup.hs` must be: + + ~~~~~~~~~~~~~~~~ + import Distribution.Make + main = defaultMain + ~~~~~~~~~~~~~~~~ + + For build type `Custom`, the file `Setup.hs` can be customized, + and will be used both by `cabal` and other tools. + + For most packages, the build type `Simple` is sufficient. + +`license:` _identifier_ (default: `AllRightsReserved`) +: The type of license under which this package is distributed. + License names are the constants of the [License][dist-license] type. + +`license-file:` _filename_ or `license-files:` _filename list_ +: The name of a file(s) containing the precise copyright license for + this package. The license file(s) will be installed with the package. + + If you have multiple license files then use the `license-files` + field instead of (or in addition to) the `license-file` field. + +`copyright:` _freeform_ +: The content of a copyright notice, typically the name of the holder + of the copyright on the package and the year(s) from which copyright + is claimed. For example: `Copyright: (c) 2006-2007 Joe Bloggs` + +`author:` _freeform_ +: The original author of the package. + + Remember that `.cabal` files are Unicode, using the UTF-8 encoding. + +`maintainer:` _address_ +: The current maintainer or maintainers of the package. This is an e-mail address to which users should send bug + reports, feature requests and patches. + +`stability:` _freeform_ +: The stability level of the package, e.g. `alpha`, `experimental`, `provisional`, + `stable`. + +`homepage:` _URL_ +: The package homepage. + +`bug-reports:` _URL_ +: The URL where users should direct bug reports. This would normally be either: + + * A `mailto:` URL, e.g. for a person or a mailing list. + + * An `http:` (or `https:`) URL for an online bug tracking system. + + For example Cabal itself uses a web-based bug tracking system + + ~~~~~~~~~~~~~~~~ + bug-reports: http://hackage.haskell.org/trac/hackage/ + ~~~~~~~~~~~~~~~~ + +`package-url:` _URL_ +: The location of a source bundle for the package. The distribution + should be a Cabal package. + +`synopsis:` _freeform_ +: A very short description of the package, for use in a table of + packages. This is your headline, so keep it short (one line) but as + informative as possible. Save space by not including the package + name or saying it's written in Haskell. + +`description:` _freeform_ +: Description of the package. This may be several paragraphs, and + should be aimed at a Haskell programmer who has never heard of your + package before. + + For library packages, this field is used as prologue text by [`setup + haddock`](installing-packages.html#setup-haddock), and thus may + contain the same markup as [haddock][] documentation comments. + +`category:` _freeform_ +: A classification category for future use by the package catalogue [Hackage]. These + categories have not yet been specified, but the upper levels of the + module hierarchy make a good start. + +`tested-with:` _compiler list_ +: A list of compilers and versions against which the package has been + tested (or at least built). + +`data-files:` _filename list_ +: A list of files to be installed for run-time use by the package. + This is useful for packages that use a large amount of static data, + such as tables of values or code templates. Cabal provides a way to + [find these files at + run-time](#accessing-data-files-from-package-code). + + A limited form of `*` wildcards in file names, for example + `data-files: images/*.png` matches all the `.png` files in the + `images` directory. + + The limitation is that `*` wildcards are only allowed in place of + the file name, not in the directory name or file extension. In + particular, wildcards do not include directories contents + recursively. Furthermore, if a wildcard is used it must be used with + an extension, so `data-files: data/*` is not allowed. When matching + a wildcard plus extension, a file's full extension must match + exactly, so `*.gz` matches `foo.gz` but not `foo.tar.gz`. A wildcard + that does not match any files is an error. + + The reason for providing only a very limited form of wildcard is to + concisely express the common case of a large number of related files + of the same file type without making it too easy to accidentally + include unwanted files. + +`data-dir:` _directory_ +: The directory where Cabal looks for data files to install, relative + to the source directory. By default, Cabal will look in the source + directory itself. + +`extra-source-files:` _filename list_ +: A list of additional files to be included in source distributions + built with [`setup sdist`](installing-packages.html#setup-sdist). As + with `data-files` it can use a limited form of `*` wildcards in file + names. + +`extra-doc-files:` _filename list_ +: A list of additional files to be included in source distributions, + and also copied to the html directory when Haddock documentation is + generated. As with `data-files` it can use a limited form of `*` + wildcards in file names. + +`extra-tmp-files:` _filename list_ +: A list of additional files or directories to be removed by [`setup + clean`](installing-packages.html#setup-clean). These would typically + be additional files created by additional hooks, such as the scheme + described in the section on [system-dependent + parameters](#system-dependent-parameters). + +### Library ### + +The library section should contain the following fields: + +`exposed-modules:` _identifier list_ (required if this package contains a library) +: A list of modules added by this package. + +`exposed:` _boolean_ (default: `True`) +: Some Haskell compilers (notably GHC) support the notion of packages + being "exposed" or "hidden" which means the modules they provide can + be easily imported without always having to specify which package + they come from. However this only works effectively if the modules + provided by all exposed packages do not overlap (otherwise a module + import would be ambiguous). + + Almost all new libraries use hierarchical module names that do not + clash, so it is very uncommon to have to use this field. However it + may be necessary to set `exposed: False` for some old libraries that + use a flat module namespace or where it is known that the exposed + modules would clash with other common modules. + +`reexported-modules:` _exportlist _ +: Supported only in GHC 7.10 and later. A list of modules to _reexport_ from + this package. The syntax of this field is `orig-pkg:Name as NewName` to + reexport module `Name` from `orig-pkg` with the new name `NewName`. We also + support abbreviated versions of the syntax: if you omit `as NewName`, + we'll reexport without renaming; if you omit `orig-pkg`, then we will + automatically figure out which package to reexport from, if it's + unambiguous. + + Reexported modules are useful for compatibility shims when a package has + been split into multiple packages, and they have the useful property that + if a package provides a module, and another package reexports it under + the same name, these are not considered a conflict (as would be the case + with a stub module.) They can also be used to resolve name conflicts. + +The library section may also contain build information fields (see the +section on [build information](#build-information)). + +#### Opening an interpreter session #### + +While developing a package, it is often useful to make its code available inside +an interpreter session. This can be done with the `repl` command: + +~~~~~~~~~~~~~~~~ +cabal repl +~~~~~~~~~~~~~~~~ + +The name comes from the acronym [REPL], which stands for +"read-eval-print-loop". By default `cabal repl` loads the first component in a +package. If the package contains several named components, the name can be given +as an argument to `repl`. The name can be also optionally prefixed with the +component's type for disambiguation purposes. Example: + +~~~~~~~~~~~~~~~~ +cabal repl foo +cabal repl exe:foo +cabal repl test:bar +cabal repl bench:baz +~~~~~~~~~~~~~~~~ + +#### Freezing dependency versions #### + +If a package is built in several different environments, such as a development +environment, a staging environment and a production environment, it may be +necessary or desirable to ensure that the same dependency versions are +selected in each environment. This can be done with the `freeze` command: + +~~~~~~~~~~~~~~~~ +cabal freeze +~~~~~~~~~~~~~~~~ + +The command writes the selected version for all dependencies to the +`cabal.config` file. All environments which share this file will use the +dependency versions specified in it. + +### Executables ### + +Executable sections (if present) describe executable programs contained +in the package and must have an argument after the section label, which +defines the name of the executable. This is a freeform argument but may +not contain spaces. + +The executable may be described using the following fields, as well as +build information fields (see the section on [build +information](#build-information)). + +`main-is:` _filename_ (required) +: The name of the `.hs` or `.lhs` file containing the `Main` module. Note that it is the + `.hs` filename that must be listed, even if that file is generated + using a preprocessor. The source file must be relative to one of the + directories listed in `hs-source-dirs`. + +#### Running executables #### + +You can have Cabal build and run your executables by using the `run` command: + +~~~~~~~~~~~~~~~~ +$ cabal run EXECUTABLE [-- EXECUTABLE_FLAGS] +~~~~~~~~~~~~~~~~ + +This command will configure, build and run the executable `EXECUTABLE`. The +double dash separator is required to distinguish executable flags from `run`'s +own flags. If there is only one executable defined in the whole package, the +executable's name can be omitted. See the output of `cabal help run` for a list +of options you can pass to `cabal run`. + + +### Test suites ### + +Test suite sections (if present) describe package test suites and must have an +argument after the section label, which defines the name of the test suite. +This is a freeform argument, but may not contain spaces. It should be unique +among the names of the package's other test suites, the package's executables, +and the package itself. Using test suite sections requires at least Cabal +version 1.9.2. + +The test suite may be described using the following fields, as well as build +information fields (see the section on [build +information](#build-information)). + +`type:` _interface_ (required) +: The interface type and version of the test suite. Cabal supports two test + suite interfaces, called `exitcode-stdio-1.0` and `detailed-0.9`. Each of + these types may require or disallow other fields as described below. + +Test suites using the `exitcode-stdio-1.0` interface are executables +that indicate test failure with a non-zero exit code when run; they may provide +human-readable log information through the standard output and error channels. +This interface is provided primarily for compatibility with existing test +suites; it is preferred that new test suites be written for the `detailed-0.9` +interface. The `exitcode-stdio-1.0` type requires the `main-is` field. + +`main-is:` _filename_ (required: `exitcode-stdio-1.0`, disallowed: `detailed-0.9`) +: The name of the `.hs` or `.lhs` file containing the `Main` module. Note that it is the + `.hs` filename that must be listed, even if that file is generated + using a preprocessor. The source file must be relative to one of the + directories listed in `hs-source-dirs`. This field is analogous to the + `main-is` field of an executable section. + +Test suites using the `detailed-0.9` interface are modules exporting the symbol +`tests :: IO [Test]`. The `Test` type is exported by the module +`Distribution.TestSuite` provided by Cabal. For more details, see the example below. + +The `detailed-0.9` interface allows Cabal and other test agents to inspect a +test suite's results case by case, producing detailed human- and +machine-readable log files. The `detailed-0.9` interface requires the +`test-module` field. + +`test-module:` _identifier_ (required: `detailed-0.9`, disallowed: `exitcode-stdio-1.0`) +: The module exporting the `tests` symbol. + +#### Example: Package using `exitcode-stdio-1.0` interface #### + +The example package description and executable source file below demonstrate +the use of the `exitcode-stdio-1.0` interface. For brevity, the example package +does not include a library or any normal executables, but a real package would +be required to have at least one library or executable. + +foo.cabal: + +~~~~~~~~~~~~~~~~ +Name: foo +Version: 1.0 +License: BSD3 +Cabal-Version: >= 1.9.2 +Build-Type: Simple + +Test-Suite test-foo + type: exitcode-stdio-1.0 + main-is: test-foo.hs + build-depends: base +~~~~~~~~~~~~~~~~ + +test-foo.hs: + +~~~~~~~~~~~~~~~~ +module Main where + +import System.Exit (exitFailure) + +main = do + putStrLn "This test always fails!" + exitFailure +~~~~~~~~~~~~~~~~ + +#### Example: Package using `detailed-0.9` interface #### + +The example package description and test module source file below demonstrate +the use of the `detailed-0.9` interface. For brevity, the example package does +note include a library or any normal executables, but a real package would be +required to have at least one library or executable. The test module below +also develops a simple implementation of the interface set by +`Distribution.TestSuite`, but in actual usage the implementation would be +provided by the library that provides the testing facility. + +bar.cabal: + +~~~~~~~~~~~~~~~~ +Name: bar +Version: 1.0 +License: BSD3 +Cabal-Version: >= 1.9.2 +Build-Type: Simple + +Test-Suite test-bar + type: detailed-0.9 + test-module: Bar + build-depends: base, Cabal >= 1.9.2 +~~~~~~~~~~~~~~~~ + +Bar.hs: + +~~~~~~~~~~~~~~~~ +module Bar ( tests ) where + +import Distribution.TestSuite + +tests :: IO [Test] +tests = return [ Test succeeds, Test fails ] + where + succeeds = TestInstance + { run = return $ Finished Pass + , name = "succeeds" + , tags = [] + , options = [] + , setOption = \_ _ -> Right succeeds + } + fails = TestInstance + { run = return $ Finished $ Fail "Always fails!" + , name = "fails" + , tags = [] + , options = [] + , setOption = \_ _ -> Right fails + } +~~~~~~~~~~~~~~~~ + +#### Running test suites #### + +You can have Cabal run your test suites using its built-in test +runner: + +~~~~~~~~~~~~~~~~ +$ cabal configure --enable-tests +$ cabal build +$ cabal test +~~~~~~~~~~~~~~~~ + +See the output of `cabal help test` for a list of options you can pass +to `cabal test`. + +### Benchmarks ### + +Benchmark sections (if present) describe benchmarks contained in the package and +must have an argument after the section label, which defines the name of the +benchmark. This is a freeform argument, but may not contain spaces. It should +be unique among the names of the package's other benchmarks, the package's test +suites, the package's executables, and the package itself. Using benchmark +sections requires at least Cabal version 1.9.2. + +The benchmark may be described using the following fields, as well as build +information fields (see the section on [build information](#build-information)). + +`type:` _interface_ (required) +: The interface type and version of the benchmark. At the moment Cabal only + support one benchmark interface, called `exitcode-stdio-1.0`. + +Benchmarks using the `exitcode-stdio-1.0` interface are executables that +indicate failure to run the benchmark with a non-zero exit code when run; they +may provide human-readable information through the standard output and error +channels. + +`main-is:` _filename_ (required: `exitcode-stdio-1.0`) +: The name of the `.hs` or `.lhs` file containing the `Main` module. Note that + it is the `.hs` filename that must be listed, even if that file is generated + using a preprocessor. The source file must be relative to one of the + directories listed in `hs-source-dirs`. This field is analogous to the + `main-is` field of an executable section. + +#### Example: Package using `exitcode-stdio-1.0` interface #### + +The example package description and executable source file below demonstrate +the use of the `exitcode-stdio-1.0` interface. For brevity, the example package +does not include a library or any normal executables, but a real package would +be required to have at least one library or executable. + +foo.cabal: + +~~~~~~~~~~~~~~~~ +Name: foo +Version: 1.0 +License: BSD3 +Cabal-Version: >= 1.9.2 +Build-Type: Simple + +Benchmark bench-foo + type: exitcode-stdio-1.0 + main-is: bench-foo.hs + build-depends: base, time +~~~~~~~~~~~~~~~~ + +bench-foo.hs: + +~~~~~~~~~~~~~~~~ +{-# LANGUAGE BangPatterns #-} +module Main where + +import Data.Time.Clock + +fib 0 = 1 +fib 1 = 1 +fib n = fib (n-1) + fib (n-2) + +main = do + start <- getCurrentTime + let !r = fib 20 + end <- getCurrentTime + putStrLn $ "fib 20 took " ++ show (diffUTCTime end start) +~~~~~~~~~~~~~~~~ + +#### Running benchmarks #### + +You can have Cabal run your benchmark using its built-in benchmark runner: + +~~~~~~~~~~~~~~~~ +$ cabal configure --enable-benchmarks +$ cabal build +$ cabal bench +~~~~~~~~~~~~~~~~ + +See the output of `cabal help bench` for a list of options you can +pass to `cabal bench`. + +### Build information ### + +The following fields may be optionally present in a library or +executable section, and give information for the building of the +corresponding library or executable. See also the sections on +[system-dependent parameters](#system-dependent-parameters) and +[configurations](#configurations) for a way to supply system-dependent +values for these fields. + +`build-depends:` _package list_ +: A list of packages needed to build this one. Each package can be + annotated with a version constraint. + + Version constraints use the operators `==, >=, >, <, <=` and a + version number. Multiple constraints can be combined using `&&` or + `||`. If no version constraint is specified, any version is assumed + to be acceptable. For example: + + ~~~~~~~~~~~~~~~~ + library + build-depends: + base >= 2, + foo >= 1.2 && < 1.3, + bar + ~~~~~~~~~~~~~~~~ + + Dependencies like `foo >= 1.2 && < 1.3` turn out to be very common + because it is recommended practise for package versions to + correspond to API versions. As of Cabal 1.6, there is a special + syntax to support this use: + + ~~~~~~~~~~~~~~~~ + build-depends: foo ==1.2.* + ~~~~~~~~~~~~~~~~ + + It is only syntactic sugar. It is exactly equivalent to `foo >= 1.2 && < 1.3`. + + With Cabal 1.20 and GHC 7.10, `build-depends` also supports module + thinning and renaming, which allows you to selectively decide what + modules become visible from a package dependency. For example: + + ~~~~~~~~~~~~~~~~ + build-depends: containers (Data.Set, Data.IntMap as Map) + ~~~~~~~~~~~~~~~~ + + This results in only the modules `Data.Set` and `Map` being visible to + the user from containers, hiding all other modules. To add additional + names for modules without hiding the others, you can use the `with` + keyword: + + ~~~~~~~~~~~~~~~~ + build-depends: containers with (Data.IntMap as Map) + ~~~~~~~~~~~~~~~~ + + Note: Prior to Cabal 1.8, build-depends specified in each section + were global to all sections. This was unintentional, but some packages + were written to depend on it, so if you need your build-depends to + be local to each section, you must specify at least + `Cabal-Version: >= 1.8` in your `.cabal` file. + +`other-modules:` _identifier list_ +: A list of modules used by the component but not exposed to users. + For a library component, these would be hidden modules of the + library. For an executable, these would be auxiliary modules to be + linked with the file named in the `main-is` field. + + Note: Every module in the package *must* be listed in one of + `other-modules`, `exposed-modules` or `main-is` fields. + +`hs-source-dirs:` _directory list_ (default: "`.`") +: Root directories for the module hierarchy. + + For backwards compatibility, the old variant `hs-source-dir` is also + recognized. + +`extensions:` _identifier list_ +: A list of Haskell extensions used by every module. Extension names + are the constructors of the [Extension][extension] type. These + determine corresponding compiler options. In particular, `CPP` specifies that + Haskell source files are to be preprocessed with a C preprocessor. + + Extensions used only by one module may be specified by placing a + `LANGUAGE` pragma in the source file affected, e.g.: + + ~~~~~~~~~~~~~~~~ + {-# LANGUAGE CPP, MultiParamTypeClasses #-} + ~~~~~~~~~~~~~~~~ + + Note: GHC versions prior to 6.6 do not support the `LANGUAGE` pragma. + +`build-tools:` _program list_ +: A list of programs, possibly annotated with versions, needed to + build this package, e.g. `c2hs >= 0.15, cpphs`.If no version + constraint is specified, any version is assumed to be acceptable. + +`buildable:` _boolean_ (default: `True`) +: Is the component buildable? Like some of the other fields below, + this field is more useful with the slightly more elaborate form of + the simple build infrastructure described in the section on + [system-dependent parameters](#system-dependent-parameters). + +`ghc-options:` _token list_ +: Additional options for GHC. You can often achieve the same effect + using the `extensions` field, which is preferred. + + Options required only by one module may be specified by placing an + `OPTIONS_GHC` pragma in the source file affected. + +`ghc-prof-options:` _token list_ +: Additional options for GHC when the package is built with profiling + enabled. + +`ghc-shared-options:` _token list_ +: Additional options for GHC when the package is built as shared library. + +`includes:` _filename list_ +: A list of header files to be included in any compilations via C. + This field applies to both header files that are already installed + on the system and to those coming with the package to be installed. + These files typically contain function prototypes for foreign + imports used by the package. + +`install-includes:` _filename list_ +: A list of header files from this package to be installed into + `$libdir/includes` when the package is installed. Files listed in + `install-includes:` should be found in relative to the top of the + source tree or relative to one of the directories listed in + `include-dirs`. + + `install-includes` is typically used to name header files that + contain prototypes for foreign imports used in Haskell code in this + package, for which the C implementations are also provided with the + package. Note that to include them when compiling the package + itself, they need to be listed in the `includes:` field as well. + +`include-dirs:` _directory list_ +: A list of directories to search for header files, when preprocessing + with `c2hs`, `hsc2hs`, `cpphs` or the C preprocessor, and + also when compiling via C. + +`c-sources:` _filename list_ +: A list of C source files to be compiled and linked with the Haskell files. + +`js-sources:` _filename list_ +: A list of JavaScript source files to be linked with the Haskell files + (only for JavaScript targets). + +`extra-libraries:` _token list_ +: A list of extra libraries to link with. + +`extra-ghci-libraries:` _token list_ +: A list of extra libraries to be used instead of 'extra-libraries' when + the package is loaded with GHCi. + +`extra-lib-dirs:` _directory list_ +: A list of directories to search for libraries. + +`cc-options:` _token list_ +: Command-line arguments to be passed to the C compiler. Since the + arguments are compiler-dependent, this field is more useful with the + setup described in the section on [system-dependent + parameters](#system-dependent-parameters). + +`cpp-options:` _token list_ +: Command-line arguments for pre-processing Haskell code. Applies to + haskell source and other pre-processed Haskell source like .hsc .chs. + Does not apply to C code, that's what cc-options is for. + +`ld-options:` _token list_ +: Command-line arguments to be passed to the linker. Since the + arguments are compiler-dependent, this field is more useful with the + setup described in the section on [system-dependent + parameters](#system-dependent-parameters)>. + +`pkgconfig-depends:` _package list_ +: A list of [pkg-config][] packages, needed to build this package. + They can be annotated with versions, e.g. `gtk+-2.0 >= 2.10, cairo + >= 1.0`. If no version constraint is specified, any version is + assumed to be acceptable. Cabal uses `pkg-config` to find if the + packages are available on the system and to find the extra + compilation and linker options needed to use the packages. + + If you need to bind to a C library that supports `pkg-config` (use + `pkg-config --list-all` to find out if it is supported) then it is + much preferable to use this field rather than hard code options into + the other fields. + +`frameworks:` _token list_ +: On Darwin/MacOS X, a list of frameworks to link to. See Apple's + developer documentation for more details on frameworks. This entry + is ignored on all other platforms. + +### Configurations ### + +Library and executable sections may include conditional +blocks, which test for various system parameters and +configuration flags. The flags mechanism is rather generic, +but most of the time a flag represents certain feature, that +can be switched on or off by the package user. +Here is an example package description file using +configurations: + +#### Example: A package containing a library and executable programs #### + +~~~~~~~~~~~~~~~~ +Name: Test1 +Version: 0.0.1 +Cabal-Version: >= 1.2 +License: BSD3 +Author: Jane Doe +Synopsis: Test package to test configurations +Category: Example + +Flag Debug + Description: Enable debug support + Default: False + +Flag WebFrontend + Description: Include API for web frontend. + -- Cabal checks if the configuration is possible, first + -- with this flag set to True and if not it tries with False + +Library + Build-Depends: base + Exposed-Modules: Testing.Test1 + Extensions: CPP + + if flag(debug) + GHC-Options: -DDEBUG + if !os(windows) + CC-Options: "-DDEBUG" + else + CC-Options: "-DNDEBUG" + + if flag(webfrontend) + Build-Depends: cgi > 0.42 + Other-Modules: Testing.WebStuff + +Executable test1 + Main-is: T1.hs + Other-Modules: Testing.Test1 + Build-Depends: base + + if flag(debug) + CC-Options: "-DDEBUG" + GHC-Options: -DDEBUG +~~~~~~~~~~~~~~~~ + +#### Layout #### + +Flags, conditionals, library and executable sections use layout to +indicate structure. This is very similar to the Haskell layout rule. +Entries in a section have to all be indented to the same level which +must be more than the section header. Tabs are not allowed to be used +for indentation. + +As an alternative to using layout you can also use explicit braces `{}`. +In this case the indentation of entries in a section does not matter, +though different fields within a block must be on different lines. Here +is a bit of the above example again, using braces: + +#### Example: Using explicit braces rather than indentation for layout #### + +~~~~~~~~~~~~~~~~ +Name: Test1 +Version: 0.0.1 +Cabal-Version: >= 1.2 +License: BSD3 +Author: Jane Doe +Synopsis: Test package to test configurations +Category: Example + +Flag Debug { + Description: Enable debug support + Default: False +} + +Library { + Build-Depends: base + Exposed-Modules: Testing.Test1 + Extensions: CPP + if flag(debug) { + GHC-Options: -DDEBUG + if !os(windows) { + CC-Options: "-DDEBUG" + } else { + CC-Options: "-DNDEBUG" + } + } +} +~~~~~~~~~~~~~~~~ + +#### Configuration Flags #### + +A flag section takes the flag name as an argument and may contain the +following fields. + +`description:` _freeform_ +: The description of this flag. + +`default:` _boolean_ (default: `True`) +: The default value of this flag. + + Note that this value may be [overridden in several + ways](installing-packages.html#controlling-flag-assignments"). The + rationale for having flags default to True is that users usually + want new features as soon as they are available. Flags representing + features that are not (yet) recommended for most users (such as + experimental features or debugging support) should therefore + explicitly override the default to False. + +`manual:` _boolean_ (default: `False`) +: By default, Cabal will first try to satisfy dependencies with the + default flag value and then, if that is not possible, with the + negated value. However, if the flag is manual, then the default + value (which can be overridden by commandline flags) will be used. + +#### Conditional Blocks #### + +Conditional blocks may appear anywhere inside a library or executable +section. They have to follow rather strict formatting rules. +Conditional blocks must always be of the shape + +~~~~~~~~~~~~~~~~ + `if `_condition_ + _property-descriptions-or-conditionals*_ +~~~~~~~~~~~~~~~~ + +or + +~~~~~~~~~~~~~~~~ + `if `_condition_ + _property-descriptions-or-conditionals*_ + `else` + _property-descriptions-or-conditionals*_ +~~~~~~~~~~~~~~~~ + +Note that the `if` and the condition have to be all on the same line. + +#### Conditions #### + +Conditions can be formed using boolean tests and the boolean operators +`||` (disjunction / logical "or"), `&&` (conjunction / logical "and"), +or `!` (negation / logical "not"). The unary `!` takes highest +precedence, `||` takes lowest. Precedence levels may be overridden +through the use of parentheses. For example, `os(darwin) && !arch(i386) +|| os(freebsd)` is equivalent to `(os(darwin) && !(arch(i386))) || +os(freebsd)`. + +The following tests are currently supported. + +`os(`_name_`)` +: Tests if the current operating system is _name_. The argument is + tested against `System.Info.os` on the target system. There is + unfortunately some disagreement between Haskell implementations + about the standard values of `System.Info.os`. Cabal canonicalises + it so that in particular `os(windows)` works on all implementations. + If the canonicalised os names match, this test evaluates to true, + otherwise false. The match is case-insensitive. + +`arch(`_name_`)` +: Tests if the current architecture is _name_. The argument is + matched against `System.Info.arch` on the target system. If the arch + names match, this test evaluates to true, otherwise false. The match + is case-insensitive. + +`impl(`_compiler_`)` +: Tests for the configured Haskell implementation. An optional version + constraint may be specified (for example `impl(ghc >= 6.6.1)`). If + the configured implementation is of the right type and matches the + version constraint, then this evaluates to true, otherwise false. + The match is case-insensitive. + +`flag(`_name_`)` +: Evaluates to the current assignment of the flag of the given name. + Flag names are case insensitive. Testing for flags that have not + been introduced with a flag section is an error. + +`true` +: Constant value true. + +`false` +: Constant value false. + +#### Resolution of Conditions and Flags #### + +If a package descriptions specifies configuration flags the package user can +[control these in several ways](installing-packages.html#controlling-flag-assignments). +If the user does not fix the value of a flag, Cabal will try to find a flag +assignment in the following way. + + * For each flag specified, it will assign its default value, evaluate + all conditions with this flag assignment, and check if all + dependencies can be satisfied. If this check succeeded, the package + will be configured with those flag assignments. + + * If dependencies were missing, the last flag (as by the order in + which the flags were introduced in the package description) is tried + with its alternative value and so on. This continues until either + an assignment is found where all dependencies can be satisfied, or + all possible flag assignments have been tried. + +To put it another way, Cabal does a complete backtracking search to find +a satisfiable package configuration. It is only the dependencies +specified in the `build-depends` field in conditional blocks that +determine if a particular flag assignment is satisfiable (`build-tools` +are not considered). The order of the declaration and the default value +of the flags determines the search order. Flags overridden on the +command line fix the assignment of that flag, so no backtracking will be +tried for that flag. + +If no suitable flag assignment could be found, the configuration phase +will fail and a list of missing dependencies will be printed. Note that +this resolution process is exponential in the worst case (i.e., in the +case where dependencies cannot be satisfied). There are some +optimizations applied internally, but the overall complexity remains +unchanged. + +### Meaning of field values when using conditionals ### + +During the configuration phase, a flag assignment is chosen, all +conditionals are evaluated, and the package description is combined into +a flat package descriptions. If the same field both inside a conditional +and outside then they are combined using the following rules. + + + * Boolean fields are combined using conjunction (logical "and"). + + * List fields are combined by appending the inner items to the outer + items, for example + + ~~~~~~~~~~~~~~~~ + Extensions: CPP + if impl(ghc) + Extensions: MultiParamTypeClasses + ~~~~~~~~~~~~~~~~ + + when compiled using GHC will be combined to + + ~~~~~~~~~~~~~~~~ + Extensions: CPP, MultiParamTypeClasses + ~~~~~~~~~~~~~~~~ + + Similarly, if two conditional sections appear at the same nesting + level, properties specified in the latter will come after properties + specified in the former. + + * All other fields must not be specified in ambiguous ways. For + example + + ~~~~~~~~~~~~~~~~ + Main-is: Main.hs + if flag(useothermain) + Main-is: OtherMain.hs + ~~~~~~~~~~~~~~~~ + + will lead to an error. Instead use + + ~~~~~~~~~~~~~~~~ + if flag(useothermain) + Main-is: OtherMain.hs + else + Main-is: Main.hs + ~~~~~~~~~~~~~~~~ + +### Source Repositories ### + +It is often useful to be able to specify a source revision control +repository for a package. Cabal lets you specifying this information in +a relatively structured form which enables other tools to interpret and +make effective use of the information. For example the information +should be sufficient for an automatic tool to checkout the sources. + +Cabal supports specifying different information for various common +source control systems. Obviously not all automated tools will support +all source control systems. + +Cabal supports specifying repositories for different use cases. By +declaring which case we mean automated tools can be more useful. There +are currently two kinds defined: + + * The `head` kind refers to the latest development branch of the + package. This may be used for example to track activity of a project + or as an indication to outside developers what sources to get for + making new contributions. + + * The `this` kind refers to the branch and tag of a repository that + contains the sources for this version or release of a package. For most + source control systems this involves specifying a tag, id or hash of + some form and perhaps a branch. The purpose is to be able to + reconstruct the sources corresponding to a particular package + version. This might be used to indicate what sources to get if + someone needs to fix a bug in an older branch that is no longer an + active head branch. + +You can specify one kind or the other or both. As an example here are +the repositories for the Cabal library. Note that the `this` kind of +repository specifies a tag. + +~~~~~~~~~~~~~~~~ +source-repository head + type: darcs + location: http://darcs.haskell.org/cabal/ + +source-repository this + type: darcs + location: http://darcs.haskell.org/cabal-branches/cabal-1.6/ + tag: 1.6.1 +~~~~~~~~~~~~~~~~ + +The exact fields are as follows: + +`type:` _token_ +: The name of the source control system used for this repository. The + currently recognised types are: + + * `darcs` + * `git` + * `svn` + * `cvs` + * `mercurial` (or alias `hg`) + * `bazaar` (or alias `bzr`) + * `arch` + * `monotone` + + This field is required. + +`location:` _URL_ +: The location of the repository. The exact form of this field depends + on the repository type. For example: + + * for darcs: `http://code.haskell.org/foo/` + * for git: `git://github.com/foo/bar.git` + * for CVS: `anoncvs@cvs.foo.org:/cvs` + + This field is required. + +`module:` _token_ +: CVS requires a named module, as each CVS server can host multiple + named repositories. + + This field is required for the CVS repository type and should not + be used otherwise. + +`branch:` _token_ +: Many source control systems support the notion of a branch, as a + distinct concept from having repositories in separate locations. For + example CVS, SVN and git use branches while for darcs uses different + locations for different branches. If you need to specify a branch to + identify a your repository then specify it in this field. + + This field is optional. + +`tag:` _token_ +: A tag identifies a particular state of a source repository. The tag + can be used with a `this` repository kind to identify the state of + a repository corresponding to a particular package version or + release. The exact form of the tag depends on the repository type. + + This field is required for the `this` repository kind. + +`subdir:` _directory_ +: Some projects put the sources for multiple packages under a single + source repository. This field lets you specify the relative path + from the root of the repository to the top directory for the + package, i.e. the directory containing the package's `.cabal` file. + + This field is optional. It default to empty which corresponds to the + root directory of the repository. + +### Downloading a package's source ### + +The `cabal get` command allows to access a package's source code - either by +unpacking a tarball downloaded from Hackage (the default) or by checking out a +working copy from the package's source repository. + +~~~~~~~~~~~~~~~~ +$ cabal get [FLAGS] PACKAGES +~~~~~~~~~~~~~~~~ + +The `get` command supports the following options: + +`-d --destdir` _PATH_ +: Where to place the package source, defaults to (a subdirectory of) the + current directory. + +`-s --source-repository` _[head|this|...]_ +: Fork the package's source repository using the appropriate version control + system. The optional argument allows to choose a specific repository kind. + + +## Accessing data files from package code ## + +The placement on the target system of files listed in the `data-files` +field varies between systems, and in some cases one can even move +packages around after installation (see [prefix +independence](installing-packages.html#prefix-independence)). To enable +packages to find these files in a portable way, Cabal generates a module +called `Paths_`_pkgname_ (with any hyphens in _pkgname_ replaced by +underscores) during building, so that it may be imported by modules of +the package. This module defines a function + +~~~~~~~~~~~~~~~ +getDataFileName :: FilePath -> IO FilePath +~~~~~~~~~~~~~~~ + +If the argument is a filename listed in the `data-files` field, the +result is the name of the corresponding file on the system on which the +program is running. + +Note: If you decide to import the `Paths_`_pkgname_ module then it +*must* be listed in the `other-modules` field just like any other module +in your package. + +The `Paths_`_pkgname_ module is not platform independent so it does not +get included in the source tarballs generated by `sdist`. + +### Accessing the package version ### + +The aforementioned auto generated `Paths_`_pkgname_ module also +exports the constant `version ::` [Version][data-version] which is +defined as the version of your package as specified in the `version` +field. + +## System-dependent parameters ## + +For some packages, especially those interfacing with C libraries, +implementation details and the build procedure depend on the build +environment. The `build-type` `Configure` can be used to handle many +such situations. In this case, `Setup.hs` should be: + +~~~~~~~~~~~~~~~~ +import Distribution.Simple +main = defaultMainWithHooks autoconfUserHooks +~~~~~~~~~~~~~~~~ + +Most packages, however, would probably do better using the `Simple` +build type and [configurations](#configurations). + +The `build-type` `Configure` differs from `Simple` in two ways: + +* The package root directory must contain a shell script called + `configure`. The configure step will run the script. This `configure` + script may be produced by [autoconf][] or may be hand-written. The + `configure` script typically discovers information about the system + and records it for later steps, e.g. by generating system-dependent + header files for inclusion in C source files and preprocessed Haskell + source files. (Clearly this won't work for Windows without MSYS or + Cygwin: other ideas are needed.) + +* If the package root directory contains a file called + _package_`.buildinfo` after the configuration step, subsequent steps + will read it to obtain additional settings for [build + information](#build-information) fields,to be merged with the ones + given in the `.cabal` file. In particular, this file may be generated + by the `configure` script mentioned above, allowing these settings to + vary depending on the build environment. + + The build information file should have the following structure: + + > _buildinfo_ + > + > `executable:` _name_ + > _buildinfo_ + > + > `executable:` _name_ + > _buildinfo_ + > ... + + where each _buildinfo_ consists of settings of fields listed in the + section on [build information](#build-information). The first one (if + present) relates to the library, while each of the others relate to + the named executable. (The names must match the package description, + but you don't have to have entries for all of them.) + +Neither of these files is required. If they are absent, this setup +script is equivalent to `defaultMain`. + +#### Example: Using autoconf #### + +This example is for people familiar with the [autoconf][] tools. + +In the X11 package, the file `configure.ac` contains: + +~~~~~~~~~~~~~~~~ +AC_INIT([Haskell X11 package], [1.1], [libraries@haskell.org], [X11]) + +# Safety check: Ensure that we are in the correct source directory. +AC_CONFIG_SRCDIR([X11.cabal]) + +# Header file to place defines in +AC_CONFIG_HEADERS([include/HsX11Config.h]) + +# Check for X11 include paths and libraries +AC_PATH_XTRA +AC_TRY_CPP([#include ],,[no_x=yes]) + +# Build the package if we found X11 stuff +if test "$no_x" = yes +then BUILD_PACKAGE_BOOL=False +else BUILD_PACKAGE_BOOL=True +fi +AC_SUBST([BUILD_PACKAGE_BOOL]) + +AC_CONFIG_FILES([X11.buildinfo]) +AC_OUTPUT +~~~~~~~~~~~~~~~~ + +Then the setup script will run the `configure` script, which checks for +the presence of the X11 libraries and substitutes for variables in the +file `X11.buildinfo.in`: + +~~~~~~~~~~~~~~~~ +buildable: @BUILD_PACKAGE_BOOL@ +cc-options: @X_CFLAGS@ +ld-options: @X_LIBS@ +~~~~~~~~~~~~~~~~ + +This generates a file `X11.buildinfo` supplying the parameters needed by +later stages: + +~~~~~~~~~~~~~~~~ +buildable: True +cc-options: -I/usr/X11R6/include +ld-options: -L/usr/X11R6/lib +~~~~~~~~~~~~~~~~ + +The `configure` script also generates a header file `include/HsX11Config.h` +containing C preprocessor defines recording the results of various tests. This +file may be included by C source files and preprocessed Haskell source files in +the package. + +Note: Packages using these features will also need to list additional files such +as `configure`, templates for `.buildinfo` files, files named only in +`.buildinfo` files, header files and so on in the `extra-source-files` field to +ensure that they are included in source distributions. They should also list +files and directories generated by `configure` in the `extra-tmp-files` field to +ensure that they are removed by `setup clean`. + +Quite often the files generated by `configure` need to be listed somewhere in +the package description (for example, in the `install-includes` field). However, +we usually don't want generated files to be included in the source tarball. The +solution is again provided by the `.buildinfo` file. In the above example, the +following line should be added to `X11.buildinfo`: + +~~~~~~~~~~~~~~~~ +install-includes: HsX11Config.h +~~~~~~~~~~~~~~~~ + +In this way, the generated `HsX11Config.h` file won't be included in the source +tarball in addition to `HsX11Config.h.in`, but it will be copied to the right +location during the install process. Packages that use custom `Setup.hs` scripts +can update the necessary fields programmatically instead of using the +`.buildinfo` file. + + +## Conditional compilation ## + +Sometimes you want to write code that works with more than one version +of a dependency. You can specify a range of versions for the dependency +in the `build-depends`, but how do you then write the code that can use +different versions of the API? + +Haskell lets you preprocess your code using the C preprocessor (either +the real C preprocessor, or `cpphs`). To enable this, add `extensions: +CPP` to your package description. When using CPP, Cabal provides some +pre-defined macros to let you test the version of dependent packages; +for example, suppose your package works with either version 3 or version +4 of the `base` package, you could select the available version in your +Haskell modules like this: + +~~~~~~~~~~~~~~~~ +#if MIN_VERSION_base(4,0,0) +... code that works with base-4 ... +#else +... code that works with base-3 ... +#endif +~~~~~~~~~~~~~~~~ + +In general, Cabal supplies a macro `MIN_VERSION_`_`package`_`_(A,B,C)` +for each package depended on via `build-depends`. This macro is true if +the actual version of the package in use is greater than or equal to +`A.B.C` (using the conventional ordering on version numbers, which is +lexicographic on the sequence, but numeric on each component, so for +example 1.2.0 is greater than 1.0.3). + +Since version 1.20, there is also the `MIN_TOOL_VERSION_`_`tool`_ family of +macros for conditioning on the version of build tools used to build the program +(e.g. `hsc2hs`). + +Cabal places the definitions of these macros into an +automatically-generated header file, which is included when +preprocessing Haskell source code by passing options to the C +preprocessor. + +Cabal also allows to detect when the source code is being used for generating +documentation. The `__HADDOCK_VERSION__` macro is defined only when compiling +via [haddock][] instead of a normal Haskell compiler. The value of the +`__HADDOCK_VERSION__` macro is defined as `A*1000 + B*10 + C`, where `A.B.C` is +the Haddock version. This can be useful for working around bugs in Haddock or +generating prettier documentation in some special cases. + +## More complex packages ## + +For packages that don't fit the simple schemes described above, you have +a few options: + + * By using the `build-type` `Custom`, you can supply your own + `Setup.hs` file, and customize the simple build infrastructure + using _hooks_. These allow you to perform additional actions + before and after each command is run, and also to specify + additional preprocessors. A typical `Setup.hs` may look like this: + + ~~~~~~~~~~~~~~~~ + import Distribution.Simple + main = defaultMainWithHooks simpleUserHooks { postHaddock = posthaddock } + + posthaddock args flags desc info = .... + ~~~~~~~~~~~~~~~~ + + See `UserHooks` in [Distribution.Simple][dist-simple] for the + details, but note that this interface is experimental, and likely + to change in future releases. + + * You could delegate all the work to `make`, though this is unlikely + to be very portable. Cabal supports this with the `build-type` + `Make` and a trivial setup library [Distribution.Make][dist-make], + which simply parses the command line arguments and invokes `make`. + Here `Setup.hs` should look like this: + + ~~~~~~~~~~~~~~~~ + import Distribution.Make + main = defaultMain + ~~~~~~~~~~~~~~~~ + + The root directory of the package should contain a `configure` + script, and, after that has run, a `Makefile` with a default target + that builds the package, plus targets `install`, `register`, + `unregister`, `clean`, `dist` and `docs`. Some options to commands + are passed through as follows: + + * The `--with-hc-pkg`, `--prefix`, `--bindir`, `--libdir`, `--datadir`, + `--libexecdir` and `--sysconfdir` options to the `configure` command are + passed on to the `configure` script. In addition the value of the + `--with-compiler` option is passed in a `--with-hc` option and all + options specified with `--configure-option=` are passed on. + + * The `--destdir` option to the `copy` command becomes a setting + of a `destdir` variable on the invocation of `make copy`. The + supplied `Makefile` should provide a `copy` target, which will + probably look like this: + + ~~~~~~~~~~~~~~~~ + copy : + $(MAKE) install prefix=$(destdir)/$(prefix) \ + bindir=$(destdir)/$(bindir) \ + libdir=$(destdir)/$(libdir) \ + datadir=$(destdir)/$(datadir) \ + libexecdir=$(destdir)/$(libexecdir) \ + sysconfdir=$(destdir)/$(sysconfdir) \ + ~~~~~~~~~~~~~~~~ + + * Finally, with the `build-type` `Custom`, you can also write your + own setup script from scratch. It must conform to the interface + described in the section on [building and installing + packages](installing-packages.html), and you may use the Cabal + library for all or part of the work. One option is to copy the + source of `Distribution.Simple`, and alter it for your needs. Good + luck. + + + +[dist-simple]: ../release/cabal-latest/doc/API/Cabal/Distribution-Simple.html +[dist-make]: ../release/cabal-latest/doc/API/Cabal/Distribution-Make.html +[dist-license]: ../release/cabal-latest/doc/API/Cabal/Distribution-License.html#t:License +[extension]: ../release/cabal-latest/doc/API/Cabal/Language-Haskell-Extension.html#t:Extension +[BuildType]: ../release/cabal-latest/doc/API/Cabal/Distribution-PackageDescription.html#t:BuildType +[data-version]: http://hackage.haskell.org/packages/archive/base/latest/doc/html/Data-Version.html +[alex]: http://www.haskell.org/alex/ +[autoconf]: http://www.gnu.org/software/autoconf/ +[c2hs]: http://www.cse.unsw.edu.au/~chak/haskell/c2hs/ +[cpphs]: http://projects.haskell.org/cpphs/ +[greencard]: http://hackage.haskell.org/package/greencard +[haddock]: http://www.haskell.org/haddock/ +[HsColour]: http://www.cs.york.ac.uk/fp/darcs/hscolour/ +[happy]: http://www.haskell.org/happy/ +[Hackage]: http://hackage.haskell.org/ +[pkg-config]: http://www.freedesktop.org/wiki/Software/pkg-config/ +[REPL]: http://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/doc/index.markdown cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/doc/index.markdown --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/doc/index.markdown 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/doc/index.markdown 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,200 @@ +% Cabal User Guide + +Cabal is the standard package system for [Haskell] software. It helps +people to configure, build and install Haskell software and to +distribute it easily to other users and developers. + +There is a command line tool called `cabal` for working with Cabal +packages. It helps with installing existing packages and also helps +people developing their own packages. It can be used to work with local +packages or to install packages from online package archives, including +automatically installing dependencies. By default it is configured to +use [Hackage] which is Haskell's central package archive that contains +thousands of libraries and applications in the Cabal package format. + +# Contents # + + * [Introduction](#introduction) + - [What's in a package](#whats-in-a-package) + - [A tool for working with packages](#a-tool-for-working-with-packages) + * [Building, installing and managing packages](installing-packages.html) + * [Creating packages](developing-packages.html) + * [Reporting bugs and deficiencies](misc.html#reporting-bugs-and-deficiencies) + * [Stability of Cabal interfaces](misc.html#stability-of-cabal-interfaces) + +# Introduction # + +Cabal is a package system for Haskell software. The point of a package +system is to enable software developers and users to easily distribute, +use and reuse software. A package system makes it easier for developers +to get their software into the hands of users. Equally importantly, it +makes it easier for software developers to be able to reuse software +components written by other developers. + +Packaging systems deal with packages and with Cabal we call them _Cabal +packages_. The Cabal package is the unit of distribution. Every Cabal +package has a name and a version number which are used to identify the +package, e.g. `filepath-1.0`. + +Cabal packages can depend on other Cabal packages. There are tools +to enable automated package management. This means it is possible for +developers and users to install a package plus all of the other Cabal +packages that it depends on. It also means that it is practical to make +very modular systems using lots of packages that reuse code written by +many developers. + +Cabal packages are source based and are typically (but not necessarily) +portable to many platforms and Haskell implementations. The Cabal +package format is designed to make it possible to translate into other +formats, including binary packages for various systems. + +When distributed, Cabal packages use the standard compressed tarball +format, with the file extension `.tar.gz`, e.g. `filepath-1.0.tar.gz`. + +Note that packages are not part of the Haskell language, rather they +are a feature provided by the combination of Cabal and GHC (and several +other Haskell implementations). + + +## A tool for working with packages ## + +There is a command line tool, called "`cabal`", that users and developers +can use to build and install Cabal packages. It can be used for both +local packages and for packages available remotely over the network. It +can automatically install Cabal packages plus any other Cabal packages +they depend on. + +Developers can use the tool with packages in local directories, e.g. + +~~~~~~~~~~~~~~~~ +cd foo/ +cabal install +~~~~~~~~~~~~~~~~ + +While working on a package in a local directory, developers can run the +individual steps to configure and build, and also generate documentation +and run test suites and benchmarks. + +It is also possible to install several local packages at once, e.g. + +~~~~~~~~~~~~~~~~ +cabal install foo/ bar/ +~~~~~~~~~~~~~~~~ + +Developers and users can use the tool to install packages from remote +Cabal package archives. By default, the `cabal` tool is configured to +use the central Haskell package archive called [Hackage] but it +is possible to use it with any other suitable archive. + +~~~~~~~~~~~~~~~~ +cabal install xmonad +~~~~~~~~~~~~~~~~ + +This will install the `xmonad` package plus all of its dependencies. + +In addition to packages that have been published in an archive, +developers can install packages from local or remote tarball files, +for example + +~~~~~~~~~~~~~~~~ +cabal install foo-1.0.tar.gz +cabal install http://example.com/foo-1.0.tar.gz +~~~~~~~~~~~~~~~~ + +Cabal provides a number of ways for a user to customise how and where a +package is installed. They can decide where a package will be installed, +which Haskell implementation to use and whether to build optimised code +or build with the ability to profile code. It is not expected that users +will have to modify any of the information in the `.cabal` file. + +For full details, see the section on [building and installing +packages](installing-packages.html). + +Note that `cabal` is not the only tool for working with Cabal packages. +Due to the standardised format and a library for reading `.cabal` files, +there are several other special-purpose tools. + +## What's in a package ## + +A Cabal package consists of: + + * Haskell software, including libraries, executables and tests + * metadata about the package in a standard human and machine + readable format (the "`.cabal`" file) + * a standard interface to build the package (the "`Setup.hs`" file) + +The `.cabal` file contains information about the package, supplied by +the package author. In particular it lists the other Cabal packages +that the package depends on. + +For full details on what goes in the `.cabal` and `Setup.hs` files, and +for all the other features provided by the build system, see the section +on [developing packages](developing-packages.html). + + +## Cabal featureset ## + +Cabal and its associated tools and websites covers: + + * a software build system + * software configuration + * packaging for distribution + * automated package management + * natively using the `cabal` command line tool; or + * by translation into native package formats such as RPM or deb + * web and local Cabal package archives + * central Hackage website with 1000's of Cabal packages + +Some parts of the system can be used without others. In particular the +built-in build system for simple packages is optional: it is possible +to use custom build systems. + +## Similar systems ## + +The Cabal system is roughly comparable with the system of Python Eggs, +Ruby Gems or Perl distributions. Each system has a notion of +distributable packages, and has tools to manage the process of +distributing and installing packages. + +Hackage is an online archive of Cabal packages. It is roughly comparable +to CPAN but with rather fewer packages (around 5,000 vs 28,000). + +Cabal is often compared with autoconf and automake and there is some +overlap in functionality. The most obvious similarity is that the +command line interface for actually configuring and building packages +follows the same steps and has many of the same configuration +parameters. + +~~~~~~~~~~ +./configure --prefix=... +make +make install +~~~~~~~~~~ + +compared to + +~~~~~~~~~~ +cabal configure --prefix=... +cabal build +cabal install +~~~~~~~~~~ + +Cabal's build system for simple packages is considerably less flexible +than make/automake, but has builtin knowledge of how to build Haskell +code and requires very little manual configuration. Cabal's simple build +system is also portable to Windows, without needing a Unix-like +environment such as cygwin/mingwin. + +Compared to autoconf, Cabal takes a somewhat different approach to +package configuration. Cabal's approach is designed for automated +package management. Instead of having a configure script that tests for +whether dependencies are available, Cabal packages specify their +dependencies. There is some scope for optional and conditional +dependencies. By having package authors specify dependencies it makes it +possible for tools to install a package and all of its dependencies +automatically. It also makes it possible to translate (in a +mostly-automatically way) into another package format like RPM or deb +which also have automatic dependency resolution. + +[Haskell]: http://www.haskell.org/ +[Hackage]: http://hackage.haskell.org/ diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/doc/installing-packages.markdown cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/doc/installing-packages.markdown --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/doc/installing-packages.markdown 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/doc/installing-packages.markdown 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,1067 @@ +% Cabal User Guide + +# Building and installing packages # + +After you've unpacked a Cabal package, you can build it by moving into +the root directory of the package and running the `cabal` tool there: + +> `cabal [command] [option...]` + +The _command_ argument selects a particular step in the build/install process. + +You can also get a summary of the command syntax with + +> `cabal help` + +Alternatively, you can also use the `Setup.hs` or `Setup.lhs` script: + +> `runhaskell Setup.hs [command] [option...]` + +For the summary of the command syntax, run: + +> `cabal help` + +or + +> `runhaskell Setup.hs --help` + +## Building and installing a system package ## + +~~~~~~~~~~~~~~~~ +runhaskell Setup.hs configure --ghc +runhaskell Setup.hs build +runhaskell Setup.hs install +~~~~~~~~~~~~~~~~ + +The first line readies the system to build the tool using GHC; for +example, it checks that GHC exists on the system. The second line +performs the actual building, while the last both copies the build +results to some permanent place and registers the package with GHC. + +## Building and installing a user package ## + +~~~~~~~~~~~~~~~~ +runhaskell Setup.hs configure --user +runhaskell Setup.hs build +runhaskell Setup.hs install +~~~~~~~~~~~~~~~~ + +The package is installed under the user's home directory and is +registered in the user's package database (`--user`). + +## Installing packages from Hackage ## + +The `cabal` tool also can download, configure, build and install a [Hackage] +package and all of its dependencies in a single step. To do this, run: + +~~~~~~~~~~~~~~~~ +cabal install [PACKAGE...] +~~~~~~~~~~~~~~~~ + +To browse the list of available packages, visit the [Hackage] web site. + +## Developing with sandboxes ## + +By default, any dependencies of the package are installed into the global or +user package databases (e.g. using `cabal install --only-dependencies`). If +you're building several different packages that have incompatible dependencies, +this can cause the build to fail. One way to avoid this problem is to build each +package in an isolated environment ("sandbox"), with a sandbox-local package +database. Because sandboxes are per-project, inconsistent dependencies can be +simply disallowed. + +For more on sandboxes, see also +[this article](http://coldwa.st/e/blog/2013-08-20-Cabal-sandbox.html). + +### Sandboxes: basic usage ### + +To initialise a fresh sandbox in the current directory, run `cabal sandbox +init`. All subsequent commands (such as `build` and `install`) from this point +will use the sandbox. + +~~~~~~~~~~~~~~~ +$ cd /path/to/my/haskell/library +$ cabal sandbox init # Initialise the sandbox +$ cabal install --only-dependencies # Install dependencies into the sandbox +$ cabal build # Build your package inside the sandbox +~~~~~~~~~~~~~~~ + +It can be useful to make a source package available for installation in the +sandbox - for example, if your package depends on a patched or an unreleased +version of a library. This can be done with the `cabal sandbox add-source` +command - think of it as "local [Hackage]". If an add-source dependency is later +modified, it is reinstalled automatically. + +~~~~~~~~~~~~~~~ +$ cabal sandbox add-source /my/patched/library # Add a new add-source dependency +$ cabal install --dependencies-only # Install it into the sandbox +$ cabal build # Build the local package +$ $EDITOR /my/patched/library/Source.hs # Modify the add-source dependency +$ cabal build # Modified dependency is automatically reinstalled +~~~~~~~~~~~~~~~ + +Normally, the sandbox settings (such as optimisation level) are inherited from +the main Cabal config file (`$HOME/cabal/config`). Sometimes, though, you need +to change some settings specifically for a single sandbox. You can do this by +creating a `cabal.config` file in the same directory with your +`cabal.sandbox.config` (which was created by `sandbox init`). This file has the +same syntax as the main Cabal config file. + +~~~~~~~~~~~~~~~ +$ cat cabal.config +documentation: True +constraints: foo == 1.0, bar >= 2.0, baz +$ cabal build # Uses settings from the cabal.config file +~~~~~~~~~~~~~~~ + +When you have decided that you no longer want to build your package inside a +sandbox, just delete it: + +~~~~~~~~~~~~~~~ +$ cabal sandbox delete # Built-in command +$ rm -rf .cabal-sandbox cabal.sandbox.config # Alternative manual method +~~~~~~~~~~~~~~~ + +### Sandboxes: advanced usage ### + +The default behaviour of the `add-source` command is to track modifications done +to the added dependency and reinstall the sandbox copy of the package when +needed. Sometimes this is not desirable: in these cases you can use `add-source +--snapshot`, which disables the change tracking. In addition to `add-source`, +there are also `list-sources` and `delete-source` commands. + +Sometimes one wants to share a single sandbox between multiple packages. This +can be easily done with the `--sandbox` option: + +~~~~~~~~~~~~~~~ +$ mkdir -p /path/to/shared-sandbox +$ cd /path/to/shared-sandbox +$ cabal sandbox init --sandbox . +$ cd /path/to/package-a +$ cabal sandbox init --sandbox /path/to/shared-sandbox +$ cd /path/to/package-b +$ cabal sandbox init --sandbox /path/to/shared-sandbox +~~~~~~~~~~~~~~~ + +Note that `cabal sandbox init --sandbox .` puts all sandbox files into the +current directory. By default, `cabal sandbox init` initialises a new sandbox in +a newly-created subdirectory of the current working directory +(`./.cabal-sandbox`). + +Using multiple different compiler versions simultaneously is also supported, via +the `-w` option: + +~~~~~~~~~~~~~~~ +$ cabal sandbox init +$ cabal install --only-dependencies -w /path/to/ghc-1 # Install dependencies for both compilers +$ cabal install --only-dependencies -w /path/to/ghc-2 +$ cabal configure -w /path/to/ghc-1 # Build with the first compiler +$ cabal build +$ cabal configure -w /path/to/ghc-2 # Build with the second compiler +$ cabal build +~~~~~~~~~~~~~~~ + +It can be occasionally useful to run the compiler-specific package manager tool +(e.g. `ghc-pkg`) tool on the sandbox package DB directly (for example, you may +need to unregister some packages). The `cabal sandbox hc-pkg` command is a +convenient wrapper that runs the compiler-specific package manager tool with the +arguments: + +~~~~~~~~~~~~~~~ +$ cabal -v sandbox hc-pkg list +Using a sandbox located at /path/to/.cabal-sandbox +'ghc-pkg' '--global' '--no-user-package-conf' + '--package-conf=/path/to/.cabal-sandbox/i386-linux-ghc-7.4.2-packages.conf.d' + 'list' +[...] +~~~~~~~~~~~~~~~ + +The `--require-sandbox` option makes all sandbox-aware commands +(`install`/`build`/etc.) exit with error if there is no sandbox present. This +makes it harder to accidentally modify the user package database. The option can +be also turned on via the per-user configuration file (`~/.cabal/config`) or the +per-project one (`$PROJECT_DIR/cabal.config`). The error can be squelched with +`--no-require-sandbox`. + +The option `--sandbox-config-file` allows to specify the location of the +`cabal.sandbox.config` file (by default, `cabal` searches for it in the current +directory). This provides the same functionality as shared sandboxes, but +sometimes can be more convenient. Example: + +~~~~~~~~~~~~~~~ +$ mkdir my/sandbox +$ cd my/sandbox +$ cabal sandbox init +$ cd /path/to/my/project +$ cabal --sandbox-config-file=/path/to/my/sandbox/cabal.sandbox.config install +# Uses the sandbox located at /path/to/my/sandbox/.cabal-sandbox +$ cd ~ +$ cabal --sandbox-config-file=/path/to/my/sandbox/cabal.sandbox.config install +# Still uses the same sandbox +~~~~~~~~~~~~~~~ + +The sandbox config file can be also specified via the `CABAL_SANDBOX_CONFIG` +environment variable. + +Finally, the flag `--ignore-sandbox` lets you temporarily ignore an existing +sandbox: + +~~~~~~~~~~~~~~~ +$ mkdir my/sandbox +$ cd my/sandbox +$ cabal sandbox init +$ cabal --ignore-sandbox install text +# Installs 'text' in the user package database ('~/.cabal'). +~~~~~~~~~~~~~~~ + +## Creating a binary package ## + +When creating binary packages (e.g. for Red Hat or Debian) one needs to +create a tarball that can be sent to another system for unpacking in the +root directory: + +~~~~~~~~~~~~~~~~ +runhaskell Setup.hs configure --prefix=/usr +runhaskell Setup.hs build +runhaskell Setup.hs copy --destdir=/tmp/mypkg +tar -czf mypkg.tar.gz /tmp/mypkg/ +~~~~~~~~~~~~~~~~ + +If the package contains a library, you need two additional steps: + +~~~~~~~~~~~~~~~~ +runhaskell Setup.hs register --gen-script +runhaskell Setup.hs unregister --gen-script +~~~~~~~~~~~~~~~~ + +This creates shell scripts `register.sh` and `unregister.sh`, which must +also be sent to the target system. After unpacking there, the package +must be registered by running the `register.sh` script. The +`unregister.sh` script would be used in the uninstall procedure of the +package. Similar steps may be used for creating binary packages for +Windows. + + +The following options are understood by all commands: + +`--help`, `-h` or `-?` +: List the available options for the command. + +`--verbose=`_n_ or `-v`_n_ +: Set the verbosity level (0-3). The normal level is 1; a missing _n_ + defaults to 2. + +The various commands and the additional options they support are +described below. In the simple build infrastructure, any other options +will be reported as errors. + +## setup configure ## + +Prepare to build the package. Typically, this step checks that the +target platform is capable of building the package, and discovers +platform-specific features that are needed during the build. + +The user may also adjust the behaviour of later stages using the options +listed in the following subsections. In the simple build +infrastructure, the values supplied via these options are recorded in a +private file read by later stages. + +If a user-supplied `configure` script is run (see the section on +[system-dependent +parameters](developing-packages.html#system-dependent-parameters) or on +[complex packages](developing-packages.html#more-complex-packages)), it +is passed the `--with-hc-pkg`, `--prefix`, `--bindir`, `--libdir`, +`--datadir`, `--libexecdir` and `--sysconfdir` options. In addition the +value of the `--with-compiler` option is passed in a `--with-hc` option +and all options specified with `--configure-option=` are passed on. + +### Programs used for building ### + +The following options govern the programs used to process the source +files of a package: + +`--ghc` or `-g`, `--jhc`, `--lhc`, `--uhc` +: Specify which Haskell implementation to use to build the package. + At most one of these flags may be given. If none is given, the + implementation under which the setup script was compiled or + interpreted is used. + +`--with-compiler=`_path_ or `-w`_path_ +: Specify the path to a particular compiler. If given, this must match + the implementation selected above. The default is to search for the + usual name of the selected implementation. + + This flag also sets the default value of the `--with-hc-pkg` option + to the package tool for this compiler. Check the output of `setup + configure -v` to ensure that it finds the right package tool (or use + `--with-hc-pkg` explicitly). + + +`--with-hc-pkg=`_path_ +: Specify the path to the package tool, e.g. `ghc-pkg`. The package + tool must be compatible with the compiler specified by + `--with-compiler`. If this option is omitted, the default value is + determined from the compiler selected. + +`--with-`_`prog`_`=`_path_ +: Specify the path to the program _prog_. Any program known to Cabal + can be used in place of _prog_. It can either be a fully path or the + name of a program that can be found on the program search path. For + example: `--with-ghc=ghc-6.6.1` or + `--with-cpphs=/usr/local/bin/cpphs`. + The full list of accepted programs is not enumerated in this user guide. + Rather, run `cabal install --help` to view the list. + +`--`_`prog`_`-options=`_options_ +: Specify additional options to the program _prog_. Any program known + to Cabal can be used in place of _prog_. For example: + `--alex-options="--template=mytemplatedir/"`. The _options_ is split + into program options based on spaces. Any options containing embedded + spaced need to be quoted, for example + `--foo-options='--bar="C:\Program File\Bar"'`. As an alternative + that takes only one option at a time but avoids the need to quote, + use `--`_`prog`_`-option` instead. + +`--`_`prog`_`-option=`_option_ +: Specify a single additional option to the program _prog_. For + passing an option that contain embedded spaces, such as a file name + with embedded spaces, using this rather than `--`_`prog`_`-options` + means you do not need an additional level of quoting. Of course if + you are using a command shell you may still need to quote, for + example `--foo-options="--bar=C:\Program File\Bar"`. + +All of the options passed with either `--`_`prog`_`-options` or +`--`_`prog`_`-option` are passed in the order they were specified on the +configure command line. + +### Installation paths ### + +The following options govern the location of installed files from a +package: + +`--prefix=`_dir_ +: The root of the installation. For example for a global install you + might use `/usr/local` on a Unix system, or `C:\Program Files` on a + Windows system. The other installation paths are usually + subdirectories of _prefix_, but they don't have to be. + + In the simple build system, _dir_ may contain the following path + variables: `$pkgid`, `$pkg`, `$version`, `$compiler`, `$os`, + `$arch`, `$abi`, `$abitag` + +`--bindir=`_dir_ +: Executables that the user might invoke are installed here. + + In the simple build system, _dir_ may contain the following path + variables: `$prefix`, `$pkgid`, `$pkg`, `$version`, `$compiler`, + `$os`, `$arch`, `$abi`, `$abitag + +`--libdir=`_dir_ +: Object-code libraries are installed here. + + In the simple build system, _dir_ may contain the following path + variables: `$prefix`, `$bindir`, `$pkgid`, `$pkg`, `$version`, + `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +`--libexecdir=`_dir_ +: Executables that are not expected to be invoked directly by the user + are installed here. + + In the simple build system, _dir_ may contain the following path + variables: `$prefix`, `$bindir`, `$libdir`, `$libsubdir`, `$pkgid`, + `$pkg`, `$version`, `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +`--datadir`=_dir_ +: Architecture-independent data files are installed here. + + In the simple build system, _dir_ may contain the following path + variables: `$prefix`, `$bindir`, `$libdir`, `$libsubdir`, `$pkgid`, `$pkg`, + `$version`, `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +`--sysconfdir=`_dir_ +: Installation directory for the configuration files. + + In the simple build system, _dir_ may contain the following path variables: + `$prefix`, `$bindir`, `$libdir`, `$libsubdir`, `$pkgid`, `$pkg`, `$version`, + `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +In addition the simple build system supports the following installation path options: + +`--libsubdir=`_dir_ +: A subdirectory of _libdir_ in which libraries are actually + installed. For example, in the simple build system on Unix, the + default _libdir_ is `/usr/local/lib`, and _libsubdir_ contains the + package identifier and compiler, e.g. `mypkg-0.2/ghc-6.4`, so + libraries would be installed in `/usr/local/lib/mypkg-0.2/ghc-6.4`. + + _dir_ may contain the following path variables: `$pkgid`, `$pkg`, + `$version`, `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +`--datasubdir=`_dir_ +: A subdirectory of _datadir_ in which data files are actually + installed. + + _dir_ may contain the following path variables: `$pkgid`, `$pkg`, + `$version`, `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +`--docdir=`_dir_ +: Documentation files are installed relative to this directory. + + _dir_ may contain the following path variables: `$prefix`, `$bindir`, + `$libdir`, `$libsubdir`, `$datadir`, `$datasubdir`, `$pkgid`, `$pkg`, + `$version`, `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +`--htmldir=`_dir_ +: HTML documentation files are installed relative to this directory. + + _dir_ may contain the following path variables: `$prefix`, `$bindir`, + `$libdir`, `$libsubdir`, `$datadir`, `$datasubdir`, `$docdir`, `$pkgid`, + `$pkg`, `$version`, `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +`--program-prefix=`_prefix_ +: Prepend _prefix_ to installed program names. + + _prefix_ may contain the following path variables: `$pkgid`, `$pkg`, + `$version`, `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +`--program-suffix=`_suffix_ +: Append _suffix_ to installed program names. The most obvious use for + this is to append the program's version number to make it possible + to install several versions of a program at once: + `--program-suffix='$version'`. + + _suffix_ may contain the following path variables: `$pkgid`, `$pkg`, + `$version`, `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` + +#### Path variables in the simple build system #### + +For the simple build system, there are a number of variables that can be +used when specifying installation paths. The defaults are also specified +in terms of these variables. A number of the variables are actually for +other paths, like `$prefix`. This allows paths to be specified relative +to each other rather than as absolute paths, which is important for +building relocatable packages (see [prefix +independence](#prefix-independence)). + +`$prefix` +: The path variable that stands for the root of the installation. For + an installation to be relocatable, all other installation paths must + be relative to the `$prefix` variable. + +`$bindir` +: The path variable that expands to the path given by the `--bindir` + configure option (or the default). + +`$libdir` +: As above but for `--libdir` + +`$libsubdir` +: As above but for `--libsubdir` + +`$datadir` +: As above but for `--datadir` + +`$datasubdir` +: As above but for `--datasubdir` + +`$docdir` +: As above but for `--docdir` + +`$pkgid` +: The name and version of the package, e.g. `mypkg-0.2` + +`$pkg` +: The name of the package, e.g. `mypkg` + +`$version` +: The version of the package, e.g. `0.2` + +`$compiler` +: The compiler being used to build the package, e.g. `ghc-6.6.1` + +`$os` +: The operating system of the computer being used to build the + package, e.g. `linux`, `windows`, `osx`, `freebsd` or `solaris` + +`$arch` +: The architecture of the computer being used to build the package, e.g. + `i386`, `x86_64`, `ppc` or `sparc` + +`$abitag` +: An optional tag that a compiler can use for telling incompatible ABI's + on the same architecture apart. GHCJS encodes the underlying GHC version + in the ABI tag. + +`$abi` +: A shortcut for getting a path that completely identifies the platform in terms + of binary compatibility. Expands to the same value as `$arch-$os-compiler-$abitag` + if the compiler uses an abi tag, `$arch-$os-$compiler` if it doesn't. + +#### Paths in the simple build system #### + +For the simple build system, the following defaults apply: + +Option Windows Default Unix Default +------- ---------------- ------------- +`--prefix` (global) `C:\Program Files\Haskell` `/usr/local` +`--prefix` (per-user) `C:\Documents And Settings\user\Application Data\cabal` `$HOME/.cabal` +`--bindir` `$prefix\bin` `$prefix/bin` +`--libdir` `$prefix` `$prefix/lib` +`--libsubdir` (others) `$pkgid\$compiler` `$pkgid/$compiler` +`--libexecdir` `$prefix\$pkgid` `$prefix/libexec` +`--datadir` (executable) `$prefix` `$prefix/share` +`--datadir` (library) `C:\Program Files\Haskell` `$prefix/share` +`--datasubdir` `$pkgid` `$pkgid` +`--docdir` `$prefix\doc\$pkgid` `$datadir/doc/$pkgid` +`--sysconfdir` `$prefix\etc` `$prefix/etc` +`--htmldir` `$docdir\html` `$docdir/html` +`--program-prefix` (empty) (empty) +`--program-suffix` (empty) (empty) + + +#### Prefix-independence #### + +On Windows it is possible to obtain the pathname of the running program. This +means that we can construct an installable executable package that is +independent of its absolute install location. The executable can find its +auxiliary files by finding its own path and knowing the location of the other +files relative to `$bindir`. Prefix-independence is particularly useful: it +means the user can choose the install location (i.e. the value of `$prefix`) at +install-time, rather than having to bake the path into the binary when it is +built. + +In order to achieve this, we require that for an executable on Windows, +all of `$bindir`, `$libdir`, `$datadir` and `$libexecdir` begin with +`$prefix`. If this is not the case then the compiled executable will +have baked-in all absolute paths. + +The application need do nothing special to achieve prefix-independence. +If it finds any files using `getDataFileName` and the [other functions +provided for the +purpose](developing-packages.html#accessing-data-files-from-package-code), +the files will be accessed relative to the location of the current +executable. + +A library cannot (currently) be prefix-independent, because it will be +linked into an executable whose file system location bears no relation +to the library package. + +### Controlling Flag Assignments ### + +Flag assignments (see the [resolution of conditions and +flags](developing-packages.html#resolution-of-conditions-and-flags)) can +be controlled with the following command line options. + +`-f` _flagname_ or `-f` `-`_flagname_ +: Force the specified flag to `true` or `false` (if preceded with a `-`). Later + specifications for the same flags will override earlier, i.e., + specifying `-fdebug -f-debug` is equivalent to `-f-debug` + +`--flags=`_flagspecs_ +: Same as `-f`, but allows specifying multiple flag assignments at + once. The parameter is a space-separated list of flag names (to + force a flag to `true`), optionally preceded by a `-` (to force a + flag to `false`). For example, `--flags="debug -feature1 feature2"` is + equivalent to `-fdebug -f-feature1 -ffeature2`. + +### Building Test Suites ### + +`--enable-tests` +: Build the test suites defined in the package description file during the + `build` stage. Check for dependencies required by the test suites. If the + package is configured with this option, it will be possible to run the test + suites with the `test` command after the package is built. + +`--disable-tests` +: (default) Do not build any test suites during the `build` stage. + Do not check for dependencies required only by the test suites. It will not + be possible to invoke the `test` command without reconfiguring the package. + +`--enable-coverage` +: Build libraries and executables (including test suites) with Haskell + Program Coverage enabled. Running the test suites will automatically + generate coverage reports with HPC. + +`--disable-coverage` +: (default) Do not enable Haskell Program Coverage. + +### Miscellaneous options ## + +`--user` +: Does a per-user installation. This changes the [default installation + prefix](#paths-in-the-simple-build-system). It also allow + dependencies to be satisfied by the user's package database, in + addition to the global database. This also implies a default of + `--user` for any subsequent `install` command, as packages + registered in the global database should not depend on packages + registered in a user's database. + +`--global` +: (default) Does a global installation. In this case package + dependencies must be satisfied by the global package database. All + packages in the user's package database will be ignored. Typically + the final installation step will require administrative privileges. + +`--package-db=`_db_ +: Allows package dependencies to be satisfied from this additional + package database _db_ in addition to the global package database. + All packages in the user's package database will be ignored. The + interpretation of _db_ is implementation-specific. Typically it will + be a file or directory. Not all implementations support arbitrary + package databases. + +`--enable-optimization`[=_n_] or `-O`[_n_] +: (default) Build with optimization flags (if available). This is + appropriate for production use, taking more time to build faster + libraries and programs. + + The optional _n_ value is the optimisation level. Some compilers + support multiple optimisation levels. The range is 0 to 2. Level 0 + is equivalent to `--disable-optimization`, level 1 is the default if + no _n_ parameter is given. Level 2 is higher optimisation if the + compiler supports it. Level 2 is likely to lead to longer compile + times and bigger generated code. + +`--disable-optimization` +: Build without optimization. This is suited for development: building + will be quicker, but the resulting library or programs will be slower. + +`--enable-library-profiling` or `-p` +: Request that an additional version of the library with profiling + features enabled be built and installed (only for implementations + that support profiling). + +`--disable-library-profiling` +: (default) Do not generate an additional profiling version of the + library. + +`--enable-profiling` +: Any executables generated should have profiling enabled (only for + implementations that support profiling). For this to work, all + libraries used by these executables must also have been built with + profiling support. The library will be built with profiling enabled (if + supported) unless `--disable-library-profiling` is specified. + +`--disable-profiling` +: (default) Do not enable profiling in generated executables. + +`--enable-library-vanilla` +: (default) Build ordinary libraries (as opposed to profiling + libraries). This is independent of the `--enable-library-profiling` + option. If you enable both, you get both. + +`--disable-library-vanilla` +: Do not build ordinary libraries. This is useful in conjunction with + `--enable-library-profiling` to build only profiling libraries, + rather than profiling and ordinary libraries. + +`--enable-library-for-ghci` +: (default) Build libraries suitable for use with GHCi. + +`--disable-library-for-ghci` +: Not all platforms support GHCi and indeed on some platforms, trying + to build GHCi libs fails. In such cases this flag can be used as a + workaround. + +`--enable-split-objs` +: Use the GHC `-split-objs` feature when building the library. This + reduces the final size of the executables that use the library by + allowing them to link with only the bits that they use rather than + the entire library. The downside is that building the library takes + longer and uses considerably more memory. + +`--disable-split-objs` +: (default) Do not use the GHC `-split-objs` feature. This makes + building the library quicker but the final executables that use the + library will be larger. + +`--enable-executable-stripping` +: (default) When installing binary executable programs, run the + `strip` program on the binary. This can considerably reduce the size + of the executable binary file. It does this by removing debugging + information and symbols. While such extra information is useful for + debugging C programs with traditional debuggers it is rarely helpful + for debugging binaries produced by Haskell compilers. + + Not all Haskell implementations generate native binaries. For such + implementations this option has no effect. + +`--disable-executable-stripping` +: Do not strip binary executables during installation. You might want + to use this option if you need to debug a program using gdb, for + example if you want to debug the C parts of a program containing + both Haskell and C code. Another reason is if your are building a + package for a system which has a policy of managing the stripping + itself (such as some Linux distributions). + +`--enable-shared` +: Build shared library. This implies a separate compiler run to + generate position independent code as required on most platforms. + +`--disable-shared` +: (default) Do not build shared library. + +`--enable-executable-dynamic` +: Link executables dynamically. The executable's library dependencies should + be built as shared objects. This implies `--enable-shared` unless + `--disable-shared` is explicitly specified. + +`--disable-executable-dynamic` +: (default) Link executables statically. + +`--configure-option=`_str_ +: An extra option to an external `configure` script, if one is used + (see the section on [system-dependent + parameters](developing-packages.html#system-dependent-parameters)). + There can be several of these options. + +`--extra-include-dirs`[=_dir_] +: An extra directory to search for C header files. You can use this + flag multiple times to get a list of directories. + + You might need to use this flag if you have standard system header + files in a non-standard location that is not mentioned in the + package's `.cabal` file. Using this option has the same affect as + appending the directory _dir_ to the `include-dirs` field in each + library and executable in the package's `.cabal` file. The advantage + of course is that you do not have to modify the package at all. + These extra directories will be used while building the package and + for libraries it is also saved in the package registration + information and used when compiling modules that use the library. + +`--extra-lib-dirs`[=_dir_] +: An extra directory to search for system libraries files. You can use + this flag multiple times to get a list of directories. + + You might need to use this flag if you have standard system + libraries in a non-standard location that is not mentioned in the + package's `.cabal` file. Using this option has the same affect as + appending the directory _dir_ to the `extra-lib-dirs` field in each + library and executable in the package's `.cabal` file. The advantage + of course is that you do not have to modify the package at all. + These extra directories will be used while building the package and + for libraries it is also saved in the package registration + information and used when compiling modules that use the library. + +`--allow-newer`[=_pkgs_] +: Selectively relax upper bounds in dependencies without editing the + package description. + + If you want to install a package A that depends on B >= 1.0 && < 2.0, but + you have the version 2.0 of B installed, you can compile A against B 2.0 by + using `cabal install --allow-newer=B A`. This works for the whole package + index: if A also depends on C that in turn depends on B < 2.0, C's + dependency on B will be also relaxed. + + Example: + + ~~~~~~~~~~~~~~~~ + $ cd foo + $ cabal configure + Resolving dependencies... + cabal: Could not resolve dependencies: + [...] + $ cabal configure --allow-newer + Resolving dependencies... + Configuring foo... + ~~~~~~~~~~~~~~~~ + + Additional examples: + + ~~~~~~~~~~~~~~~~ + # Relax upper bounds in all dependencies. + $ cabal install --allow-newer foo + + # Relax upper bounds only in dependencies on bar, baz and quux. + $ cabal install --allow-newer=bar,baz,quux foo + + # Relax the upper bound on bar and force bar==2.1. + $ cabal install --allow-newer=bar --constraint="bar==2.1" foo + ~~~~~~~~~~~~~~~~ + + It's also possible to enable `--allow-newer` permanently by setting + `allow-newer: True` in the `~/.cabal/config` file. + +`--constraint=`_constraint_ +: Restrict solutions involving a package to a given version range. + For example, `cabal install --constraint="bar==2.1"` will only consider + install plans that do not use `bar` at all, or `bar` of version 2.1. + + As a special case, `cabal install --constraint="bar -none"` prevents `bar` + from being used at all (`-none` abbreviates `> 1 && < 1`); `cabal install + --constraint="bar installed"` prevents reinstallation of the `bar` package; + `cabal install --constraint="bar +foo -baz"` specifies that the flag `foo` + should be turned on and the `baz` flag should be turned off. + +## setup build ## + +Perform any preprocessing or compilation needed to make this package ready for installation. + +This command takes the following options: + +--_prog_-options=_options_, --_prog_-option=_option_ +: These are mostly the same as the [options configure + step](#setup-configure). Unlike the options specified at the + configure step, any program options specified at the build step are + not persistent but are used for that invocation only. They options + specified at the build step are in addition not in replacement of + any options specified at the configure step. + +## setup haddock ## + +Build the documentation for the package using [haddock][]. By default, +only the documentation for the exposed modules is generated (but see the +`--executables` and `--internal` flags below). + +This command takes the following options: + +`--hoogle` +: Generate a file `dist/doc/html/`_pkgid_`.txt`, which can be + converted by [Hoogle](http://www.haskell.org/hoogle/) into a + database for searching. This is equivalent to running [haddock][] + with the `--hoogle` flag. + +`--html-location=`_url_ +: Specify a template for the location of HTML documentation for + prerequisite packages. The substitutions ([see + listing](#paths-in-the-simple-build-system)) are applied to the + template to obtain a location for each package, which will be used + by hyperlinks in the generated documentation. For example, the + following command generates links pointing at [Hackage] pages: + + > setup haddock --html-location='http://hackage.haskell.org/packages/archive/$pkg/latest/doc/html' + + Here the argument is quoted to prevent substitution by the shell. If + this option is omitted, the location for each package is obtained + using the package tool (e.g. `ghc-pkg`). + +`--executables` +: Also run [haddock][] for the modules of all the executable programs. + By default [haddock][] is run only on the exported modules. + +`--internal` +: Run [haddock][] for the all modules, including unexposed ones, and + make [haddock][] generate documentation for unexported symbols as + well. + +`--css=`_path_ +: The argument _path_ denotes a CSS file, which is passed to + [haddock][] and used to set the style of the generated + documentation. This is only needed to override the default style + that [haddock][] uses. + +`--hyperlink-source` +: Generate [haddock][] documentation integrated with [HsColour][]. + First, [HsColour][] is run to generate colourised code. Then + [haddock][] is run to generate HTML documentation. Each entity + shown in the documentation is linked to its definition in the + colourised code. + +`--hscolour-css=`_path_ +: The argument _path_ denotes a CSS file, which is passed to [HsColour][] as in + + > runhaskell Setup.hs hscolour --css=_path_ + +## setup hscolour ## + +Produce colourised code in HTML format using [HsColour][]. Colourised +code for exported modules is put in `dist/doc/html/`_pkgid_`/src`. + +This command takes the following options: + +`--executables` +: Also run [HsColour][] on the sources of all executable programs. + Colourised code is put in `dist/doc/html/`_pkgid_/_executable_`/src`. + +`--css=`_path_ +: Use the given CSS file for the generated HTML files. The CSS file + defines the colours used to colourise code. Note that this copies + the given CSS file to the directory with the generated HTML files + (renamed to `hscolour.css`) rather than linking to it. + +## setup install ## + +Copy the files into the install locations and (for library packages) +register the package with the compiler, i.e. make the modules it +contains available to programs. + +The [install locations](#installation-paths) are determined by options +to `setup configure`. + +This command takes the following options: + +`--global` +: Register this package in the system-wide database. (This is the + default, unless the `--user` option was supplied to the `configure` + command.) + +`--user` +: Register this package in the user's local package database. (This is + the default if the `--user` option was supplied to the `configure` + command.) + +## setup copy ## + +Copy the files without registering them. This command is mainly of use +to those creating binary packages. + +This command takes the following option: + +`--destdir=`_path_ + +Specify the directory under which to place installed files. If this is +not given, then the root directory is assumed. + +## setup register ## + +Register this package with the compiler, i.e. make the modules it +contains available to programs. This only makes sense for library +packages. Note that the `install` command incorporates this action. The +main use of this separate command is in the post-installation step for a +binary package. + +This command takes the following options: + +`--global` +: Register this package in the system-wide database. (This is the default.) + + +`--user` +: Register this package in the user's local package database. + + +`--gen-script` +: Instead of registering the package, generate a script containing + commands to perform the registration. On Unix, this file is called + `register.sh`, on Windows, `register.bat`. This script might be + included in a binary bundle, to be run after the bundle is unpacked + on the target system. + +`--gen-pkg-config`[=_path_] +: Instead of registering the package, generate a package registration + file. This only applies to compilers that support package + registration files which at the moment is only GHC. The file should + be used with the compiler's mechanism for registering packages. This + option is mainly intended for packaging systems. If possible use the + `--gen-script` option instead since it is more portable across + Haskell implementations. The _path_ is + optional and can be used to specify a particular output file to + generate. Otherwise, by default the file is the package name and + version with a `.conf` extension. + +`--inplace` +: Registers the package for use directly from the build tree, without + needing to install it. This can be useful for testing: there's no + need to install the package after modifying it, just recompile and + test. + + This flag does not create a build-tree-local package database. It + still registers the package in one of the user or global databases. + + However, there are some caveats. It only works with GHC + (currently). It only works if your package doesn't depend on having + any supplemental files installed --- plain Haskell libraries should + be fine. + +## setup unregister ## + +Deregister this package with the compiler. + +This command takes the following options: + +`--global` +: Deregister this package in the system-wide database. (This is the default.) + +`--user` +: Deregister this package in the user's local package database. + +`--gen-script` +: Instead of deregistering the package, generate a script containing + commands to perform the deregistration. On Unix, this file is + called `unregister.sh`, on Windows, `unregister.bat`. This script + might be included in a binary bundle, to be run on the target + system. + +## setup clean ## + +Remove any local files created during the `configure`, `build`, +`haddock`, `register` or `unregister` steps, and also any files and +directories listed in the `extra-tmp-files` field. + +This command takes the following options: + +`--save-configure` or `-s` +: Keeps the configuration information so it is not necessary to run + the configure step again before building. + +## setup test ## + +Run the test suites specified in the package description file. Aside from +the following flags, Cabal accepts the name of one or more test suites on the +command line after `test`. When supplied, Cabal will run only the named test +suites, otherwise, Cabal will run all test suites in the package. + +`--builddir=`_dir_ +: The directory where Cabal puts generated build files (default: `dist`). + Test logs will be located in the `test` subdirectory. + +`--human-log=`_path_ +: The template used to name human-readable test logs; the path is relative + to `dist/test`. By default, logs are named according to the template + `$pkgid-$test-suite.log`, so that each test suite will be logged to its own + human-readable log file. Template variables allowed are: `$pkgid`, + `$compiler`, `$os`, `$arch`, `$abi`, `$abitag`, `$test-suite`, and `$result`. + +`--machine-log=`_path_ +: The path to the machine-readable log, relative to `dist/test`. The default + template is `$pkgid.log`. Template variables allowed are: `$pkgid`, + `$compiler`, `$os`, `$arch`, `$abi`, `$abitag` and `$result`. + +`--show-details=`_filter_ +: Determines if the results of individual test cases are shown on the + terminal. May be `always` (always show), `never` (never show), `failures` + (show only failed results), or `streaming` (show all results in real time). + +`--test-options=`_options_ +: Give extra options to the test executables. + +`--test-option=`_option_ +: give an extra option to the test executables. There is no need to quote + options containing spaces because a single option is assumed, so options + will not be split on spaces. + +## setup sdist ## + +Create a system- and compiler-independent source distribution in a file +_package_-_version_`.tar.gz` in the `dist` subdirectory, for +distribution to package builders. When unpacked, the commands listed in +this section will be available. + +The files placed in this distribution are the package description file, +the setup script, the sources of the modules named in the package +description file, and files named in the `license-file`, `main-is`, +`c-sources`, `js-sources`, `data-files`, `extra-source-files` and +`extra-doc-files` fields. + +This command takes the following option: + +`--snapshot` +: Append today's date (in "YYYYMMDD" format) to the version number for + the generated source package. The original package is unaffected. + + +[dist-simple]: ../release/cabal-latest/doc/API/Cabal/Distribution-Simple.html +[dist-make]: ../release/cabal-latest/doc/API/Cabal/Distribution-Make.html +[dist-license]: ../release/cabal-latest/doc/API/Cabal/Distribution-License.html#t:License +[extension]: ../release/cabal-latest/doc/API/Cabal/Language-Haskell-Extension.html#t:Extension +[BuildType]: ../release/cabal-latest/doc/API/Cabal/Distribution-PackageDescription.html#t:BuildType +[alex]: http://www.haskell.org/alex/ +[autoconf]: http://www.gnu.org/software/autoconf/ +[c2hs]: http://www.cse.unsw.edu.au/~chak/haskell/c2hs/ +[cpphs]: http://projects.haskell.org/cpphs/ +[greencard]: http://hackage.haskell.org/package/greencard +[haddock]: http://www.haskell.org/haddock/ +[HsColour]: http://www.cs.york.ac.uk/fp/darcs/hscolour/ +[happy]: http://www.haskell.org/happy/ +[Hackage]: http://hackage.haskell.org/ +[pkg-config]: http://www.freedesktop.org/wiki/Software/pkg-config/ diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/doc/misc.markdown cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/doc/misc.markdown --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/doc/misc.markdown 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/doc/misc.markdown 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,109 @@ +% Cabal User Guide + +# Reporting bugs and deficiencies # + +Please report any flaws or feature requests in the [bug tracker][]. + +For general discussion or queries email the libraries mailing list +. There is also a development mailing list +. + +[bug tracker]: https://github.com/haskell/cabal/issues + +# Stability of Cabal interfaces # + +The Cabal library and related infrastructure is still under active +development. New features are being added and limitations and bugs are +being fixed. This requires internal changes and often user visible +changes as well. We therefore cannot promise complete future-proof +stability, at least not without halting all development work. + +This section documents the aspects of the Cabal interface that we can +promise to keep stable and which bits are subject to change. + +## Cabal file format ## + +This is backwards compatible and mostly forwards compatible. New fields +can be added without breaking older versions of Cabal. Fields can be +deprecated without breaking older packages. + +## Command-line interface ## + +### Very Stable Command-line interfaces ### + +* `./setup configure` + * `--prefix` + * `--user` + * `--ghc`, `--uhc` + * `--verbose` + * `--prefix` + +* `./setup build` +* `./setup install` +* `./setup register` +* `./setup copy` + +### Stable Command-line interfaces ### + +### Unstable command-line ### + +## Functions and Types ## + +The Cabal library follows the [Package Versioning Policy][PVP]. This +means that within a stable major release, for example 1.2.x, there will +be no incompatible API changes. But minor versions increments, for +example 1.2.3, indicate compatible API additions. + +The Package Versioning Policy does not require any API guarantees +between major releases, for example between 1.2.x and 1.4.x. In practise +of course not everything changes between major releases. Some parts of +the API are more prone to change than others. The rest of this section +gives some informal advice on what level of API stability you can expect +between major releases. + +[PVP]: http://www.haskell.org/haskellwiki/Package_versioning_policy + +### Very Stable API ### + +* `defaultMain` + +* `defaultMainWithHooks defaultUserHooks` + + But regular `defaultMainWithHooks` isn't stable since `UserHooks` + changes. + +### Semi-stable API ### + +* `UserHooks` The hooks API will change in the future + +* `Distribution.*` is mostly declarative information about packages and + is somewhat stable. + +### Unstable API ### + +Everything under `Distribution.Simple.*` has no stability guarantee. + +## Hackage ## + +The index format is a partly stable interface. It consists of a tar.gz +file that contains directories with `.cabal` files in. In future it may +contain more kinds of files so do not assume every file is a `.cabal` +file. Incompatible revisions to the format would involve bumping the +name of the index file, i.e., `00-index.tar.gz`, `01-index.tar.gz` etc. + + +[dist-simple]: ../release/cabal-latest/doc/API/Cabal/Distribution-Simple.html +[dist-make]: ../release/cabal-latest/doc/API/Cabal/Distribution-Make.html +[dist-license]: ../release/cabal-latest/doc/API/Cabal/Distribution-License.html#t:License +[extension]: ../release/cabal-latest/doc/API/Cabal/Language-Haskell-Extension.html#t:Extension +[BuildType]: ../release/cabal-latest/doc/API/Cabal/Distribution-PackageDescription.html#t:BuildType +[alex]: http://www.haskell.org/alex/ +[autoconf]: http://www.gnu.org/software/autoconf/ +[c2hs]: http://www.cse.unsw.edu.au/~chak/haskell/c2hs/ +[cpphs]: http://projects.haskell.org/cpphs/ +[greencard]: http://hackage.haskell.org/package/greencard +[haddock]: http://www.haskell.org/haddock/ +[HsColour]: http://www.cs.york.ac.uk/fp/darcs/hscolour/ +[happy]: http://www.haskell.org/happy/ +[Hackage]: http://hackage.haskell.org/ +[pkg-config]: http://www.freedesktop.org/wiki/Software/pkg-config/ diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Language/Haskell/Extension.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Language/Haskell/Extension.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Language/Haskell/Extension.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Language/Haskell/Extension.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,795 @@ +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Language.Haskell.Extension +-- Copyright : Isaac Jones 2003-2004 +-- License : BSD3 +-- +-- Maintainer : libraries@haskell.org +-- Portability : portable +-- +-- Haskell language dialects and extensions + +module Language.Haskell.Extension ( + Language(..), + knownLanguages, + + Extension(..), + KnownExtension(..), + knownExtensions, + deprecatedExtensions + ) where + +import Distribution.Text (Text(..)) +import qualified Distribution.Compat.ReadP as Parse +import qualified Text.PrettyPrint as Disp +import qualified Data.Char as Char (isAlphaNum) +import Data.Array (Array, accumArray, bounds, Ix(inRange), (!)) +import Distribution.Compat.Binary (Binary) +import Data.Data (Data) +import Data.Typeable (Typeable) +import GHC.Generics (Generic) + +-- ------------------------------------------------------------ +-- * Language +-- ------------------------------------------------------------ + +-- | This represents a Haskell language dialect. +-- +-- Language 'Extension's are interpreted relative to one of these base +-- languages. +-- +data Language = + + -- | The Haskell 98 language as defined by the Haskell 98 report. + -- + Haskell98 + + -- | The Haskell 2010 language as defined by the Haskell 2010 report. + -- + | Haskell2010 + + -- | An unknown language, identified by its name. + | UnknownLanguage String + deriving (Generic, Show, Read, Eq, Typeable, Data) + +instance Binary Language + +knownLanguages :: [Language] +knownLanguages = [Haskell98, Haskell2010] + +instance Text Language where + disp (UnknownLanguage other) = Disp.text other + disp other = Disp.text (show other) + + parse = do + lang <- Parse.munch1 Char.isAlphaNum + return (classifyLanguage lang) + +classifyLanguage :: String -> Language +classifyLanguage = \str -> case lookup str langTable of + Just lang -> lang + Nothing -> UnknownLanguage str + where + langTable = [ (show lang, lang) + | lang <- knownLanguages ] + +-- ------------------------------------------------------------ +-- * Extension +-- ------------------------------------------------------------ + +-- Note: if you add a new 'KnownExtension': +-- +-- * also add it to the Distribution.Simple.X.languageExtensions lists +-- (where X is each compiler: GHC, JHC, LHC, UHC, HaskellSuite) +-- +-- | This represents language extensions beyond a base 'Language' definition +-- (such as 'Haskell98') that are supported by some implementations, usually +-- in some special mode. +-- +-- Where applicable, references are given to an implementation's +-- official documentation. + +data Extension = + -- | Enable a known extension + EnableExtension KnownExtension + + -- | Disable a known extension + | DisableExtension KnownExtension + + -- | An unknown extension, identified by the name of its @LANGUAGE@ + -- pragma. + | UnknownExtension String + + deriving (Generic, Show, Read, Eq, Ord, Typeable, Data) + +instance Binary Extension + +data KnownExtension = + + -- | Allow overlapping class instances, provided there is a unique + -- most specific instance for each use. + -- + -- * + OverlappingInstances + + -- | Ignore structural rules guaranteeing the termination of class + -- instance resolution. Termination is guaranteed by a fixed-depth + -- recursion stack, and compilation may fail if this depth is + -- exceeded. + -- + -- * + | UndecidableInstances + + -- | Implies 'OverlappingInstances'. Allow the implementation to + -- choose an instance even when it is possible that further + -- instantiation of types will lead to a more specific instance + -- being applicable. + -- + -- * + | IncoherentInstances + + -- | /(deprecated)/ Allow recursive bindings in @do@ blocks, using the @rec@ + -- keyword. See also 'RecursiveDo'. + | DoRec + + -- | Allow recursive bindings using @mdo@, a variant of @do@. + -- @DoRec@ provides a different, preferred syntax. + -- + -- * + | RecursiveDo + + -- | Provide syntax for writing list comprehensions which iterate + -- over several lists together, like the 'zipWith' family of + -- functions. + -- + -- * + | ParallelListComp + + -- | Allow multiple parameters in a type class. + -- + -- * + | MultiParamTypeClasses + + -- | Enable the dreaded monomorphism restriction. + -- + -- * + | MonomorphismRestriction + + -- | Allow a specification attached to a multi-parameter type class + -- which indicates that some parameters are entirely determined by + -- others. The implementation will check that this property holds + -- for the declared instances, and will use this property to reduce + -- ambiguity in instance resolution. + -- + -- * + | FunctionalDependencies + + -- | Like 'RankNTypes' but does not allow a higher-rank type to + -- itself appear on the left of a function arrow. + -- + -- * + | Rank2Types + + -- | Allow a universally-quantified type to occur on the left of a + -- function arrow. + -- + -- * + | RankNTypes + + -- | Allow data constructors to have polymorphic arguments. Unlike + -- 'RankNTypes', does not allow this for ordinary functions. + -- + -- * + | PolymorphicComponents + + -- | Allow existentially-quantified data constructors. + -- + -- * + | ExistentialQuantification + + -- | Cause a type variable in a signature, which has an explicit + -- @forall@ quantifier, to scope over the definition of the + -- accompanying value declaration. + -- + -- * + | ScopedTypeVariables + + -- | Deprecated, use 'ScopedTypeVariables' instead. + | PatternSignatures + + -- | Enable implicit function parameters with dynamic scope. + -- + -- * + | ImplicitParams + + -- | Relax some restrictions on the form of the context of a type + -- signature. + -- + -- * + | FlexibleContexts + + -- | Relax some restrictions on the form of the context of an + -- instance declaration. + -- + -- * + | FlexibleInstances + + -- | Allow data type declarations with no constructors. + -- + -- * + | EmptyDataDecls + + -- | Run the C preprocessor on Haskell source code. + -- + -- * + | CPP + + -- | Allow an explicit kind signature giving the kind of types over + -- which a type variable ranges. + -- + -- * + | KindSignatures + + -- | Enable a form of pattern which forces evaluation before an + -- attempted match, and a form of strict @let@/@where@ binding. + -- + -- * + | BangPatterns + + -- | Allow type synonyms in instance heads. + -- + -- * + | TypeSynonymInstances + + -- | Enable Template Haskell, a system for compile-time + -- metaprogramming. + -- + -- * + | TemplateHaskell + + -- | Enable the Foreign Function Interface. In GHC, implements the + -- standard Haskell 98 Foreign Function Interface Addendum, plus + -- some GHC-specific extensions. + -- + -- * + | ForeignFunctionInterface + + -- | Enable arrow notation. + -- + -- * + | Arrows + + -- | /(deprecated)/ Enable generic type classes, with default instances defined in + -- terms of the algebraic structure of a type. + -- + -- * + | Generics + + -- | Enable the implicit importing of the module "Prelude". When + -- disabled, when desugaring certain built-in syntax into ordinary + -- identifiers, use whatever is in scope rather than the "Prelude" + -- -- version. + -- + -- * + | ImplicitPrelude + + -- | Enable syntax for implicitly binding local names corresponding + -- to the field names of a record. Puns bind specific names, unlike + -- 'RecordWildCards'. + -- + -- * + | NamedFieldPuns + + -- | Enable a form of guard which matches a pattern and binds + -- variables. + -- + -- * + | PatternGuards + + -- | Allow a type declared with @newtype@ to use @deriving@ for any + -- class with an instance for the underlying type. + -- + -- * + | GeneralizedNewtypeDeriving + + -- | Enable the \"Trex\" extensible records system. + -- + -- * + | ExtensibleRecords + + -- | Enable type synonyms which are transparent in some definitions + -- and opaque elsewhere, as a way of implementing abstract + -- datatypes. + -- + -- * + | RestrictedTypeSynonyms + + -- | Enable an alternate syntax for string literals, + -- with string templating. + -- + -- * + | HereDocuments + + -- | Allow the character @#@ as a postfix modifier on identifiers. + -- Also enables literal syntax for unboxed values. + -- + -- * + | MagicHash + + -- | Allow data types and type synonyms which are indexed by types, + -- i.e. ad-hoc polymorphism for types. + -- + -- * + | TypeFamilies + + -- | Allow a standalone declaration which invokes the type class + -- @deriving@ mechanism. + -- + -- * + | StandaloneDeriving + + -- | Allow certain Unicode characters to stand for certain ASCII + -- character sequences, e.g. keywords and punctuation. + -- + -- * + | UnicodeSyntax + + -- | Allow the use of unboxed types as foreign types, e.g. in + -- @foreign import@ and @foreign export@. + -- + -- * + | UnliftedFFITypes + + -- | Enable interruptible FFI. + -- + -- * + | InterruptibleFFI + + -- | Allow use of CAPI FFI calling convention (@foreign import capi@). + -- + -- * + | CApiFFI + + -- | Defer validity checking of types until after expanding type + -- synonyms, relaxing the constraints on how synonyms may be used. + -- + -- * + | LiberalTypeSynonyms + + -- | Allow the name of a type constructor, type class, or type + -- variable to be an infix operator. + | TypeOperators + + -- | Enable syntax for implicitly binding local names corresponding + -- to the field names of a record. A wildcard binds all unmentioned + -- names, unlike 'NamedFieldPuns'. + -- + -- * + | RecordWildCards + + -- | Deprecated, use 'NamedFieldPuns' instead. + | RecordPuns + + -- | Allow a record field name to be disambiguated by the type of + -- the record it's in. + -- + -- * + | DisambiguateRecordFields + + -- | Enable traditional record syntax (as supported by Haskell 98) + -- + -- * + | TraditionalRecordSyntax + + -- | Enable overloading of string literals using a type class, much + -- like integer literals. + -- + -- * + | OverloadedStrings + + -- | Enable generalized algebraic data types, in which type + -- variables may be instantiated on a per-constructor basis. Implies + -- 'GADTSyntax'. + -- + -- * + | GADTs + + -- | Enable GADT syntax for declaring ordinary algebraic datatypes. + -- + -- * + | GADTSyntax + + -- | Make pattern bindings monomorphic. + -- + -- * + | MonoPatBinds + + -- | Relax the requirements on mutually-recursive polymorphic + -- functions. + -- + -- * + | RelaxedPolyRec + + -- | Allow default instantiation of polymorphic types in more + -- situations. + -- + -- * + | ExtendedDefaultRules + + -- | Enable unboxed tuples. + -- + -- * + | UnboxedTuples + + -- | Enable @deriving@ for classes 'Data.Typeable.Typeable' and + -- 'Data.Generics.Data'. + -- + -- * + | DeriveDataTypeable + + -- | Enable @deriving@ for 'GHC.Generics.Generic' and 'GHC.Generics.Generic1'. + -- + -- * + | DeriveGeneric + + -- | Enable support for default signatures. + -- + -- * + | DefaultSignatures + + -- | Allow type signatures to be specified in instance declarations. + -- + -- * + | InstanceSigs + + -- | Allow a class method's type to place additional constraints on + -- a class type variable. + -- + -- * + | ConstrainedClassMethods + + -- | Allow imports to be qualified by the package name the module is + -- intended to be imported from, e.g. + -- + -- > import "network" Network.Socket + -- + -- * + | PackageImports + + -- | /(deprecated)/ Allow a type variable to be instantiated at a + -- polymorphic type. + -- + -- * + | ImpredicativeTypes + + -- | /(deprecated)/ Change the syntax for qualified infix operators. + -- + -- * + | NewQualifiedOperators + + -- | Relax the interpretation of left operator sections to allow + -- unary postfix operators. + -- + -- * + | PostfixOperators + + -- | Enable quasi-quotation, a mechanism for defining new concrete + -- syntax for expressions and patterns. + -- + -- * + | QuasiQuotes + + -- | Enable generalized list comprehensions, supporting operations + -- such as sorting and grouping. + -- + -- * + | TransformListComp + + -- | Enable monad comprehensions, which generalise the list + -- comprehension syntax to work for any monad. + -- + -- * + | MonadComprehensions + + -- | Enable view patterns, which match a value by applying a + -- function and matching on the result. + -- + -- * + | ViewPatterns + + -- | Allow concrete XML syntax to be used in expressions and patterns, + -- as per the Haskell Server Pages extension language: + -- . The ideas behind it are + -- discussed in the paper \"Haskell Server Pages through Dynamic Loading\" + -- by Niklas Broberg, from Haskell Workshop '05. + | XmlSyntax + + -- | Allow regular pattern matching over lists, as discussed in the + -- paper \"Regular Expression Patterns\" by Niklas Broberg, Andreas Farre + -- and Josef Svenningsson, from ICFP '04. + | RegularPatterns + + -- | Enable the use of tuple sections, e.g. @(, True)@ desugars into + -- @\x -> (x, True)@. + -- + -- * + | TupleSections + + -- | Allow GHC primops, written in C--, to be imported into a Haskell + -- file. + | GHCForeignImportPrim + + -- | Support for patterns of the form @n + k@, where @k@ is an + -- integer literal. + -- + -- * + | NPlusKPatterns + + -- | Improve the layout rule when @if@ expressions are used in a @do@ + -- block. + | DoAndIfThenElse + + -- | Enable support for multi-way @if@-expressions. + -- + -- * + | MultiWayIf + + -- | Enable support lambda-@case@ expressions. + -- + -- * + | LambdaCase + + -- | Makes much of the Haskell sugar be desugared into calls to the + -- function with a particular name that is in scope. + -- + -- * + | RebindableSyntax + + -- | Make @forall@ a keyword in types, which can be used to give the + -- generalisation explicitly. + -- + -- * + | ExplicitForAll + + -- | Allow contexts to be put on datatypes, e.g. the @Eq a@ in + -- @data Eq a => Set a = NilSet | ConsSet a (Set a)@. + -- + -- * + | DatatypeContexts + + -- | Local (@let@ and @where@) bindings are monomorphic. + -- + -- * + | MonoLocalBinds + + -- | Enable @deriving@ for the 'Data.Functor.Functor' class. + -- + -- * + | DeriveFunctor + + -- | Enable @deriving@ for the 'Data.Traversable.Traversable' class. + -- + -- * + | DeriveTraversable + + -- | Enable @deriving@ for the 'Data.Foldable.Foldable' class. + -- + -- * + | DeriveFoldable + + -- | Enable non-decreasing indentation for @do@ blocks. + -- + -- * + | NondecreasingIndentation + + -- | Allow imports to be qualified with a safe keyword that requires + -- the imported module be trusted as according to the Safe Haskell + -- definition of trust. + -- + -- > import safe Network.Socket + -- + -- * + | SafeImports + + -- | Compile a module in the Safe, Safe Haskell mode -- a restricted + -- form of the Haskell language to ensure type safety. + -- + -- * + | Safe + + -- | Compile a module in the Trustworthy, Safe Haskell mode -- no + -- restrictions apply but the module is marked as trusted as long as + -- the package the module resides in is trusted. + -- + -- * + | Trustworthy + + -- | Compile a module in the Unsafe, Safe Haskell mode so that + -- modules compiled using Safe, Safe Haskell mode can't import it. + -- + -- * + | Unsafe + + -- | Allow type class/implicit parameter/equality constraints to be + -- used as types with the special kind constraint. Also generalise + -- the @(ctxt => ty)@ syntax so that any type of kind constraint can + -- occur before the arrow. + -- + -- * + | ConstraintKinds + + -- | Enable kind polymorphism. + -- + -- * + | PolyKinds + + -- | Enable datatype promotion. + -- + -- * + | DataKinds + + -- | Enable parallel arrays syntax (@[:@, @:]@) for /Data Parallel Haskell/. + -- + -- * + | ParallelArrays + + -- | Enable explicit role annotations, like in (@type role Foo representational representational@). + -- + -- * + | RoleAnnotations + + -- | Enable overloading of list literals, arithmetic sequences and + -- list patterns using the 'IsList' type class. + -- + -- * + | OverloadedLists + + -- | Enable case expressions that have no alternatives. Also applies to lambda-case expressions if they are enabled. + -- + -- * + | EmptyCase + + -- | Triggers the generation of derived 'Typeable' instances for every + -- datatype and type class declaration. + -- + -- * + | AutoDeriveTypeable + + -- | Desugars negative literals directly (without using negate). + -- + -- * + | NegativeLiterals + + -- | Allow the use of binary integer literal syntax (e.g. @0b11001001@ to denote @201@). + -- + -- * + | BinaryLiterals + + -- | Allow the use of floating literal syntax for all instances of 'Num', including 'Int' and 'Integer'. + -- + -- * + | NumDecimals + + -- | Enable support for type classes with no type parameter. + -- + -- * + | NullaryTypeClasses + + -- | Enable explicit namespaces in module import/export lists. + -- + -- * + | ExplicitNamespaces + + -- | Allow the user to write ambiguous types, and the type inference engine to infer them. + -- + -- * + | AllowAmbiguousTypes + + -- | Enable @foreign import javascript@. + | JavaScriptFFI + + -- | Allow giving names to and abstracting over patterns. + -- + -- * + | PatternSynonyms + + -- | Allow anonymous placeholders (underscore) inside type signatures. The + -- type inference engine will generate a message describing the type inferred + -- at the hole's location. + -- + -- * + | PartialTypeSignatures + + -- | Allow named placeholders written with a leading underscore inside type + -- signatures. Wildcards with the same name unify to the same type. + -- + -- * + | NamedWildCards + + -- | Enable @deriving@ for any class. + -- + -- * + | DeriveAnyClass + + deriving (Generic, Show, Read, Eq, Ord, Enum, Bounded, Typeable, Data) + +instance Binary KnownExtension + +{-# DEPRECATED knownExtensions + "KnownExtension is an instance of Enum and Bounded, use those instead." #-} +knownExtensions :: [KnownExtension] +knownExtensions = [minBound..maxBound] + +-- | Extensions that have been deprecated, possibly paired with another +-- extension that replaces it. +-- +deprecatedExtensions :: [(Extension, Maybe Extension)] +deprecatedExtensions = + [ (EnableExtension RecordPuns, Just (EnableExtension NamedFieldPuns)) + , (EnableExtension PatternSignatures, Just (EnableExtension ScopedTypeVariables)) + ] +-- NOTE: when adding deprecated extensions that have new alternatives +-- we must be careful to make sure that the deprecation messages are +-- valid. We must not recommend aliases that cannot be used with older +-- compilers, perhaps by adding support in Cabal to translate the new +-- name to the old one for older compilers. Otherwise we are in danger +-- of the scenario in ticket #689. + +instance Text Extension where + disp (UnknownExtension other) = Disp.text other + disp (EnableExtension ke) = Disp.text (show ke) + disp (DisableExtension ke) = Disp.text ("No" ++ show ke) + + parse = do + extension <- Parse.munch1 Char.isAlphaNum + return (classifyExtension extension) + +instance Text KnownExtension where + disp ke = Disp.text (show ke) + + parse = do + extension <- Parse.munch1 Char.isAlphaNum + case classifyKnownExtension extension of + Just ke -> + return ke + Nothing -> + fail ("Can't parse " ++ show extension ++ " as KnownExtension") + +classifyExtension :: String -> Extension +classifyExtension string + = case classifyKnownExtension string of + Just ext -> EnableExtension ext + Nothing -> + case string of + 'N':'o':string' -> + case classifyKnownExtension string' of + Just ext -> DisableExtension ext + Nothing -> UnknownExtension string + _ -> UnknownExtension string + +-- | 'read' for 'KnownExtension's is really really slow so for the Text +-- instance +-- what we do is make a simple table indexed off the first letter in the +-- extension name. The extension names actually cover the range @'A'-'Z'@ +-- pretty densely and the biggest bucket is 7 so it's not too bad. We just do +-- a linear search within each bucket. +-- +-- This gives an order of magnitude improvement in parsing speed, and it'll +-- also allow us to do case insensitive matches in future if we prefer. +-- +classifyKnownExtension :: String -> Maybe KnownExtension +classifyKnownExtension "" = Nothing +classifyKnownExtension string@(c : _) + | inRange (bounds knownExtensionTable) c + = lookup string (knownExtensionTable ! c) + | otherwise = Nothing + +knownExtensionTable :: Array Char [(String, KnownExtension)] +knownExtensionTable = + accumArray (flip (:)) [] ('A', 'Z') + [ (head str, (str, extension)) + | extension <- [toEnum 0 ..] + , let str = show extension ] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/LICENSE cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/LICENSE --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/LICENSE 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,34 @@ +Copyright (c) 2003-2014, Isaac Jones, Simon Marlow, Martin Sjögren, + Bjorn Bringert, Krasimir Angelov, + Malcolm Wallace, Ross Patterson, Ian Lynagh, + Duncan Coutts, Thomas Schilling, + Johan Tibell, Mikhail Glushenkov +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of Isaac Jones nor the names of other + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/README.md cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/README.md --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/README.md 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,182 @@ +The Cabal library package +========================= + +See the [Cabal web site] for more information. + +If you also want the `cabal` command-line program, you need the +[cabal-install] package in addition to this library. + +[cabal-install]: ../cabal-install/README.md + +Installing the Cabal library +============================ + +If you already have the `cabal` program +--------------------------------------- + +In this case run: + + $ cabal install + +However, if you do not have an existing version of the `cabal` program, +you first must install the Cabal library. To avoid this bootstrapping +problem, you can install the Cabal library directly as described below. + + +Installing as a user (no root or administrator access) +------------------------------------------------------ + + ghc -threaded --make Setup + ./Setup configure --user + ./Setup build + ./Setup install + +Note the use of the `--user` flag at the configure step. + +Compiling 'Setup' rather than using `runghc Setup` is much faster and +works on Windows. For all packages other than Cabal itself, it is fine +to use `runghc`. + +This will install into `$HOME/.cabal/` on Unix and into +`Documents and Settings\$User\Application Data\cabal\` on Windows. +If you want to install elsewhere, use the `--prefix=` flag at the +configure step. + + +Installing as root or Administrator +----------------------------------- + + ghc -threaded --make Setup + ./Setup configure + ./Setup build + sudo ./Setup install + +Compiling Setup rather than using `runghc Setup` is much faster and +works on Windows. For all packages other than Cabal itself, it is fine +to use `runghc`. + +This will install into `/usr/local` on Unix, and on Windows it will +install into `$ProgramFiles/Haskell`. If you want to install elsewhere, +use the `--prefix=` flag at the configure step. + + +Using older versions of GHC and Cabal +====================================== + +It is recommended that you leave any pre-existing version of Cabal +installed. In particular, it is *essential* you keep the version that +came with GHC itself, since other installed packages require it (for +instance, the "ghc" API package). + +Prior to GHC 6.4.2, however, GHC did not deal particularly well with +having multiple versions of packages installed at once. So if you are +using GHC 6.4.1 or older and you have an older version of Cabal +installed, you should probably remove it by running: + + $ ghc-pkg unregister Cabal + +or, if you had Cabal installed only for your user account, run: + + $ ghc-pkg unregister Cabal --user + +The `filepath` dependency +========================= + +Cabal uses the [filepath] package, so it must be installed first. +GHC version 6.6.1 and later come with `filepath`, however, earlier +versions do not by default. If you do not already have `filepath`, +you need to install it. You can use any existing version of Cabal to do +that. If you have neither Cabal nor `filepath`, it is slightly +harder but still possible. + +Unpack Cabal and `filepath` into separate directories. For example: + + tar -xzf filepath-1.1.0.0.tar.gz + tar -xzf Cabal-1.6.0.0.tar.gz + + # rename to make the following instructions simpler: + mv filepath-1.1.0.0/ filepath/ + mv Cabal-1.6.0.0/ Cabal/ + + cd Cabal + ghc -i../filepath -cpp --make Setup.hs -o ../filepath/setup + cd ../filepath/ + ./setup configure --user + ./setup build + ./setup install + +This installs `filepath` so that you can install Cabal with the normal +method. + +[filepath]: http://hackage.haskell.org/package/filepath + +More information +================ + +Please see the [Cabal web site] for the [user guide] and [API +documentation]. There is additional information available on the +[development wiki]. + +[user guide]: http://www.haskell.org/cabal/users-guide +[API documentation]: http://www.haskell.org/cabal/release/cabal-latest/doc/API/Cabal/Distribution-Simple.html +[development wiki]: https://github.com/haskell/cabal/wiki + + +Bugs +==== + +Please report bugs and feature requests to Cabal's [bug tracker]. + + +Your help +--------- + +To help Cabal's development, it is enormously helpful to know from +Cabal's users what their most pressing problems are with Cabal and +[Hackage]. You may have a favourite Cabal bug or limitation. Look at +Cabal's [bug tracker]. Ensure that the problem is reported there and +adequately described. Comment on the issue to report how much of a +problem the bug is for you. Subscribe to the issues's notifications to +discussed requirements and keep informed on progress. For feature +requests, it is helpful if there is a description of how you would +expect to interact with the new feature. + +[Hackage]: http://hackage.haskell.org + + +Source code +=========== + +You can get the master development branch using: + + $ git clone https://github.com/haskell/cabal.git + + +Credits +======= + +Cabal developers (in alphabetical order): + +- Krasimir Angelov +- Bjorn Bringert +- Duncan Coutts +- Isaac Jones +- David Himmelstrup ("Lemmih") +- Simon Marlow +- Ross Patterson +- Thomas Schilling +- Martin Sjögren +- Malcolm Wallace +- and nearly 30 other people have contributed occasional patches + +Cabal specification authors: + +- Isaac Jones +- Simon Marlow +- Ross Patterson +- Simon Peyton Jones +- Malcolm Wallace + + +[bug tracker]: https://github.com/haskell/cabal/issues +[Cabal web site]: http://www.haskell.org/cabal/ diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Setup.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Setup.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/Setup.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +import Distribution.Simple +main :: IO () +main = defaultMain + +-- Although this looks like the Simple build type, it is in fact vital that +-- we use this Setup.hs because it'll get compiled against the local copy +-- of the Cabal lib, thus enabling Cabal to bootstrap itself without relying +-- on any previous installation. This also means we can use any new features +-- immediately because we never have to worry about building Cabal with an +-- older version of itself. diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/hackage/check.sh cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/hackage/check.sh --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/hackage/check.sh 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/hackage/check.sh 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,25 @@ +#!/bin/sh + +base_version=1.4.0.2 +test_version=1.5.6 + +for setup in archive/*/*/Setup.hs archive/*/*/Setup.lhs; do + + pkgname=$(basename ${setup}) + + if test $(wc -w < ${setup}) -gt 21; then + if ghc -package Cabal-${base_version} -S ${setup} -o /dev/null 2> /dev/null; then + + if ghc -package Cabal-${test_version} -S ${setup} -o /dev/null 2> /dev/null; then + echo "OK ${setup}" + else + echo "FAIL ${setup} does not compile with Cabal-${test_version}" + fi + else + echo "OK ${setup} (does not compile with Cabal-${base_version})" + fi + else + echo "trivial ${setup}" + fi + +done diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/hackage/download.sh cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/hackage/download.sh --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/hackage/download.sh 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/hackage/download.sh 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,19 @@ +#!/bin/sh + +if test ! -f archive/archive.tar; then + + wget http://hackage.haskell.org/cgi-bin/hackage-scripts/archive.tar + mkdir -p archive + mv archive.tar archive/ + tar -C archive -xf archive/archive.tar + +fi + +if test ! -f archive/00-index.tar.gz; then + + wget http://hackage.haskell.org/packages/archive/00-index.tar.gz + mkdir -p archive + mv 00-index.tar.gz archive/ + tar -C archive -xzf archive/00-index.tar.gz + +fi diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/hackage/unpack.sh cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/hackage/unpack.sh --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/hackage/unpack.sh 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/hackage/unpack.sh 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,16 @@ +#!/bin/sh + +for tarball in archive/*/*/*.tar.gz; do + + pkgdir=$(dirname ${tarball}) + pkgname=$(basename ${tarball} .tar.gz) + + if tar -tzf ${tarball} ${pkgname}/Setup.hs 2> /dev/null; then + tar -xzf ${tarball} ${pkgname}/Setup.hs -O > ${pkgdir}/Setup.hs + elif tar -tzf ${tarball} ${pkgname}/Setup.lhs 2> /dev/null; then + tar -xzf ${tarball} ${pkgname}/Setup.lhs -O > ${pkgdir}/Setup.lhs + else + echo "${pkgname} has no Setup.hs or .lhs at all!!?!" + fi + +done diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/misc/ghc-supported-languages.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/misc/ghc-supported-languages.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/misc/ghc-supported-languages.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/misc/ghc-supported-languages.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,97 @@ +-- | A test program to check that ghc has got all of its extensions registered +-- +module Main where + +import Language.Haskell.Extension +import Distribution.Text +import Distribution.Simple.Utils +import Distribution.Verbosity + +import Data.List ((\\)) +import Data.Maybe +import Control.Applicative +import Control.Monad +import System.Environment +import System.Exit + +-- | A list of GHC extensions that are deliberately not registered, +-- e.g. due to being experimental and not ready for public consumption +-- +exceptions = map readExtension [] + +checkProblems :: [Extension] -> [String] +checkProblems implemented = + + let unregistered = + [ ext | ext <- implemented -- extensions that ghc knows about + , not (registered ext) -- but that are not registered + , ext `notElem` exceptions ] -- except for the exceptions + + -- check if someone has forgotten to update the exceptions list... + + -- exceptions that are not implemented + badExceptions = exceptions \\ implemented + + -- exceptions that are now registered + badExceptions' = filter registered exceptions + + in catMaybes + [ check unregistered $ unlines + [ "The following extensions are known to GHC but are not in the " + , "extension registry in Language.Haskell.Extension." + , " " ++ intercalate "\n " (map display unregistered) + , "If these extensions are ready for public consumption then they " + , "should be registered. If they are still experimental and you " + , "think they are not ready to be registered then please add them " + , "to the exceptions list in this test program along with an " + , "explanation." + ] + , check badExceptions $ unlines + [ "Error in the extension exception list. The following extensions" + , "are listed as exceptions but are not even implemented by GHC:" + , " " ++ intercalate "\n " (map display badExceptions) + , "Please fix this test program by correcting the list of" + , "exceptions." + ] + , check badExceptions' $ unlines + [ "Error in the extension exception list. The following extensions" + , "are listed as exceptions to registration but they are in fact" + , "now registered in Language.Haskell.Extension:" + , " " ++ intercalate "\n " (map display badExceptions') + , "Please fix this test program by correcting the list of" + , "exceptions." + ] + ] + where + registered (UnknownExtension _) = False + registered _ = True + + check [] _ = Nothing + check _ i = Just i + + +main = topHandler $ do + [ghcPath] <- getArgs + exts <- getExtensions ghcPath + let problems = checkProblems exts + putStrLn (intercalate "\n" problems) + if null problems + then exitSuccess + else exitFailure + +getExtensions :: FilePath -> IO [Extension] +getExtensions ghcPath = + map readExtension . lines + <$> rawSystemStdout normal ghcPath ["--supported-languages"] + +readExtension :: String -> Extension +readExtension str = handleNoParse $ do + -- GHC defines extensions in a positive way, Cabal defines them + -- relative to H98 so we try parsing ("No" ++ extName) first + ext <- simpleParse ("No" ++ str) + case ext of + UnknownExtension _ -> simpleParse str + _ -> return ext + where + handleNoParse :: Maybe Extension -> Extension + handleNoParse = fromMaybe (error $ "unparsable extension " ++ show str) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/benchmarks/bench-Foo.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/benchmarks/bench-Foo.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/benchmarks/bench-Foo.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/benchmarks/bench-Foo.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,8 @@ +module Main where + +import Foo +import System.Exit + +main :: IO () +main | fooTest [] = exitSuccess + | otherwise = exitFailure diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,16 @@ +module PackageTests.BenchmarkExeV10.Check + ( checkBenchmark + ) where + +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + +dir :: FilePath +dir = "PackageTests" "BenchmarkExeV10" + +checkBenchmark :: FilePath -> Test +checkBenchmark ghcPath = TestCase $ do + let spec = PackageSpec dir Nothing ["--enable-benchmarks"] + buildResult <- cabal_build spec ghcPath + assertBuildSucceeded buildResult diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/Foo.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/Foo.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/Foo.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/Foo.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,4 @@ +module Foo where + +fooTest :: [String] -> Bool +fooTest _ = True diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkExeV10/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,15 @@ +name: my +version: 0.1 +license: BSD3 +cabal-version: >= 1.9.2 +build-type: Simple + +library + exposed-modules: Foo + build-depends: base + +benchmark bench-Foo + type: exitcode-stdio-1.0 + hs-source-dirs: benchmarks + main-is: bench-Foo.hs + build-depends: base, my diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/BenchmarkOptions.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/BenchmarkOptions.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/BenchmarkOptions.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/BenchmarkOptions.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,20 @@ +name: BenchmarkOptions +version: 0.1 +license: BSD3 +author: Johan Tibell +stability: stable +category: PackageTests +build-type: Simple +cabal-version: >= 1.9.2 + +description: + Check that Cabal passes the correct test options to test suites. + +executable dummy + main-is: test-BenchmarkOptions.hs + build-depends: base + +benchmark test-BenchmarkOptions + main-is: test-BenchmarkOptions.hs + type: exitcode-stdio-1.0 + build-depends: base diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,26 @@ +module PackageTests.BenchmarkOptions.Check where + +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BenchmarkOptions" + , configOpts = ["--enable-benchmarks"] + , distPref = Nothing + } + _ <- cabal_build spec ghcPath + result <- cabal_bench spec ["--benchmark-options=1 2 3"] ghcPath + let message = "\"cabal bench\" did not pass the correct options to the " + ++ "benchmark executable with \"--benchmark-options\"" + assertEqual message True $ successful result + result' <- cabal_bench spec [ "--benchmark-option=1" + , "--benchmark-option=2" + , "--benchmark-option=3" + ] + ghcPath + let message' = "\"cabal bench\" did not pass the correct options to the " + ++ "benchmark executable with \"--benchmark-option\"" + assertEqual message' True $ successful result' diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/test-BenchmarkOptions.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/test-BenchmarkOptions.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/test-BenchmarkOptions.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkOptions/test-BenchmarkOptions.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,11 @@ +module Main where + +import System.Environment ( getArgs ) +import System.Exit ( exitFailure, exitSuccess ) + +main :: IO () +main = do + args <- getArgs + if args == ["1", "2", "3"] + then exitSuccess + else putStrLn ("Got: " ++ show args) >> exitFailure diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkStanza/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkStanza/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkStanza/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkStanza/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,52 @@ +module PackageTests.BenchmarkStanza.Check where + +import Test.HUnit +import System.FilePath +import qualified Data.Map as Map +import PackageTests.PackageTester +import Distribution.Version +import Distribution.PackageDescription.Parse + ( readPackageDescription ) +import Distribution.PackageDescription.Configuration + ( finalizePackageDescription ) +import Distribution.Package + ( PackageName(..), Dependency(..) ) +import Distribution.PackageDescription + ( PackageDescription(..), BuildInfo(..), Benchmark(..) + , BenchmarkInterface(..) + , emptyBuildInfo + , emptyBenchmark, defaultRenaming ) +import Distribution.Verbosity (silent) +import Distribution.System (buildPlatform) +import Distribution.Compiler + ( CompilerId(..), CompilerFlavor(..), unknownCompilerInfo, AbiTag(..) ) +import Distribution.Text + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let dir = "PackageTests" "BenchmarkStanza" + pdFile = dir "my" <.> "cabal" + spec = PackageSpec { directory = dir, configOpts = [], distPref = Nothing } + result <- cabal_configure spec ghcPath + assertOutputDoesNotContain "unknown section type" result + genPD <- readPackageDescription silent pdFile + let compiler = unknownCompilerInfo (CompilerId GHC $ Version [6, 12, 2] []) NoAbiTag + anticipatedBenchmark = emptyBenchmark + { benchmarkName = "dummy" + , benchmarkInterface = BenchmarkExeV10 (Version [1,0] []) "dummy.hs" + , benchmarkBuildInfo = emptyBuildInfo + { targetBuildDepends = + [ Dependency (PackageName "base") anyVersion ] + , targetBuildRenaming = + Map.singleton (PackageName "base") defaultRenaming + , hsSourceDirs = ["."] + } + , benchmarkEnabled = False + } + case finalizePackageDescription [] (const True) buildPlatform compiler [] genPD of + Left xs -> let depMessage = "should not have missing dependencies:\n" ++ + (unlines $ map (show . disp) xs) + in assertEqual depMessage True False + Right (f, _) -> let gotBenchmark = head $ benchmarks f + in assertEqual "parsed benchmark stanza does not match anticipated" + gotBenchmark anticipatedBenchmark diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkStanza/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkStanza/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkStanza/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BenchmarkStanza/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,19 @@ +name: BenchmarkStanza +version: 0.1 +license: BSD3 +author: Johan Tibell +stability: stable +category: PackageTests +build-type: Simple + +description: + Check that Cabal recognizes the benchmark stanza defined below. + +Library + exposed-modules: MyLibrary + build-depends: base + +benchmark dummy + main-is: dummy.hs + type: exitcode-stdio-1.0 + build-depends: base \ No newline at end of file diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,22 @@ +module PackageTests.BuildDeps.GlobalBuildDepsNotAdditive1.Check where + +import Test.HUnit +import PackageTests.PackageTester +import System.FilePath +import Data.List +import Control.Exception +import Prelude hiding (catch) + + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec ("PackageTests" "BuildDeps" "GlobalBuildDepsNotAdditive1") [] + result <- cabal_build spec ghcPath + do + assertEqual "cabal build should fail - see test-log.txt" False (successful result) + let sb = "Could not find module `Prelude'" + assertBool ("cabal output should be "++show sb) $ + sb `isInfixOf` outputText result + `catch` \exc -> do + putStrLn $ "Cabal result was "++show result + throwIO (exc :: SomeException) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/GlobalBuildDepsNotAdditive1.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/GlobalBuildDepsNotAdditive1.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/GlobalBuildDepsNotAdditive1.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/GlobalBuildDepsNotAdditive1.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,20 @@ +name: GlobalBuildDepsNotAdditive1 +version: 0.1 +license: BSD3 +cabal-version: >= 1.6 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + If you specify 'base' in the global build dependencies, then define + a library without base, it fails to find 'base' for the library. + +--------------------------------------- + +build-depends: base + +Library + exposed-modules: MyLibrary + build-depends: bytestring, old-time diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive1/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,22 @@ +module PackageTests.BuildDeps.GlobalBuildDepsNotAdditive2.Check where + +import Test.HUnit +import PackageTests.PackageTester +import System.FilePath +import Data.List +import Control.Exception +import Prelude hiding (catch) + + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec ("PackageTests" "BuildDeps" "GlobalBuildDepsNotAdditive2") [] + result <- cabal_build spec ghcPath + do + assertEqual "cabal build should fail - see test-log.txt" False (successful result) + let sb = "Could not find module `Prelude'" + assertBool ("cabal output should be "++show sb) $ + sb `isInfixOf` outputText result + `catch` \exc -> do + putStrLn $ "Cabal result was "++show result + throwIO (exc :: SomeException) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/GlobalBuildDepsNotAdditive2.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/GlobalBuildDepsNotAdditive2.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/GlobalBuildDepsNotAdditive2.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/GlobalBuildDepsNotAdditive2.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,20 @@ +name: GlobalBuildDepsNotAdditive1 +version: 0.1 +license: BSD3 +cabal-version: >= 1.6 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + If you specify 'base' in the global build dependencies, then define + an executable without base, it fails to find 'base' for the executable + +--------------------------------------- + +build-depends: base + +Executable lemon + main-is: lemon.hs + build-depends: bytestring, old-time diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/GlobalBuildDepsNotAdditive2/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,7 @@ +import qualified Data.ByteString.Char8 as C +import System.Time + +main = do + getClockTime + let text = "lemon" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,22 @@ +module PackageTests.BuildDeps.InternalLibrary0.Check where + +import Control.Monad +import Data.Version +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + + +suite :: Version -> FilePath -> Test +suite cabalVersion ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BuildDeps" "InternalLibrary0" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + assertBuildFailed result + when (cabalVersion >= Version [1, 7] []) $ do + let sb = "library which is defined within the same package." + -- In 1.7 it should tell you how to enable the desired behaviour. + assertOutputContains sb result diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,24 @@ +name: InternalLibrary0 +version: 0.1 +license: BSD3 +cabal-version: >= 1.6 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + Check that with 'cabal-version:' containing versions less than 1.7, we do *not* + have the new behaviour to allow executables to refer to the library defined + in the same module. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time + +Executable lemon + main-is: lemon.hs + hs-source-dirs: programs + build-depends: base, bytestring, old-time, InternalLibrary0 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/programs/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/programs/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/programs/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary0/programs/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +import System.Time +import MyLibrary + +main = do + getClockTime + myLibFunc diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,16 @@ +module PackageTests.BuildDeps.InternalLibrary1.Check where + +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BuildDeps" "InternalLibrary1" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + assertBuildSucceeded result diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,23 @@ +name: InternalLibrary1 +version: 0.1 +license: BSD3 +cabal-version: >= 1.7.1 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + Check for the new (in >= 1.7.1) ability to allow executables to refer to + the library defined in the same module. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time + +Executable lemon + main-is: lemon.hs + hs-source-dirs: programs + build-depends: base, bytestring, old-time, InternalLibrary1 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/programs/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/programs/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/programs/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary1/programs/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +import System.Time +import MyLibrary + +main = do + getClockTime + myLibFunc diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,32 @@ +module PackageTests.BuildDeps.InternalLibrary2.Check where + +import qualified Data.ByteString.Char8 as C +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + + +suite :: FilePath -> FilePath -> Test +suite ghcPath ghcPkgPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BuildDeps" "InternalLibrary2" + , configOpts = [] + , distPref = Nothing + } + let specTI = PackageSpec + { directory = directory spec "to-install" + , configOpts = [] + , distPref = Nothing + } + + unregister "InternalLibrary2" ghcPkgPath + iResult <- cabal_install specTI ghcPath + assertInstallSucceeded iResult + bResult <- cabal_build spec ghcPath + assertBuildSucceeded bResult + unregister "InternalLibrary2" ghcPkgPath + + (_, _, output) <- run (Just $ directory spec) (directory spec "dist" "build" "lemon" "lemon") [] [] + C.appendFile (directory spec "test-log.txt") (C.pack $ "\ndist/build/lemon/lemon\n"++output) + assertEqual "executable should have linked with the internal library" "myLibFunc internal" (concat $ lines output) + diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,23 @@ +name: InternalLibrary2 +version: 0.1 +license: BSD3 +cabal-version: >= 1.7.1 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + This test is to make sure that the internal library is preferred by ghc to + an installed one of the same name and version. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time + +Executable lemon + main-is: lemon.hs + hs-source-dirs: programs + build-depends: base, bytestring, old-time, InternalLibrary2 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc internal" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/programs/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/programs/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/programs/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/programs/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +import System.Time +import MyLibrary + +main = do + getClockTime + myLibFunc diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/to-install/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/to-install/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/to-install/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/to-install/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,18 @@ +name: InternalLibrary2 +version: 0.1 +license: BSD3 +cabal-version: >= 1.6 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + This test is to make sure that the internal library is preferred by ghc to + an installed one of the same name and version. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/to-install/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/to-install/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/to-install/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary2/to-install/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc installed" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,32 @@ +module PackageTests.BuildDeps.InternalLibrary3.Check where + +import qualified Data.ByteString.Char8 as C +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + + +suite :: FilePath -> FilePath -> Test +suite ghcPath ghcPkgPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BuildDeps" "InternalLibrary3" + , configOpts = [] + , distPref = Nothing + } + let specTI = PackageSpec + { directory = directory spec "to-install" + , configOpts = [] + , distPref = Nothing + } + + unregister "InternalLibrary3" ghcPkgPath + iResult <- cabal_install specTI ghcPath + assertInstallSucceeded iResult + bResult <- cabal_build spec ghcPath + assertBuildSucceeded bResult + unregister "InternalLibrary3"ghcPkgPath + + (_, _, output) <- run (Just $ directory spec) (directory spec "dist" "build" "lemon" "lemon") [] [] + C.appendFile (directory spec "test-log.txt") (C.pack $ "\ndist/build/lemon/lemon\n"++output) + assertEqual "executable should have linked with the internal library" "myLibFunc internal" (concat $ lines output) + diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,23 @@ +name: InternalLibrary3 +version: 0.1 +license: BSD3 +cabal-version: >= 1.7.1 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + This test is to make sure that the internal library is preferred by ghc to + an installed one of the same name, but a *newer* version. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time + +Executable lemon + main-is: lemon.hs + hs-source-dirs: programs + build-depends: base, bytestring, old-time, InternalLibrary3 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc internal" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/programs/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/programs/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/programs/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/programs/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +import System.Time +import MyLibrary + +main = do + getClockTime + myLibFunc diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/to-install/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/to-install/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/to-install/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/to-install/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,18 @@ +name: InternalLibrary3 +version: 0.2 +license: BSD3 +cabal-version: >= 1.6 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + This test is to make sure that the internal library is preferred by ghc to + an installed one of the same name but a *newer* version. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/to-install/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/to-install/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/to-install/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary3/to-install/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc installed" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,32 @@ +module PackageTests.BuildDeps.InternalLibrary4.Check where + +import qualified Data.ByteString.Char8 as C +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + + +suite :: FilePath -> FilePath -> Test +suite ghcPath ghcPkgPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BuildDeps" "InternalLibrary4" + , configOpts = [] + , distPref = Nothing + } + let specTI = PackageSpec + { directory = directory spec "to-install" + , configOpts = [] + , distPref = Nothing + } + + unregister "InternalLibrary4" ghcPkgPath + iResult <- cabal_install specTI ghcPath + assertInstallSucceeded iResult + bResult <- cabal_build spec ghcPath + assertBuildSucceeded bResult + unregister "InternalLibrary4" ghcPkgPath + + (_, _, output) <- run (Just $ directory spec) (directory spec "dist" "build" "lemon" "lemon") [] [] + C.appendFile (directory spec "test-log.txt") (C.pack $ "\ndist/build/lemon/lemon\n"++output) + assertEqual "executable should have linked with the installed library" "myLibFunc installed" (concat $ lines output) + diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,23 @@ +name: InternalLibrary4 +version: 0.1 +license: BSD3 +cabal-version: >= 1.7.1 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + This test is to make sure that we can explicitly say we want InternalLibrary4-0.2 + and it will give us the *installed* version 0.2 instead of the internal 0.1. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time + +Executable lemon + main-is: lemon.hs + hs-source-dirs: programs + build-depends: base, bytestring, old-time, InternalLibrary4 >= 0.2 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc internal" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/programs/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/programs/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/programs/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/programs/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +import System.Time +import MyLibrary + +main = do + getClockTime + myLibFunc diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/to-install/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/to-install/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/to-install/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/to-install/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,18 @@ +name: InternalLibrary4 +version: 0.2 +license: BSD3 +cabal-version: >= 1.6 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + This test is to make sure that the internal library is preferred by ghc to + an installed one of the same name but a *newer* version. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/to-install/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/to-install/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/to-install/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/InternalLibrary4/to-install/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc installed" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,21 @@ +module PackageTests.BuildDeps.SameDepsAllRound.Check where + +import Test.HUnit +import PackageTests.PackageTester +import System.FilePath +import qualified Control.Exception as E + + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BuildDeps" "SameDepsAllRound" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + do + assertEqual "cabal build should succeed - see test-log.txt" True (successful result) + `E.catch` \exc -> do + putStrLn $ "Cabal result was "++show result + E.throwIO (exc :: E.SomeException) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,7 @@ +import qualified Data.ByteString.Char8 as C +import System.Time + +main = do + getClockTime + let text = "lemon" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/pineapple.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/pineapple.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/pineapple.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/pineapple.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,7 @@ +import qualified Data.ByteString.Char8 as C +import System.Time + +main = do + getClockTime + let text = "pineapple" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/SameDepsAllRound.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/SameDepsAllRound.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/SameDepsAllRound.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/SameDepsAllRound/SameDepsAllRound.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,31 @@ +name: SameDepsAllRound +version: 0.1 +license: BSD3 +cabal-version: >= 1.6 +author: Stephen Blackheath +stability: stable +synopsis: Same dependencies all round +category: PackageTests +build-type: Simple + +description: + Check for the "old build-dep behaviour" namely that we get the same + package dependencies on all build targets, even if different ones + were specified for different targets + . + Here all .hs files use the three packages mentioned, so this shows + that build-depends is not target-specific. This is the behaviour + we want when cabal-version contains versions less than 1.7. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring + +Executable lemon + main-is: lemon.hs + build-depends: old-time + +Executable pineapple + main-is: pineapple.hs diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,29 @@ +module PackageTests.BuildDeps.TargetSpecificDeps1.Check where + +import Test.HUnit +import PackageTests.PackageTester +import System.FilePath +import Data.List +import qualified Control.Exception as E +import Text.Regex.Posix + + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BuildDeps" "TargetSpecificDeps1" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + do + assertEqual "cabal build should fail - see test-log.txt" False (successful result) + assertBool "error should be in MyLibrary.hs" $ + "MyLibrary.hs:" `isInfixOf` outputText result + assertBool "error should be \"Could not find module `System.Time\"" $ + (intercalate " " $ lines $ outputText result) + =~ "Could not find module.*System.Time" + + `E.catch` \exc -> do + putStrLn $ "Cabal result was "++show result + E.throwIO (exc :: E.SomeException) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,7 @@ +import qualified Data.ByteString.Char8 as C +import System.Time + +main = do + getClockTime + let text = "lemon" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,22 @@ +name: TargetSpecificDeps1 +version: 0.1 +license: BSD3 +cabal-version: >= 1.7.1 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + Check for the new build-dep behaviour, where build-depends are + handled specifically for each target + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring + +Executable lemon + main-is: lemon.hs + build-depends: base, bytestring, old-time diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps1/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,21 @@ +module PackageTests.BuildDeps.TargetSpecificDeps2.Check where + +import Test.HUnit +import PackageTests.PackageTester +import System.FilePath +import qualified Control.Exception as E + + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BuildDeps" "TargetSpecificDeps2" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + do + assertEqual "cabal build should succeed - see test-log.txt" True (successful result) + `E.catch` \exc -> do + putStrLn $ "Cabal result was "++show result + E.throwIO (exc :: E.SomeException) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,5 @@ +import qualified Data.ByteString.Char8 as C + +main = do + let text = "lemon" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,24 @@ +name: TargetSpecificDeps1 +version: 0.1 +license: BSD3 +cabal-version: >= 1.7.1 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + Check for the new build-dep behaviour, where build-depends are + handled specifically for each target + This one is a control against TargetSpecificDeps1 - it is correct and should + succeed. + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time + +Executable lemon + main-is: lemon.hs + build-depends: base, bytestring diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps2/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,28 @@ +module PackageTests.BuildDeps.TargetSpecificDeps3.Check where + +import Test.HUnit +import PackageTests.PackageTester +import System.FilePath +import Data.List +import qualified Control.Exception as E +import Text.Regex.Posix + + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "BuildDeps" "TargetSpecificDeps3" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + do + assertEqual "cabal build should fail - see test-log.txt" False (successful result) + assertBool "error should be in lemon.hs" $ + "lemon.hs:" `isInfixOf` outputText result + assertBool "error should be \"Could not find module `System.Time\"" $ + (intercalate " " $ lines $ outputText result) + =~ "Could not find module.*System.Time" + `E.catch` \exc -> do + putStrLn $ "Cabal result was "++show result + E.throwIO (exc :: E.SomeException) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/lemon.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/lemon.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/lemon.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/lemon.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,7 @@ +import qualified Data.ByteString.Char8 as C +import System.Time + +main = do + getClockTime + let text = "lemon" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,22 @@ +name: test +version: 0.1 +license: BSD3 +cabal-version: >= 1.7.1 +author: Stephen Blackheath +stability: stable +category: PackageTests +build-type: Simple + +description: + Check for the new build-dep behaviour, where build-depends are + handled specifically for each target + +--------------------------------------- + +Library + exposed-modules: MyLibrary + build-depends: base, bytestring, old-time + +Executable lemon + main-is: lemon.hs + build-depends: base, bytestring diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/MyLibrary.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/MyLibrary.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/MyLibrary.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildDeps/TargetSpecificDeps3/MyLibrary.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module MyLibrary where + +import qualified Data.ByteString.Char8 as C +import System.Time + +myLibFunc :: IO () +myLibFunc = do + getClockTime + let text = "myLibFunc" + C.putStrLn $ C.pack text diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,18 @@ +module PackageTests.BuildTestSuiteDetailedV09.Check where + +import Test.HUnit +import System.FilePath (()) + +import PackageTests.PackageTester + +suite :: PackageSpec -> FilePath -> Test +suite inplaceSpec ghcPath = TestCase $ do + let dir = "PackageTests" "BuildTestSuiteDetailedV09" + spec = inplaceSpec + { directory = dir + , configOpts = "--enable-tests" : configOpts inplaceSpec + } + confResult <- cabal_configure spec ghcPath + assertEqual "configure failed!" (successful confResult) True + buildResult <- cabal_build spec ghcPath + assertEqual "build failed!" (successful buildResult) True diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/Dummy.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/Dummy.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/Dummy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/Dummy.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +module Dummy where + +import Distribution.TestSuite (Test) + +tests :: IO [Test] +tests = return [] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/BuildTestSuiteDetailedV09/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,19 @@ +name: BuildTestSuiteDetailedV09 +version: 0.1 +license: BSD3 +author: Thomas Tuegel +stability: stable +category: PackageTests +build-type: Simple + +description: + Check that Cabal can build test suites of type detailed-0.9. + +Library + exposed-modules: Dummy + build-depends: base, Cabal + +test-suite test-Dummy + type: detailed-0.9 + test-module: Dummy + build-depends: base, Cabal diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Bar.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Bar.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Bar.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Bar.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,7 @@ +{-# LANGUAGE ForeignFunctionInterface #-} +module Bar where + +bar :: IO () +bar = return () + +foreign export ccall bar :: IO () diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,20 @@ +module PackageTests.CMain.Check + ( checkBuild + ) where + +import Test.HUnit +import System.FilePath +import PackageTests.PackageTester + +dir :: FilePath +dir = "PackageTests" "CMain" + +checkBuild :: FilePath -> Test +checkBuild ghcPath = TestCase $ do + let spec = PackageSpec + { directory = dir + , distPref = Nothing + , configOpts = [] + } + buildResult <- cabal_build spec ghcPath + assertBuildSucceeded buildResult diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/foo.c cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/foo.c --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/foo.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/foo.c 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,13 @@ +#include +#include "HsFFI.h" + +#ifdef __GLASGOW_HASKELL__ +#include "Bar_stub.h" +#endif + +int main(int argc, char **argv) { + hs_init(&argc, &argv); + bar(); + printf("Hello world!"); + return 0; +} diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +name: my +version: 0.1 +license: BSD3 +cabal-version: >= 1.9.2 +build-type: Simple + +executable foo + main-is: foo.c + other-modules: Bar + build-depends: base diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Setup.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Setup.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/CMain/Setup.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,3 @@ +import Distribution.Simple +main = defaultMain + diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,141 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE OverloadedStrings #-} + +module PackageTests.DeterministicAr.Check where + +import Control.Monad +import qualified Data.ByteString as BS +import qualified Data.ByteString.Char8 as BS8 +import Data.Char (isSpace) +import Data.List +#if __GLASGOW_HASKELL__ < 710 +import Data.Traversable +#endif +import PackageTests.PackageTester +import System.Exit +import System.FilePath +import System.IO +import Test.HUnit (Assertion, Test (TestCase), assertFailure) + +import Distribution.Compiler (CompilerFlavor(..), CompilerId(..)) +import Distribution.Package (packageKeyHash) +import Distribution.Version (Version(..)) +import Distribution.Simple.Compiler (compilerId) +import Distribution.Simple.Configure (getPersistBuildConfig) +import Distribution.Simple.LocalBuildInfo (LocalBuildInfo, compiler, pkgKey) + +-- Perhaps these should live in PackageTester. + +-- For a polymorphic @IO a@ rather than @Assertion = IO ()@. +assertFailure' :: String -> IO a +assertFailure' msg = assertFailure msg >> return {-unpossible!-}undefined + +ghcPkg_field :: String -> String -> FilePath -> IO [FilePath] +ghcPkg_field libraryName fieldName ghcPkgPath = do + (cmd, exitCode, raw) <- run Nothing ghcPkgPath [] + ["--user", "field", libraryName, fieldName] + let output = filter ('\r' /=) raw -- Windows + -- copypasta of PackageTester.requireSuccess + unless (exitCode == ExitSuccess) . assertFailure $ + "Command " ++ cmd ++ " failed.\n" ++ "output: " ++ output + + let prefix = fieldName ++ ": " + case traverse (stripPrefix prefix) (lines output) of + Nothing -> assertFailure' $ "Command " ++ cmd ++ " failed: expected " + ++ show prefix ++ " prefix on every line.\noutput: " ++ output + Just fields -> return fields + +ghcPkg_field1 :: String -> String -> FilePath -> IO FilePath +ghcPkg_field1 libraryName fieldName ghcPkgPath = do + fields <- ghcPkg_field libraryName fieldName ghcPkgPath + case fields of + [field] -> return field + _ -> assertFailure' $ "Command ghc-pkg field failed: " + ++ "output not a single line.\noutput: " ++ show fields + +------------------------------------------------------------------------ + +this :: String +this = "DeterministicAr" + +suite :: FilePath -> FilePath -> Test +suite ghcPath ghcPkgPath = TestCase $ do + let dir = "PackageTests" this + let spec = PackageSpec + { directory = dir + , configOpts = [] + , distPref = Nothing + } + + unregister this ghcPkgPath + iResult <- cabal_install spec ghcPath + assertInstallSucceeded iResult + + let distBuild = dir "dist" "build" + libdir <- ghcPkg_field1 this "library-dirs" ghcPkgPath + lbi <- getPersistBuildConfig (dir "dist") + mapM_ (checkMetadata lbi) [distBuild, libdir] + unregister this ghcPkgPath + +-- Almost a copypasta of Distribution.Simple.Program.Ar.wipeMetadata +checkMetadata :: LocalBuildInfo -> FilePath -> Assertion +checkMetadata lbi dir = withBinaryFile path ReadMode $ \ h -> do + hFileSize h >>= checkArchive h + where + path = dir "libHS" ++ this ++ "-0" + ++ (if ghc_7_10 then ("-" ++ packageKeyHash (pkgKey lbi)) else "") + ++ ".a" + + ghc_7_10 = case compilerId (compiler lbi) of + CompilerId GHC version | version >= Version [7, 10] [] -> True + _ -> False + + checkError msg = assertFailure' $ + "PackageTests.DeterministicAr.checkMetadata: " ++ msg ++ + " in " ++ path + archLF = "!\x0a" -- global magic, 8 bytes + x60LF = "\x60\x0a" -- header magic, 2 bytes + metadata = BS.concat + [ "0 " -- mtime, 12 bytes + , "0 " -- UID, 6 bytes + , "0 " -- GID, 6 bytes + , "0644 " -- mode, 8 bytes + ] + headerSize = 60 + + -- http://en.wikipedia.org/wiki/Ar_(Unix)#File_format_details + checkArchive :: Handle -> Integer -> IO () + checkArchive h archiveSize = do + global <- BS.hGet h (BS.length archLF) + unless (global == archLF) $ checkError "Bad global header" + checkHeader (toInteger $ BS.length archLF) + + where + checkHeader :: Integer -> IO () + checkHeader offset = case compare offset archiveSize of + EQ -> return () + GT -> checkError (atOffset "Archive truncated") + LT -> do + header <- BS.hGet h headerSize + unless (BS.length header == headerSize) $ + checkError (atOffset "Short header") + let magic = BS.drop 58 header + unless (magic == x60LF) . checkError . atOffset $ + "Bad magic " ++ show magic ++ " in header" + + unless (metadata == BS.take 32 (BS.drop 16 header)) + . checkError . atOffset $ "Metadata has changed" + + let size = BS.take 10 $ BS.drop 48 header + objSize <- case reads (BS8.unpack size) of + [(n, s)] | all isSpace s -> return n + _ -> checkError (atOffset "Bad file size in header") + + let nextHeader = offset + toInteger headerSize + + -- Odd objects are padded with an extra '\x0a' + if odd objSize then objSize + 1 else objSize + hSeek h AbsoluteSeek nextHeader + checkHeader nextHeader + + where + atOffset msg = msg ++ " at offset " ++ show offset diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/Lib.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/Lib.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/Lib.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/Lib.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,5 @@ +module Lib where + +dummy :: IO () +dummy = return () + diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/DeterministicAr/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,17 @@ +name: DeterministicAr +version: 0 +license: BSD3 +cabal-version: >= 1.9.1 +author: Liyang HU +stability: stable +category: PackageTests +build-type: Simple + +description: + Ensure our GNU ar -D emulation (#1537) works as advertised: check that + all metadata in the resulting .a archive match the default. + +Library + exposed-modules: Lib + build-depends: base + diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/EmptyLib/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/EmptyLib/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/EmptyLib/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/EmptyLib/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,16 @@ +module PackageTests.EmptyLib.Check where + +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + +-- See https://github.com/haskell/cabal/issues/1241 +emptyLib :: FilePath -> Test +emptyLib ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "EmptyLib" "empty" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + assertBuildSucceeded result diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/EmptyLib/empty/empty.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/EmptyLib/empty/empty.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/EmptyLib/empty/empty.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/EmptyLib/empty/empty.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +name: emptyLib +Cabal-version: >= 1.2 +version: 1.0 +build-type: Simple + +Library diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,42 @@ +module PackageTests.Haddock.Check (suite) where + +import Control.Monad (unless, when) +import Data.List (isInfixOf) + +import System.FilePath (()) +import System.Directory (doesDirectoryExist, removeDirectoryRecursive) +import Test.HUnit (Assertion, Test (TestCase), assertFailure) + +import Distribution.Simple.Utils (withFileContents) +import PackageTests.PackageTester + (PackageSpec(..), assertHaddockSucceeded, cabal_haddock) + +this :: String +this = "Haddock" + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let dir = "PackageTests" this + haddocksDir = dir "dist" "doc" "html" "Haddock" + spec = PackageSpec + { directory = dir + , configOpts = [] + , distPref = Nothing + } + + haddocksDirExists <- doesDirectoryExist haddocksDir + when haddocksDirExists (removeDirectoryRecursive haddocksDir) + hResult <- cabal_haddock spec [] ghcPath + assertHaddockSucceeded hResult + + let docFiles = map (haddocksDir ) + ["CPP.html", "Literate.html", "NoCPP.html", "Simple.html"] + mapM_ (assertFindInFile "For hiding needles.") docFiles + +assertFindInFile :: String -> FilePath -> Assertion +assertFindInFile needle path = + withFileContents path + (\contents -> + unless (needle `isInfixOf` contents) + (assertFailure ("expected: " ++ needle ++ "\n" ++ + " in file: " ++ path))) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/CPP.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/CPP.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/CPP.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/CPP.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,9 @@ +{-# LANGUAGE CPP #-} + +module CPP where + +#define HIDING hiding +#define NEEDLES needles + +-- | For HIDING NEEDLES. +data Haystack = Haystack diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Literate.lhs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Literate.lhs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Literate.lhs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Literate.lhs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,4 @@ +> module Literate where + +> -- | For hiding needles. +> data Haystack = Haystack diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,16 @@ +name: Haddock +version: 0.1 +license: BSD3 +author: Iain Nicol +stability: stable +category: PackageTests +build-type: Simple +Cabal-version: >= 1.2 + +description: + Check that Cabal successfully invokes Haddock. + +Library + exposed-modules: CPP, Literate, NoCPP, Simple + other-extensions: CPP + build-depends: base diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/NoCPP.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/NoCPP.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/NoCPP.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/NoCPP.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,8 @@ +module NoCPP (Haystack) where + +-- | For hiding needles. +data Haystack = Haystack + +-- | Causes a build failure if the CPP language extension is enabled. +stringGap = "Foo\ +\Bar" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Simple.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Simple.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Simple.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/Haddock/Simple.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,4 @@ +module Simple where + +-- | For hiding needles. +data Haystack = Haystack diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,24 @@ +module PackageTests.OrderFlags.Check where + +import Test.HUnit +import PackageTests.PackageTester +import System.FilePath +import Control.Exception + +#if !MIN_VERSION_base(4,6,0) +import Prelude hiding (catch) +#endif + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "OrderFlags" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + do + assertEqual "cabal build should succeed - see test-log.txt" True (successful result) + `catch` \exc -> do + putStrLn $ "Cabal result was "++show result + throwIO (exc :: SomeException) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/Foo.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/Foo.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/Foo.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/Foo.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,8 @@ +module Foo where + +x :: IO Int +x = return 5 + +f :: IO Int +f = do x + return 3 diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/OrderFlags/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,20 @@ +name: OrderFlags +version: 0.1 +license: BSD3 +author: Oleksandr Manzyuk +stability: stable +category: PackageTests +build-type: Simple +cabal-version: >=1.9.2 + +description: + Check that Cabal correctly orders flags that are passed to GHC. + +library + exposed-modules: Foo + build-depends: base + + ghc-options: -Wall -Werror + + if impl(ghc >= 6.12.1) + ghc-options: -fno-warn-unused-do-bind diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PackageTester.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PackageTester.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PackageTester.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PackageTester.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,308 @@ +{-# LANGUAGE ScopedTypeVariables #-} + +-- You can set the following VERBOSE environment variable to control +-- the verbosity of the output generated by this module. +module PackageTests.PackageTester + ( PackageSpec(..) + , Success(..) + , Result(..) + + -- * Running cabal commands + , cabal_configure + , cabal_build + , cabal_haddock + , cabal_test + , cabal_bench + , cabal_install + , unregister + , compileSetup + , run + + -- * Test helpers + , assertBuildSucceeded + , assertBuildFailed + , assertHaddockSucceeded + , assertTestSucceeded + , assertInstallSucceeded + , assertOutputContains + , assertOutputDoesNotContain + ) where + +import qualified Control.Exception.Extensible as E +import Control.Monad +import qualified Data.ByteString.Char8 as C +import Data.List +import Data.Maybe +import System.Directory (canonicalizePath, doesFileExist, getCurrentDirectory) +import System.Environment (getEnv) +import System.Exit (ExitCode(ExitSuccess)) +import System.FilePath +import System.IO (hIsEOF, hGetChar, hClose) +import System.IO.Error (isDoesNotExistError) +import System.Process (runProcess, waitForProcess) +import Test.HUnit (Assertion, assertFailure) + +import Distribution.Compat.CreatePipe (createPipe) +import Distribution.Simple.BuildPaths (exeExtension) +import Distribution.Simple.Program.Run (getEffectiveEnvironment) +import Distribution.Simple.Utils (printRawCommandAndArgsAndEnv) +import Distribution.ReadE (readEOrFail) +import Distribution.Verbosity (Verbosity, flagToVerbosity, normal) + +data PackageSpec = PackageSpec + { directory :: FilePath + , distPref :: Maybe FilePath + , configOpts :: [String] + } + +data Success = Failure + | ConfigureSuccess + | BuildSuccess + | HaddockSuccess + | InstallSuccess + | TestSuccess + | BenchSuccess + deriving (Eq, Show) + +data Result = Result + { successful :: Bool + , success :: Success + , outputText :: String + } deriving Show + +nullResult :: Result +nullResult = Result True Failure "" + +------------------------------------------------------------------------ +-- * Running cabal commands + +recordRun :: (String, ExitCode, String) -> Success -> Result -> Result +recordRun (cmd, exitCode, exeOutput) thisSucc res = + res { successful = successful res && exitCode == ExitSuccess + , success = if exitCode == ExitSuccess then thisSucc + else success res + , outputText = + (if null $ outputText res then "" else outputText res ++ "\n") ++ + cmd ++ "\n" ++ exeOutput + } + +cabal_configure :: PackageSpec -> FilePath -> IO Result +cabal_configure spec ghcPath = do + res <- doCabalConfigure spec ghcPath + record spec res + return res + +doCabalConfigure :: PackageSpec -> FilePath -> IO Result +doCabalConfigure spec ghcPath = do + cleanResult@(_, _, _) <- cabal spec [] ["clean"] ghcPath + requireSuccess cleanResult + res <- cabal spec [] + (["configure", "--user", "-w", ghcPath] ++ configOpts spec) + ghcPath + return $ recordRun res ConfigureSuccess nullResult + +doCabalBuild :: PackageSpec -> FilePath -> IO Result +doCabalBuild spec ghcPath = do + configResult <- doCabalConfigure spec ghcPath + if successful configResult + then do + res <- cabal spec [] ["build", "-v"] ghcPath + return $ recordRun res BuildSuccess configResult + else + return configResult + +cabal_build :: PackageSpec -> FilePath -> IO Result +cabal_build spec ghcPath = do + res <- doCabalBuild spec ghcPath + record spec res + return res + +cabal_haddock :: PackageSpec -> [String] -> FilePath -> IO Result +cabal_haddock spec extraArgs ghcPath = do + res <- doCabalHaddock spec extraArgs ghcPath + record spec res + return res + +doCabalHaddock :: PackageSpec -> [String] -> FilePath -> IO Result +doCabalHaddock spec extraArgs ghcPath = do + configResult <- doCabalConfigure spec ghcPath + if successful configResult + then do + res <- cabal spec [] ("haddock" : extraArgs) ghcPath + return $ recordRun res HaddockSuccess configResult + else + return configResult + +unregister :: String -> FilePath -> IO () +unregister libraryName ghcPkgPath = do + res@(_, _, output) <- run Nothing ghcPkgPath [] ["unregister", "--user", libraryName] + if "cannot find package" `isInfixOf` output + then return () + else requireSuccess res + +-- | Install this library in the user area +cabal_install :: PackageSpec -> FilePath -> IO Result +cabal_install spec ghcPath = do + buildResult <- doCabalBuild spec ghcPath + res <- if successful buildResult + then do + res <- cabal spec [] ["install"] ghcPath + return $ recordRun res InstallSuccess buildResult + else + return buildResult + record spec res + return res + +cabal_test :: PackageSpec -> [(String, Maybe String)] -> [String] -> FilePath -> IO Result +cabal_test spec envOverrides extraArgs ghcPath = do + res <- cabal spec envOverrides ("test" : extraArgs) ghcPath + let r = recordRun res TestSuccess nullResult + record spec r + return r + +cabal_bench :: PackageSpec -> [String] -> FilePath -> IO Result +cabal_bench spec extraArgs ghcPath = do + res <- cabal spec [] ("bench" : extraArgs) ghcPath + let r = recordRun res BenchSuccess nullResult + record spec r + return r + +compileSetup :: FilePath -> FilePath -> IO () +compileSetup packageDir ghcPath = do + wd <- getCurrentDirectory + r <- run (Just $ packageDir) ghcPath [] + [ "--make" +-- HPC causes trouble -- see #1012 +-- , "-fhpc" + , "-package-conf " ++ wd "../dist/package.conf.inplace" + , "Setup.hs" + ] + requireSuccess r + +-- | Returns the command that was issued, the return code, and the output text. +cabal :: PackageSpec -> [(String, Maybe String)] -> [String] -> FilePath -> IO (String, ExitCode, String) +cabal spec envOverrides cabalArgs_ ghcPath = do + let cabalArgs = case distPref spec of + Nothing -> cabalArgs_ + Just dist -> ("--builddir=" ++ dist) : cabalArgs_ + customSetup <- doesFileExist (directory spec "Setup.hs") + if customSetup + then do + compileSetup (directory spec) ghcPath + path <- canonicalizePath $ directory spec "Setup" + run (Just $ directory spec) path envOverrides cabalArgs + else do + -- Use shared Setup executable (only for Simple build types). + path <- canonicalizePath "Setup" + run (Just $ directory spec) path envOverrides cabalArgs + +-- | Returns the command that was issued, the return code, and the output text +run :: Maybe FilePath -> String -> [(String, Maybe String)] -> [String] -> IO (String, ExitCode, String) +run cwd path envOverrides args = do + verbosity <- getVerbosity + -- path is relative to the current directory; canonicalizePath makes it + -- absolute, so that runProcess will find it even when changing directory. + path' <- do pathExists <- doesFileExist path + canonicalizePath (if pathExists then path else path <.> exeExtension) + menv <- getEffectiveEnvironment envOverrides + + printRawCommandAndArgsAndEnv verbosity path' args menv + (readh, writeh) <- createPipe + pid <- runProcess path' args cwd menv Nothing (Just writeh) (Just writeh) + + -- fork off a thread to start consuming the output + out <- suckH [] readh + hClose readh + + -- wait for the program to terminate + exitcode <- waitForProcess pid + let fullCmd = unwords (path' : args) + return ("\"" ++ fullCmd ++ "\" in " ++ fromMaybe "" cwd, exitcode, out) + where + suckH output h = do + eof <- hIsEOF h + if eof + then return (reverse output) + else do + c <- hGetChar h + suckH (c:output) h + + +requireSuccess :: (String, ExitCode, String) -> IO () +requireSuccess (cmd, exitCode, output) = + unless (exitCode == ExitSuccess) $ + assertFailure $ "Command " ++ cmd ++ " failed.\n" ++ + "output: " ++ output + +record :: PackageSpec -> Result -> IO () +record spec res = do + C.writeFile (directory spec "test-log.txt") (C.pack $ outputText res) + +------------------------------------------------------------------------ +-- * Test helpers + +assertBuildSucceeded :: Result -> Assertion +assertBuildSucceeded result = unless (successful result) $ + assertFailure $ + "expected: \'setup build\' should succeed\n" ++ + " output: " ++ outputText result + +assertBuildFailed :: Result -> Assertion +assertBuildFailed result = when (successful result) $ + assertFailure $ + "expected: \'setup build\' should fail\n" ++ + " output: " ++ outputText result + +assertHaddockSucceeded :: Result -> Assertion +assertHaddockSucceeded result = unless (successful result) $ + assertFailure $ + "expected: \'setup haddock\' should succeed\n" ++ + " output: " ++ outputText result + +assertTestSucceeded :: Result -> Assertion +assertTestSucceeded result = unless (successful result) $ + assertFailure $ + "expected: \'setup test\' should succeed\n" ++ + " output: " ++ outputText result + +assertInstallSucceeded :: Result -> Assertion +assertInstallSucceeded result = unless (successful result) $ + assertFailure $ + "expected: \'setup install\' should succeed\n" ++ + " output: " ++ outputText result + +assertOutputContains :: String -> Result -> Assertion +assertOutputContains needle result = + unless (needle `isInfixOf` (concatOutput output)) $ + assertFailure $ + " expected: " ++ needle ++ "\n" ++ + " in output: " ++ output ++ "" + where output = outputText result + +assertOutputDoesNotContain :: String -> Result -> Assertion +assertOutputDoesNotContain needle result = + when (needle `isInfixOf` (concatOutput output)) $ + assertFailure $ + "unexpected: " ++ needle ++ + " in output: " ++ output + where output = outputText result + +-- | Replace line breaks with spaces, correctly handling "\r\n". +concatOutput :: String -> String +concatOutput = unwords . lines . filter ((/=) '\r') + +------------------------------------------------------------------------ +-- Verbosity + +lookupEnv :: String -> IO (Maybe String) +lookupEnv name = + (fmap Just $ getEnv name) + `E.catch` \ (e :: IOError) -> + if isDoesNotExistError e + then return Nothing + else E.throw e + +-- TODO: Convert to a "-v" flag instead. +getVerbosity :: IO Verbosity +getVerbosity = do + maybe normal (readEOrFail flagToVerbosity) `fmap` lookupEnv "VERBOSE" diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,16 @@ +module PackageTests.PathsModule.Executable.Check (suite) where + +import PackageTests.PackageTester + (PackageSpec(..), assertBuildSucceeded, cabal_build) +import System.FilePath +import Test.HUnit + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "PathsModule" "Executable" + , distPref = Nothing + , configOpts = [] + } + result <- cabal_build spec ghcPath + assertBuildSucceeded result diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/Main.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/Main.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/Main.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/Main.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,8 @@ +module Main where + +import Paths_PathsModule (getBinDir) + +main :: IO () +main = do + _ <- getBinDir + return () diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Executable/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,16 @@ +name: PathsModule +version: 0.1 +license: BSD3 +author: Johan Tibell +stability: stable +category: PackageTests +build-type: Simple +Cabal-version: >= 1.2 + +description: + Check that the generated paths module compiles. + +Executable TestPathsModule + main-is: Main.hs + other-modules: Paths_PathsModule + build-depends: base diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Library/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Library/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Library/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Library/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,16 @@ +module PackageTests.PathsModule.Library.Check (suite) where + +import PackageTests.PackageTester + (PackageSpec(..), assertBuildSucceeded, cabal_build) +import System.FilePath +import Test.HUnit + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "PathsModule" "Library" + , distPref = Nothing + , configOpts = [] + } + result <- cabal_build spec ghcPath + assertBuildSucceeded result diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Library/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Library/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Library/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PathsModule/Library/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,15 @@ +name: PathsModule +version: 0.1 +license: BSD3 +author: Johan Tibell +stability: stable +category: PackageTests +build-type: Simple +Cabal-version: >= 1.2 + +description: + Check that the generated paths module compiles. + +Library + exposed-modules: Paths_PathsModule + build-depends: base diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,16 @@ +module PackageTests.PreProcess.Check (suite) where + +import PackageTests.PackageTester + (PackageSpec(..), assertBuildSucceeded, cabal_build) +import System.FilePath +import Test.HUnit + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "PreProcess" + , distPref = Nothing + , configOpts = ["--enable-tests", "--enable-benchmarks"] + } + result <- cabal_build spec ghcPath + assertBuildSucceeded result diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Foo.hsc cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Foo.hsc --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Foo.hsc 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Foo.hsc 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1 @@ +module Foo where diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Main.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Main.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Main.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/Main.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +module Main where + +import Foo + +main :: IO () +main = return () diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/PreProcess/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,32 @@ +name: PreProcess +version: 0.1 +license: BSD3 +author: Johan Tibell +stability: stable +category: PackageTests +build-type: Simple +Cabal-version: >= 1.2 + +description: + Check that preprocessors are run. + +Library + exposed-modules: Foo + build-depends: base + +Executable my-executable + main-is: Main.hs + other-modules: Foo + build-depends: base + +Test-Suite my-test-suite + main-is: Main.hs + type: exitcode-stdio-1.0 + other-modules: Foo + build-depends: base + +Benchmark my-benchmark + main-is: Main.hs + type: exitcode-stdio-1.0 + other-modules: Foo + build-depends: base diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/ReexportedModules/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/ReexportedModules/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/ReexportedModules/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/ReexportedModules/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,41 @@ +module PackageTests.ReexportedModules.Check where + +import Data.Version +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit +import Data.Maybe +import Data.List +import Control.Monad +import Data.Char +import Text.ParserCombinators.ReadP + +orFail :: String -> [(a, String)] -> a +orFail err r = case find (all isSpace . snd) r of + Nothing -> error err + Just (i, _) -> i + +find' :: (a -> Bool) -> [a] -> Maybe a +find' = find + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + -- ToDo: Turn this into a utility function + (_, _, xs) <- run Nothing ghcPath [] ["--info"] + let compat = (>= Version [7,9] []) + . orFail "could not parse version" + . readP_to_S parseVersion + . snd + . fromJust + . find' ((=="Project version").fst) + . orFail "could not parse ghc --info output" + . reads + $ xs + when compat $ do + let spec = PackageSpec + { directory = "PackageTests" "ReexportedModules" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + assertBuildSucceeded result diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/ReexportedModules/ReexportedModules.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/ReexportedModules/ReexportedModules.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/ReexportedModules/ReexportedModules.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/ReexportedModules/ReexportedModules.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,11 @@ +name: ReexportedModules +version: 0.1.0.0 +license-file: LICENSE +author: Edward Z. Yang +maintainer: ezyang@cs.stanford.edu +build-type: Simple +cabal-version: >=1.21 + +library + build-depends: base, containers + reexported-modules: containers:Data.Map as DataMap diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,41 @@ +module PackageTests.TemplateHaskell.Check where + +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + +vanilla :: FilePath -> Test +vanilla ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "TemplateHaskell" "vanilla" + , configOpts = [] + , distPref = Nothing + } + result <- cabal_build spec ghcPath + assertBuildSucceeded result + +profiling :: FilePath -> Test +profiling ghcPath = TestCase $ do + let flags = ["--enable-library-profiling" +-- ,"--disable-library-vanilla" + ,"--enable-profiling"] + spec = PackageSpec + { directory = "PackageTests" "TemplateHaskell" "profiling" + , configOpts = flags + , distPref = Nothing + } + result <- cabal_build spec ghcPath + assertBuildSucceeded result + +dynamic :: FilePath -> Test +dynamic ghcPath = TestCase $ do + let flags = ["--enable-shared" +-- ,"--disable-library-vanilla" + ,"--enable-executable-dynamic"] + spec = PackageSpec + { directory = "PackageTests" "TemplateHaskell" "dynamic" + , configOpts = flags + , distPref = Nothing + } + result <- cabal_build spec ghcPath + assertBuildSucceeded result diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/Exe.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/Exe.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/Exe.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/Exe.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +{-# LANGUAGE TemplateHaskell #-} +module Main where + +import TH + +main = print $(splice) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/Lib.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/Lib.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/Lib.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/Lib.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +{-# LANGUAGE TemplateHaskell #-} +module Lib where + +import TH + +val = $(splice) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,15 @@ +Name: templateHaskell +Version: 0.1 +Build-Type: Simple +Cabal-Version: >= 1.2 + +Library + Exposed-Modules: Lib + Other-Modules: TH + Build-Depends: base, template-haskell + Extensions: TemplateHaskell + +Executable main + Main-is: Exe.hs + Build-Depends: base, template-haskell + Extensions: TemplateHaskell diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/TH.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/TH.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/TH.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/dynamic/TH.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,4 @@ +{-# LANGUAGE TemplateHaskell #-} +module TH where + +splice = [| () |] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/Exe.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/Exe.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/Exe.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/Exe.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +{-# LANGUAGE TemplateHaskell #-} +module Main where + +import TH + +main = print $(splice) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/Lib.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/Lib.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/Lib.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/Lib.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +{-# LANGUAGE TemplateHaskell #-} +module Lib where + +import TH + +val = $(splice) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,15 @@ +Name: templateHaskell +Version: 0.1 +Build-Type: Simple +Cabal-Version: >= 1.2 + +Library + Exposed-Modules: Lib + Other-Modules: TH + Build-Depends: base, template-haskell + Extensions: TemplateHaskell + +Executable main + Main-is: Exe.hs + Build-Depends: base, template-haskell + Extensions: TemplateHaskell diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/TH.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/TH.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/TH.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/profiling/TH.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,4 @@ +{-# LANGUAGE TemplateHaskell #-} +module TH where + +splice = [| () |] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/Exe.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/Exe.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/Exe.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/Exe.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +{-# LANGUAGE TemplateHaskell #-} +module Main where + +import TH + +main = print $(splice) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/Lib.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/Lib.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/Lib.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/Lib.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,6 @@ +{-# LANGUAGE TemplateHaskell #-} +module Lib where + +import TH + +val = $(splice) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,15 @@ +Name: templateHaskell +Version: 0.1 +Build-Type: Simple +Cabal-Version: >= 1.2 + +Library + Exposed-Modules: Lib + Other-Modules: TH + Build-Depends: base, template-haskell + Extensions: TemplateHaskell + +Executable main + Main-is: Exe.hs + Build-Depends: base, template-haskell + Extensions: TemplateHaskell diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/TH.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/TH.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/TH.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TemplateHaskell/vanilla/TH.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,4 @@ +{-# LANGUAGE TemplateHaskell #-} +module TH where + +splice = [| () |] diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,26 @@ +module PackageTests.TestOptions.Check where + +import PackageTests.PackageTester +import System.FilePath +import Test.HUnit + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let spec = PackageSpec + { directory = "PackageTests" "TestOptions" + , configOpts = ["--enable-tests"] + , distPref = Nothing + } + _ <- cabal_build spec ghcPath + result <- cabal_test spec [] ["--test-options=1 2 3"] ghcPath + let message = "\"cabal test\" did not pass the correct options to the " + ++ "test executable with \"--test-options\"" + assertEqual message True $ successful result + result' <- cabal_test spec [] [ "--test-option=1" + , "--test-option=2" + , "--test-option=3" + ] + ghcPath + let message' = "\"cabal test\" did not pass the correct options to the " + ++ "test executable with \"--test-option\"" + assertEqual message' True $ successful result' diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/TestOptions.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/TestOptions.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/TestOptions.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/TestOptions.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,20 @@ +name: TestOptions +version: 0.1 +license: BSD3 +author: Thomas Tuegel +stability: stable +category: PackageTests +build-type: Simple +cabal-version: >= 1.9.2 + +description: + Check that Cabal passes the correct test options to test suites. + +executable dummy + main-is: test-TestOptions.hs + build-depends: base + +test-suite test-TestOptions + main-is: test-TestOptions.hs + type: exitcode-stdio-1.0 + build-depends: base diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/test-TestOptions.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/test-TestOptions.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/test-TestOptions.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestOptions/test-TestOptions.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,11 @@ +module Main where + +import System.Environment ( getArgs ) +import System.Exit ( exitFailure, exitSuccess ) + +main :: IO () +main = do + args <- getArgs + if args == ["1", "2", "3"] + then exitSuccess + else putStrLn ("Got: " ++ show args) >> exitFailure diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestStanza/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestStanza/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestStanza/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestStanza/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,53 @@ +module PackageTests.TestStanza.Check where + +import Test.HUnit +import System.FilePath +import qualified Data.Map as Map +import PackageTests.PackageTester +import Distribution.Version +import Distribution.PackageDescription.Parse (readPackageDescription) +import Distribution.PackageDescription.Configuration + (finalizePackageDescription) +import Distribution.Package (PackageName(..), Dependency(..)) +import Distribution.PackageDescription + ( PackageDescription(..), BuildInfo(..), TestSuite(..) + , TestSuiteInterface(..), emptyBuildInfo, emptyTestSuite + , defaultRenaming) +import Distribution.Verbosity (silent) +import Distribution.System (buildPlatform) +import Distribution.Compiler + ( CompilerId(..), CompilerFlavor(..), unknownCompilerInfo, AbiTag(..) ) +import Distribution.Text + +suite :: FilePath -> Test +suite ghcPath = TestCase $ do + let dir = "PackageTests" "TestStanza" + pdFile = dir "my" <.> "cabal" + spec = PackageSpec + { directory = dir + , configOpts = [] + , distPref = Nothing + } + result <- cabal_configure spec ghcPath + assertOutputDoesNotContain "unknown section type" result + genPD <- readPackageDescription silent pdFile + let compiler = unknownCompilerInfo (CompilerId GHC $ Version [6, 12, 2] []) NoAbiTag + anticipatedTestSuite = emptyTestSuite + { testName = "dummy" + , testInterface = TestSuiteExeV10 (Version [1,0] []) "dummy.hs" + , testBuildInfo = emptyBuildInfo + { targetBuildDepends = + [ Dependency (PackageName "base") anyVersion ] + , targetBuildRenaming = + Map.singleton (PackageName "base") defaultRenaming + , hsSourceDirs = ["."] + } + , testEnabled = False + } + case finalizePackageDescription [] (const True) buildPlatform compiler [] genPD of + Left xs -> let depMessage = "should not have missing dependencies:\n" ++ + (unlines $ map (show . disp) xs) + in assertEqual depMessage True False + Right (f, _) -> let gotTest = head $ testSuites f + in assertEqual "parsed test-suite stanza does not match anticipated" + gotTest anticipatedTestSuite diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestStanza/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestStanza/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestStanza/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestStanza/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,19 @@ +name: TestStanza +version: 0.1 +license: BSD3 +author: Thomas Tuegel +stability: stable +category: PackageTests +build-type: Simple + +description: + Check that Cabal recognizes the Test stanza defined below. + +Library + exposed-modules: MyLibrary + build-depends: base + +test-suite dummy + main-is: dummy.hs + type: exitcode-stdio-1.0 + build-depends: base \ No newline at end of file diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/Check.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/Check.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,147 @@ +module PackageTests.TestSuiteExeV10.Check (checks) where + +import qualified Control.Exception as E (IOException, catch) +import Control.Monad (when) +import Data.Maybe (catMaybes) +import System.Directory ( doesFileExist ) +import System.FilePath +import qualified Test.Framework as TF +import Test.Framework (testGroup) +import Test.Framework.Providers.HUnit (hUnitTestToTests) +import Test.HUnit hiding ( path ) + +import Distribution.Compiler (CompilerFlavor(..), CompilerId(..)) +import Distribution.PackageDescription (package) +import Distribution.Simple.Compiler (compilerId) +import Distribution.Simple.Configure (getPersistBuildConfig) +import Distribution.Simple.LocalBuildInfo (compiler, localPkgDescr, pkgKey) +import Distribution.Simple.Hpc +import Distribution.Simple.Program.Builtin (hpcProgram) +import Distribution.Simple.Program.Db + ( emptyProgramDb, configureProgram, requireProgramVersion ) +import Distribution.Text (display) +import qualified Distribution.Verbosity as Verbosity +import Distribution.Version (Version(..), orLaterVersion) + +import PackageTests.PackageTester + +checks :: FilePath -> [TF.Test] +checks ghcPath = + [ hunit "Test" $ checkTest ghcPath ] + ++ hpcTestMatrix ghcPath ++ + [ hunit "TestNoHpc/NoTix" $ checkTestNoHpcNoTix ghcPath + , hunit "TestNoHpc/NoMarkup" $ checkTestNoHpcNoMarkup ghcPath + ] + +hpcTestMatrix :: FilePath -> [TF.Test] +hpcTestMatrix ghcPath = do + libProf <- [True, False] + exeProf <- [True, False] + exeDyn <- [True, False] + shared <- [True, False] + let name = concat + [ "WithHpc-" + , if libProf then "LibProf" else "" + , if exeProf then "ExeProf" else "" + , if exeDyn then "ExeDyn" else "" + , if shared then "Shared" else "" + ] + enable cond flag + | cond = Just $ "--enable-" ++ flag + | otherwise = Nothing + opts = catMaybes + [ enable libProf "library-profiling" + , enable exeProf "profiling" + , enable exeDyn "executable-dynamic" + , enable shared "shared" + ] + return $ hunit name $ checkTestWithHpc ghcPath name opts + +dir :: FilePath +dir = "PackageTests" "TestSuiteExeV10" + +checkTest :: FilePath -> Test +checkTest ghcPath = TestCase $ buildAndTest ghcPath "Default" [] [] + +shouldExist :: FilePath -> Assertion +shouldExist path = doesFileExist path >>= assertBool (path ++ " should exist") + +shouldNotExist :: FilePath -> Assertion +shouldNotExist path = + doesFileExist path >>= assertBool (path ++ " should exist") . not + +-- | Ensure that both .tix file and markup are generated if coverage is enabled. +checkTestWithHpc :: FilePath -> String -> [String] -> Test +checkTestWithHpc ghcPath name extraOpts = TestCase $ do + isCorrectVersion <- correctHpcVersion + when isCorrectVersion $ do + let distPref' = dir "dist-" ++ name + buildAndTest ghcPath name [] ("--enable-coverage" : extraOpts) + lbi <- getPersistBuildConfig distPref' + let way = guessWay lbi + CompilerId comp version = compilerId (compiler lbi) + subdir + | comp == GHC && version >= Version [7, 10] [] = + display (pkgKey lbi) + | otherwise = display (package $ localPkgDescr lbi) + mapM_ shouldExist + [ mixDir distPref' way "my-0.1" subdir "Foo.mix" + , mixDir distPref' way "test-Foo" "Main.mix" + , tixFilePath distPref' way "test-Foo" + , htmlDir distPref' way "test-Foo" "hpc_index.html" + ] + +-- | Ensures that even if -fhpc is manually provided no .tix file is output. +checkTestNoHpcNoTix :: FilePath -> Test +checkTestNoHpcNoTix ghcPath = TestCase $ do + buildAndTest ghcPath "NoHpcNoTix" [] + [ "--ghc-option=-fhpc" + , "--ghc-option=-hpcdir" + , "--ghc-option=dist-NoHpcNoTix/hpc/vanilla" ] + lbi <- getPersistBuildConfig (dir "dist-NoHpcNoTix") + let way = guessWay lbi + shouldNotExist $ tixFilePath (dir "dist-NoHpcNoTix") way "test-Foo" + +-- | Ensures that even if a .tix file happens to be left around +-- markup isn't generated. +checkTestNoHpcNoMarkup :: FilePath -> Test +checkTestNoHpcNoMarkup ghcPath = TestCase $ do + let tixFile = tixFilePath "dist-NoHpcNoMarkup" Vanilla "test-Foo" + buildAndTest ghcPath "NoHpcNoMarkup" + [("HPCTIXFILE", Just tixFile)] + [ "--ghc-option=-fhpc" + , "--ghc-option=-hpcdir" + , "--ghc-option=dist-NoHpcNoMarkup/hpc/vanilla" ] + shouldNotExist $ htmlDir (dir "dist-NoHpcNoMarkup") Vanilla "test-Foo" "hpc_index.html" + +-- | Build and test a package and ensure that both were successful. +-- +-- The flag "--enable-tests" is provided in addition to the given flags. +buildAndTest :: FilePath -> String -> [(String, Maybe String)] -> [String] -> IO () +buildAndTest ghcPath name envOverrides flags = do + let spec = PackageSpec + { directory = dir + , distPref = Just $ "dist-" ++ name + , configOpts = "--enable-tests" : flags + } + buildResult <- cabal_build spec ghcPath + assertBuildSucceeded buildResult + testResult <- cabal_test spec envOverrides [] ghcPath + assertTestSucceeded testResult + +hunit :: TF.TestName -> Test -> TF.Test +hunit name = testGroup name . hUnitTestToTests + +-- | Checks for a suitable HPC version for testing. +correctHpcVersion :: IO Bool +correctHpcVersion = do + let programDb' = emptyProgramDb + let verbosity = Verbosity.normal + let verRange = orLaterVersion (Version [0,7] []) + programDb <- configureProgram verbosity hpcProgram programDb' + (requireProgramVersion verbosity hpcProgram verRange programDb + >> return True) `catchIO` (\_ -> return False) + where + -- Distribution.Compat.Exception is hidden. + catchIO :: IO a -> (E.IOException -> IO a) -> IO a + catchIO = E.catch diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/Foo.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/Foo.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/Foo.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/Foo.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,4 @@ +module Foo where + +fooTest :: [String] -> Bool +fooTest _ = True diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/my.cabal cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/my.cabal --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,15 @@ +name: my +version: 0.1 +license: BSD3 +cabal-version: >= 1.9.2 +build-type: Simple + +library + exposed-modules: Foo + build-depends: base + +test-suite test-Foo + type: exitcode-stdio-1.0 + hs-source-dirs: tests + main-is: test-Foo.hs + build-depends: base, my diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/tests/test-Foo.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/tests/test-Foo.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/tests/test-Foo.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests/TestSuiteExeV10/tests/test-Foo.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,8 @@ +module Main where + +import Foo +import System.Exit + +main :: IO () +main | fooTest [] = exitSuccess + | otherwise = exitFailure diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/PackageTests.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/PackageTests.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,172 @@ +-- The intention is that this will be the new unit test framework. +-- Please add any working tests here. This file should do nothing +-- but import tests from other modules. +-- +-- Stephen Blackheath, 2009 + +module Main where + +import PackageTests.BenchmarkExeV10.Check +import PackageTests.BenchmarkOptions.Check +import PackageTests.BenchmarkStanza.Check +-- import PackageTests.BuildDeps.GlobalBuildDepsNotAdditive1.Check +-- import PackageTests.BuildDeps.GlobalBuildDepsNotAdditive2.Check +import PackageTests.BuildDeps.InternalLibrary0.Check +import PackageTests.BuildDeps.InternalLibrary1.Check +import PackageTests.BuildDeps.InternalLibrary2.Check +import PackageTests.BuildDeps.InternalLibrary3.Check +import PackageTests.BuildDeps.InternalLibrary4.Check +import PackageTests.BuildDeps.SameDepsAllRound.Check +import PackageTests.BuildDeps.TargetSpecificDeps1.Check +import PackageTests.BuildDeps.TargetSpecificDeps2.Check +import PackageTests.BuildDeps.TargetSpecificDeps3.Check +import PackageTests.BuildTestSuiteDetailedV09.Check +import PackageTests.PackageTester (PackageSpec(..), compileSetup) +import PackageTests.PathsModule.Executable.Check +import PackageTests.PathsModule.Library.Check +import PackageTests.PreProcess.Check +import PackageTests.TemplateHaskell.Check +import PackageTests.CMain.Check +import PackageTests.DeterministicAr.Check +import PackageTests.EmptyLib.Check +import PackageTests.Haddock.Check +import PackageTests.TestOptions.Check +import PackageTests.TestStanza.Check +import PackageTests.TestSuiteExeV10.Check +import PackageTests.OrderFlags.Check +import PackageTests.ReexportedModules.Check + +import Distribution.Simple.Configure + ( ConfigStateFileError(..), getConfigStateFile ) +import Distribution.Simple.LocalBuildInfo (LocalBuildInfo(..)) +import Distribution.Simple.Program.Types (programPath) +import Distribution.Simple.Program.Builtin + ( ghcProgram, ghcPkgProgram, haddockProgram ) +import Distribution.Simple.Program.Db (requireProgram) +import Distribution.Simple.Utils (cabalVersion) +import Distribution.Text (display) +import Distribution.Verbosity (normal) +import Distribution.Version (Version(Version)) + +import Control.Exception (try, throw) +import System.Directory + ( getCurrentDirectory, setCurrentDirectory ) +import System.FilePath (()) +import System.IO (BufferMode(NoBuffering), hSetBuffering, stdout) +import Test.Framework (Test, TestName, defaultMain, testGroup) +import Test.Framework.Providers.HUnit (hUnitTestToTests) +import qualified Test.HUnit as HUnit + + +hunit :: TestName -> HUnit.Test -> Test +hunit name test = testGroup name $ hUnitTestToTests test + +tests :: Version -> PackageSpec -> FilePath -> FilePath -> [Test] +tests version inplaceSpec ghcPath ghcPkgPath = + [ hunit "BuildDeps/SameDepsAllRound" + (PackageTests.BuildDeps.SameDepsAllRound.Check.suite ghcPath) + -- The two following tests were disabled by Johan Tibell as + -- they have been failing for a long time: + -- , hunit "BuildDeps/GlobalBuildDepsNotAdditive1/" + -- (PackageTests.BuildDeps.GlobalBuildDepsNotAdditive1.Check.suite ghcPath) + -- , hunit "BuildDeps/GlobalBuildDepsNotAdditive2/" + -- (PackageTests.BuildDeps.GlobalBuildDepsNotAdditive2.Check.suite ghcPath) + , hunit "BuildDeps/InternalLibrary0" + (PackageTests.BuildDeps.InternalLibrary0.Check.suite version ghcPath) + , hunit "PreProcess" (PackageTests.PreProcess.Check.suite ghcPath) + , hunit "TestStanza" (PackageTests.TestStanza.Check.suite ghcPath) + -- ^ The Test stanza test will eventually be required + -- only for higher versions. + , testGroup "TestSuiteExeV10" (PackageTests.TestSuiteExeV10.Check.checks ghcPath) + , hunit "TestOptions" (PackageTests.TestOptions.Check.suite ghcPath) + , hunit "BenchmarkStanza" (PackageTests.BenchmarkStanza.Check.suite ghcPath) + -- ^ The benchmark stanza test will eventually be required + -- only for higher versions. + , hunit "BenchmarkExeV10/Test" + (PackageTests.BenchmarkExeV10.Check.checkBenchmark ghcPath) + , hunit "BenchmarkOptions" (PackageTests.BenchmarkOptions.Check.suite ghcPath) + , hunit "TemplateHaskell/vanilla" + (PackageTests.TemplateHaskell.Check.vanilla ghcPath) + , hunit "TemplateHaskell/profiling" + (PackageTests.TemplateHaskell.Check.profiling ghcPath) + , hunit "PathsModule/Executable" + (PackageTests.PathsModule.Executable.Check.suite ghcPath) + , hunit "PathsModule/Library" (PackageTests.PathsModule.Library.Check.suite ghcPath) + , hunit "DeterministicAr" + (PackageTests.DeterministicAr.Check.suite ghcPath ghcPkgPath) + , hunit "EmptyLib/emptyLib" + (PackageTests.EmptyLib.Check.emptyLib ghcPath) + , hunit "Haddock" (PackageTests.Haddock.Check.suite ghcPath) + , hunit "BuildTestSuiteDetailedV09" + (PackageTests.BuildTestSuiteDetailedV09.Check.suite inplaceSpec ghcPath) + , hunit "OrderFlags" + (PackageTests.OrderFlags.Check.suite ghcPath) + , hunit "TemplateHaskell/dynamic" + (PackageTests.TemplateHaskell.Check.dynamic ghcPath) + , hunit "ReexportedModules" + (PackageTests.ReexportedModules.Check.suite ghcPath) + ] ++ + -- These tests are only required to pass on cabal version >= 1.7 + (if version >= Version [1, 7] [] + then [ hunit "BuildDeps/TargetSpecificDeps1" + (PackageTests.BuildDeps.TargetSpecificDeps1.Check.suite ghcPath) + , hunit "BuildDeps/TargetSpecificDeps2" + (PackageTests.BuildDeps.TargetSpecificDeps2.Check.suite ghcPath) + , hunit "BuildDeps/TargetSpecificDeps3" + (PackageTests.BuildDeps.TargetSpecificDeps3.Check.suite ghcPath) + , hunit "BuildDeps/InternalLibrary1" + (PackageTests.BuildDeps.InternalLibrary1.Check.suite ghcPath) + , hunit "BuildDeps/InternalLibrary2" + (PackageTests.BuildDeps.InternalLibrary2.Check.suite ghcPath ghcPkgPath) + , hunit "BuildDeps/InternalLibrary3" + (PackageTests.BuildDeps.InternalLibrary3.Check.suite ghcPath ghcPkgPath) + , hunit "BuildDeps/InternalLibrary4" + (PackageTests.BuildDeps.InternalLibrary4.Check.suite ghcPath ghcPkgPath) + , hunit "PackageTests/CMain" + (PackageTests.CMain.Check.checkBuild ghcPath) + ] + else []) + +main :: IO () +main = do + -- WORKAROUND: disable buffering on stdout to get streaming test logs + -- test providers _should_ do this themselves + hSetBuffering stdout NoBuffering + + wd <- getCurrentDirectory + let dbFile = wd "dist/package.conf.inplace" + inplaceSpec = PackageSpec + { directory = [] + , configOpts = [ "--package-db=" ++ dbFile + , "--constraint=Cabal == " ++ display cabalVersion + ] + , distPref = Nothing + } + putStrLn $ "Cabal test suite - testing cabal version " ++ + display cabalVersion + lbi <- getPersistBuildConfig_ ("dist" "setup-config") + (ghc, _) <- requireProgram normal ghcProgram (withPrograms lbi) + (ghcPkg, _) <- requireProgram normal ghcPkgProgram (withPrograms lbi) + (haddock, _) <- requireProgram normal haddockProgram (withPrograms lbi) + let ghcPath = programPath ghc + ghcPkgPath = programPath ghcPkg + haddockPath = programPath haddock + putStrLn $ "Using ghc: " ++ ghcPath + putStrLn $ "Using ghc-pkg: " ++ ghcPkgPath + putStrLn $ "Using haddock: " ++ haddockPath + setCurrentDirectory "tests" + -- Create a shared Setup executable to speed up Simple tests + compileSetup "." ghcPath + defaultMain (tests cabalVersion inplaceSpec ghcPath ghcPkgPath) + +-- Like Distribution.Simple.Configure.getPersistBuildConfig but +-- doesn't check that the Cabal version matches, which it doesn't when +-- we run Cabal's own test suite, due to bootstrapping issues. +getPersistBuildConfig_ :: FilePath -> IO LocalBuildInfo +getPersistBuildConfig_ filename = do + eLBI <- try $ getConfigStateFile filename + case eLBI of + Left (ConfigStateFileBadVersion _ _ (Right lbi)) -> return lbi + Left (ConfigStateFileBadVersion _ _ (Left err)) -> throw err + Left err -> throw err + Right lbi -> return lbi diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/README.md cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/README.md --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/README.md 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,34 @@ +Writing package tests +===================== + +The tests under the [PackageTests] directory define and build packages +that exercise various components of Cabal. Each test case is an [HUnit] +test. The entry point for the test suite, where all the test cases are +listed, is [PackageTests.hs]. There are utilities for calling the stages +of Cabal's build process in [PackageTests/PackageTester.hs]; have a look +at an existing test case to see how they are used. + +It is important that package tests use the in-place version of Cabal +rather than the system version. Several long-standing bugs in the test +suite were caused by testing the system (rather than the newly compiled) +version of Cabal. There are two places where the system Cabal can +accidentally be invoked: + +1. Compiling `Setup.hs`. `runghc` needs to be told about the in-place + package database. This issue should be solved for all future package + tests; see `compileSetup` in [PackageTests/PackageTester.hs]. + +2. Compiling a package which depends on Cabal. In particular, packages + with the [detailed]-type test suites depend on the Cabal library + directly, so it is important that they are configured to use the + in-place package database. The test suite already creates a stub + `PackageSpec` for this case; see + [PackageTests/BuildTestSuiteDetailedV09/Check.hs] to see how it is + used. + +[PackageTests]: PackageTests +[HUnit]: http://hackage.haskell.org/package/HUnit +[PackageTests.hs]: PackageTests.hs +[PackageTests/PackageTester.hs]: PackageTests/PackageTester.hs +[detailed]: ../Distribution/TestSuite.hs +[PackageTests/BuildTestSuiteDetailedV09/Check.hs]: PackageTests/BuildTestSuiteDetailedV09/Check.hs \ No newline at end of file diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/Setup.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/Setup.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/Setup.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,3 @@ +import Distribution.Simple +main = defaultMain + diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Compat/CreatePipe.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Compat/CreatePipe.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Compat/CreatePipe.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Compat/CreatePipe.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,20 @@ +module UnitTests.Distribution.Compat.CreatePipe (tests) where + +import Distribution.Compat.CreatePipe +import System.IO (hClose, hGetContents, hPutStr, hSetEncoding, localeEncoding) +import Test.Framework +import Test.Framework.Providers.HUnit +import Test.HUnit hiding (Test) + +tests :: [Test] +tests = [testCase "Locale Encoding" case_Locale_Encoding] + +case_Locale_Encoding :: Assertion +case_Locale_Encoding = assert $ do + let str = "\0252" + (r, w) <- createPipe + hSetEncoding w localeEncoding + out <- hGetContents r + hPutStr w str + hClose w + return $! out == str diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Compat/ReadP.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Compat/ReadP.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Compat/ReadP.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Compat/ReadP.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,153 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Compat.ReadP +-- Copyright : (c) The University of Glasgow 2002 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Portability : portable +-- +-- This code was originally in Distribution.Compat.ReadP. Please see that file +-- for provenance. The tests have been integrated into the test framework. +-- Some properties cannot be tested, as they hold over arbitrary ReadP values, +-- and we don't have a good Arbitrary instance (nor Show instance) for ReadP. +-- +module UnitTests.Distribution.Compat.ReadP + ( tests + -- * Properties + -- $properties + ) where + +import Data.List +import Distribution.Compat.ReadP +import Test.Framework +import Test.Framework.Providers.QuickCheck2 + +tests :: [Test] +tests = + [ testProperty "Get Nil" prop_Get_Nil + , testProperty "Get Cons" prop_Get_Cons + , testProperty "Look" prop_Look + , testProperty "Fail" prop_Fail + , testProperty "Return" prop_Return + --, testProperty "Bind" prop_Bind + --, testProperty "Plus" prop_Plus + --, testProperty "LeftPlus" prop_LeftPlus + --, testProperty "Gather" prop_Gather + , testProperty "String Yes" prop_String_Yes + , testProperty "String Maybe" prop_String_Maybe + , testProperty "Munch" (prop_Munch evenChar) + , testProperty "Munch1" (prop_Munch1 evenChar) + --, testProperty "Choice" prop_Choice + --, testProperty "ReadS" prop_ReadS + ] + +-- --------------------------------------------------------------------------- +-- QuickCheck properties that hold for the combinators + +{- $properties +The following are QuickCheck specifications of what the combinators do. +These can be seen as formal specifications of the behavior of the +combinators. + +We use bags to give semantics to the combinators. +-} + +type Bag a = [a] + +-- Equality on bags does not care about the order of elements. + +(=~) :: Ord a => Bag a -> Bag a -> Bool +xs =~ ys = sort xs == sort ys + +-- A special equality operator to avoid unresolved overloading +-- when testing the properties. + +(=~.) :: Bag (Int,String) -> Bag (Int,String) -> Bool +(=~.) = (=~) + +-- Here follow the properties: + +prop_Get_Nil :: Bool +prop_Get_Nil = + readP_to_S get [] =~ [] + +prop_Get_Cons :: Char -> [Char] -> Bool +prop_Get_Cons c s = + readP_to_S get (c:s) =~ [(c,s)] + +prop_Look :: String -> Bool +prop_Look s = + readP_to_S look s =~ [(s,s)] + +prop_Fail :: String -> Bool +prop_Fail s = + readP_to_S pfail s =~. [] + +prop_Return :: Int -> String -> Bool +prop_Return x s = + readP_to_S (return x) s =~. [(x,s)] + +{- +prop_Bind p k s = + readP_to_S (p >>= k) s =~. + [ ys'' + | (x,s') <- readP_to_S p s + , ys'' <- readP_to_S (k (x::Int)) s' + ] + +prop_Plus :: ReadP Int Int -> ReadP Int Int -> String -> Bool +prop_Plus p q s = + readP_to_S (p +++ q) s =~. + (readP_to_S p s ++ readP_to_S q s) + +prop_LeftPlus :: ReadP Int Int -> ReadP Int Int -> String -> Bool +prop_LeftPlus p q s = + readP_to_S (p <++ q) s =~. + (readP_to_S p s +<+ readP_to_S q s) + where + [] +<+ ys = ys + xs +<+ _ = xs + +prop_Gather s = + forAll readPWithoutReadS $ \p -> + readP_to_S (gather p) s =~ + [ ((pre,x::Int),s') + | (x,s') <- readP_to_S p s + , let pre = take (length s - length s') s + ] +-} + +prop_String_Yes :: String -> [Char] -> Bool +prop_String_Yes this s = + readP_to_S (string this) (this ++ s) =~ + [(this,s)] + +prop_String_Maybe :: String -> String -> Bool +prop_String_Maybe this s = + readP_to_S (string this) s =~ + [(this, drop (length this) s) | this `isPrefixOf` s] + +prop_Munch :: (Char -> Bool) -> String -> Bool +prop_Munch p s = + readP_to_S (munch p) s =~ + [(takeWhile p s, dropWhile p s)] + +prop_Munch1 :: (Char -> Bool) -> String -> Bool +prop_Munch1 p s = + readP_to_S (munch1 p) s =~ + [(res,s') | let (res,s') = (takeWhile p s, dropWhile p s), not (null res)] + +{- +prop_Choice :: [ReadP Int Int] -> String -> Bool +prop_Choice ps s = + readP_to_S (choice ps) s =~. + readP_to_S (foldr (+++) pfail ps) s + +prop_ReadS :: ReadS Int -> String -> Bool +prop_ReadS r s = + readP_to_S (readS_to_P r) s =~. r s +-} + +evenChar :: Char -> Bool +evenChar = even . fromEnum diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Utils/NubList.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Utils/NubList.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Utils/NubList.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/UnitTests/Distribution/Utils/NubList.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,47 @@ +{-# LANGUAGE CPP #-} +module UnitTests.Distribution.Utils.NubList + ( tests + ) where + +#if __GLASGOW_HASKELL__ < 710 +import Data.Monoid +#endif +import Distribution.Utils.NubList +import Test.Framework +import Test.Framework.Providers.HUnit (testCase) +import Test.Framework.Providers.QuickCheck2 +import Test.HUnit (Assertion, assertBool) + +tests :: [Test] +tests = + [ testCase "Numlist retains ordering" testOrdering + , testCase "Numlist removes duplicates" testDeDupe + , testProperty "Monoid Numlist Identity" prop_Identity + , testProperty "Monoid Numlist Associativity" prop_Associativity + ] + +someIntList :: [Int] +-- This list must not have duplicate entries. +someIntList = [ 1, 3, 4, 2, 0, 7, 6, 5, 9, -1 ] + +testOrdering :: Assertion +testOrdering = + assertBool "Maintains element ordering:" $ + fromNubList (toNubList someIntList) == someIntList + +testDeDupe :: Assertion +testDeDupe = + assertBool "De-duplicates a list:" $ + fromNubList (toNubList (someIntList ++ someIntList)) == someIntList + +-- --------------------------------------------------------------------------- +-- QuickCheck properties for NubList + +prop_Identity :: [Int] -> Bool +prop_Identity xs = + mempty `mappend` toNubList xs == toNubList xs `mappend` mempty + +prop_Associativity :: [Int] -> [Int] -> [Int] -> Bool +prop_Associativity xs ys zs = + (toNubList xs `mappend` toNubList ys) `mappend` toNubList zs + == toNubList xs `mappend` (toNubList ys `mappend` toNubList zs) diff -Nru cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/UnitTests.hs cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/UnitTests.hs --- cabal-install-1.22-1.22.6.0/src/Cabal-1.22.8.0/tests/UnitTests.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/Cabal-1.22.8.0/tests/UnitTests.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,27 @@ +module Main + ( main + ) where + +import System.IO (BufferMode(NoBuffering), hSetBuffering, stdout) +import Test.Framework + +import qualified UnitTests.Distribution.Compat.CreatePipe +import qualified UnitTests.Distribution.Compat.ReadP +import qualified UnitTests.Distribution.Utils.NubList + +tests :: [Test] +tests = + [ testGroup "Distribution.Compat.ReadP" + UnitTests.Distribution.Compat.ReadP.tests + , testGroup "Distribution.Compat.CreatePipe" + UnitTests.Distribution.Compat.CreatePipe.tests + , testGroup "Distribution.Utils.NubList" + UnitTests.Distribution.Utils.NubList.tests + ] + +main :: IO () +main = do + -- WORKAROUND: disable buffering on stdout to get streaming test logs + -- test providers _should_ do this themselves + hSetBuffering stdout NoBuffering + defaultMain tests diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/bash-completion/cabal cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/bash-completion/cabal --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/bash-completion/cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/bash-completion/cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,80 @@ +# cabal command line completion +# Copyright 2007-2008 "Lennart Kolmodin" +# "Duncan Coutts" +# + +# List cabal targets by type, pass: +# - test-suite for test suites +# - benchmark for benchmarks +# - executable for executables +# - executable|test-suite|benchmark for the three +_cabal_list() +{ + cat *.cabal | + grep -Ei "^[[:space:]]*($1)[[:space:]]" | + sed -e "s/.* \([^ ]*\).*/\1/" +} + +# List possible targets depending on the command supplied as parameter. The +# ideal option would be to implement this via --list-options on cabal directly. +# This is a temporary workaround. +_cabal_targets() +{ + # If command ($*) contains build, repl, test or bench completes with + # targets of according type. + [ -f *.cabal ] || return 0 + local comp + for comp in $*; do + [ $comp == build ] && _cabal_list "executable|test-suite|benchmark" && break + [ $comp == repl ] && _cabal_list "executable|test-suite|benchmark" && break + [ $comp == run ] && _cabal_list "executable" && break + [ $comp == test ] && _cabal_list "test-suite" && break + [ $comp == bench ] && _cabal_list "benchmark" && break + done +} + +# List possible subcommands of a cabal subcommand. +# +# In example "sandbox" is a cabal subcommand that itself has subcommands. Since +# "cabal --list-options" doesn't work in such cases we have to get the list +# using other means. +_cabal_subcommands() +{ + local word + for word in "$@"; do + case "$word" in + sandbox) + # Get list of "cabal sandbox" subcommands from its help message. + # + # Following command short-circuits if it reaches flags section. + # This is to prevent any problems that might arise from unfortunate + # word combinations in flag descriptions. Usage section is parsed + # using simple regexp and "sandbox" subcommand is printed for each + # successful substitution. + "$1" help sandbox | + sed -rn '/Flags/q;s/^.* cabal sandbox *([^ ]*).*/\1/;t p;b;: p;p' + break # Terminate for loop. + ;; + esac + done +} + +_cabal() +{ + # get the word currently being completed + local cur + cur=${COMP_WORDS[$COMP_CWORD]} + + # create a command line to run + local cmd + # copy all words the user has entered + cmd=( ${COMP_WORDS[@]} ) + + # replace the current word with --list-options + cmd[${COMP_CWORD}]="--list-options" + + # the resulting completions should be put into this array + COMPREPLY=( $( compgen -W "$( ${cmd[@]} ) $( _cabal_targets ${cmd[@]} ) $( _cabal_subcommands ${COMP_WORDS[@]} )" -- $cur ) ) +} + +complete -F _cabal -o default cabal diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/bootstrap.sh cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/bootstrap.sh --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/bootstrap.sh 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/bootstrap.sh 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,436 @@ +#!/usr/bin/env sh + +# A script to bootstrap cabal-install. + +# It works by downloading and installing the Cabal, zlib and +# HTTP packages. It then installs cabal-install itself. +# It expects to be run inside the cabal-install directory. + +# Install settings, you can override these by setting environment vars. E.g. if +# you don't want profiling and dynamic versions of libraries to be installed in +# addition to vanilla, run 'EXTRA_CONFIGURE_OPTS="" ./bootstrap.sh' + +#VERBOSE +DEFAULT_CONFIGURE_OPTS="--enable-library-profiling --enable-shared" +EXTRA_CONFIGURE_OPTS=${EXTRA_CONFIGURE_OPTS-$DEFAULT_CONFIGURE_OPTS} +#EXTRA_BUILD_OPTS +#EXTRA_INSTALL_OPTS + +die () { printf "\nError during cabal-install bootstrap:\n$1\n" >&2 && exit 2 ;} + +# programs, you can override these by setting environment vars +GHC="${GHC:-ghc}" +GHC_PKG="${GHC_PKG:-ghc-pkg}" +GHC_VER="$(${GHC} --numeric-version)" +HADDOCK=${HADDOCK:-haddock} +WGET="${WGET:-wget}" +CURL="${CURL:-curl}" +FETCH="${FETCH:-fetch}" +TAR="${TAR:-tar}" +GZIP_PROGRAM="${GZIP_PROGRAM:-gzip}" + +# The variable SCOPE_OF_INSTALLATION can be set on the command line to +# use/install the libaries needed to build cabal-install to a custom package +# database instead of the user or global package database. +# +# Example: +# +# $ ghc-pkg init /my/package/database +# $ SCOPE_OF_INSTALLATION='--package-db=/my/package/database' ./bootstrap.sh +# +# You can also combine SCOPE_OF_INSTALLATION with PREFIX: +# +# $ ghc-pkg init /my/prefix/packages.conf.d +# $ SCOPE_OF_INSTALLATION='--package-db=/my/prefix/packages.conf.d' \ +# PREFIX=/my/prefix ./bootstrap.sh +# +# If you use the --global,--user or --sandbox arguments, this will +# override the SCOPE_OF_INSTALLATION setting and not use the package +# database you pass in the SCOPE_OF_INSTALLATION variable. + +SCOPE_OF_INSTALLATION="${SCOPE_OF_INSTALLATION:---user}" +DEFAULT_PREFIX="${HOME}/.cabal" + +# Try to respect $TMPDIR but override if needed - see #1710. +[ -"$TMPDIR"- = -""- ] || echo "$TMPDIR" | grep -q ld && + export TMPDIR=/tmp/cabal-$(echo $(od -XN4 -An /dev/random)) && mkdir $TMPDIR + +# Check for a C compiler. +[ ! -x "$CC" ] && for ccc in gcc clang cc icc; do + ${ccc} --version > /dev/null 2>&1 && CC=$ccc && + echo "Using $CC for C compiler. If this is not what you want, set CC." >&2 && + break +done + +# None found. +[ ! -x `which "$CC"` ] && + die "C compiler not found (or could not be run). + If a C compiler is installed make sure it is on your PATH, + or set the CC variable." + +# Check the C compiler/linker work. +LINK="$(for link in collect2 ld; do + echo 'main;' | ${CC} -v -x c - -o /dev/null -\#\#\# 2>&1 | grep -qw $link && + echo 'main;' | ${CC} -v -x c - -o /dev/null -\#\#\# 2>&1 | grep -w $link | + sed -e "s|\(.*$link\).*|\1|" -e 's/ //g' -e 's|"||' && break +done)" + +# They don't. +[ -z "$LINK" ] && + die "C compiler and linker could not compile a simple test program. + Please check your toolchain." + +## Warn that were's overriding $LD if set (if you want). + +[ -x "$LD" ] && [ "$LD" != "$LINK" ] && + echo "Warning: value set in $LD is not the same as C compiler's $LINK." >&2 + echo "Using $LINK instead." >&2 + +# Set LD, overriding environment if necessary. +LD=$LINK + +# Check we're in the right directory, etc. +grep "cabal-install" ./cabal-install.cabal > /dev/null 2>&1 || + die "The bootstrap.sh script must be run in the cabal-install directory" + +${GHC} --numeric-version > /dev/null 2>&1 || + die "${GHC} not found (or could not be run). + If ghc is installed, make sure it is on your PATH, + or set the GHC and GHC_PKG vars." + +${GHC_PKG} --version > /dev/null 2>&1 || die "${GHC_PKG} not found." + +GHC_VER="$(${GHC} --numeric-version)" +GHC_PKG_VER="$(${GHC_PKG} --version | cut -d' ' -f 5)" + +[ ${GHC_VER} = ${GHC_PKG_VER} ] || + die "Version mismatch between ${GHC} and ${GHC_PKG}. + If you set the GHC variable then set GHC_PKG too." + +while [ "$#" -gt 0 ]; do + case "${1}" in + "--user") + SCOPE_OF_INSTALLATION="${1}" + shift;; + "--global") + SCOPE_OF_INSTALLATION="${1}" + DEFAULT_PREFIX="/usr/local" + shift;; + "--sandbox") + shift + # check if there is another argument which doesn't start with -- + if [ "$#" -le 0 ] || [ ! -z $(echo "${1}" | grep "^--") ] + then + SANDBOX=".cabal-sandbox" + else + SANDBOX="${1}" + shift + fi;; + "--no-doc") + NO_DOCUMENTATION=1 + shift;; + *) + echo "Unknown argument or option, quitting: ${1}" + echo "usage: bootstrap.sh [OPTION]" + echo + echo "options:" + echo " --user Install for the local user (default)" + echo " --global Install systemwide (must be run as root)" + echo " --no-doc Do not generate documentation for installed "\ + "packages" + echo " --sandbox Install to a sandbox in the default location"\ + "(.cabal-sandbox)" + echo " --sandbox path Install to a sandbox located at path" + exit;; + esac +done + +abspath () { case "$1" in /*)printf "%s\n" "$1";; *)printf "%s\n" "$PWD/$1";; + esac; } + +if [ ! -z "$SANDBOX" ] +then # set up variables for sandbox bootstrap + # Make the sandbox path absolute since it will be used from + # different working directories when the dependency packages are + # installed. + SANDBOX=$(abspath "$SANDBOX") + # Get the name of the package database which cabal sandbox would use. + GHC_ARCH=$(ghc --info | + sed -n 's/.*"Target platform".*"\([^-]\+\)-[^-]\+-\([^"]\+\)".*/\1-\2/p') + PACKAGEDB="$SANDBOX/${GHC_ARCH}-ghc-${GHC_VER}-packages.conf.d" + # Assume that if the directory is already there, it is already a + # package database. We will get an error immediately below if it + # isn't. Uses -r to try to be compatible with Solaris, and allow + # symlinks as well as a normal dir/file. + [ ! -r "$PACKAGEDB" ] && ghc-pkg init "$PACKAGEDB" + PREFIX="$SANDBOX" + SCOPE_OF_INSTALLATION="--package-db=$PACKAGEDB" + echo Bootstrapping in sandbox at \'$SANDBOX\'. +fi + +# Check for haddock unless no documentation should be generated. +if [ ! ${NO_DOCUMENTATION} ] +then + ${HADDOCK} --version > /dev/null 2>&1 || die "${HADDOCK} not found." +fi + +PREFIX=${PREFIX:-${DEFAULT_PREFIX}} + +# Versions of the packages to install. +# The version regex says what existing installed versions are ok. +PARSEC_VER="3.1.9"; PARSEC_VER_REGEXP="[3]\.[01]\." + # >= 3.0 && < 3.2 +DEEPSEQ_VER="1.4.1.2"; DEEPSEQ_VER_REGEXP="1\.[1-9]\." + # >= 1.1 && < 2 +BINARY_VER="0.8.0.1"; BINARY_VER_REGEXP="[0]\.[78]\." + # >= 0.7 && < 0.9 +TEXT_VER="1.2.2.0"; TEXT_VER_REGEXP="((1\.[012]\.)|(0\.([2-9]|(1[0-1]))\.))" + # >= 0.2 && < 1.3 +NETWORK_VER="2.6.2.1"; NETWORK_VER_REGEXP="2\.[0-6]\." + # >= 2.0 && < 2.7 +NETWORK_URI_VER="2.6.0.3"; NETWORK_URI_VER_REGEXP="2\.6\." + # >= 2.6 && < 2.7 +CABAL_VER="1.22.8.0"; CABAL_VER_REGEXP="1\.22" + # >= 1.22 && < 1.23 +TRANS_VER="0.5.1.0"; TRANS_VER_REGEXP="0\.[45]\." + # >= 0.2.* && < 0.6 +MTL_VER="2.2.1"; MTL_VER_REGEXP="[2]\." + # >= 2.0 && < 3 +HTTP_VER="4000.3.2"; HTTP_VER_REGEXP="4000\.(2\.([5-9]|1[0-9]|2[0-9])|3\.?)" + # >= 4000.2.5 < 4000.4 +ZLIB_VER="0.6.1.1"; ZLIB_VER_REGEXP="0\.((5\.([3-9]|1[0-9]))|6\.?)" + # >= 0.5.3 && < 0.7 +TIME_VER="1.6" TIME_VER_REGEXP="1\.[123456]\.?" + # >= 1.1 && < 1.7 +RANDOM_VER="1.1" RANDOM_VER_REGEXP="1\.[01]\.?" + # >= 1 && < 1.2 +STM_VER="2.4.4.1"; STM_VER_REGEXP="2\." + # == 2.* +OLD_TIME_VER="1.1.0.3"; OLD_TIME_VER_REGEXP="1\.[01]\.?" + # >=1.0.0.0 && <1.2 +OLD_LOCALE_VER="1.0.0.7"; OLD_LOCALE_VER_REGEXP="1\.0\.?" + # >=1.0.0.0 && <1.1 + +HACKAGE_URL="https://hackage.haskell.org/package" + +# Haddock fails for network-2.5.0.0. +NO_DOCS_PACKAGES_VER_REGEXP="network-uri-2\.5\.[0-9]+\.[0-9]+" + +# Cache the list of packages: +echo "Checking installed packages for ghc-${GHC_VER}..." +${GHC_PKG} list --global ${SCOPE_OF_INSTALLATION} > ghc-pkg.list || + die "running '${GHC_PKG} list' failed" + +# Will we need to install this package, or is a suitable version installed? +need_pkg () { + PKG=$1 + VER_MATCH=$2 + if egrep " ${PKG}-${VER_MATCH}" ghc-pkg.list > /dev/null 2>&1 + then + return 1; + else + return 0; + fi + #Note: we cannot use "! grep" here as Solaris 9 /bin/sh doesn't like it. +} + +info_pkg () { + PKG=$1 + VER=$2 + VER_MATCH=$3 + + if need_pkg ${PKG} ${VER_MATCH} + then + if [ -r "${PKG}-${VER}.tar.gz" ] + then + echo "${PKG}-${VER} will be installed from local tarball." + else + echo "${PKG}-${VER} will be downloaded and installed." + fi + else + echo "${PKG} is already installed and the version is ok." + fi +} + +fetch_pkg () { + PKG=$1 + VER=$2 + + URL=${HACKAGE_URL}/${PKG}-${VER}/${PKG}-${VER}.tar.gz + if which ${CURL} > /dev/null + then + # TODO: switch back to resuming curl command once + # https://github.com/haskell/hackage-server/issues/111 is resolved + #${CURL} -L --fail -C - -O ${URL} || die "Failed to download ${PKG}." + ${CURL} -L --fail -O ${URL} || die "Failed to download ${PKG}." + elif which ${WGET} > /dev/null + then + ${WGET} -c ${URL} || die "Failed to download ${PKG}." + elif which ${FETCH} > /dev/null + then + ${FETCH} ${URL} || die "Failed to download ${PKG}." + else + die "Failed to find a downloader. 'curl', 'wget' or 'fetch' is required." + fi + [ -f "${PKG}-${VER}.tar.gz" ] || + die "Downloading ${URL} did not create ${PKG}-${VER}.tar.gz" +} + +unpack_pkg () { + PKG=$1 + VER=$2 + + rm -rf "${PKG}-${VER}.tar" "${PKG}-${VER}" + ${GZIP_PROGRAM} -d < "${PKG}-${VER}.tar.gz" | ${TAR} -xf - + [ -d "${PKG}-${VER}" ] || die "Failed to unpack ${PKG}-${VER}.tar.gz" +} + +install_pkg () { + PKG=$1 + VER=$2 + + [ -x Setup ] && ./Setup clean + [ -f Setup ] && rm Setup + + ${GHC} --make Setup -o Setup || + die "Compiling the Setup script failed." + + [ -x Setup ] || die "The Setup script does not exist or cannot be run" + + args="${SCOPE_OF_INSTALLATION} --prefix=${PREFIX} --with-compiler=${GHC}" + args="$args --with-hc-pkg=${GHC_PKG} --with-gcc=${CC} --with-ld=${LD}" + args="$args ${EXTRA_CONFIGURE_OPTS} ${VERBOSE}" + + ./Setup configure $args || die "Configuring the ${PKG} package failed." + + ./Setup build ${EXTRA_BUILD_OPTS} ${VERBOSE} || + die "Building the ${PKG} package failed." + + if [ ! ${NO_DOCUMENTATION} ] + then + if echo "${PKG}-${VER}" | egrep ${NO_DOCS_PACKAGES_VER_REGEXP} > /dev/null 2>&1 + then + echo "Skipping documentation for the ${PKG} package." + else + ./Setup haddock --with-ghc=${GHC} --with-haddock=${HADDOCK} ${VERBOSE} || + die "Documenting the ${PKG} package failed." + fi + fi + + ./Setup install ${EXTRA_INSTALL_OPTS} ${VERBOSE} || + die "Installing the ${PKG} package failed." +} + +do_pkg () { + PKG=$1 + VER=$2 + VER_MATCH=$3 + + if need_pkg ${PKG} ${VER_MATCH} + then + echo + if [ -r "${PKG}-${VER}.tar.gz" ] + then + echo "Using local tarball for ${PKG}-${VER}." + else + echo "Downloading ${PKG}-${VER}..." + fetch_pkg ${PKG} ${VER} + fi + unpack_pkg ${PKG} ${VER} + cd "${PKG}-${VER}" + install_pkg ${PKG} ${VER} + cd .. + fi +} + +# Replicate the flag selection logic for network-uri in the .cabal file. +do_network_uri_pkg () { + # Refresh installed package list. + ${GHC_PKG} list --global ${SCOPE_OF_INSTALLATION} > ghc-pkg-stage2.list \ + || die "running '${GHC_PKG} list' failed" + + NETWORK_URI_DUMMY_VER="2.5.0.0"; NETWORK_URI_DUMMY_VER_REGEXP="2\.5\." # < 2.6 + if egrep " network-2\.[6-9]\." ghc-pkg-stage2.list > /dev/null 2>&1 + then + # Use network >= 2.6 && network-uri >= 2.6 + info_pkg "network-uri" ${NETWORK_URI_VER} ${NETWORK_URI_VER_REGEXP} + do_pkg "network-uri" ${NETWORK_URI_VER} ${NETWORK_URI_VER_REGEXP} + else + # Use network < 2.6 && network-uri < 2.6 + info_pkg "network-uri" ${NETWORK_URI_DUMMY_VER} ${NETWORK_URI_DUMMY_VER_REGEXP} + do_pkg "network-uri" ${NETWORK_URI_DUMMY_VER} ${NETWORK_URI_DUMMY_VER_REGEXP} + fi +} + +# Actually do something! + +info_pkg "deepseq" ${DEEPSEQ_VER} ${DEEPSEQ_VER_REGEXP} +info_pkg "binary" ${BINARY_VER} ${BINARY_VER_REGEXP} +info_pkg "time" ${TIME_VER} ${TIME_VER_REGEXP} +info_pkg "Cabal" ${CABAL_VER} ${CABAL_VER_REGEXP} +info_pkg "transformers" ${TRANS_VER} ${TRANS_VER_REGEXP} +info_pkg "mtl" ${MTL_VER} ${MTL_VER_REGEXP} +info_pkg "text" ${TEXT_VER} ${TEXT_VER_REGEXP} +info_pkg "parsec" ${PARSEC_VER} ${PARSEC_VER_REGEXP} +info_pkg "network" ${NETWORK_VER} ${NETWORK_VER_REGEXP} +info_pkg "old-locale" ${OLD_LOCALE_VER} ${OLD_LOCALE_VER_REGEXP} +info_pkg "old-time" ${OLD_TIME_VER} ${OLD_TIME_VER_REGEXP} +info_pkg "HTTP" ${HTTP_VER} ${HTTP_VER_REGEXP} +info_pkg "zlib" ${ZLIB_VER} ${ZLIB_VER_REGEXP} +info_pkg "random" ${RANDOM_VER} ${RANDOM_VER_REGEXP} +info_pkg "stm" ${STM_VER} ${STM_VER_REGEXP} + +do_pkg "deepseq" ${DEEPSEQ_VER} ${DEEPSEQ_VER_REGEXP} +do_pkg "binary" ${BINARY_VER} ${BINARY_VER_REGEXP} +do_pkg "time" ${TIME_VER} ${TIME_VER_REGEXP} +do_pkg "Cabal" ${CABAL_VER} ${CABAL_VER_REGEXP} +do_pkg "transformers" ${TRANS_VER} ${TRANS_VER_REGEXP} +do_pkg "mtl" ${MTL_VER} ${MTL_VER_REGEXP} +do_pkg "text" ${TEXT_VER} ${TEXT_VER_REGEXP} +do_pkg "parsec" ${PARSEC_VER} ${PARSEC_VER_REGEXP} +do_pkg "network" ${NETWORK_VER} ${NETWORK_VER_REGEXP} + +# We conditionally install network-uri, depending on the network version. +do_network_uri_pkg + +do_pkg "old-locale" ${OLD_LOCALE_VER} ${OLD_LOCALE_VER_REGEXP} +do_pkg "old-time" ${OLD_TIME_VER} ${OLD_TIME_VER_REGEXP} +do_pkg "HTTP" ${HTTP_VER} ${HTTP_VER_REGEXP} +do_pkg "zlib" ${ZLIB_VER} ${ZLIB_VER_REGEXP} +do_pkg "random" ${RANDOM_VER} ${RANDOM_VER_REGEXP} +do_pkg "stm" ${STM_VER} ${STM_VER_REGEXP} + +install_pkg "cabal-install" + +# Use the newly built cabal to turn the prefix/package database into a +# legit cabal sandbox. This works because 'cabal sandbox init' will +# reuse the already existing package database and other files if they +# are in the expected locations. +[ ! -z "$SANDBOX" ] && $SANDBOX/bin/cabal sandbox init --sandbox $SANDBOX + +echo +echo "===========================================" +CABAL_BIN="$PREFIX/bin" +if [ -x "$CABAL_BIN/cabal" ] +then + echo "The 'cabal' program has been installed in $CABAL_BIN/" + echo "You should either add $CABAL_BIN to your PATH" + echo "or copy the cabal program to a directory that is on your PATH." + echo + echo "The first thing to do is to get the latest list of packages with:" + echo " cabal update" + echo "This will also create a default config file (if it does not already" + echo "exist) at $HOME/.cabal/config" + echo + echo "By default cabal will install programs to $HOME/.cabal/bin" + echo "If you do not want to add this directory to your PATH then you can" + echo "change the setting in the config file, for example you could use:" + echo "symlink-bindir: $HOME/bin" +else + echo "Sorry, something went wrong." + echo "The 'cabal' executable was not successfully installed into" + echo "$CABAL_BIN/" +fi +echo + +rm ghc-pkg.list diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/cabal-install.cabal cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/cabal-install.cabal --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/cabal-install.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/cabal-install.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,263 @@ +Name: cabal-install +Version: 1.22.9.0 +Synopsis: The command-line interface for Cabal and Hackage. +Description: + The \'cabal\' command-line program simplifies the process of managing + Haskell software by automating the fetching, configuration, compilation + and installation of Haskell libraries and programs. +homepage: http://www.haskell.org/cabal/ +bug-reports: https://github.com/haskell/cabal/issues +License: BSD3 +License-File: LICENSE +Author: Lemmih + Paolo Martini + Bjorn Bringert + Isaac Potoczny-Jones + Duncan Coutts +Maintainer: cabal-devel@haskell.org +Copyright: 2005 Lemmih + 2006 Paolo Martini + 2007 Bjorn Bringert + 2007 Isaac Potoczny-Jones + 2007-2012 Duncan Coutts +Category: Distribution +Build-type: Simple +Cabal-Version: >= 1.10 +Extra-Source-Files: + README.md bash-completion/cabal bootstrap.sh changelog + + -- Generated with '../Cabal/misc/gen-extra-source-files.sh | sort' + tests/PackageTests/Freeze/my.cabal + +source-repository head + type: git + location: https://github.com/haskell/cabal/ + subdir: cabal-install + +Flag old-directory + description: Use directory < 1.2 and old-time + default: False + +Flag network-uri + description: Get Network.URI from the network-uri package + default: True + +executable cabal + main-is: Main.hs + ghc-options: -Wall -fwarn-tabs + other-modules: + Distribution.Client.BuildReports.Anonymous + Distribution.Client.BuildReports.Storage + Distribution.Client.BuildReports.Types + Distribution.Client.BuildReports.Upload + Distribution.Client.Check + Distribution.Client.Config + Distribution.Client.Configure + Distribution.Client.Dependency + Distribution.Client.Dependency.TopDown + Distribution.Client.Dependency.TopDown.Constraints + Distribution.Client.Dependency.TopDown.Types + Distribution.Client.Dependency.Types + Distribution.Client.Dependency.Modular + Distribution.Client.Dependency.Modular.Assignment + Distribution.Client.Dependency.Modular.Builder + Distribution.Client.Dependency.Modular.Configured + Distribution.Client.Dependency.Modular.ConfiguredConversion + Distribution.Client.Dependency.Modular.Dependency + Distribution.Client.Dependency.Modular.Explore + Distribution.Client.Dependency.Modular.Flag + Distribution.Client.Dependency.Modular.Index + Distribution.Client.Dependency.Modular.IndexConversion + Distribution.Client.Dependency.Modular.Log + Distribution.Client.Dependency.Modular.Message + Distribution.Client.Dependency.Modular.Package + Distribution.Client.Dependency.Modular.Preference + Distribution.Client.Dependency.Modular.PSQ + Distribution.Client.Dependency.Modular.Solver + Distribution.Client.Dependency.Modular.Tree + Distribution.Client.Dependency.Modular.Validate + Distribution.Client.Dependency.Modular.Version + Distribution.Client.Exec + Distribution.Client.Fetch + Distribution.Client.FetchUtils + Distribution.Client.Freeze + Distribution.Client.Get + Distribution.Client.GZipUtils + Distribution.Client.Haddock + Distribution.Client.HttpUtils + Distribution.Client.IndexUtils + Distribution.Client.Init + Distribution.Client.Init.Heuristics + Distribution.Client.Init.Licenses + Distribution.Client.Init.Types + Distribution.Client.Install + Distribution.Client.InstallPlan + Distribution.Client.InstallSymlink + Distribution.Client.JobControl + Distribution.Client.List + Distribution.Client.PackageIndex + Distribution.Client.PackageUtils + Distribution.Client.ParseUtils + Distribution.Client.Run + Distribution.Client.Sandbox + Distribution.Client.Sandbox.Index + Distribution.Client.Sandbox.PackageEnvironment + Distribution.Client.Sandbox.Timestamp + Distribution.Client.Sandbox.Types + Distribution.Client.Setup + Distribution.Client.SetupWrapper + Distribution.Client.SrcDist + Distribution.Client.Tar + Distribution.Client.Targets + Distribution.Client.Types + Distribution.Client.Update + Distribution.Client.Upload + Distribution.Client.Utils + Distribution.Client.World + Distribution.Client.Win32SelfUpgrade + Distribution.Client.Compat.Environment + Distribution.Client.Compat.ExecutablePath + Distribution.Client.Compat.FilePerms + Distribution.Client.Compat.Process + Distribution.Client.Compat.Semaphore + Distribution.Client.Compat.Time + Paths_cabal_install + + -- NOTE: when updating build-depends, don't forget to update version regexps + -- in bootstrap.sh. + build-depends: + array >= 0.1 && < 0.6, + base >= 4.3 && < 5, + bytestring >= 0.9 && < 1, + Cabal >= 1.22.2 && < 1.23, + containers >= 0.1 && < 0.6, + filepath >= 1.0 && < 1.5, + HTTP >= 4000.2.5 && < 4000.4, + mtl >= 2.0 && < 3, + pretty >= 1 && < 1.2, + random >= 1 && < 1.2, + stm >= 2.0 && < 3, + time >= 1.1 && < 1.7, + zlib >= 0.5.3 && < 0.7 + + if flag(old-directory) + build-depends: directory >= 1 && < 1.2, old-time >= 1 && < 1.2, + process >= 1.0.1.1 && < 1.1.0.2 + else + build-depends: directory >= 1.2 && < 1.3, + process >= 1.1.0.2 && < 1.3 + + -- NOTE: you MUST include the network dependency even when network-uri + -- is pulled in, otherwise the constraint solver doesn't have enough + -- information + if flag(network-uri) + build-depends: network-uri >= 2.6, network >= 2.6 + else + build-depends: network >= 2.4 && < 2.6 + + if os(windows) + build-depends: Win32 >= 2 && < 3 + cpp-options: -DWIN32 + else + build-depends: unix >= 2.0 && < 2.8 + + if arch(arm) && impl(ghc < 7.6) + -- older ghc on arm does not support -threaded + cc-options: -DCABAL_NO_THREADED + else + ghc-options: -threaded + + c-sources: cbits/getnumcores.c + default-language: Haskell2010 + +-- Small, fast running tests. +Test-Suite unit-tests + type: exitcode-stdio-1.0 + main-is: UnitTests.hs + hs-source-dirs: tests, . + ghc-options: -Wall -fwarn-tabs + other-modules: + UnitTests.Distribution.Client.Targets + UnitTests.Distribution.Client.Dependency.Modular.PSQ + UnitTests.Distribution.Client.Sandbox + UnitTests.Distribution.Client.UserConfig + build-depends: + base, + array, + bytestring, + Cabal, + containers, + mtl, + pretty, + process, + directory, + filepath, + stm, + time, + HTTP, + zlib, + + test-framework, + test-framework-hunit, + test-framework-quickcheck2 >= 0.3, + HUnit, + QuickCheck >= 2.5 + + if flag(old-directory) + build-depends: old-time + + if flag(network-uri) + build-depends: network-uri >= 2.6, network >= 2.6 + else + build-depends: network-uri < 2.6, network < 2.6 + + if os(windows) + build-depends: Win32 + cpp-options: -DWIN32 + else + build-depends: unix + + if arch(arm) + cc-options: -DCABAL_NO_THREADED + else + ghc-options: -threaded + default-language: Haskell2010 + +-- Large, system tests that build packages. +test-suite package-tests + type: exitcode-stdio-1.0 + hs-source-dirs: tests + main-is: PackageTests.hs + other-modules: + PackageTests.Exec.Check + PackageTests.Freeze.Check + PackageTests.MultipleSource.Check + PackageTests.PackageTester + build-depends: + Cabal, + HUnit, + QuickCheck >= 2.1.0.1 && < 2.9, + base, + bytestring, + directory, + extensible-exceptions, + filepath, + process, + regex-posix, + test-framework, + test-framework-hunit, + test-framework-quickcheck2 >= 0.2.12 + + if os(windows) + build-depends: Win32 >= 2 && < 3 + cpp-options: -DWIN32 + else + build-depends: unix >= 2.0 && < 2.8 + + if arch(arm) + cc-options: -DCABAL_NO_THREADED + else + ghc-options: -threaded + + ghc-options: -Wall + default-language: Haskell2010 diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/cbits/getnumcores.c cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/cbits/getnumcores.c --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/cbits/getnumcores.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/cbits/getnumcores.c 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,46 @@ +#if defined(__GLASGOW_HASKELL__) && (__GLASGOW_HASKELL__ >= 612) && !defined(CABAL_NO_THREADED) +/* Since version 6.12, GHC's threaded RTS includes a getNumberOfProcessors + function, so we try to use that if available. cabal-install is always built + with -threaded nowadays. */ +#define HAS_GET_NUMBER_OF_PROCESSORS +#endif + + +#ifndef HAS_GET_NUMBER_OF_PROCESSORS + +#ifdef _WIN32 +#include +#elif MACOS +#include +#include +#elif __linux__ +#include +#endif + +int getNumberOfProcessors() { +#ifdef WIN32 + SYSTEM_INFO sysinfo; + GetSystemInfo(&sysinfo); + return sysinfo.dwNumberOfProcessors; +#elif MACOS + int nm[2]; + size_t len = 4; + uint32_t count; + + nm[0] = CTL_HW; nm[1] = HW_AVAILCPU; + sysctl(nm, 2, &count, &len, NULL, 0); + + if(count < 1) { + nm[1] = HW_NCPU; + sysctl(nm, 2, &count, &len, NULL, 0); + if(count < 1) { count = 1; } + } + return count; +#elif __linux__ + return sysconf(_SC_NPROCESSORS_ONLN); +#else + return 1; +#endif +} + +#endif /* HAS_GET_NUMBER_OF_PROCESSORS */ diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/changelog cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/changelog --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/changelog 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/changelog 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,209 @@ +-*-change-log-*- +1.22.9.0 Ryan Thomas March 2016 + * Include Cabal-1.22.8.0 + +1.22.8.0 Ryan Thomas February 2016 + * Only Custom setup scripts should be compiled with '-i -i.'. + * installedCabalVersion: Don't special-case Cabal anymore. + * Bump the HTTP upper bound. See #3069. + +1.22.7.0 + * Remove GZipUtils tests + * maybeDecompress: bail on all errors at the beginning of the stream with zlib < 0.6 + * Correct maybeDecompress + +1.22.6.0 Ryan Thomas June 2015 + * A fix for @ezyang's fix for #2502. (Mikhail Glushenkov) + +1.22.5.0 Ryan Thomas June 2015 + * Reduce temporary directory name length, fixes #2502. (Edward Z. Yang) + +1.22.4.0 Ryan Thomas May 2015 + * Force cabal upload to always use digest auth and never basic auth. + * Add dependency-graph information to `printPlan` output + * bootstrap.sh: fixes linker matching to avoid cases where tested linker names appear unexpectedly in compiler output (fixes #2542) + +1.22.3.0 Ryan Thomas April 2015 + * Fix bash completion for sandbox subcommands - Fixes #2513 (Mikhail Glushenkov) + * filterConfigureFlags: filter more flags (Mikhail Glushenkov) + +1.22.2.0 Ryan Thomas March 2015 + * Don't pass '--{en,dis}able-profiling' to old setup exes. + * -Wall police + * Allow filepath 1.4 + +1.21.x (current development version) + * New command: user-config (#2159). + * Implement 'cabal repl --only' (#2016). + * Fix an issue when 'cabal repl' was doing unnecessary compilation + (#1715). + * Prompt the user to specify source directory in 'cabal init' + (#1989). + * Remove the self-upgrade check (#2090). + * Don't redownload already downloaded packages when bootstrapping + (#2133). + * Support sandboxes in 'bootstrap.sh' (#2137). + * Install profiling and shared libs by default in 'bootstrap.sh' + (#2009). + +1.20.0.3 Johan Tibell June 2014 + * Don't attempt to rename dist if it is already named correctly + * Treat all flags of a package as interdependent. + * Allow template-haskell to be upgradable again + +1.20.0.2 Johan Tibell May 2014 + * Increase max-backjumps to 2000. + * Fix solver bug which led to missed install plans. + * Fix streaming test output. + * Tweak solver heuristics to avoid reinstalls. + +1.20.0.1 Johan Tibell May 2014 + * Fix cabal repl search path bug on Windows + * Include OS and arch in cabal-install user agent + * Revert --constraint flag behavior in configure to 1.18 behavior + +1.20.0.0 Johan Tibell April 2014 + * Build only selected executables + * Add -j flag to build/test/bench/run + * Improve install log file + * Don't symlink executables when in a sandbox + * Add --package-db flag to 'list' and 'info' + * Make upload more efficient + * Add --require-sandbox option + * Add experimental Cabal file format command + * Add haddock section to config file + * Add --main-is flag to init + +0.14.0 Andres Loeh April 2012 + * Works with ghc-7.4 + * Completely new modular dependency solver (default in most cases) + * Some tweaks to old topdown dependency solver + * Install plans are now checked for reinstalls that break packages + * Flags --constraint and --preference work for nonexisting packages + * New constraint forms for source and installed packages + * New constraint form for package-specific use flags + * New constraint form for package-specific stanza flags + * Test suite dependencies are pulled in on demand + * No longer install packages on --enable-tests when tests fail + * New "cabal bench" command + * Various "cabal init" tweaks + +0.10.0 Duncan Coutts February 2011 + * New package targets: local dirs, local and remote tarballs + * Initial support for a "world" package target + * Partial fix for situation where user packages mask global ones + * Removed cabal upgrade, new --upgrade-dependencies flag + * New cabal install --only-dependencies flag + * New cabal fetch --no-dependencies and --dry-run flags + * Improved output for cabal info + * Simpler and faster bash command line completion + * Fix for broken proxies that decompress wrongly + * Fix for cabal unpack to preserve executable permissions + * Adjusted the output for the -v verbosity level in a few places + +0.8.2 Duncan Coutts March 2010 + * Fix for cabal update on Windows + * On windows switch to per-user installs (rather than global) + * Handle intra-package dependencies in dependency planning + * Minor tweaks to cabal init feature + * Fix various -Wall warnings + * Fix for cabal sdist --snapshot + +0.8.0 Duncan Coutts Dec 2009 + * Works with ghc-6.12 + * New "cabal init" command for making initial project .cabal file + * New feature to maintain an index of haddock documentation + +0.6.4 Duncan Coutts Nov 2009 + * Improve the algorithm for selecting the base package version + * Hackage errors now reported by "cabal upload [--check]" + * Improved format of messages from "cabal check" + * Config file can now be selected by an env var + * Updated tar reading/writing code + * Improve instructions in the README and bootstrap output + * Fix bootstrap.sh on Solaris 9 + * Fix bootstrap for systems where network uses parsec 3 + * Fix building with ghc-6.6 + +0.6.2 Duncan Coutts Feb 2009 + * The upgrade command has been disabled in this release + * The configure and install commands now have consistent behaviour + * Reduce the tendancy to re-install already existing packages + * The --constraint= flag now works for the install command + * New --preference= flag for soft constraints / version preferences + * Improved bootstrap.sh script, smarter and better error checking + * New cabal info command to display detailed info on packages + * New cabal unpack command to download and untar a package + * HTTP-4000 package required, should fix bugs with http proxies + * Now works with authenticated proxies. + * On Windows can now override the proxy setting using an env var + * Fix compatability with config files generated by older versions + * Warn if the hackage package list is very old + * More helpful --help output, mention config file and examples + * Better documentation in ~/.cabal/config file + * Improved command line interface for logging and build reporting + * Minor improvements to some messages + +0.6.0 Duncan Coutts Oct 2008 + * Constraint solver can now cope with base 3 and base 4 + * Allow use of package version preferences from hackage index + * More detailed output from cabal install --dry-run -v + * Improved bootstrap.sh + +0.5.2 Duncan Coutts Aug 2008 + * Suport building haddock documentaion + * Self-reinstall now works on Windows + * Allow adding symlinks to excutables into a separate bindir + * New self-documenting config file + * New install --reinstall flag + * More helpful status messages in a couple places + * Upload failures now report full text error message from the server + * Support for local package repositories + * New build logging and reporting + * New command to upload build reports to (a compatible) server + * Allow tilde in hackage server URIs + * Internal code improvements + * Many other minor improvements and bug fixes + +0.5.1 Duncan Coutts June 2008 + * Restore minimal hugs support in dependency resolver + * Fix for disabled http proxies on Windows + * Revert to global installs on Windows by default + +0.5.0 Duncan Coutts June 2008 + * New package dependency resolver, solving diamond dep problem + * Integrate cabal-setup functionality + * Integrate cabal-upload functionality + * New cabal update and check commands + * Improved behavior for install and upgrade commands + * Full Windows support + * New command line handling + * Bash command line completion + * Allow case insensitive package names on command line + * New --dry-run flag for install, upgrade and fetch commands + * New --root-cmd flag to allow installing as root + * New --cabal-lib-version flag to select different Cabal lib versions + * Support for HTTP proxies + * Improved cabal list output + * Build other non-dependent packages even when some fail + * Report a summary of all build failures at the end + * Partial support for hugs + * Partial implementation of build reporting and logging + * More consistent logging and verbosity + * Significant internal code restructuring + +0.4 Duncan Coutts Oct 2007 + * Renamed executable from 'cabal-install' to 'cabal' + * Partial Windows compatability + * Do per-user installs by default + * cabal install now installs the package in the current directory + * Allow multiple remote servers + * Use zlib lib and internal tar code and rather than external tar + * Reorganised configuration files + * Significant code restructuring + * Cope with packages with conditional dependencies + +0.3 and older versions by Lemmih, Paolo Martini and others 2006-2007 + * Switch from smart-server, dumb-client model to the reverse + * New .tar.gz based index format + * New remote and local package archive format diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Anonymous.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Anonymous.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Anonymous.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Anonymous.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,316 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Reporting +-- Copyright : (c) David Waern 2008 +-- License : BSD-like +-- +-- Maintainer : david.waern@gmail.com +-- Stability : experimental +-- Portability : portable +-- +-- Anonymous build report data structure, printing and parsing +-- +----------------------------------------------------------------------------- +module Distribution.Client.BuildReports.Anonymous ( + BuildReport(..), + InstallOutcome(..), + Outcome(..), + + -- * Constructing and writing reports + new, + + -- * parsing and pretty printing + parse, + parseList, + show, +-- showList, + ) where + +import qualified Distribution.Client.Types as BR + ( BuildResult, BuildFailure(..), BuildSuccess(..) + , DocsResult(..), TestsResult(..) ) +import Distribution.Client.Utils + ( mergeBy, MergeResult(..) ) +import qualified Paths_cabal_install (version) + +import Distribution.Package + ( PackageIdentifier(..), PackageName(..) ) +import Distribution.PackageDescription + ( FlagName(..), FlagAssignment ) +--import Distribution.Version +-- ( Version ) +import Distribution.System + ( OS, Arch ) +import Distribution.Compiler + ( CompilerId(..) ) +import qualified Distribution.Text as Text + ( Text(disp, parse) ) +import Distribution.ParseUtils + ( FieldDescr(..), ParseResult(..), Field(..) + , simpleField, listField, ppFields, readFields + , syntaxError, locatedErrorMsg ) +import Distribution.Simple.Utils + ( comparing ) + +import qualified Distribution.Compat.ReadP as Parse + ( ReadP, pfail, munch1, skipSpaces ) +import qualified Text.PrettyPrint as Disp + ( Doc, render, char, text ) +import Text.PrettyPrint + ( (<+>), (<>) ) + +import Data.List + ( unfoldr, sortBy ) +import Data.Char as Char + ( isAlpha, isAlphaNum ) + +import Prelude hiding (show) + +data BuildReport + = BuildReport { + -- | The package this build report is about + package :: PackageIdentifier, + + -- | The OS and Arch the package was built on + os :: OS, + arch :: Arch, + + -- | The Haskell compiler (and hopefully version) used + compiler :: CompilerId, + + -- | The uploading client, ie cabal-install-x.y.z + client :: PackageIdentifier, + + -- | Which configurations flags we used + flagAssignment :: FlagAssignment, + + -- | Which dependent packages we were using exactly + dependencies :: [PackageIdentifier], + + -- | Did installing work ok? + installOutcome :: InstallOutcome, + + -- Which version of the Cabal library was used to compile the Setup.hs +-- cabalVersion :: Version, + + -- Which build tools we were using (with versions) +-- tools :: [PackageIdentifier], + + -- | Configure outcome, did configure work ok? + docsOutcome :: Outcome, + + -- | Configure outcome, did configure work ok? + testsOutcome :: Outcome + } + +data InstallOutcome + = PlanningFailed + | DependencyFailed PackageIdentifier + | DownloadFailed + | UnpackFailed + | SetupFailed + | ConfigureFailed + | BuildFailed + | TestsFailed + | InstallFailed + | InstallOk + deriving Eq + +data Outcome = NotTried | Failed | Ok + deriving Eq + +new :: OS -> Arch -> CompilerId -> PackageIdentifier -> FlagAssignment + -> [PackageIdentifier] -> BR.BuildResult -> BuildReport +new os' arch' comp pkgid flags deps result = + BuildReport { + package = pkgid, + os = os', + arch = arch', + compiler = comp, + client = cabalInstallID, + flagAssignment = flags, + dependencies = deps, + installOutcome = convertInstallOutcome, +-- cabalVersion = undefined + docsOutcome = convertDocsOutcome, + testsOutcome = convertTestsOutcome + } + where + convertInstallOutcome = case result of + Left BR.PlanningFailed -> PlanningFailed + Left (BR.DependentFailed p) -> DependencyFailed p + Left (BR.DownloadFailed _) -> DownloadFailed + Left (BR.UnpackFailed _) -> UnpackFailed + Left (BR.ConfigureFailed _) -> ConfigureFailed + Left (BR.BuildFailed _) -> BuildFailed + Left (BR.TestsFailed _) -> TestsFailed + Left (BR.InstallFailed _) -> InstallFailed + Right (BR.BuildOk _ _ _) -> InstallOk + convertDocsOutcome = case result of + Left _ -> NotTried + Right (BR.BuildOk BR.DocsNotTried _ _) -> NotTried + Right (BR.BuildOk BR.DocsFailed _ _) -> Failed + Right (BR.BuildOk BR.DocsOk _ _) -> Ok + convertTestsOutcome = case result of + Left (BR.TestsFailed _) -> Failed + Left _ -> NotTried + Right (BR.BuildOk _ BR.TestsNotTried _) -> NotTried + Right (BR.BuildOk _ BR.TestsOk _) -> Ok + +cabalInstallID :: PackageIdentifier +cabalInstallID = + PackageIdentifier (PackageName "cabal-install") Paths_cabal_install.version + +-- ------------------------------------------------------------ +-- * External format +-- ------------------------------------------------------------ + +initialBuildReport :: BuildReport +initialBuildReport = BuildReport { + package = requiredField "package", + os = requiredField "os", + arch = requiredField "arch", + compiler = requiredField "compiler", + client = requiredField "client", + flagAssignment = [], + dependencies = [], + installOutcome = requiredField "install-outcome", +-- cabalVersion = Nothing, +-- tools = [], + docsOutcome = NotTried, + testsOutcome = NotTried + } + where + requiredField fname = error ("required field: " ++ fname) + +-- ----------------------------------------------------------------------------- +-- Parsing + +parse :: String -> Either String BuildReport +parse s = case parseFields s of + ParseFailed perror -> Left msg where (_, msg) = locatedErrorMsg perror + ParseOk _ report -> Right report + +--FIXME: this does not allow for optional or repeated fields +parseFields :: String -> ParseResult BuildReport +parseFields input = do + fields <- mapM extractField =<< readFields input + let merged = mergeBy (\desc (_,name,_) -> compare (fieldName desc) name) + sortedFieldDescrs + (sortBy (comparing (\(_,name,_) -> name)) fields) + checkMerged initialBuildReport merged + + where + extractField :: Field -> ParseResult (Int, String, String) + extractField (F line name value) = return (line, name, value) + extractField (Section line _ _ _) = syntaxError line "Unrecognized stanza" + extractField (IfBlock line _ _ _) = syntaxError line "Unrecognized stanza" + + checkMerged report [] = return report + checkMerged report (merged:remaining) = case merged of + InBoth fieldDescr (line, _name, value) -> do + report' <- fieldSet fieldDescr line value report + checkMerged report' remaining + OnlyInRight (line, name, _) -> + syntaxError line ("Unrecognized field " ++ name) + OnlyInLeft fieldDescr -> + fail ("Missing field " ++ fieldName fieldDescr) + +parseList :: String -> [BuildReport] +parseList str = + [ report | Right report <- map parse (split str) ] + + where + split :: String -> [String] + split = filter (not . null) . unfoldr chunk . lines + chunk [] = Nothing + chunk ls = case break null ls of + (r, rs) -> Just (unlines r, dropWhile null rs) + +-- ----------------------------------------------------------------------------- +-- Pretty-printing + +show :: BuildReport -> String +show = Disp.render . ppFields fieldDescrs + +-- ----------------------------------------------------------------------------- +-- Description of the fields, for parsing/printing + +fieldDescrs :: [FieldDescr BuildReport] +fieldDescrs = + [ simpleField "package" Text.disp Text.parse + package (\v r -> r { package = v }) + , simpleField "os" Text.disp Text.parse + os (\v r -> r { os = v }) + , simpleField "arch" Text.disp Text.parse + arch (\v r -> r { arch = v }) + , simpleField "compiler" Text.disp Text.parse + compiler (\v r -> r { compiler = v }) + , simpleField "client" Text.disp Text.parse + client (\v r -> r { client = v }) + , listField "flags" dispFlag parseFlag + flagAssignment (\v r -> r { flagAssignment = v }) + , listField "dependencies" Text.disp Text.parse + dependencies (\v r -> r { dependencies = v }) + , simpleField "install-outcome" Text.disp Text.parse + installOutcome (\v r -> r { installOutcome = v }) + , simpleField "docs-outcome" Text.disp Text.parse + docsOutcome (\v r -> r { docsOutcome = v }) + , simpleField "tests-outcome" Text.disp Text.parse + testsOutcome (\v r -> r { testsOutcome = v }) + ] + +sortedFieldDescrs :: [FieldDescr BuildReport] +sortedFieldDescrs = sortBy (comparing fieldName) fieldDescrs + +dispFlag :: (FlagName, Bool) -> Disp.Doc +dispFlag (FlagName name, True) = Disp.text name +dispFlag (FlagName name, False) = Disp.char '-' <> Disp.text name + +parseFlag :: Parse.ReadP r (FlagName, Bool) +parseFlag = do + name <- Parse.munch1 (\c -> Char.isAlphaNum c || c == '_' || c == '-') + case name of + ('-':flag) -> return (FlagName flag, False) + flag -> return (FlagName flag, True) + +instance Text.Text InstallOutcome where + disp PlanningFailed = Disp.text "PlanningFailed" + disp (DependencyFailed pkgid) = Disp.text "DependencyFailed" <+> Text.disp pkgid + disp DownloadFailed = Disp.text "DownloadFailed" + disp UnpackFailed = Disp.text "UnpackFailed" + disp SetupFailed = Disp.text "SetupFailed" + disp ConfigureFailed = Disp.text "ConfigureFailed" + disp BuildFailed = Disp.text "BuildFailed" + disp TestsFailed = Disp.text "TestsFailed" + disp InstallFailed = Disp.text "InstallFailed" + disp InstallOk = Disp.text "InstallOk" + + parse = do + name <- Parse.munch1 Char.isAlphaNum + case name of + "PlanningFailed" -> return PlanningFailed + "DependencyFailed" -> do Parse.skipSpaces + pkgid <- Text.parse + return (DependencyFailed pkgid) + "DownloadFailed" -> return DownloadFailed + "UnpackFailed" -> return UnpackFailed + "SetupFailed" -> return SetupFailed + "ConfigureFailed" -> return ConfigureFailed + "BuildFailed" -> return BuildFailed + "TestsFailed" -> return TestsFailed + "InstallFailed" -> return InstallFailed + "InstallOk" -> return InstallOk + _ -> Parse.pfail + +instance Text.Text Outcome where + disp NotTried = Disp.text "NotTried" + disp Failed = Disp.text "Failed" + disp Ok = Disp.text "Ok" + parse = do + name <- Parse.munch1 Char.isAlpha + case name of + "NotTried" -> return NotTried + "Failed" -> return Failed + "Ok" -> return Ok + _ -> Parse.pfail diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Storage.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Storage.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Storage.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Storage.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,152 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Reporting +-- Copyright : (c) David Waern 2008 +-- License : BSD-like +-- +-- Maintainer : david.waern@gmail.com +-- Stability : experimental +-- Portability : portable +-- +-- Anonymous build report data structure, printing and parsing +-- +----------------------------------------------------------------------------- +module Distribution.Client.BuildReports.Storage ( + + -- * Storing and retrieving build reports + storeAnonymous, + storeLocal, +-- retrieve, + + -- * 'InstallPlan' support + fromInstallPlan, + fromPlanningFailure, + ) where + +import qualified Distribution.Client.BuildReports.Anonymous as BuildReport +import Distribution.Client.BuildReports.Anonymous (BuildReport) + +import Distribution.Client.Types +import qualified Distribution.Client.InstallPlan as InstallPlan +import Distribution.Client.InstallPlan + ( InstallPlan ) + +import Distribution.Package + ( PackageId, packageId ) +import Distribution.PackageDescription + ( FlagAssignment ) +import Distribution.Simple.InstallDirs + ( PathTemplate, fromPathTemplate + , initialPathTemplateEnv, substPathTemplate ) +import Distribution.System + ( Platform(Platform) ) +import Distribution.Compiler + ( CompilerId(..), CompilerInfo(..) ) +import Distribution.Simple.Utils + ( comparing, equating ) + +import Data.List + ( groupBy, sortBy ) +import Data.Maybe + ( catMaybes ) +import System.FilePath + ( (), takeDirectory ) +import System.Directory + ( createDirectoryIfMissing ) + +storeAnonymous :: [(BuildReport, Maybe Repo)] -> IO () +storeAnonymous reports = sequence_ + [ appendFile file (concatMap format reports') + | (repo, reports') <- separate reports + , let file = repoLocalDir repo "build-reports.log" ] + --TODO: make this concurrency safe, either lock the report file or make sure + -- the writes for each report are atomic (under 4k and flush at boundaries) + + where + format r = '\n' : BuildReport.show r ++ "\n" + separate :: [(BuildReport, Maybe Repo)] + -> [(Repo, [BuildReport])] + separate = map (\rs@((_,repo,_):_) -> (repo, [ r | (r,_,_) <- rs ])) + . map concat + . groupBy (equating (repoName . head)) + . sortBy (comparing (repoName . head)) + . groupBy (equating repoName) + . onlyRemote + repoName (_,_,rrepo) = remoteRepoName rrepo + + onlyRemote :: [(BuildReport, Maybe Repo)] -> [(BuildReport, Repo, RemoteRepo)] + onlyRemote rs = + [ (report, repo, remoteRepo) + | (report, Just repo@Repo { repoKind = Left remoteRepo }) <- rs ] + +storeLocal :: CompilerInfo -> [PathTemplate] -> [(BuildReport, Maybe Repo)] + -> Platform -> IO () +storeLocal cinfo templates reports platform = sequence_ + [ do createDirectoryIfMissing True (takeDirectory file) + appendFile file output + --TODO: make this concurrency safe, either lock the report file or make + -- sure the writes for each report are atomic + | (file, reports') <- groupByFileName + [ (reportFileName template report, report) + | template <- templates + , (report, _repo) <- reports ] + , let output = concatMap format reports' + ] + where + format r = '\n' : BuildReport.show r ++ "\n" + + reportFileName template report = + fromPathTemplate (substPathTemplate env template) + where env = initialPathTemplateEnv + (BuildReport.package report) + -- ToDo: In principle, we can support $pkgkey, but only + -- if the configure step succeeds. So add a Maybe field + -- to the build report, and either use that or make up + -- a fake identifier if it's not available. + (error "storeLocal: package key not available") + cinfo + platform + + groupByFileName = map (\grp@((filename,_):_) -> (filename, map snd grp)) + . groupBy (equating fst) + . sortBy (comparing fst) + +-- ------------------------------------------------------------ +-- * InstallPlan support +-- ------------------------------------------------------------ + +fromInstallPlan :: InstallPlan -> [(BuildReport, Maybe Repo)] +fromInstallPlan plan = catMaybes + . map (fromPlanPackage platform comp) + . InstallPlan.toList + $ plan + where platform = InstallPlan.planPlatform plan + comp = compilerInfoId (InstallPlan.planCompiler plan) + +fromPlanPackage :: Platform -> CompilerId + -> InstallPlan.PlanPackage + -> Maybe (BuildReport, Maybe Repo) +fromPlanPackage (Platform arch os) comp planPackage = case planPackage of + InstallPlan.Installed (ReadyPackage srcPkg flags _ deps) result + -> Just $ ( BuildReport.new os arch comp + (packageId srcPkg) flags (map packageId deps) + (Right result) + , extractRepo srcPkg) + + InstallPlan.Failed (ConfiguredPackage srcPkg flags _ deps) result + -> Just $ ( BuildReport.new os arch comp + (packageId srcPkg) flags deps + (Left result) + , extractRepo srcPkg ) + + _ -> Nothing + + where + extractRepo (SourcePackage { packageSource = RepoTarballPackage repo _ _ }) = Just repo + extractRepo _ = Nothing + +fromPlanningFailure :: Platform -> CompilerId + -> [PackageId] -> FlagAssignment -> [(BuildReport, Maybe Repo)] +fromPlanningFailure (Platform arch os) comp pkgids flags = + [ (BuildReport.new os arch comp pkgid flags [] (Left PlanningFailed), Nothing) + | pkgid <- pkgids ] diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Types.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Types.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Types.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Types.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,44 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.BuildReports.Types +-- Copyright : (c) Duncan Coutts 2009 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Types related to build reporting +-- +----------------------------------------------------------------------------- +module Distribution.Client.BuildReports.Types ( + ReportLevel(..), + ) where + +import qualified Distribution.Text as Text + ( Text(..) ) + +import qualified Distribution.Compat.ReadP as Parse + ( pfail, munch1 ) +import qualified Text.PrettyPrint as Disp + ( text ) + +import Data.Char as Char + ( isAlpha, toLower ) + +data ReportLevel = NoReports | AnonymousReports | DetailedReports + deriving (Eq, Ord, Show) + +instance Text.Text ReportLevel where + disp NoReports = Disp.text "none" + disp AnonymousReports = Disp.text "anonymous" + disp DetailedReports = Disp.text "detailed" + parse = do + name <- Parse.munch1 Char.isAlpha + case lowercase name of + "none" -> return NoReports + "anonymous" -> return AnonymousReports + "detailed" -> return DetailedReports + _ -> Parse.pfail + +lowercase :: String -> String +lowercase = map Char.toLower diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Upload.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Upload.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Upload.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/BuildReports/Upload.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,80 @@ +{-# LANGUAGE CPP, PatternGuards #-} +-- This is a quick hack for uploading build reports to Hackage. + +module Distribution.Client.BuildReports.Upload + ( BuildLog + , BuildReportId + , uploadReports + , postBuildReport + , putBuildLog + ) where + +import Network.Browser + ( BrowserAction, request, setAllowRedirects ) +import Network.HTTP + ( Header(..), HeaderName(..) + , Request(..), RequestMethod(..), Response(..) ) +import Network.TCP (HandleStream) +import Network.URI (URI, uriPath, parseRelativeReference, relativeTo) + +import Control.Monad + ( forM_ ) +import System.FilePath.Posix + ( () ) +import qualified Distribution.Client.BuildReports.Anonymous as BuildReport +import Distribution.Client.BuildReports.Anonymous (BuildReport) +import Distribution.Text (display) + +type BuildReportId = URI +type BuildLog = String + +uploadReports :: URI -> [(BuildReport, Maybe BuildLog)] + -> BrowserAction (HandleStream BuildLog) () +uploadReports uri reports = do + forM_ reports $ \(report, mbBuildLog) -> do + buildId <- postBuildReport uri report + case mbBuildLog of + Just buildLog -> putBuildLog buildId buildLog + Nothing -> return () + +postBuildReport :: URI -> BuildReport + -> BrowserAction (HandleStream BuildLog) BuildReportId +postBuildReport uri buildReport = do + setAllowRedirects False + (_, response) <- request Request { + rqURI = uri { uriPath = "/package" display (BuildReport.package buildReport) "reports" }, + rqMethod = POST, + rqHeaders = [Header HdrContentType ("text/plain"), + Header HdrContentLength (show (length body)), + Header HdrAccept ("text/plain")], + rqBody = body + } + case rspCode response of + (3,0,3) | [Just buildId] <- [ do rel <- parseRelativeReference location +#if defined(VERSION_network_uri) + return $ relativeTo rel uri +#elif defined(VERSION_network) +#if MIN_VERSION_network(2,4,0) + return $ relativeTo rel uri +#else + relativeTo rel uri +#endif +#endif + | Header HdrLocation location <- rspHeaders response ] + -> return $ buildId + _ -> error "Unrecognised response from server." + where body = BuildReport.show buildReport + +putBuildLog :: BuildReportId -> BuildLog + -> BrowserAction (HandleStream BuildLog) () +putBuildLog reportId buildLog = do + --FIXME: do something if the request fails + (_, _response) <- request Request { + rqURI = reportId{uriPath = uriPath reportId "log"}, + rqMethod = PUT, + rqHeaders = [Header HdrContentType ("text/plain"), + Header HdrContentLength (show (length buildLog)), + Header HdrAccept ("text/plain")], + rqBody = buildLog + } + return () diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Check.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Check.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,85 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Check +-- Copyright : (c) Lennart Kolmodin 2008 +-- License : BSD-like +-- +-- Maintainer : kolmodin@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Check a package for common mistakes +-- +----------------------------------------------------------------------------- +module Distribution.Client.Check ( + check + ) where + +import Control.Monad ( when, unless ) + +import Distribution.PackageDescription.Parse + ( readPackageDescription ) +import Distribution.PackageDescription.Check +import Distribution.PackageDescription.Configuration + ( flattenPackageDescription ) +import Distribution.Verbosity + ( Verbosity ) +import Distribution.Simple.Utils + ( defaultPackageDesc, toUTF8, wrapText ) + +check :: Verbosity -> IO Bool +check verbosity = do + pdfile <- defaultPackageDesc verbosity + ppd <- readPackageDescription verbosity pdfile + -- flatten the generic package description into a regular package + -- description + -- TODO: this may give more warnings than it should give; + -- consider two branches of a condition, one saying + -- ghc-options: -Wall + -- and the other + -- ghc-options: -Werror + -- joined into + -- ghc-options: -Wall -Werror + -- checkPackages will yield a warning on the last line, but it + -- would not on each individual branch. + -- Hovever, this is the same way hackage does it, so we will yield + -- the exact same errors as it will. + let pkg_desc = flattenPackageDescription ppd + ioChecks <- checkPackageFiles pkg_desc "." + let packageChecks = ioChecks ++ checkPackage ppd (Just pkg_desc) + buildImpossible = [ x | x@PackageBuildImpossible {} <- packageChecks ] + buildWarning = [ x | x@PackageBuildWarning {} <- packageChecks ] + distSuspicious = [ x | x@PackageDistSuspicious {} <- packageChecks ] + distInexusable = [ x | x@PackageDistInexcusable {} <- packageChecks ] + + unless (null buildImpossible) $ do + putStrLn "The package will not build sanely due to these errors:" + printCheckMessages buildImpossible + + unless (null buildWarning) $ do + putStrLn "The following warnings are likely affect your build negatively:" + printCheckMessages buildWarning + + unless (null distSuspicious) $ do + putStrLn "These warnings may cause trouble when distributing the package:" + printCheckMessages distSuspicious + + unless (null distInexusable) $ do + putStrLn "The following errors will cause portability problems on other environments:" + printCheckMessages distInexusable + + let isDistError (PackageDistSuspicious {}) = False + isDistError _ = True + errors = filter isDistError packageChecks + + unless (null errors) $ + putStrLn "Hackage would reject this package." + + when (null packageChecks) $ + putStrLn "No errors or warnings could be found in the package." + + return (null packageChecks) + + where + printCheckMessages = mapM_ (putStrLn . format . explanation) + format = toUTF8 . wrapText . ("* "++) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Environment.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Environment.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Environment.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Environment.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,87 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Compat.Environment +-- Copyright : (c) Simon Hengel 2012 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- A cross-platform library for setting environment variables. +-- +----------------------------------------------------------------------------- + +module Distribution.Client.Compat.Environment ( + lookupEnv, setEnv +) where + +#ifdef mingw32_HOST_OS +import GHC.Windows +import Foreign.C +import Control.Monad +#else +import Foreign.C.Types +import Foreign.C.String +import Foreign.C.Error (throwErrnoIfMinus1_) +import System.Posix.Internals ( withFilePath ) +#endif /* mingw32_HOST_OS */ + +#if MIN_VERSION_base(4,6,0) +import System.Environment (lookupEnv) +#else +import System.Environment (getEnv) +import Distribution.Compat.Exception (catchIO) +#endif + +#if !MIN_VERSION_base(4,6,0) +-- | @lookupEnv var@ returns the value of the environment variable @var@, or +-- @Nothing@ if there is no such value. +lookupEnv :: String -> IO (Maybe String) +lookupEnv name = (Just `fmap` getEnv name) `catchIO` const (return Nothing) +#endif /* !MIN_VERSION_base(4,6,0) */ + +-- | @setEnv name value@ sets the specified environment variable to @value@. +-- +-- Throws `Control.Exception.IOException` if either @name@ or @value@ is the +-- empty string or contains an equals sign. +setEnv :: String -> String -> IO () +setEnv key value_ + | null value = error "Distribuiton.Compat.setEnv: empty string" + | otherwise = setEnv_ key value + where + -- NOTE: Anything that follows NUL is ignored on both POSIX and Windows. We + -- still strip it manually so that the null check above succeeds if a value + -- starts with NUL. + value = takeWhile (/= '\NUL') value_ + +setEnv_ :: String -> String -> IO () + +#ifdef mingw32_HOST_OS + +setEnv_ key value = withCWString key $ \k -> withCWString value $ \v -> do + success <- c_SetEnvironmentVariable k v + unless success (throwGetLastError "setEnv") + +# if defined(i386_HOST_ARCH) +# define WINDOWS_CCONV stdcall +# elif defined(x86_64_HOST_ARCH) +# define WINDOWS_CCONV ccall +# else +# error Unknown mingw32 arch +# endif /* i386_HOST_ARCH */ + +foreign import WINDOWS_CCONV unsafe "windows.h SetEnvironmentVariableW" + c_SetEnvironmentVariable :: LPTSTR -> LPTSTR -> IO Bool +#else +setEnv_ key value = do + withFilePath key $ \ keyP -> + withFilePath value $ \ valueP -> + throwErrnoIfMinus1_ "setenv" $ + c_setenv keyP valueP (fromIntegral (fromEnum True)) + +foreign import ccall unsafe "setenv" + c_setenv :: CString -> CString -> CInt -> IO CInt +#endif /* mingw32_HOST_OS */ diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/ExecutablePath.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/ExecutablePath.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/ExecutablePath.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/ExecutablePath.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,183 @@ +{-# LANGUAGE ForeignFunctionInterface #-} +{-# LANGUAGE CPP #-} + +-- Copied verbatim from base-4.6.0.0. We can't simply import +-- System.Environment.getExecutablePath because we need compatibility with older +-- GHCs. + +module Distribution.Client.Compat.ExecutablePath ( getExecutablePath ) where + +-- The imports are purposely kept completely disjoint to prevent edits +-- to one OS implementation from breaking another. + +#if defined(darwin_HOST_OS) +import Data.Word +import Foreign.C +import Foreign.Marshal.Alloc +import Foreign.Ptr +import Foreign.Storable +import System.Posix.Internals +#elif defined(linux_HOST_OS) +import Foreign.C +import Foreign.Marshal.Array +import System.Posix.Internals +#elif defined(mingw32_HOST_OS) +import Data.Word +import Foreign.C +import Foreign.Marshal.Array +import Foreign.Ptr +import System.Posix.Internals +#else +import Foreign.C +import Foreign.Marshal.Alloc +import Foreign.Ptr +import Foreign.Storable +import System.Posix.Internals +#endif + +-- GHC 7.0.* compatibility. 'System.Posix.Internals' in base-4.3.* doesn't +-- provide 'peekFilePath' and 'peekFilePathLen'. +#if !MIN_VERSION_base(4,4,0) +#ifdef mingw32_HOST_OS + +peekFilePath :: CWString -> IO FilePath +peekFilePath = peekCWString + +#else + +peekFilePath :: CString -> IO FilePath +peekFilePath = peekCString + +peekFilePathLen :: CStringLen -> IO FilePath +peekFilePathLen = peekCStringLen + +#endif +#endif + +-- The exported function is defined outside any if-guard to make sure +-- every OS implements it with the same type. + +-- | Returns the absolute pathname of the current executable. +-- +-- Note that for scripts and interactive sessions, this is the path to +-- the interpreter (e.g. ghci.) +-- +-- /Since: 4.6.0.0/ +getExecutablePath :: IO FilePath + +-------------------------------------------------------------------------------- +-- Mac OS X + +#if defined(darwin_HOST_OS) + +type UInt32 = Word32 + +foreign import ccall unsafe "mach-o/dyld.h _NSGetExecutablePath" + c__NSGetExecutablePath :: CString -> Ptr UInt32 -> IO CInt + +-- | Returns the path of the main executable. The path may be a +-- symbolic link and not the real file. +-- +-- See dyld(3) +_NSGetExecutablePath :: IO FilePath +_NSGetExecutablePath = + allocaBytes 1024 $ \ buf -> -- PATH_MAX is 1024 on OS X + alloca $ \ bufsize -> do + poke bufsize 1024 + status <- c__NSGetExecutablePath buf bufsize + if status == 0 + then peekFilePath buf + else do reqBufsize <- fromIntegral `fmap` peek bufsize + allocaBytes reqBufsize $ \ newBuf -> do + status2 <- c__NSGetExecutablePath newBuf bufsize + if status2 == 0 + then peekFilePath newBuf + else error "_NSGetExecutablePath: buffer too small" + +foreign import ccall unsafe "stdlib.h realpath" + c_realpath :: CString -> CString -> IO CString + +-- | Resolves all symbolic links, extra \/ characters, and references +-- to \/.\/ and \/..\/. Returns an absolute pathname. +-- +-- See realpath(3) +realpath :: FilePath -> IO FilePath +realpath path = + withFilePath path $ \ fileName -> + allocaBytes 1024 $ \ resolvedName -> do + _ <- throwErrnoIfNull "realpath" $ c_realpath fileName resolvedName + peekFilePath resolvedName + +getExecutablePath = _NSGetExecutablePath >>= realpath + +-------------------------------------------------------------------------------- +-- Linux + +#elif defined(linux_HOST_OS) + +foreign import ccall unsafe "readlink" + c_readlink :: CString -> CString -> CSize -> IO CInt + +-- | Reads the @FilePath@ pointed to by the symbolic link and returns +-- it. +-- +-- See readlink(2) +readSymbolicLink :: FilePath -> IO FilePath +readSymbolicLink file = + allocaArray0 4096 $ \buf -> do + withFilePath file $ \s -> do + len <- throwErrnoPathIfMinus1 "readSymbolicLink" file $ + c_readlink s buf 4096 + peekFilePathLen (buf,fromIntegral len) + +getExecutablePath = readSymbolicLink $ "/proc/self/exe" + +-------------------------------------------------------------------------------- +-- Windows + +#elif defined(mingw32_HOST_OS) + +# if defined(i386_HOST_ARCH) +# define WINDOWS_CCONV stdcall +# elif defined(x86_64_HOST_ARCH) +# define WINDOWS_CCONV ccall +# else +# error Unknown mingw32 arch +# endif + +foreign import WINDOWS_CCONV unsafe "windows.h GetModuleFileNameW" + c_GetModuleFileName :: Ptr () -> CWString -> Word32 -> IO Word32 + +getExecutablePath = go 2048 -- plenty, PATH_MAX is 512 under Win32 + where + go size = allocaArray (fromIntegral size) $ \ buf -> do + ret <- c_GetModuleFileName nullPtr buf size + case ret of + 0 -> error "getExecutablePath: GetModuleFileNameW returned an error" + _ | ret < size -> peekFilePath buf + | otherwise -> go (size * 2) + +-------------------------------------------------------------------------------- +-- Fallback to argv[0] + +#else + +foreign import ccall unsafe "getFullProgArgv" + c_getFullProgArgv :: Ptr CInt -> Ptr (Ptr CString) -> IO () + +getExecutablePath = + alloca $ \ p_argc -> + alloca $ \ p_argv -> do + c_getFullProgArgv p_argc p_argv + argc <- peek p_argc + if argc > 0 + -- If argc > 0 then argv[0] is guaranteed by the standard + -- to be a pointer to a null-terminated string. + then peek p_argv >>= peek >>= peekFilePath + else error $ "getExecutablePath: " ++ msg + where msg = "no OS specific implementation and program name couldn't be " ++ + "found in argv" + +-------------------------------------------------------------------------------- + +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/FilePerms.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/FilePerms.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/FilePerms.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/FilePerms.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,36 @@ +{-# LANGUAGE CPP #-} +{-# OPTIONS_HADDOCK hide #-} +module Distribution.Client.Compat.FilePerms ( + setFileOrdinary, + setFileExecutable, + setFileHidden, + ) where + +#ifndef mingw32_HOST_OS +import System.Posix.Types + ( FileMode ) +import System.Posix.Internals + ( c_chmod ) +import Foreign.C + ( withCString ) +import Foreign.C + ( throwErrnoPathIfMinus1_ ) +#else +import System.Win32.File (setFileAttributes, fILE_ATTRIBUTE_HIDDEN) +#endif /* mingw32_HOST_OS */ + +setFileHidden, setFileOrdinary, setFileExecutable :: FilePath -> IO () +#ifndef mingw32_HOST_OS +setFileOrdinary path = setFileMode path 0o644 -- file perms -rw-r--r-- +setFileExecutable path = setFileMode path 0o755 -- file perms -rwxr-xr-x +setFileHidden _ = return () + +setFileMode :: FilePath -> FileMode -> IO () +setFileMode name m = + withCString name $ \s -> + throwErrnoPathIfMinus1_ "setFileMode" name (c_chmod s m) +#else +setFileOrdinary _ = return () +setFileExecutable _ = return () +setFileHidden path = setFileAttributes path fILE_ATTRIBUTE_HIDDEN +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Process.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Process.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Process.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Process.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,48 @@ +{-# LANGUAGE CPP #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Compat.Process +-- Copyright : (c) 2013 Liu Hao, Brent Yorgey +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Cross-platform utilities for invoking processes. +-- +----------------------------------------------------------------------------- + +module Distribution.Client.Compat.Process ( + readProcessWithExitCode +) where + +#if !MIN_VERSION_base(4,6,0) +import Prelude hiding (catch) +#endif + +import Control.Exception (catch, throw) +import System.Exit (ExitCode (ExitFailure)) +import System.IO.Error (isDoesNotExistError) +import qualified System.Process as P + +-- | @readProcessWithExitCode@ creates an external process, reads its +-- standard output and standard error strictly, waits until the +-- process terminates, and then returns the @ExitCode@ of the +-- process, the standard output, and the standard error. +-- +-- See the documentation of the version from @System.Process@ for +-- more information. +-- +-- The version from @System.Process@ behaves inconsistently across +-- platforms when an executable with the given name is not found: in +-- some cases it returns an @ExitFailure@, in others it throws an +-- exception. This variant catches \"does not exist\" exceptions and +-- turns them into @ExitFailure@s. +readProcessWithExitCode :: FilePath -> [String] -> String -> IO (ExitCode, String, String) +readProcessWithExitCode cmd args input = + P.readProcessWithExitCode cmd args input + `catch` \e -> if isDoesNotExistError e + then return (ExitFailure 127, "", "") + else throw e diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Semaphore.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Semaphore.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Semaphore.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Semaphore.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,104 @@ +{-# LANGUAGE DeriveDataTypeable #-} +{-# OPTIONS_GHC -funbox-strict-fields #-} +module Distribution.Client.Compat.Semaphore + ( QSem + , newQSem + , waitQSem + , signalQSem + ) where + +import Control.Concurrent.STM (TVar, atomically, newTVar, readTVar, retry, + writeTVar) +import Control.Exception (mask_, onException) +import Control.Monad (join, when) +import Data.Typeable (Typeable) + +-- | 'QSem' is a quantity semaphore in which the resource is aqcuired +-- and released in units of one. It provides guaranteed FIFO ordering +-- for satisfying blocked `waitQSem` calls. +-- +data QSem = QSem !(TVar Int) !(TVar [TVar Bool]) !(TVar [TVar Bool]) + deriving (Eq, Typeable) + +newQSem :: Int -> IO QSem +newQSem i = atomically $ do + q <- newTVar i + b1 <- newTVar [] + b2 <- newTVar [] + return (QSem q b1 b2) + +waitQSem :: QSem -> IO () +waitQSem s@(QSem q _b1 b2) = + mask_ $ join $ atomically $ do + -- join, because if we need to block, we have to add a TVar to + -- the block queue. + -- mask_, because we need a chance to set up an exception handler + -- after the join returns. + v <- readTVar q + if v == 0 + then do b <- newTVar False + ys <- readTVar b2 + writeTVar b2 (b:ys) + return (wait b) + else do writeTVar q $! v - 1 + return (return ()) + where + -- + -- very careful here: if we receive an exception, then we need to + -- (a) write True into the TVar, so that another signalQSem doesn't + -- try to wake up this thread, and + -- (b) if the TVar is *already* True, then we need to do another + -- signalQSem to avoid losing a unit of the resource. + -- + -- The 'wake' function does both (a) and (b), so we can just call + -- it here. + -- + wait t = + flip onException (wake s t) $ + atomically $ do + b <- readTVar t + when (not b) retry + + +wake :: QSem -> TVar Bool -> IO () +wake s x = join $ atomically $ do + b <- readTVar x + if b then return (signalQSem s) + else do writeTVar x True + return (return ()) + +{- + property we want: + + bracket waitQSem (\_ -> signalQSem) (\_ -> ...) + + never loses a unit of the resource. +-} + +signalQSem :: QSem -> IO () +signalQSem s@(QSem q b1 b2) = + mask_ $ join $ atomically $ do + -- join, so we don't force the reverse inside the txn + -- mask_ is needed so we don't lose a wakeup + v <- readTVar q + if v /= 0 + then do writeTVar q $! v + 1 + return (return ()) + else do xs <- readTVar b1 + checkwake1 xs + where + checkwake1 [] = do + ys <- readTVar b2 + checkwake2 ys + checkwake1 (x:xs) = do + writeTVar b1 xs + return (wake s x) + + checkwake2 [] = do + writeTVar q 1 + return (return ()) + checkwake2 ys = do + let (z:zs) = reverse ys + writeTVar b1 zs + writeTVar b2 [] + return (wake s z) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Time.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Time.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Time.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Compat/Time.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,142 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface #-} +module Distribution.Client.Compat.Time + (EpochTime, getModTime, getFileAge, getCurTime) + where + +import Data.Int (Int64) +import System.Directory (getModificationTime) + +#if MIN_VERSION_directory(1,2,0) +import Data.Time.Clock.POSIX (utcTimeToPOSIXSeconds, posixDayLength) +import Data.Time (getCurrentTime, diffUTCTime) +#else +import System.Time (ClockTime(..), getClockTime + ,diffClockTimes, normalizeTimeDiff, tdDay, tdHour) +#endif + +#if defined mingw32_HOST_OS + +#if MIN_VERSION_base(4,7,0) +import Data.Bits ((.|.), finiteBitSize, unsafeShiftL) +#else +import Data.Bits ((.|.), bitSize, unsafeShiftL) +#endif +import Data.Int (Int32) +import Data.Word (Word64) +import Foreign (allocaBytes, peekByteOff) +import System.IO.Error (mkIOError, doesNotExistErrorType) +import System.Win32.Types (BOOL, DWORD, LPCTSTR, LPVOID, withTString) + +#ifdef x86_64_HOST_ARCH +#define CALLCONV ccall +#else +#define CALLCONV stdcall +#endif + +foreign import CALLCONV "windows.h GetFileAttributesExW" + c_getFileAttributesEx :: LPCTSTR -> Int32 -> LPVOID -> IO BOOL + +getFileAttributesEx :: String -> LPVOID -> IO BOOL +getFileAttributesEx path lpFileInformation = + withTString path $ \c_path -> + c_getFileAttributesEx c_path getFileExInfoStandard lpFileInformation + +getFileExInfoStandard :: Int32 +getFileExInfoStandard = 0 + +size_WIN32_FILE_ATTRIBUTE_DATA :: Int +size_WIN32_FILE_ATTRIBUTE_DATA = 36 + +index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwLowDateTime :: Int +index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwLowDateTime = 20 + +index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwHighDateTime :: Int +index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwHighDateTime = 24 + +#else + +import Foreign.C.Types (CTime(..)) +import System.Posix.Files (getFileStatus, modificationTime) + +#endif + +-- | The number of seconds since the UNIX epoch. +type EpochTime = Int64 + +-- | Return modification time of given file. Works around the low clock +-- resolution problem that 'getModificationTime' has on GHC < 7.8. +-- +-- This is a modified version of the code originally written for OpenShake by +-- Neil Mitchell. See module Development.Shake.FileTime. +getModTime :: FilePath -> IO EpochTime + +#if defined mingw32_HOST_OS + +-- Directly against the Win32 API. +getModTime path = allocaBytes size_WIN32_FILE_ATTRIBUTE_DATA $ \info -> do + res <- getFileAttributesEx path info + if not res + then do + let err = mkIOError doesNotExistErrorType + "Distribution.Client.Compat.Time.getModTime" + Nothing (Just path) + ioError err + else do + dwLow <- peekByteOff info + index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwLowDateTime + dwHigh <- peekByteOff info + index_WIN32_FILE_ATTRIBUTE_DATA_ftLastWriteTime_dwHighDateTime + return $! windowsTimeToPOSIXSeconds dwLow dwHigh + where + windowsTimeToPOSIXSeconds :: DWORD -> DWORD -> EpochTime + windowsTimeToPOSIXSeconds dwLow dwHigh = + let wINDOWS_TICK = 10000000 + sEC_TO_UNIX_EPOCH = 11644473600 +#if MIN_VERSION_base(4,7,0) + qwTime = (fromIntegral dwHigh `unsafeShiftL` finiteBitSize dwHigh) + .|. (fromIntegral dwLow) +#else + qwTime = (fromIntegral dwHigh `unsafeShiftL` bitSize dwHigh) + .|. (fromIntegral dwLow) +#endif + res = ((qwTime :: Word64) `div` wINDOWS_TICK) + - sEC_TO_UNIX_EPOCH + -- TODO: What if the result is not representable as POSIX seconds? + -- Probably fine to return garbage. + in fromIntegral res +#else + +-- Directly against the unix library. +getModTime path = do + -- CTime is Int32 in base 4.5, Int64 in base >= 4.6, and an abstract type in + -- base < 4.5. + t <- fmap modificationTime $ getFileStatus path +#if MIN_VERSION_base(4,5,0) + let CTime i = t + return (fromIntegral i) +#else + return (read . show $ t) +#endif +#endif + +-- | Return age of given file in days. +getFileAge :: FilePath -> IO Double +getFileAge file = do + t0 <- getModificationTime file +#if MIN_VERSION_directory(1,2,0) + t1 <- getCurrentTime + return $ realToFrac (t1 `diffUTCTime` t0) / realToFrac posixDayLength +#else + t1 <- getClockTime + let dt = normalizeTimeDiff (t1 `diffClockTimes` t0) + return $ fromIntegral ((24 * tdDay dt) + tdHour dt) / 24.0 +#endif + +getCurTime :: IO EpochTime +getCurTime = do +#if MIN_VERSION_directory(1,2,0) + (truncate . utcTimeToPOSIXSeconds) `fmap` getCurrentTime +#else + (TOD s _) <- getClockTime + return $! fromIntegral s +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Config.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Config.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Config.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Config.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,931 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Config +-- Copyright : (c) David Himmelstrup 2005 +-- License : BSD-like +-- +-- Maintainer : lemmih@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Utilities for handling saved state such as known packages, known servers and +-- downloaded packages. +----------------------------------------------------------------------------- +module Distribution.Client.Config ( + SavedConfig(..), + loadConfig, + + showConfig, + showConfigWithComments, + parseConfig, + + defaultCabalDir, + defaultConfigFile, + defaultCacheDir, + defaultCompiler, + defaultLogsDir, + defaultUserInstall, + + baseSavedConfig, + commentSavedConfig, + initialSavedConfig, + configFieldDescriptions, + haddockFlagsFields, + installDirsFields, + withProgramsFields, + withProgramOptionsFields, + userConfigDiff, + userConfigUpdate + ) where + +import Distribution.Client.Types + ( RemoteRepo(..), Username(..), Password(..) ) +import Distribution.Client.BuildReports.Types + ( ReportLevel(..) ) +import Distribution.Client.Setup + ( GlobalFlags(..), globalCommand, defaultGlobalFlags + , ConfigExFlags(..), configureExOptions, defaultConfigExFlags + , InstallFlags(..), installOptions, defaultInstallFlags + , UploadFlags(..), uploadCommand + , ReportFlags(..), reportCommand + , showRepo, parseRepo ) +import Distribution.Utils.NubList + ( NubList, fromNubList, toNubList) + +import Distribution.Simple.Compiler + ( DebugInfoLevel(..), OptimisationLevel(..) ) +import Distribution.Simple.Setup + ( ConfigFlags(..), configureOptions, defaultConfigFlags + , HaddockFlags(..), haddockOptions, defaultHaddockFlags + , installDirsOptions + , programConfigurationPaths', programConfigurationOptions + , Flag(..), toFlag, flagToMaybe, fromFlagOrDefault ) +import Distribution.Simple.InstallDirs + ( InstallDirs(..), defaultInstallDirs + , PathTemplate, toPathTemplate ) +import Distribution.ParseUtils + ( FieldDescr(..), liftField + , ParseResult(..), PError(..), PWarning(..) + , locatedErrorMsg, showPWarning + , readFields, warning, lineNo + , simpleField, listField, parseFilePathQ, parseTokenQ ) +import Distribution.Client.ParseUtils + ( parseFields, ppFields, ppSection ) +import qualified Distribution.ParseUtils as ParseUtils + ( Field(..) ) +import qualified Distribution.Text as Text + ( Text(..) ) +import Distribution.Simple.Command + ( CommandUI(commandOptions), commandDefaultFlags, ShowOrParseArgs(..) + , viewAsFieldDescr ) +import Distribution.Simple.Program + ( defaultProgramConfiguration ) +import Distribution.Simple.Utils + ( die, notice, warn, lowercase, cabalVersion ) +import Distribution.Compiler + ( CompilerFlavor(..), defaultCompilerFlavor ) +import Distribution.Verbosity + ( Verbosity, normal ) + +import Data.List + ( partition, find, foldl' ) +import Data.Maybe + ( fromMaybe ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid + ( Monoid(..) ) +#endif +import Control.Monad + ( unless, foldM, liftM, liftM2 ) +import qualified Distribution.Compat.ReadP as Parse + ( option ) +import qualified Text.PrettyPrint as Disp + ( render, text, empty ) +import Text.PrettyPrint + ( ($+$) ) +import System.Directory + ( createDirectoryIfMissing, getAppUserDataDirectory, renameFile ) +import Network.URI + ( URI(..), URIAuth(..) ) +import System.FilePath + ( (<.>), (), takeDirectory ) +import System.IO.Error + ( isDoesNotExistError ) +import Distribution.Compat.Environment + ( getEnvironment ) +import Distribution.Compat.Exception + ( catchIO ) +import qualified Paths_cabal_install + ( version ) +import Data.Version + ( showVersion ) +import Data.Char + ( isSpace ) +import qualified Data.Map as M + +-- +-- * Configuration saved in the config file +-- + +data SavedConfig = SavedConfig { + savedGlobalFlags :: GlobalFlags, + savedInstallFlags :: InstallFlags, + savedConfigureFlags :: ConfigFlags, + savedConfigureExFlags :: ConfigExFlags, + savedUserInstallDirs :: InstallDirs (Flag PathTemplate), + savedGlobalInstallDirs :: InstallDirs (Flag PathTemplate), + savedUploadFlags :: UploadFlags, + savedReportFlags :: ReportFlags, + savedHaddockFlags :: HaddockFlags + } + +instance Monoid SavedConfig where + mempty = SavedConfig { + savedGlobalFlags = mempty, + savedInstallFlags = mempty, + savedConfigureFlags = mempty, + savedConfigureExFlags = mempty, + savedUserInstallDirs = mempty, + savedGlobalInstallDirs = mempty, + savedUploadFlags = mempty, + savedReportFlags = mempty, + savedHaddockFlags = mempty + } + mappend a b = SavedConfig { + savedGlobalFlags = combinedSavedGlobalFlags, + savedInstallFlags = combinedSavedInstallFlags, + savedConfigureFlags = combinedSavedConfigureFlags, + savedConfigureExFlags = combinedSavedConfigureExFlags, + savedUserInstallDirs = combinedSavedUserInstallDirs, + savedGlobalInstallDirs = combinedSavedGlobalInstallDirs, + savedUploadFlags = combinedSavedUploadFlags, + savedReportFlags = combinedSavedReportFlags, + savedHaddockFlags = combinedSavedHaddockFlags + } + where + -- This is ugly, but necessary. If we're mappending two config files, we + -- want the values of the *non-empty* list fields from the second one to + -- *override* the corresponding values from the first one. Default + -- behaviour (concatenation) is confusing and makes some use cases (see + -- #1884) impossible. + -- + -- However, we also want to allow specifying multiple values for a list + -- field in a *single* config file. For example, we want the following to + -- continue to work: + -- + -- remote-repo: hackage.haskell.org:http://hackage.haskell.org/ + -- remote-repo: private-collection:http://hackage.local/ + -- + -- So we can't just wrap the list fields inside Flags; we have to do some + -- special-casing just for SavedConfig. + + -- NB: the signature prevents us from using 'combine' on lists. + combine' :: (SavedConfig -> flags) -> (flags -> Flag a) -> Flag a + combine' field subfield = + (subfield . field $ a) `mappend` (subfield . field $ b) + + lastNonEmpty' :: (SavedConfig -> flags) -> (flags -> [a]) -> [a] + lastNonEmpty' field subfield = + let a' = subfield . field $ a + b' = subfield . field $ b + in case b' of [] -> a' + _ -> b' + + lastNonEmptyNL' :: (SavedConfig -> flags) -> (flags -> NubList a) + -> NubList a + lastNonEmptyNL' field subfield = + let a' = subfield . field $ a + b' = subfield . field $ b + in case fromNubList b' of [] -> a' + _ -> b' + + combinedSavedGlobalFlags = GlobalFlags { + globalVersion = combine globalVersion, + globalNumericVersion = combine globalNumericVersion, + globalConfigFile = combine globalConfigFile, + globalSandboxConfigFile = combine globalSandboxConfigFile, + globalRemoteRepos = lastNonEmptyNL globalRemoteRepos, + globalCacheDir = combine globalCacheDir, + globalLocalRepos = lastNonEmptyNL globalLocalRepos, + globalLogsDir = combine globalLogsDir, + globalWorldFile = combine globalWorldFile, + globalRequireSandbox = combine globalRequireSandbox, + globalIgnoreSandbox = combine globalIgnoreSandbox + } + where + combine = combine' savedGlobalFlags + lastNonEmptyNL = lastNonEmptyNL' savedGlobalFlags + + combinedSavedInstallFlags = InstallFlags { + installDocumentation = combine installDocumentation, + installHaddockIndex = combine installHaddockIndex, + installDryRun = combine installDryRun, + installMaxBackjumps = combine installMaxBackjumps, + installReorderGoals = combine installReorderGoals, + installIndependentGoals = combine installIndependentGoals, + installShadowPkgs = combine installShadowPkgs, + installStrongFlags = combine installStrongFlags, + installReinstall = combine installReinstall, + installAvoidReinstalls = combine installAvoidReinstalls, + installOverrideReinstall = combine installOverrideReinstall, + installUpgradeDeps = combine installUpgradeDeps, + installOnly = combine installOnly, + installOnlyDeps = combine installOnlyDeps, + installRootCmd = combine installRootCmd, + installSummaryFile = lastNonEmptyNL installSummaryFile, + installLogFile = combine installLogFile, + installBuildReports = combine installBuildReports, + installReportPlanningFailure = combine installReportPlanningFailure, + installSymlinkBinDir = combine installSymlinkBinDir, + installOneShot = combine installOneShot, + installNumJobs = combine installNumJobs, + installRunTests = combine installRunTests + } + where + combine = combine' savedInstallFlags + lastNonEmptyNL = lastNonEmptyNL' savedInstallFlags + + combinedSavedConfigureFlags = ConfigFlags { + configPrograms = configPrograms . savedConfigureFlags $ b, + -- TODO: NubListify + configProgramPaths = lastNonEmpty configProgramPaths, + -- TODO: NubListify + configProgramArgs = lastNonEmpty configProgramArgs, + configProgramPathExtra = lastNonEmptyNL configProgramPathExtra, + configHcFlavor = combine configHcFlavor, + configHcPath = combine configHcPath, + configHcPkg = combine configHcPkg, + configVanillaLib = combine configVanillaLib, + configProfLib = combine configProfLib, + configSharedLib = combine configSharedLib, + configDynExe = combine configDynExe, + configProfExe = combine configProfExe, + -- TODO: NubListify + configConfigureArgs = lastNonEmpty configConfigureArgs, + configOptimization = combine configOptimization, + configDebugInfo = combine configDebugInfo, + configProgPrefix = combine configProgPrefix, + configProgSuffix = combine configProgSuffix, + -- Parametrised by (Flag PathTemplate), so safe to use 'mappend'. + configInstallDirs = + (configInstallDirs . savedConfigureFlags $ a) + `mappend` (configInstallDirs . savedConfigureFlags $ b), + configScratchDir = combine configScratchDir, + -- TODO: NubListify + configExtraLibDirs = lastNonEmpty configExtraLibDirs, + -- TODO: NubListify + configExtraIncludeDirs = lastNonEmpty configExtraIncludeDirs, + configDistPref = combine configDistPref, + configVerbosity = combine configVerbosity, + configUserInstall = combine configUserInstall, + -- TODO: NubListify + configPackageDBs = lastNonEmpty configPackageDBs, + configGHCiLib = combine configGHCiLib, + configSplitObjs = combine configSplitObjs, + configStripExes = combine configStripExes, + configStripLibs = combine configStripLibs, + -- TODO: NubListify + configConstraints = lastNonEmpty configConstraints, + -- TODO: NubListify + configDependencies = lastNonEmpty configDependencies, + configInstantiateWith = lastNonEmpty configInstantiateWith, + -- TODO: NubListify + configConfigurationsFlags = lastNonEmpty configConfigurationsFlags, + configTests = combine configTests, + configBenchmarks = combine configBenchmarks, + configCoverage = combine configCoverage, + configLibCoverage = combine configLibCoverage, + configExactConfiguration = combine configExactConfiguration, + configFlagError = combine configFlagError, + configRelocatable = combine configRelocatable + } + where + combine = combine' savedConfigureFlags + lastNonEmpty = lastNonEmpty' savedConfigureFlags + lastNonEmptyNL = lastNonEmptyNL' savedConfigureFlags + + combinedSavedConfigureExFlags = ConfigExFlags { + configCabalVersion = combine configCabalVersion, + -- TODO: NubListify + configExConstraints = lastNonEmpty configExConstraints, + -- TODO: NubListify + configPreferences = lastNonEmpty configPreferences, + configSolver = combine configSolver, + configAllowNewer = combine configAllowNewer + } + where + combine = combine' savedConfigureExFlags + lastNonEmpty = lastNonEmpty' savedConfigureExFlags + + -- Parametrised by (Flag PathTemplate), so safe to use 'mappend'. + combinedSavedUserInstallDirs = savedUserInstallDirs a + `mappend` savedUserInstallDirs b + + -- Parametrised by (Flag PathTemplate), so safe to use 'mappend'. + combinedSavedGlobalInstallDirs = savedGlobalInstallDirs a + `mappend` savedGlobalInstallDirs b + + combinedSavedUploadFlags = UploadFlags { + uploadCheck = combine uploadCheck, + uploadUsername = combine uploadUsername, + uploadPassword = combine uploadPassword, + uploadVerbosity = combine uploadVerbosity + } + where + combine = combine' savedUploadFlags + + combinedSavedReportFlags = ReportFlags { + reportUsername = combine reportUsername, + reportPassword = combine reportPassword, + reportVerbosity = combine reportVerbosity + } + where + combine = combine' savedReportFlags + + combinedSavedHaddockFlags = HaddockFlags { + -- TODO: NubListify + haddockProgramPaths = lastNonEmpty haddockProgramPaths, + -- TODO: NubListify + haddockProgramArgs = lastNonEmpty haddockProgramArgs, + haddockHoogle = combine haddockHoogle, + haddockHtml = combine haddockHtml, + haddockHtmlLocation = combine haddockHtmlLocation, + haddockExecutables = combine haddockExecutables, + haddockTestSuites = combine haddockTestSuites, + haddockBenchmarks = combine haddockBenchmarks, + haddockInternal = combine haddockInternal, + haddockCss = combine haddockCss, + haddockHscolour = combine haddockHscolour, + haddockHscolourCss = combine haddockHscolourCss, + haddockContents = combine haddockContents, + haddockDistPref = combine haddockDistPref, + haddockKeepTempFiles = combine haddockKeepTempFiles, + haddockVerbosity = combine haddockVerbosity + } + where + combine = combine' savedHaddockFlags + lastNonEmpty = lastNonEmpty' savedHaddockFlags + + +updateInstallDirs :: Flag Bool -> SavedConfig -> SavedConfig +updateInstallDirs userInstallFlag + savedConfig@SavedConfig { + savedConfigureFlags = configureFlags, + savedUserInstallDirs = userInstallDirs, + savedGlobalInstallDirs = globalInstallDirs + } = + savedConfig { + savedConfigureFlags = configureFlags { + configInstallDirs = installDirs + } + } + where + installDirs | userInstall = userInstallDirs + | otherwise = globalInstallDirs + userInstall = fromFlagOrDefault defaultUserInstall $ + configUserInstall configureFlags `mappend` userInstallFlag + +-- +-- * Default config +-- + +-- | These are the absolute basic defaults. The fields that must be +-- initialised. When we load the config from the file we layer the loaded +-- values over these ones, so any missing fields in the file take their values +-- from here. +-- +baseSavedConfig :: IO SavedConfig +baseSavedConfig = do + userPrefix <- defaultCabalDir + logsDir <- defaultLogsDir + worldFile <- defaultWorldFile + return mempty { + savedConfigureFlags = mempty { + configHcFlavor = toFlag defaultCompiler, + configUserInstall = toFlag defaultUserInstall, + configVerbosity = toFlag normal + }, + savedUserInstallDirs = mempty { + prefix = toFlag (toPathTemplate userPrefix) + }, + savedGlobalFlags = mempty { + globalLogsDir = toFlag logsDir, + globalWorldFile = toFlag worldFile + } + } + +-- | This is the initial configuration that we write out to to the config file +-- if the file does not exist (or the config we use if the file cannot be read +-- for some other reason). When the config gets loaded it gets layered on top +-- of 'baseSavedConfig' so we do not need to include it into the initial +-- values we save into the config file. +-- +initialSavedConfig :: IO SavedConfig +initialSavedConfig = do + cacheDir <- defaultCacheDir + logsDir <- defaultLogsDir + worldFile <- defaultWorldFile + extraPath <- defaultExtraPath + return mempty { + savedGlobalFlags = mempty { + globalCacheDir = toFlag cacheDir, + globalRemoteRepos = toNubList [defaultRemoteRepo], + globalWorldFile = toFlag worldFile + }, + savedConfigureFlags = mempty { + configProgramPathExtra = toNubList extraPath + }, + savedInstallFlags = mempty { + installSummaryFile = toNubList [toPathTemplate (logsDir "build.log")], + installBuildReports= toFlag AnonymousReports, + installNumJobs = toFlag Nothing + } + } + +--TODO: misleading, there's no way to override this default +-- either make it possible or rename to simply getCabalDir. +defaultCabalDir :: IO FilePath +defaultCabalDir = getAppUserDataDirectory "cabal" + +defaultConfigFile :: IO FilePath +defaultConfigFile = do + dir <- defaultCabalDir + return $ dir "config" + +defaultCacheDir :: IO FilePath +defaultCacheDir = do + dir <- defaultCabalDir + return $ dir "packages" + +defaultLogsDir :: IO FilePath +defaultLogsDir = do + dir <- defaultCabalDir + return $ dir "logs" + +-- | Default position of the world file +defaultWorldFile :: IO FilePath +defaultWorldFile = do + dir <- defaultCabalDir + return $ dir "world" + +defaultExtraPath :: IO [FilePath] +defaultExtraPath = do + dir <- defaultCabalDir + return [dir "bin"] + +defaultCompiler :: CompilerFlavor +defaultCompiler = fromMaybe GHC defaultCompilerFlavor + +defaultUserInstall :: Bool +defaultUserInstall = True +-- We do per-user installs by default on all platforms. We used to default to +-- global installs on Windows but that no longer works on Windows Vista or 7. + +defaultRemoteRepo :: RemoteRepo +defaultRemoteRepo = RemoteRepo name uri + where + name = "hackage.haskell.org" + uri = URI "http:" (Just (URIAuth "" name "")) "/packages/archive" "" "" + +-- +-- * Config file reading +-- + +loadConfig :: Verbosity -> Flag FilePath -> Flag Bool -> IO SavedConfig +loadConfig verbosity configFileFlag userInstallFlag = addBaseConf $ do + let sources = [ + ("commandline option", return . flagToMaybe $ configFileFlag), + ("env var CABAL_CONFIG", lookup "CABAL_CONFIG" `liftM` getEnvironment), + ("default config file", Just `liftM` defaultConfigFile) ] + + getSource [] = error "no config file path candidate found." + getSource ((msg,action): xs) = + action >>= maybe (getSource xs) (return . (,) msg) + + (source, configFile) <- getSource sources + minp <- readConfigFile mempty configFile + case minp of + Nothing -> do + notice verbosity $ "Config file path source is " ++ source ++ "." + notice verbosity $ "Config file " ++ configFile ++ " not found." + notice verbosity $ "Writing default configuration to " ++ configFile + commentConf <- commentSavedConfig + initialConf <- initialSavedConfig + writeConfigFile configFile commentConf initialConf + return initialConf + Just (ParseOk ws conf) -> do + unless (null ws) $ warn verbosity $ + unlines (map (showPWarning configFile) ws) + return conf + Just (ParseFailed err) -> do + let (line, msg) = locatedErrorMsg err + die $ + "Error parsing config file " ++ configFile + ++ maybe "" (\n -> ':' : show n) line ++ ":\n" ++ msg + + where + addBaseConf body = do + base <- baseSavedConfig + extra <- body + return (updateInstallDirs userInstallFlag (base `mappend` extra)) + +readConfigFile :: SavedConfig -> FilePath -> IO (Maybe (ParseResult SavedConfig)) +readConfigFile initial file = handleNotExists $ + fmap (Just . parseConfig initial) (readFile file) + + where + handleNotExists action = catchIO action $ \ioe -> + if isDoesNotExistError ioe + then return Nothing + else ioError ioe + +writeConfigFile :: FilePath -> SavedConfig -> SavedConfig -> IO () +writeConfigFile file comments vals = do + let tmpFile = file <.> "tmp" + createDirectoryIfMissing True (takeDirectory file) + writeFile tmpFile $ explanation ++ showConfigWithComments comments vals ++ "\n" + renameFile tmpFile file + where + explanation = unlines + ["-- This is the configuration file for the 'cabal' command line tool." + ,"" + ,"-- The available configuration options are listed below." + ,"-- Some of them have default values listed." + ,"" + ,"-- Lines (like this one) beginning with '--' are comments." + ,"-- Be careful with spaces and indentation because they are" + ,"-- used to indicate layout for nested sections." + ,"" + ,"-- Cabal library version: " ++ showVersion cabalVersion + ,"-- cabal-install version: " ++ showVersion Paths_cabal_install.version + ,"","" + ] + +-- | These are the default values that get used in Cabal if a no value is +-- given. We use these here to include in comments when we write out the +-- initial config file so that the user can see what default value they are +-- overriding. +-- +commentSavedConfig :: IO SavedConfig +commentSavedConfig = do + userInstallDirs <- defaultInstallDirs defaultCompiler True True + globalInstallDirs <- defaultInstallDirs defaultCompiler False True + return SavedConfig { + savedGlobalFlags = defaultGlobalFlags, + savedInstallFlags = defaultInstallFlags, + savedConfigureExFlags = defaultConfigExFlags, + savedConfigureFlags = (defaultConfigFlags defaultProgramConfiguration) { + configUserInstall = toFlag defaultUserInstall + }, + savedUserInstallDirs = fmap toFlag userInstallDirs, + savedGlobalInstallDirs = fmap toFlag globalInstallDirs, + savedUploadFlags = commandDefaultFlags uploadCommand, + savedReportFlags = commandDefaultFlags reportCommand, + savedHaddockFlags = defaultHaddockFlags + } + +-- | All config file fields. +-- +configFieldDescriptions :: [FieldDescr SavedConfig] +configFieldDescriptions = + + toSavedConfig liftGlobalFlag + (commandOptions (globalCommand []) ParseArgs) + ["version", "numeric-version", "config-file", "sandbox-config-file"] [] + + ++ toSavedConfig liftConfigFlag + (configureOptions ParseArgs) + (["builddir", "constraint", "dependency"] + ++ map fieldName installDirsFields) + + --FIXME: this is only here because viewAsFieldDescr gives us a parser + -- that only recognises 'ghc' etc, the case-sensitive flag names, not + -- what the normal case-insensitive parser gives us. + [simpleField "compiler" + (fromFlagOrDefault Disp.empty . fmap Text.disp) (optional Text.parse) + configHcFlavor (\v flags -> flags { configHcFlavor = v }) + -- TODO: The following is a temporary fix. The "optimization" + -- and "debug-info" fields are OptArg, and viewAsFieldDescr + -- fails on that. Instead of a hand-written hackaged parser + -- and printer, we should handle this case properly in the + -- library. + ,liftField configOptimization (\v flags -> flags { configOptimization = v }) $ + let name = "optimization" in + FieldDescr name + (\f -> case f of + Flag NoOptimisation -> Disp.text "False" + Flag NormalOptimisation -> Disp.text "True" + Flag MaximumOptimisation -> Disp.text "2" + _ -> Disp.empty) + (\line str _ -> case () of + _ | str == "False" -> ParseOk [] (Flag NoOptimisation) + | str == "True" -> ParseOk [] (Flag NormalOptimisation) + | str == "0" -> ParseOk [] (Flag NoOptimisation) + | str == "1" -> ParseOk [] (Flag NormalOptimisation) + | str == "2" -> ParseOk [] (Flag MaximumOptimisation) + | lstr == "false" -> ParseOk [caseWarning] (Flag NoOptimisation) + | lstr == "true" -> ParseOk [caseWarning] (Flag NormalOptimisation) + | otherwise -> ParseFailed (NoParse name line) + where + lstr = lowercase str + caseWarning = PWarning $ + "The '" ++ name ++ "' field is case sensitive, use 'True' or 'False'.") + ,liftField configDebugInfo (\v flags -> flags { configDebugInfo = v }) $ + let name = "debug-info" in + FieldDescr name + (\f -> case f of + Flag NoDebugInfo -> Disp.text "False" + Flag MinimalDebugInfo -> Disp.text "1" + Flag NormalDebugInfo -> Disp.text "True" + Flag MaximalDebugInfo -> Disp.text "3" + _ -> Disp.empty) + (\line str _ -> case () of + _ | str == "False" -> ParseOk [] (Flag NoDebugInfo) + | str == "True" -> ParseOk [] (Flag NormalDebugInfo) + | str == "0" -> ParseOk [] (Flag NoDebugInfo) + | str == "1" -> ParseOk [] (Flag MinimalDebugInfo) + | str == "2" -> ParseOk [] (Flag NormalDebugInfo) + | str == "3" -> ParseOk [] (Flag MaximalDebugInfo) + | lstr == "false" -> ParseOk [caseWarning] (Flag NoDebugInfo) + | lstr == "true" -> ParseOk [caseWarning] (Flag NormalDebugInfo) + | otherwise -> ParseFailed (NoParse name line) + where + lstr = lowercase str + caseWarning = PWarning $ + "The '" ++ name ++ "' field is case sensitive, use 'True' or 'False'.") + ] + + ++ toSavedConfig liftConfigExFlag + (configureExOptions ParseArgs) + [] [] + + ++ toSavedConfig liftInstallFlag + (installOptions ParseArgs) + ["dry-run", "only", "only-dependencies", "dependencies-only"] [] + + ++ toSavedConfig liftUploadFlag + (commandOptions uploadCommand ParseArgs) + ["verbose", "check"] [] + + ++ toSavedConfig liftReportFlag + (commandOptions reportCommand ParseArgs) + ["verbose", "username", "password"] [] + --FIXME: this is a hack, hiding the user name and password. + -- But otherwise it masks the upload ones. Either need to + -- share the options or make then distinct. In any case + -- they should probably be per-server. + + where + toSavedConfig lift options exclusions replacements = + [ lift (fromMaybe field replacement) + | opt <- options + , let field = viewAsFieldDescr opt + name = fieldName field + replacement = find ((== name) . fieldName) replacements + , name `notElem` exclusions ] + optional = Parse.option mempty . fmap toFlag + +-- TODO: next step, make the deprecated fields elicit a warning. +-- +deprecatedFieldDescriptions :: [FieldDescr SavedConfig] +deprecatedFieldDescriptions = + [ liftGlobalFlag $ + listField "repos" + (Disp.text . showRepo) parseRepo + (fromNubList . globalRemoteRepos) + (\rs cfg -> cfg { globalRemoteRepos = toNubList rs }) + , liftGlobalFlag $ + simpleField "cachedir" + (Disp.text . fromFlagOrDefault "") (optional parseFilePathQ) + globalCacheDir (\d cfg -> cfg { globalCacheDir = d }) + , liftUploadFlag $ + simpleField "hackage-username" + (Disp.text . fromFlagOrDefault "" . fmap unUsername) + (optional (fmap Username parseTokenQ)) + uploadUsername (\d cfg -> cfg { uploadUsername = d }) + , liftUploadFlag $ + simpleField "hackage-password" + (Disp.text . fromFlagOrDefault "" . fmap unPassword) + (optional (fmap Password parseTokenQ)) + uploadPassword (\d cfg -> cfg { uploadPassword = d }) + ] + ++ map (modifyFieldName ("user-"++) . liftUserInstallDirs) installDirsFields + ++ map (modifyFieldName ("global-"++) . liftGlobalInstallDirs) installDirsFields + where + optional = Parse.option mempty . fmap toFlag + modifyFieldName :: (String -> String) -> FieldDescr a -> FieldDescr a + modifyFieldName f d = d { fieldName = f (fieldName d) } + +liftUserInstallDirs :: FieldDescr (InstallDirs (Flag PathTemplate)) + -> FieldDescr SavedConfig +liftUserInstallDirs = liftField + savedUserInstallDirs (\flags conf -> conf { savedUserInstallDirs = flags }) + +liftGlobalInstallDirs :: FieldDescr (InstallDirs (Flag PathTemplate)) + -> FieldDescr SavedConfig +liftGlobalInstallDirs = liftField + savedGlobalInstallDirs (\flags conf -> conf { savedGlobalInstallDirs = flags }) + +liftGlobalFlag :: FieldDescr GlobalFlags -> FieldDescr SavedConfig +liftGlobalFlag = liftField + savedGlobalFlags (\flags conf -> conf { savedGlobalFlags = flags }) + +liftConfigFlag :: FieldDescr ConfigFlags -> FieldDescr SavedConfig +liftConfigFlag = liftField + savedConfigureFlags (\flags conf -> conf { savedConfigureFlags = flags }) + +liftConfigExFlag :: FieldDescr ConfigExFlags -> FieldDescr SavedConfig +liftConfigExFlag = liftField + savedConfigureExFlags (\flags conf -> conf { savedConfigureExFlags = flags }) + +liftInstallFlag :: FieldDescr InstallFlags -> FieldDescr SavedConfig +liftInstallFlag = liftField + savedInstallFlags (\flags conf -> conf { savedInstallFlags = flags }) + +liftUploadFlag :: FieldDescr UploadFlags -> FieldDescr SavedConfig +liftUploadFlag = liftField + savedUploadFlags (\flags conf -> conf { savedUploadFlags = flags }) + +liftReportFlag :: FieldDescr ReportFlags -> FieldDescr SavedConfig +liftReportFlag = liftField + savedReportFlags (\flags conf -> conf { savedReportFlags = flags }) + +parseConfig :: SavedConfig -> String -> ParseResult SavedConfig +parseConfig initial = \str -> do + fields <- readFields str + let (knownSections, others) = partition isKnownSection fields + config <- parse others + let user0 = savedUserInstallDirs config + global0 = savedGlobalInstallDirs config + (haddockFlags, user, global, paths, args) <- + foldM parseSections + (savedHaddockFlags config, user0, global0, [], []) + knownSections + return config { + savedConfigureFlags = (savedConfigureFlags config) { + configProgramPaths = paths, + configProgramArgs = args + }, + savedHaddockFlags = haddockFlags, + savedUserInstallDirs = user, + savedGlobalInstallDirs = global + } + + where + isKnownSection (ParseUtils.Section _ "haddock" _ _) = True + isKnownSection (ParseUtils.Section _ "install-dirs" _ _) = True + isKnownSection (ParseUtils.Section _ "program-locations" _ _) = True + isKnownSection (ParseUtils.Section _ "program-default-options" _ _) = True + isKnownSection _ = False + + parse = parseFields (configFieldDescriptions + ++ deprecatedFieldDescriptions) initial + + parseSections accum@(h,u,g,p,a) + (ParseUtils.Section _ "haddock" name fs) + | name == "" = do h' <- parseFields haddockFlagsFields h fs + return (h', u, g, p, a) + | otherwise = do + warning "The 'haddock' section should be unnamed" + return accum + parseSections accum@(h,u,g,p,a) + (ParseUtils.Section _ "install-dirs" name fs) + | name' == "user" = do u' <- parseFields installDirsFields u fs + return (h, u', g, p, a) + | name' == "global" = do g' <- parseFields installDirsFields g fs + return (h, u, g', p, a) + | otherwise = do + warning "The 'install-paths' section should be for 'user' or 'global'" + return accum + where name' = lowercase name + parseSections accum@(h,u,g,p,a) + (ParseUtils.Section _ "program-locations" name fs) + | name == "" = do p' <- parseFields withProgramsFields p fs + return (h, u, g, p', a) + | otherwise = do + warning "The 'program-locations' section should be unnamed" + return accum + parseSections accum@(h, u, g, p, a) + (ParseUtils.Section _ "program-default-options" name fs) + | name == "" = do a' <- parseFields withProgramOptionsFields a fs + return (h, u, g, p, a') + | otherwise = do + warning "The 'program-default-options' section should be unnamed" + return accum + parseSections accum f = do + warning $ "Unrecognized stanza on line " ++ show (lineNo f) + return accum + +showConfig :: SavedConfig -> String +showConfig = showConfigWithComments mempty + +showConfigWithComments :: SavedConfig -> SavedConfig -> String +showConfigWithComments comment vals = Disp.render $ + ppFields configFieldDescriptions mcomment vals + $+$ Disp.text "" + $+$ ppSection "haddock" "" haddockFlagsFields + (fmap savedHaddockFlags mcomment) (savedHaddockFlags vals) + $+$ Disp.text "" + $+$ installDirsSection "user" savedUserInstallDirs + $+$ Disp.text "" + $+$ installDirsSection "global" savedGlobalInstallDirs + $+$ Disp.text "" + $+$ configFlagsSection "program-locations" withProgramsFields + configProgramPaths + $+$ Disp.text "" + $+$ configFlagsSection "program-default-options" withProgramOptionsFields + configProgramArgs + where + mcomment = Just comment + installDirsSection name field = + ppSection "install-dirs" name installDirsFields + (fmap field mcomment) (field vals) + configFlagsSection name fields field = + ppSection name "" fields + (fmap (field . savedConfigureFlags) mcomment) + ((field . savedConfigureFlags) vals) + +-- | Fields for the 'install-dirs' sections. +installDirsFields :: [FieldDescr (InstallDirs (Flag PathTemplate))] +installDirsFields = map viewAsFieldDescr installDirsOptions + +-- | Fields for the 'haddock' section. +haddockFlagsFields :: [FieldDescr HaddockFlags] +haddockFlagsFields = [ field + | opt <- haddockOptions ParseArgs + , let field = viewAsFieldDescr opt + name = fieldName field + , name `notElem` exclusions ] + where + exclusions = ["verbose", "builddir"] + +-- | Fields for the 'program-locations' section. +withProgramsFields :: [FieldDescr [(String, FilePath)]] +withProgramsFields = + map viewAsFieldDescr $ + programConfigurationPaths' (++ "-location") defaultProgramConfiguration + ParseArgs id (++) + +-- | Fields for the 'program-default-options' section. +withProgramOptionsFields :: [FieldDescr [(String, [String])]] +withProgramOptionsFields = + map viewAsFieldDescr $ + programConfigurationOptions defaultProgramConfiguration ParseArgs id (++) + +-- | Get the differences (as a pseudo code diff) between the user's +-- '~/.cabal/config' and the one that cabal would generate if it didn't exist. +userConfigDiff :: GlobalFlags -> IO [String] +userConfigDiff globalFlags = do + userConfig <- loadConfig normal (globalConfigFile globalFlags) mempty + testConfig <- liftM2 mappend baseSavedConfig initialSavedConfig + return $ reverse . foldl' createDiff [] . M.toList + $ M.unionWith combine + (M.fromList . map justFst $ filterShow testConfig) + (M.fromList . map justSnd $ filterShow userConfig) + where + justFst (a, b) = (a, (Just b, Nothing)) + justSnd (a, b) = (a, (Nothing, Just b)) + + combine (Nothing, Just b) (Just a, Nothing) = (Just a, Just b) + combine (Just a, Nothing) (Nothing, Just b) = (Just a, Just b) + combine x y = error $ "Can't happen : userConfigDiff " ++ show x ++ " " ++ show y + + createDiff :: [String] -> (String, (Maybe String, Maybe String)) -> [String] + createDiff acc (key, (Just a, Just b)) + | a == b = acc + | otherwise = ("+ " ++ key ++ ": " ++ b) : ("- " ++ key ++ ": " ++ a) : acc + createDiff acc (key, (Nothing, Just b)) = ("+ " ++ key ++ ": " ++ b) : acc + createDiff acc (key, (Just a, Nothing)) = ("- " ++ key ++ ": " ++ a) : acc + createDiff acc (_, (Nothing, Nothing)) = acc + + filterShow :: SavedConfig -> [(String, String)] + filterShow cfg = map keyValueSplit + . filter (\s -> not (null s) && any (== ':') s) + . map nonComment + . lines + $ showConfig cfg + + nonComment [] = [] + nonComment ('-':'-':_) = [] + nonComment (x:xs) = x : nonComment xs + + topAndTail = reverse . dropWhile isSpace . reverse . dropWhile isSpace + + keyValueSplit s = + let (left, right) = break (== ':') s + in (topAndTail left, topAndTail (drop 1 right)) + + +-- | Update the user's ~/.cabal/config' keeping the user's customizations. +userConfigUpdate :: Verbosity -> GlobalFlags -> IO () +userConfigUpdate verbosity globalFlags = do + userConfig <- loadConfig normal (globalConfigFile globalFlags) mempty + newConfig <- liftM2 mappend baseSavedConfig initialSavedConfig + commentConf <- commentSavedConfig + cabalFile <- defaultConfigFile + let backup = cabalFile ++ ".backup" + notice verbosity $ "Renaming " ++ cabalFile ++ " to " ++ backup ++ "." + renameFile cabalFile backup + notice verbosity $ "Writing merged config to " ++ cabalFile ++ "." + writeConfigFile cabalFile commentConf (newConfig `mappend` userConfig) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Configure.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Configure.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Configure.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Configure.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,254 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Configure +-- Copyright : (c) David Himmelstrup 2005, +-- Duncan Coutts 2005 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- High level interface to configuring a package. +----------------------------------------------------------------------------- +module Distribution.Client.Configure ( + configure, + chooseCabalVersion, + ) where + +import Distribution.Client.Dependency +import Distribution.Client.Dependency.Types (AllowNewer(..), isAllowNewer) +import qualified Distribution.Client.InstallPlan as InstallPlan +import Distribution.Client.InstallPlan (InstallPlan) +import Distribution.Client.IndexUtils as IndexUtils + ( getSourcePackages, getInstalledPackages ) +import Distribution.Client.Setup + ( ConfigExFlags(..), configureCommand, filterConfigureFlags ) +import Distribution.Client.Types as Source +import Distribution.Client.SetupWrapper + ( setupWrapper, SetupScriptOptions(..), defaultSetupScriptOptions ) +import Distribution.Client.Targets + ( userToPackageConstraint ) + +import Distribution.Simple.Compiler + ( Compiler, CompilerInfo, compilerInfo, PackageDB(..), PackageDBStack ) +import Distribution.Simple.Program (ProgramConfiguration ) +import Distribution.Simple.Setup + ( ConfigFlags(..), fromFlag, toFlag, flagToMaybe, fromFlagOrDefault ) +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import Distribution.Simple.Utils + ( defaultPackageDesc ) +import qualified Distribution.InstalledPackageInfo as Installed +import Distribution.Package + ( Package(..), packageName, Dependency(..), thisPackageVersion ) +import Distribution.PackageDescription.Parse + ( readPackageDescription ) +import Distribution.PackageDescription.Configuration + ( finalizePackageDescription ) +import Distribution.Version + ( anyVersion, thisVersion ) +import Distribution.Simple.Utils as Utils + ( notice, info, debug, die ) +import Distribution.System + ( Platform ) +import Distribution.Verbosity as Verbosity + ( Verbosity ) +import Distribution.Version + ( Version(..), VersionRange, orLaterVersion ) + +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid (Monoid(..)) +#endif + +-- | Choose the Cabal version such that the setup scripts compiled against this +-- version will support the given command-line flags. +chooseCabalVersion :: ConfigExFlags -> Maybe Version -> VersionRange +chooseCabalVersion configExFlags maybeVersion = + maybe defaultVersionRange thisVersion maybeVersion + where + -- Cabal < 1.19.2 doesn't support '--exact-configuration' which is needed + -- for '--allow-newer' to work. + allowNewer = fromFlagOrDefault False $ + fmap isAllowNewer (configAllowNewer configExFlags) + + defaultVersionRange = if allowNewer + then orLaterVersion (Version [1,19,2] []) + else anyVersion + +-- | Configure the package found in the local directory +configure :: Verbosity + -> PackageDBStack + -> [Repo] + -> Compiler + -> Platform + -> ProgramConfiguration + -> ConfigFlags + -> ConfigExFlags + -> [String] + -> IO () +configure verbosity packageDBs repos comp platform conf + configFlags configExFlags extraArgs = do + + installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf + sourcePkgDb <- getSourcePackages verbosity repos + + progress <- planLocalPackage verbosity comp platform configFlags configExFlags + installedPkgIndex sourcePkgDb + + notice verbosity "Resolving dependencies..." + maybePlan <- foldProgress logMsg (return . Left) (return . Right) + progress + case maybePlan of + Left message -> do + info verbosity message + setupWrapper verbosity (setupScriptOptions installedPkgIndex) Nothing + configureCommand (const configFlags) extraArgs + + Right installPlan -> case InstallPlan.ready installPlan of + [pkg@(ReadyPackage (SourcePackage _ _ (LocalUnpackedPackage _) _) _ _ _)] -> + configurePackage verbosity + (InstallPlan.planPlatform installPlan) + (InstallPlan.planCompiler installPlan) + (setupScriptOptions installedPkgIndex) + configFlags pkg extraArgs + + _ -> die $ "internal error: configure install plan should have exactly " + ++ "one local ready package." + + where + setupScriptOptions index = SetupScriptOptions { + useCabalVersion = chooseCabalVersion configExFlags + (flagToMaybe (configCabalVersion configExFlags)), + useCompiler = Just comp, + usePlatform = Just platform, + usePackageDB = packageDBs', + usePackageIndex = index', + useProgramConfig = conf, + useDistPref = fromFlagOrDefault + (useDistPref defaultSetupScriptOptions) + (configDistPref configFlags), + useLoggingHandle = Nothing, + useWorkingDir = Nothing, + useWin32CleanHack = False, + forceExternalSetupMethod = False, + setupCacheLock = Nothing + } + where + -- Hack: we typically want to allow the UserPackageDB for finding the + -- Cabal lib when compiling any Setup.hs even if we're doing a global + -- install. However we also allow looking in a specific package db. + (packageDBs', index') = + case packageDBs of + (GlobalPackageDB:dbs) | UserPackageDB `notElem` dbs + -> (GlobalPackageDB:UserPackageDB:dbs, Nothing) + -- but if the user is using an odd db stack, don't touch it + dbs -> (dbs, Just index) + + logMsg message rest = debug verbosity message >> rest + +-- | Make an 'InstallPlan' for the unpacked package in the current directory, +-- and all its dependencies. +-- +planLocalPackage :: Verbosity -> Compiler + -> Platform + -> ConfigFlags -> ConfigExFlags + -> InstalledPackageIndex + -> SourcePackageDb + -> IO (Progress String String InstallPlan) +planLocalPackage verbosity comp platform configFlags configExFlags installedPkgIndex + (SourcePackageDb _ packagePrefs) = do + pkg <- readPackageDescription verbosity =<< defaultPackageDesc verbosity + solver <- chooseSolver verbosity (fromFlag $ configSolver configExFlags) (compilerInfo comp) + + let -- We create a local package and ask to resolve a dependency on it + localPkg = SourcePackage { + packageInfoId = packageId pkg, + Source.packageDescription = pkg, + packageSource = LocalUnpackedPackage ".", + packageDescrOverride = Nothing + } + + testsEnabled = fromFlagOrDefault False $ configTests configFlags + benchmarksEnabled = + fromFlagOrDefault False $ configBenchmarks configFlags + + resolverParams = + removeUpperBounds (fromFlagOrDefault AllowNewerNone $ + configAllowNewer configExFlags) + + . addPreferences + -- preferences from the config file or command line + [ PackageVersionPreference name ver + | Dependency name ver <- configPreferences configExFlags ] + + . addConstraints + -- version constraints from the config file or command line + -- TODO: should warn or error on constraints that are not on direct + -- deps or flag constraints not on the package in question. + (map userToPackageConstraint (configExConstraints configExFlags)) + + . addConstraints + -- package flags from the config file or command line + [ PackageConstraintFlags (packageName pkg) + (configConfigurationsFlags configFlags) ] + + . addConstraints + -- '--enable-tests' and '--enable-benchmarks' constraints from + -- command line + [ PackageConstraintStanzas (packageName pkg) $ + [ TestStanzas | testsEnabled ] ++ + [ BenchStanzas | benchmarksEnabled ] + ] + + $ standardInstallPolicy + installedPkgIndex + (SourcePackageDb mempty packagePrefs) + [SpecificSourcePackage localPkg] + + return (resolveDependencies platform (compilerInfo comp) solver resolverParams) + + +-- | Call an installer for an 'SourcePackage' but override the configure +-- flags with the ones given by the 'ReadyPackage'. In particular the +-- 'ReadyPackage' specifies an exact 'FlagAssignment' and exactly +-- versioned package dependencies. So we ignore any previous partial flag +-- assignment or dependency constraints and use the new ones. +-- +-- NB: when updating this function, don't forget to also update +-- 'installReadyPackage' in D.C.Install. +configurePackage :: Verbosity + -> Platform -> CompilerInfo + -> SetupScriptOptions + -> ConfigFlags + -> ReadyPackage + -> [String] + -> IO () +configurePackage verbosity platform comp scriptOptions configFlags + (ReadyPackage (SourcePackage _ gpkg _ _) flags stanzas deps) extraArgs = + + setupWrapper verbosity + scriptOptions (Just pkg) configureCommand configureFlags extraArgs + + where + configureFlags = filterConfigureFlags configFlags { + configConfigurationsFlags = flags, + -- We generate the legacy constraints as well as the new style precise + -- deps. In the end only one set gets passed to Setup.hs configure, + -- depending on the Cabal version we are talking to. + configConstraints = [ thisPackageVersion (packageId deppkg) + | deppkg <- deps ], + configDependencies = [ (packageName (Installed.sourcePackageId deppkg), + Installed.installedPackageId deppkg) + | deppkg <- deps ], + -- Use '--exact-configuration' if supported. + configExactConfiguration = toFlag True, + configVerbosity = toFlag verbosity, + configBenchmarks = toFlag (BenchStanzas `elem` stanzas), + configTests = toFlag (TestStanzas `elem` stanzas) + } + + pkg = case finalizePackageDescription flags + (const True) + platform comp [] (enableStanzas stanzas gpkg) of + Left _ -> error "finalizePackageDescription ReadyPackage failed" + Right (desc, _) -> desc diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Assignment.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Assignment.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Assignment.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Assignment.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,154 @@ +module Distribution.Client.Dependency.Modular.Assignment where + +import Control.Applicative +import Control.Monad +import Data.Array as A +import Data.List as L +import Data.Map as M +import Data.Maybe +import Data.Graph +import Prelude hiding (pi) + +import Distribution.PackageDescription (FlagAssignment) -- from Cabal +import Distribution.Client.Types (OptionalStanza) + +import Distribution.Client.Dependency.Modular.Configured +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Flag +import Distribution.Client.Dependency.Modular.Index +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.Version + +-- | A (partial) package assignment. Qualified package names +-- are associated with instances. +type PAssignment = Map QPN I + +-- | A (partial) package preassignment. Qualified package names +-- are associated with constrained instances. Constrained instances +-- record constraints about the instances that can still be chosen, +-- and in the extreme case fix a concrete instance. +type PPreAssignment = Map QPN (CI QPN) +type FAssignment = Map QFN Bool +type SAssignment = Map QSN Bool + +-- | A (partial) assignment of variables. +data Assignment = A PAssignment FAssignment SAssignment + deriving (Show, Eq) + +-- | A preassignment comprises knowledge about variables, but not +-- necessarily fixed values. +data PreAssignment = PA PPreAssignment FAssignment SAssignment + +-- | Extend a package preassignment. +-- +-- Takes the variable that causes the new constraints, a current preassignment +-- and a set of new dependency constraints. +-- +-- We're trying to extend the preassignment with each dependency one by one. +-- Each dependency is for a particular variable. We check if we already have +-- constraints for that variable in the current preassignment. If so, we're +-- trying to merge the constraints. +-- +-- Either returns a witness of the conflict that would arise during the merge, +-- or the successfully extended assignment. +extend :: Var QPN -> PPreAssignment -> [Dep QPN] -> Either (ConflictSet QPN, [Dep QPN]) PPreAssignment +extend var pa qa = foldM (\ a (Dep qpn ci) -> + let ci' = M.findWithDefault (Constrained []) qpn a + in case (\ x -> M.insert qpn x a) <$> merge ci' ci of + Left (c, (d, d')) -> Left (c, L.map (Dep qpn) (simplify (P qpn) d d')) + Right x -> Right x) + pa qa + where + -- We're trying to remove trivial elements of the conflict. If we're just + -- making a choice pkg == instance, and pkg => pkg == instance is a part + -- of the conflict, then this info is clear from the context and does not + -- have to be repeated. + simplify v (Fixed _ (Goal var' _)) c | v == var && var' == var = [c] + simplify v c (Fixed _ (Goal var' _)) | v == var && var' == var = [c] + simplify _ c d = [c, d] + +-- | Delivers an ordered list of fully configured packages. +-- +-- TODO: This function is (sort of) ok. However, there's an open bug +-- w.r.t. unqualification. There might be several different instances +-- of one package version chosen by the solver, which will lead to +-- clashes. +toCPs :: Assignment -> RevDepMap -> [CP QPN] +toCPs (A pa fa sa) rdm = + let + -- get hold of the graph + g :: Graph + vm :: Vertex -> ((), QPN, [QPN]) + cvm :: QPN -> Maybe Vertex + -- Note that the RevDepMap contains duplicate dependencies. Therefore the nub. + (g, vm, cvm) = graphFromEdges (L.map (\ (x, xs) -> ((), x, nub xs)) + (M.toList rdm)) + tg :: Graph + tg = transposeG g + -- Topsort the dependency graph, yielding a list of pkgs in the right order. + -- The graph will still contain all the installed packages, and it might + -- contain duplicates, because several variables might actually resolve to + -- the same package in the presence of qualified package names. + ps :: [PI QPN] + ps = L.map ((\ (_, x, _) -> PI x (pa M.! x)) . vm) $ + topSort g + -- Determine the flags per package, by walking over and regrouping the + -- complete flag assignment by package. + fapp :: Map QPN FlagAssignment + fapp = M.fromListWith (++) $ + L.map (\ ((FN (PI qpn _) fn), b) -> (qpn, [(fn, b)])) $ + M.toList $ + fa + -- Stanzas per package. + sapp :: Map QPN [OptionalStanza] + sapp = M.fromListWith (++) $ + L.map (\ ((SN (PI qpn _) sn), b) -> (qpn, if b then [sn] else [])) $ + M.toList $ + sa + -- Dependencies per package. + depp :: QPN -> [PI QPN] + depp qpn = let v :: Vertex + v = fromJust (cvm qpn) + dvs :: [Vertex] + dvs = tg A.! v + in L.map (\ dv -> case vm dv of (_, x, _) -> PI x (pa M.! x)) dvs + in + L.map (\ pi@(PI qpn _) -> CP pi + (M.findWithDefault [] qpn fapp) + (M.findWithDefault [] qpn sapp) + (depp qpn)) + ps + +-- | Finalize an assignment and a reverse dependency map. +-- +-- This is preliminary, and geared towards output right now. +finalize :: Index -> Assignment -> RevDepMap -> IO () +finalize idx (A pa fa _) rdm = + let + -- get hold of the graph + g :: Graph + vm :: Vertex -> ((), QPN, [QPN]) + (g, vm) = graphFromEdges' (L.map (\ (x, xs) -> ((), x, xs)) (M.toList rdm)) + -- topsort the dependency graph, yielding a list of pkgs in the right order + f :: [PI QPN] + f = L.filter (not . instPI) (L.map ((\ (_, x, _) -> PI x (pa M.! x)) . vm) (topSort g)) + fapp :: Map QPN [(QFN, Bool)] -- flags per package + fapp = M.fromListWith (++) $ + L.map (\ (qfn@(FN (PI qpn _) _), b) -> (qpn, [(qfn, b)])) $ M.toList $ fa + -- print one instance + ppi pi@(PI qpn _) = showPI pi ++ status pi ++ " " ++ pflags (M.findWithDefault [] qpn fapp) + -- print install status + status :: PI QPN -> String + status (PI (Q _ pn) _) = + case insts of + [] -> " (new)" + vs -> " (" ++ intercalate ", " (L.map showVer vs) ++ ")" + where insts = L.map (\ (I v _) -> v) $ L.filter isInstalled $ + M.keys (M.findWithDefault M.empty pn idx) + isInstalled (I _ (Inst _ )) = True + isInstalled _ = False + -- print flag assignment + pflags = unwords . L.map (uncurry showFBool) + in + -- show packages with associated flag assignments + putStr (unlines (L.map ppi f)) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Builder.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Builder.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Builder.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Builder.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,170 @@ +module Distribution.Client.Dependency.Modular.Builder where + +-- Building the search tree. +-- +-- In this phase, we build a search tree that is too large, i.e, it contains +-- invalid solutions. We keep track of the open goals at each point. We +-- nondeterministically pick an open goal (via a goal choice node), create +-- subtrees according to the index and the available solutions, and extend the +-- set of open goals by superficially looking at the dependencies recorded in +-- the index. +-- +-- For each goal, we keep track of all the *reasons* why it is being +-- introduced. These are for debugging and error messages, mainly. A little bit +-- of care has to be taken due to the way we treat flags. If a package has +-- flag-guarded dependencies, we cannot introduce them immediately. Instead, we +-- store the entire dependency. + +import Control.Monad.Reader hiding (sequence, mapM) +import Data.List as L +import Data.Map as M +import Prelude hiding (sequence, mapM) + +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Flag +import Distribution.Client.Dependency.Modular.Index +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.PSQ as P +import Distribution.Client.Dependency.Modular.Tree + +-- | The state needed during the build phase of the search tree. +data BuildState = BS { + index :: Index, -- ^ information about packages and their dependencies + scope :: Scope, -- ^ information about encapsulations + rdeps :: RevDepMap, -- ^ set of all package goals, completed and open, with reverse dependencies + open :: PSQ OpenGoal (), -- ^ set of still open goals (flag and package goals) + next :: BuildType -- ^ kind of node to generate next +} + +-- | Extend the set of open goals with the new goals listed. +-- +-- We also adjust the map of overall goals, and keep track of the +-- reverse dependencies of each of the goals. +extendOpen :: QPN -> [OpenGoal] -> BuildState -> BuildState +extendOpen qpn' gs s@(BS { rdeps = gs', open = o' }) = go gs' o' gs + where + go :: RevDepMap -> PSQ OpenGoal () -> [OpenGoal] -> BuildState + go g o [] = s { rdeps = g, open = o } + go g o (ng@(OpenGoal (Flagged _ _ _ _) _gr) : ngs) = go g (cons ng () o) ngs + -- Note: for 'Flagged' goals, we always insert, so later additions win. + -- This is important, because in general, if a goal is inserted twice, + -- the later addition will have better dependency information. + go g o (ng@(OpenGoal (Stanza _ _ ) _gr) : ngs) = go g (cons ng () o) ngs + go g o (ng@(OpenGoal (Simple (Dep qpn _)) _gr) : ngs) + | qpn == qpn' = go g o ngs + -- we ignore self-dependencies at this point; TODO: more care may be needed + | qpn `M.member` g = go (M.adjust (qpn':) qpn g) o ngs + | otherwise = go (M.insert qpn [qpn'] g) (cons ng () o) ngs + -- code above is correct; insert/adjust have different arg order + +-- | Update the current scope by taking into account the encapsulations that +-- are defined for the current package. +establishScope :: QPN -> Encaps -> BuildState -> BuildState +establishScope (Q pp pn) ecs s = + s { scope = L.foldl (\ m e -> M.insert e pp' m) (scope s) ecs } + where + pp' = pn : pp -- new path + +-- | Given the current scope, qualify all the package names in the given set of +-- dependencies and then extend the set of open goals accordingly. +scopedExtendOpen :: QPN -> I -> QGoalReasonChain -> FlaggedDeps PN -> FlagInfo -> + BuildState -> BuildState +scopedExtendOpen qpn i gr fdeps fdefs s = extendOpen qpn gs s + where + sc = scope s + -- Qualify all package names + qfdeps = L.map (fmap (qualify sc)) fdeps -- qualify all the package names + -- Introduce all package flags + qfdefs = L.map (\ (fn, b) -> Flagged (FN (PI qpn i) fn) b [] []) $ M.toList fdefs + -- Combine new package and flag goals + gs = L.map (flip OpenGoal gr) (qfdefs ++ qfdeps) + -- NOTE: + -- + -- In the expression @qfdefs ++ qfdeps@ above, flags occur potentially + -- multiple times, both via the flag declaration and via dependencies. + -- The order is potentially important, because the occurrences via + -- dependencies may record flag-dependency information. After a number + -- of bugs involving computing this information incorrectly, however, + -- we're currently not using carefully computed inter-flag dependencies + -- anymore, but instead use 'simplifyVar' when computing conflict sets + -- to map all flags of one package to a single flag for conflict set + -- purposes, thereby treating them all as interdependent. + -- + -- If we ever move to a more clever algorithm again, then the line above + -- needs to be looked at very carefully, and probably be replaced by + -- more systematically computed flag dependency information. + +-- | Datatype that encodes what to build next +data BuildType = + Goals -- ^ build a goal choice node + | OneGoal OpenGoal -- ^ build a node for this goal + | Instance QPN I PInfo QGoalReasonChain -- ^ build a tree for a concrete instance + deriving Show + +build :: BuildState -> Tree (QGoalReasonChain, Scope) +build = ana go + where + go :: BuildState -> TreeF (QGoalReasonChain, Scope) BuildState + + -- If we have a choice between many goals, we just record the choice in + -- the tree. We select each open goal in turn, and before we descend, remove + -- it from the queue of open goals. + go bs@(BS { rdeps = rds, open = gs, next = Goals }) + | P.null gs = DoneF rds + | otherwise = GoalChoiceF (P.mapWithKey (\ g (_sc, gs') -> bs { next = OneGoal g, open = gs' }) + (P.splits gs)) + + -- If we have already picked a goal, then the choice depends on the kind + -- of goal. + -- + -- For a package, we look up the instances available in the global info, + -- and then handle each instance in turn. + go bs@(BS { index = idx, scope = sc, next = OneGoal (OpenGoal (Simple (Dep qpn@(Q _ pn) _)) gr) }) = + case M.lookup pn idx of + Nothing -> FailF (toConflictSet (Goal (P qpn) gr)) (BuildFailureNotInIndex pn) + Just pis -> PChoiceF qpn (gr, sc) (P.fromList (L.map (\ (i, info) -> + (i, bs { next = Instance qpn i info gr })) + (M.toList pis))) + -- TODO: data structure conversion is rather ugly here + + -- For a flag, we create only two subtrees, and we create them in the order + -- that is indicated by the flag default. + -- + -- TODO: Should we include the flag default in the tree? + go bs@(BS { scope = sc, next = OneGoal (OpenGoal (Flagged qfn@(FN (PI qpn _) _) (FInfo b m w) t f) gr) }) = + FChoiceF qfn (gr, sc) (w || trivial) m (P.fromList (reorder b + [(True, (extendOpen qpn (L.map (flip OpenGoal (FDependency qfn True : gr)) t) bs) { next = Goals }), + (False, (extendOpen qpn (L.map (flip OpenGoal (FDependency qfn False : gr)) f) bs) { next = Goals })])) + where + reorder True = id + reorder False = reverse + trivial = L.null t && L.null f + + go bs@(BS { scope = sc, next = OneGoal (OpenGoal (Stanza qsn@(SN (PI qpn _) _) t) gr) }) = + SChoiceF qsn (gr, sc) trivial (P.fromList + [(False, bs { next = Goals }), + (True, (extendOpen qpn (L.map (flip OpenGoal (SDependency qsn : gr)) t) bs) { next = Goals })]) + where + trivial = L.null t + + -- For a particular instance, we change the state: we update the scope, + -- and furthermore we update the set of goals. + -- + -- TODO: We could inline this above. + go bs@(BS { next = Instance qpn i (PInfo fdeps fdefs ecs _) gr }) = + go ((establishScope qpn ecs + (scopedExtendOpen qpn i (PDependency (PI qpn i) : gr) fdeps fdefs bs)) + { next = Goals }) + +-- | Interface to the tree builder. Just takes an index and a list of package names, +-- and computes the initial state and then the tree from there. +buildTree :: Index -> Bool -> [PN] -> Tree (QGoalReasonChain, Scope) +buildTree idx ind igs = + build (BS idx sc + (M.fromList (L.map (\ qpn -> (qpn, [])) qpns)) + (P.fromList (L.map (\ qpn -> (OpenGoal (Simple (Dep qpn (Constrained []))) [UserGoal], ())) qpns)) + Goals) + where + sc | ind = makeIndependent igs + | otherwise = emptyScope + qpns = L.map (qualify sc) igs diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/ConfiguredConversion.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/ConfiguredConversion.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/ConfiguredConversion.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/ConfiguredConversion.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,40 @@ +module Distribution.Client.Dependency.Modular.ConfiguredConversion where + +import Data.Maybe +import Prelude hiding (pi) + +import Distribution.Client.InstallPlan +import Distribution.Client.Types +import Distribution.Compiler +import qualified Distribution.Client.PackageIndex as CI +import qualified Distribution.Simple.PackageIndex as SI +import Distribution.System + +import Distribution.Client.Dependency.Modular.Configured +import Distribution.Client.Dependency.Modular.Package + +mkPlan :: Platform -> CompilerInfo -> + SI.InstalledPackageIndex -> CI.PackageIndex SourcePackage -> + [CP QPN] -> Either [PlanProblem] InstallPlan +mkPlan plat comp iidx sidx cps = + new plat comp (SI.fromList (map (convCP iidx sidx) cps)) + +convCP :: SI.InstalledPackageIndex -> CI.PackageIndex SourcePackage -> + CP QPN -> PlanPackage +convCP iidx sidx (CP qpi fa es ds) = + case convPI qpi of + Left pi -> PreExisting $ InstalledPackage + (fromJust $ SI.lookupInstalledPackageId iidx pi) + (map convPI' ds) + Right pi -> Configured $ ConfiguredPackage + (fromJust $ CI.lookupPackageId sidx pi) + fa + es + (map convPI' ds) + +convPI :: PI QPN -> Either InstalledPackageId PackageId +convPI (PI _ (I _ (Inst pi))) = Left pi +convPI qpi = Right $ convPI' qpi + +convPI' :: PI QPN -> PackageId +convPI' (PI (Q _ pn) (I v _)) = PackageIdentifier pn v diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Configured.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Configured.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Configured.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Configured.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,10 @@ +module Distribution.Client.Dependency.Modular.Configured where + +import Distribution.PackageDescription (FlagAssignment) -- from Cabal +import Distribution.Client.Types (OptionalStanza) + +import Distribution.Client.Dependency.Modular.Package + +-- | A configured package is a package instance together with +-- a flag assignment and complete dependencies. +data CP qpn = CP (PI qpn) FlagAssignment [OptionalStanza] [PI qpn] diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Dependency.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Dependency.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Dependency.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Dependency.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,181 @@ +{-# LANGUAGE DeriveFunctor #-} +module Distribution.Client.Dependency.Modular.Dependency where + +import Prelude hiding (pi) + +import Data.List as L +import Data.Map as M +import Data.Set as S + +import Distribution.Client.Dependency.Modular.Flag +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.Version + +-- | The type of variables that play a role in the solver. +-- Note that the tree currently does not use this type directly, +-- and rather has separate tree nodes for the different types of +-- variables. This fits better with the fact that in most cases, +-- these have to be treated differently. +-- +-- TODO: This isn't the ideal location to declare the type, +-- but we need them for constrained instances. +data Var qpn = P qpn | F (FN qpn) | S (SN qpn) + deriving (Eq, Ord, Show, Functor) + +-- | For computing conflict sets, we map flag choice vars to a +-- single flag choice. This means that all flag choices are treated +-- as interdependent. So if one flag of a package ends up in a +-- conflict set, then all flags are being treated as being part of +-- the conflict set. +simplifyVar :: Var qpn -> Var qpn +simplifyVar (P qpn) = P qpn +simplifyVar (F (FN pi _)) = F (FN pi (mkFlag "flag")) +simplifyVar (S qsn) = S qsn + +showVar :: Var QPN -> String +showVar (P qpn) = showQPN qpn +showVar (F qfn) = showQFN qfn +showVar (S qsn) = showQSN qsn + +type ConflictSet qpn = Set (Var qpn) + +showCS :: ConflictSet QPN -> String +showCS = intercalate ", " . L.map showVar . S.toList + +-- | Constrained instance. If the choice has already been made, this is +-- a fixed instance, and we record the package name for which the choice +-- is for convenience. Otherwise, it is a list of version ranges paired with +-- the goals / variables that introduced them. +data CI qpn = Fixed I (Goal qpn) | Constrained [VROrigin qpn] + deriving (Eq, Show, Functor) + +instance ResetGoal CI where + resetGoal g (Fixed i _) = Fixed i g + resetGoal g (Constrained vrs) = Constrained (L.map (\ (x, y) -> (x, resetGoal g y)) vrs) + +type VROrigin qpn = (VR, Goal qpn) + +-- | Helper function to collapse a list of version ranges with origins into +-- a single, simplified, version range. +collapse :: [VROrigin qpn] -> VR +collapse = simplifyVR . L.foldr (.&&.) anyVR . L.map fst + +showCI :: CI QPN -> String +showCI (Fixed i _) = "==" ++ showI i +showCI (Constrained vr) = showVR (collapse vr) + +-- | Merge constrained instances. We currently adopt a lazy strategy for +-- merging, i.e., we only perform actual checking if one of the two choices +-- is fixed. If the merge fails, we return a conflict set indicating the +-- variables responsible for the failure, as well as the two conflicting +-- fragments. +-- +-- Note that while there may be more than one conflicting pair of version +-- ranges, we only return the first we find. +-- +-- TODO: Different pairs might have different conflict sets. We're +-- obviously interested to return a conflict that has a "better" conflict +-- set in the sense the it contains variables that allow us to backjump +-- further. We might apply some heuristics here, such as to change the +-- order in which we check the constraints. +merge :: Ord qpn => CI qpn -> CI qpn -> Either (ConflictSet qpn, (CI qpn, CI qpn)) (CI qpn) +merge c@(Fixed i g1) d@(Fixed j g2) + | i == j = Right c + | otherwise = Left (S.union (toConflictSet g1) (toConflictSet g2), (c, d)) +merge c@(Fixed (I v _) g1) (Constrained rs) = go rs -- I tried "reverse rs" here, but it seems to slow things down ... + where + go [] = Right c + go (d@(vr, g2) : vrs) + | checkVR vr v = go vrs + | otherwise = Left (S.union (toConflictSet g1) (toConflictSet g2), (c, Constrained [d])) +merge c@(Constrained _) d@(Fixed _ _) = merge d c +merge (Constrained rs) (Constrained ss) = Right (Constrained (rs ++ ss)) + + +type FlaggedDeps qpn = [FlaggedDep qpn] + +-- | Flagged dependencies can either be plain dependency constraints, +-- or flag-dependent dependency trees. +data FlaggedDep qpn = + Flagged (FN qpn) FInfo (TrueFlaggedDeps qpn) (FalseFlaggedDeps qpn) + | Stanza (SN qpn) (TrueFlaggedDeps qpn) + | Simple (Dep qpn) + deriving (Eq, Show, Functor) + +type TrueFlaggedDeps qpn = FlaggedDeps qpn +type FalseFlaggedDeps qpn = FlaggedDeps qpn + +-- | A dependency (constraint) associates a package name with a +-- constrained instance. +data Dep qpn = Dep qpn (CI qpn) + deriving (Eq, Show, Functor) + +showDep :: Dep QPN -> String +showDep (Dep qpn (Fixed i (Goal v _)) ) = + (if P qpn /= v then showVar v ++ " => " else "") ++ + showQPN qpn ++ "==" ++ showI i +showDep (Dep qpn (Constrained [(vr, Goal v _)])) = + showVar v ++ " => " ++ showQPN qpn ++ showVR vr +showDep (Dep qpn ci ) = + showQPN qpn ++ showCI ci + +instance ResetGoal Dep where + resetGoal g (Dep qpn ci) = Dep qpn (resetGoal g ci) + +-- | A map containing reverse dependencies between qualified +-- package names. +type RevDepMap = Map QPN [QPN] + +-- | Goals are solver variables paired with information about +-- why they have been introduced. +data Goal qpn = Goal (Var qpn) (GoalReasonChain qpn) + deriving (Eq, Show, Functor) + +class ResetGoal f where + resetGoal :: Goal qpn -> f qpn -> f qpn + +instance ResetGoal Goal where + resetGoal = const + +-- | For open goals as they occur during the build phase, we need to store +-- additional information about flags. +data OpenGoal = OpenGoal (FlaggedDep QPN) QGoalReasonChain + deriving (Eq, Show) + +-- | Reasons why a goal can be added to a goal set. +data GoalReason qpn = + UserGoal + | PDependency (PI qpn) + | FDependency (FN qpn) Bool + | SDependency (SN qpn) + deriving (Eq, Show, Functor) + +-- | The first element is the immediate reason. The rest are the reasons +-- for the reasons ... +type GoalReasonChain qpn = [GoalReason qpn] + +type QGoalReasonChain = GoalReasonChain QPN + +goalReasonToVars :: GoalReason qpn -> ConflictSet qpn +goalReasonToVars UserGoal = S.empty +goalReasonToVars (PDependency (PI qpn _)) = S.singleton (P qpn) +goalReasonToVars (FDependency qfn _) = S.singleton (simplifyVar (F qfn)) +goalReasonToVars (SDependency qsn) = S.singleton (S qsn) + +goalReasonChainToVars :: Ord qpn => GoalReasonChain qpn -> ConflictSet qpn +goalReasonChainToVars = S.unions . L.map goalReasonToVars + +goalReasonChainsToVars :: Ord qpn => [GoalReasonChain qpn] -> ConflictSet qpn +goalReasonChainsToVars = S.unions . L.map goalReasonChainToVars + +-- | Closes a goal, i.e., removes all the extraneous information that we +-- need only during the build phase. +close :: OpenGoal -> Goal QPN +close (OpenGoal (Simple (Dep qpn _)) gr) = Goal (P qpn) gr +close (OpenGoal (Flagged qfn _ _ _ ) gr) = Goal (F qfn) gr +close (OpenGoal (Stanza qsn _) gr) = Goal (S qsn) gr + +-- | Compute a conflic set from a goal. The conflict set contains the +-- closure of goal reasons as well as the variable of the goal itself. +toConflictSet :: Ord qpn => Goal qpn -> ConflictSet qpn +toConflictSet (Goal g grs) = S.insert (simplifyVar g) (goalReasonChainToVars grs) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Explore.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Explore.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Explore.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Explore.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,149 @@ +module Distribution.Client.Dependency.Modular.Explore where + +import Control.Applicative as A +import Data.Foldable +import Data.List as L +import Data.Map as M +import Data.Set as S + +import Distribution.Client.Dependency.Modular.Assignment +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Log +import Distribution.Client.Dependency.Modular.Message +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.PSQ as P +import Distribution.Client.Dependency.Modular.Tree + +-- | Backjumping. +-- +-- A tree traversal that tries to propagate conflict sets +-- up the tree from the leaves, and thereby cut branches. +-- All the tricky things are done in the function 'combine'. +backjump :: Tree a -> Tree (Maybe (ConflictSet QPN)) +backjump = snd . cata go + where + go (FailF c fr) = (Just c, Fail c fr) + go (DoneF rdm ) = (Nothing, Done rdm) + go (PChoiceF qpn _ ts) = (c, PChoice qpn c (P.fromList ts')) + where + ~(c, ts') = combine (P qpn) (P.toList ts) S.empty + go (FChoiceF qfn _ b m ts) = (c, FChoice qfn c b m (P.fromList ts')) + where + ~(c, ts') = combine (F qfn) (P.toList ts) S.empty + go (SChoiceF qsn _ b ts) = (c, SChoice qsn c b (P.fromList ts')) + where + ~(c, ts') = combine (S qsn) (P.toList ts) S.empty + go (GoalChoiceF ts) = (c, GoalChoice (P.fromList ts')) + where + ~(cs, ts') = unzip $ L.map (\ (k, (x, v)) -> (x, (k, v))) $ P.toList ts + c = case cs of [] -> Nothing + d : _ -> d + +-- | The 'combine' function is at the heart of backjumping. It takes +-- the variable we're currently considering, and a list of children +-- annotated with their respective conflict sets, and an accumulator +-- for the result conflict set. It returns a combined conflict set +-- for the parent node, and a (potentially shortened) list of children +-- with the annotations removed. +-- +-- It is *essential* that we produce the results as early as possible. +-- In particular, we have to produce the list of children prior to +-- traversing the entire list -- otherwise we lose the desired behaviour +-- of being able to traverse the tree from left to right incrementally. +-- +-- We can shorten the list of children if we find an individual conflict +-- set that does not contain the current variable. In this case, we can +-- just lift the conflict set to the current level, because the current +-- level cannot possibly have contributed to this conflict, so no other +-- choice at the current level would avoid the conflict. +-- +-- If any of the children might contain a successful solution +-- (indicated by Nothing), then Nothing will be the combined +-- conflict set. If all children contain conflict sets, we can +-- take the union as the combined conflict set. +combine :: Var QPN -> [(a, (Maybe (ConflictSet QPN), b))] -> + ConflictSet QPN -> (Maybe (ConflictSet QPN), [(a, b)]) +combine _ [] c = (Just c, []) +combine var ((k, ( d, v)) : xs) c = (\ ~(e, ys) -> (e, (k, v) : ys)) $ + case d of + Just e | not (simplifyVar var `S.member` e) -> (Just e, []) + | otherwise -> combine var xs (e `S.union` c) + Nothing -> (Nothing, snd $ combine var xs S.empty) + +-- | Naive backtracking exploration of the search tree. This will yield correct +-- assignments only once the tree itself is validated. +explore :: Alternative m => Tree a -> (Assignment -> m (Assignment, RevDepMap)) +explore = cata go + where + go (FailF _ _) _ = A.empty + go (DoneF rdm) a = pure (a, rdm) + go (PChoiceF qpn _ ts) (A pa fa sa) = + asum $ -- try children in order, + P.mapWithKey -- when descending ... + (\ k r -> r (A (M.insert qpn k pa) fa sa)) -- record the pkg choice + ts + go (FChoiceF qfn _ _ _ ts) (A pa fa sa) = + asum $ -- try children in order, + P.mapWithKey -- when descending ... + (\ k r -> r (A pa (M.insert qfn k fa) sa)) -- record the flag choice + ts + go (SChoiceF qsn _ _ ts) (A pa fa sa) = + asum $ -- try children in order, + P.mapWithKey -- when descending ... + (\ k r -> r (A pa fa (M.insert qsn k sa))) -- record the flag choice + ts + go (GoalChoiceF ts) a = + casePSQ ts A.empty -- empty goal choice is an internal error + (\ _k v _xs -> v a) -- commit to the first goal choice + +-- | Version of 'explore' that returns a 'Log'. +exploreLog :: Tree (Maybe (ConflictSet QPN)) -> + (Assignment -> Log Message (Assignment, RevDepMap)) +exploreLog = cata go + where + go (FailF c fr) _ = failWith (Failure c fr) + go (DoneF rdm) a = succeedWith Success (a, rdm) + go (PChoiceF qpn c ts) (A pa fa sa) = + backjumpInfo c $ + asum $ -- try children in order, + P.mapWithKey -- when descending ... + (\ k r -> tryWith (TryP (PI qpn k)) $ -- log and ... + r (A (M.insert qpn k pa) fa sa)) -- record the pkg choice + ts + go (FChoiceF qfn c _ _ ts) (A pa fa sa) = + backjumpInfo c $ + asum $ -- try children in order, + P.mapWithKey -- when descending ... + (\ k r -> tryWith (TryF qfn k) $ -- log and ... + r (A pa (M.insert qfn k fa) sa)) -- record the pkg choice + ts + go (SChoiceF qsn c _ ts) (A pa fa sa) = + backjumpInfo c $ + asum $ -- try children in order, + P.mapWithKey -- when descending ... + (\ k r -> tryWith (TryS qsn k) $ -- log and ... + r (A pa fa (M.insert qsn k sa))) -- record the pkg choice + ts + go (GoalChoiceF ts) a = + casePSQ ts + (failWith (Failure S.empty EmptyGoalChoice)) -- empty goal choice is an internal error + (\ k v _xs -> continueWith (Next (close k)) (v a)) -- commit to the first goal choice + +-- | Add in information about pruned trees. +-- +-- TODO: This isn't quite optimal, because we do not merely report the shape of the +-- tree, but rather make assumptions about where that shape originated from. It'd be +-- better if the pruning itself would leave information that we could pick up at this +-- point. +backjumpInfo :: Maybe (ConflictSet QPN) -> Log Message a -> Log Message a +backjumpInfo c m = m <|> case c of -- important to produce 'm' before matching on 'c'! + Nothing -> A.empty + Just cs -> failWith (Failure cs Backjump) + +-- | Interface. +exploreTree :: Alternative m => Tree a -> m (Assignment, RevDepMap) +exploreTree t = explore t (A M.empty M.empty M.empty) + +-- | Interface. +exploreTreeLog :: Tree (Maybe (ConflictSet QPN)) -> Log Message (Assignment, RevDepMap) +exploreTreeLog t = exploreLog t (A M.empty M.empty M.empty) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Flag.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Flag.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Flag.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Flag.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,70 @@ +{-# LANGUAGE DeriveFunctor #-} +module Distribution.Client.Dependency.Modular.Flag where + +import Data.Map as M +import Prelude hiding (pi) + +import Distribution.PackageDescription hiding (Flag) -- from Cabal + +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Types (OptionalStanza(..)) + +-- | Flag name. Consists of a package instance and the flag identifier itself. +data FN qpn = FN (PI qpn) Flag + deriving (Eq, Ord, Show, Functor) + +-- | Extract the package name from a flag name. +getPN :: FN qpn -> qpn +getPN (FN (PI qpn _) _) = qpn + +-- | Flag identifier. Just a string. +type Flag = FlagName + +unFlag :: Flag -> String +unFlag (FlagName fn) = fn + +mkFlag :: String -> Flag +mkFlag fn = FlagName fn + +-- | Flag info. Default value, whether the flag is manual, and +-- whether the flag is weak. Manual flags can only be set explicitly. +-- Weak flags are typically deferred by the solver. +data FInfo = FInfo { fdefault :: Bool, fmanual :: Bool, fweak :: Bool } + deriving (Eq, Ord, Show) + +-- | Flag defaults. +type FlagInfo = Map Flag FInfo + +-- | Qualified flag name. +type QFN = FN QPN + +-- | Stanza name. Paired with a package name, much like a flag. +data SN qpn = SN (PI qpn) OptionalStanza + deriving (Eq, Ord, Show, Functor) + +-- | Qualified stanza name. +type QSN = SN QPN + +unStanza :: OptionalStanza -> String +unStanza TestStanzas = "test" +unStanza BenchStanzas = "bench" + +showQFNBool :: QFN -> Bool -> String +showQFNBool qfn@(FN pi _f) b = showPI pi ++ ":" ++ showFBool qfn b + +showQSNBool :: QSN -> Bool -> String +showQSNBool qsn@(SN pi _f) b = showPI pi ++ ":" ++ showSBool qsn b + +showFBool :: FN qpn -> Bool -> String +showFBool (FN _ f) True = "+" ++ unFlag f +showFBool (FN _ f) False = "-" ++ unFlag f + +showSBool :: SN qpn -> Bool -> String +showSBool (SN _ s) True = "*" ++ unStanza s +showSBool (SN _ s) False = "!" ++ unStanza s + +showQFN :: QFN -> String +showQFN (FN pi f) = showPI pi ++ ":" ++ unFlag f + +showQSN :: QSN -> String +showQSN (SN pi f) = showPI pi ++ ":" ++ unStanza f diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/IndexConversion.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/IndexConversion.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/IndexConversion.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/IndexConversion.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,200 @@ +module Distribution.Client.Dependency.Modular.IndexConversion where + +import Data.List as L +import Data.Map as M +import Data.Maybe +import Prelude hiding (pi) + +import qualified Distribution.Client.PackageIndex as CI +import Distribution.Client.Types +import Distribution.Compiler +import Distribution.InstalledPackageInfo as IPI +import Distribution.Package -- from Cabal +import Distribution.PackageDescription as PD -- from Cabal +import qualified Distribution.Simple.PackageIndex as SI +import Distribution.System + +import Distribution.Client.Dependency.Modular.Dependency as D +import Distribution.Client.Dependency.Modular.Flag as F +import Distribution.Client.Dependency.Modular.Index +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.Tree +import Distribution.Client.Dependency.Modular.Version + +-- | Convert both the installed package index and the source package +-- index into one uniform solver index. +-- +-- We use 'allPackagesBySourcePackageId' for the installed package index +-- because that returns us several instances of the same package and version +-- in order of preference. This allows us in principle to \"shadow\" +-- packages if there are several installed packages of the same version. +-- There are currently some shortcomings in both GHC and Cabal in +-- resolving these situations. However, the right thing to do is to +-- fix the problem there, so for now, shadowing is only activated if +-- explicitly requested. +convPIs :: OS -> Arch -> CompilerInfo -> Bool -> Bool -> + SI.InstalledPackageIndex -> CI.PackageIndex SourcePackage -> Index +convPIs os arch comp sip strfl iidx sidx = + mkIndex (convIPI' sip iidx ++ convSPI' os arch comp strfl sidx) + +-- | Convert a Cabal installed package index to the simpler, +-- more uniform index format of the solver. +convIPI' :: Bool -> SI.InstalledPackageIndex -> [(PN, I, PInfo)] +convIPI' sip idx = + -- apply shadowing whenever there are multiple installed packages with + -- the same version + [ maybeShadow (convIP idx pkg) + | (_pkgid, pkgs) <- SI.allPackagesBySourcePackageId idx + , (maybeShadow, pkg) <- zip (id : repeat shadow) pkgs ] + where + + -- shadowing is recorded in the package info + shadow (pn, i, PInfo fdeps fds encs _) | sip = (pn, i, PInfo fdeps fds encs (Just Shadowed)) + shadow x = x + +convIPI :: Bool -> SI.InstalledPackageIndex -> Index +convIPI sip = mkIndex . convIPI' sip + +-- | Convert a single installed package into the solver-specific format. +convIP :: SI.InstalledPackageIndex -> InstalledPackageInfo -> (PN, I, PInfo) +convIP idx ipi = + let ipid = IPI.installedPackageId ipi + i = I (pkgVersion (sourcePackageId ipi)) (Inst ipid) + pn = pkgName (sourcePackageId ipi) + in case mapM (convIPId pn idx) (IPI.depends ipi) of + Nothing -> (pn, i, PInfo [] M.empty [] (Just Broken)) + Just fds -> (pn, i, PInfo fds M.empty [] Nothing) +-- TODO: Installed packages should also store their encapsulations! + +-- | Convert dependencies specified by an installed package id into +-- flagged dependencies of the solver. +-- +-- May return Nothing if the package can't be found in the index. That +-- indicates that the original package having this dependency is broken +-- and should be ignored. +convIPId :: PN -> SI.InstalledPackageIndex -> InstalledPackageId -> Maybe (FlaggedDep PN) +convIPId pn' idx ipid = + case SI.lookupInstalledPackageId idx ipid of + Nothing -> Nothing + Just ipi -> let i = I (pkgVersion (sourcePackageId ipi)) (Inst ipid) + pn = pkgName (sourcePackageId ipi) + in Just (D.Simple (Dep pn (Fixed i (Goal (P pn') [])))) + +-- | Convert a cabal-install source package index to the simpler, +-- more uniform index format of the solver. +convSPI' :: OS -> Arch -> CompilerInfo -> Bool -> + CI.PackageIndex SourcePackage -> [(PN, I, PInfo)] +convSPI' os arch cinfo strfl = L.map (convSP os arch cinfo strfl) . CI.allPackages + +convSPI :: OS -> Arch -> CompilerInfo -> Bool -> + CI.PackageIndex SourcePackage -> Index +convSPI os arch cinfo strfl = mkIndex . convSPI' os arch cinfo strfl + +-- | Convert a single source package into the solver-specific format. +convSP :: OS -> Arch -> CompilerInfo -> Bool -> SourcePackage -> (PN, I, PInfo) +convSP os arch cinfo strfl (SourcePackage (PackageIdentifier pn pv) gpd _ _pl) = + let i = I pv InRepo + in (pn, i, convGPD os arch cinfo strfl (PI pn i) gpd) + +-- We do not use 'flattenPackageDescription' or 'finalizePackageDescription' +-- from 'Distribution.PackageDescription.Configuration' here, because we +-- want to keep the condition tree, but simplify much of the test. + +-- | Convert a generic package description to a solver-specific 'PInfo'. +-- +-- TODO: We currently just take all dependencies from all specified library, +-- executable and test components. This does not quite seem fair. +convGPD :: OS -> Arch -> CompilerInfo -> Bool -> + PI PN -> GenericPackageDescription -> PInfo +convGPD os arch comp strfl pi + (GenericPackageDescription _ flags libs exes tests benchs) = + let + fds = flagInfo strfl flags + in + PInfo + (maybe [] (convCondTree os arch comp pi fds (const True) ) libs ++ + concatMap (convCondTree os arch comp pi fds (const True) . snd) exes ++ + prefix (Stanza (SN pi TestStanzas)) + (L.map (convCondTree os arch comp pi fds (const True) . snd) tests) ++ + prefix (Stanza (SN pi BenchStanzas)) + (L.map (convCondTree os arch comp pi fds (const True) . snd) benchs)) + fds + [] -- TODO: add encaps + Nothing + +prefix :: (FlaggedDeps qpn -> FlaggedDep qpn) -> [FlaggedDeps qpn] -> FlaggedDeps qpn +prefix _ [] = [] +prefix f fds = [f (concat fds)] + +-- | Convert flag information. Automatic flags are now considered weak +-- unless strong flags have been selected explicitly. +flagInfo :: Bool -> [PD.Flag] -> FlagInfo +flagInfo strfl = M.fromList . L.map (\ (MkFlag fn _ b m) -> (fn, FInfo b m (not (strfl || m)))) + +-- | Convert condition trees to flagged dependencies. +convCondTree :: OS -> Arch -> CompilerInfo -> PI PN -> FlagInfo -> + (a -> Bool) -> -- how to detect if a branch is active + CondTree ConfVar [Dependency] a -> FlaggedDeps PN +convCondTree os arch comp pi@(PI pn _) fds p (CondNode info ds branches) + | p info = L.map (D.Simple . convDep pn) ds -- unconditional dependencies + ++ concatMap (convBranch os arch comp pi fds p) branches + | otherwise = [] + +-- | Branch interpreter. +-- +-- Here, we try to simplify one of Cabal's condition tree branches into the +-- solver's flagged dependency format, which is weaker. Condition trees can +-- contain complex logical expression composed from flag choices and special +-- flags (such as architecture, or compiler flavour). We try to evaluate the +-- special flags and subsequently simplify to a tree that only depends on +-- simple flag choices. +convBranch :: OS -> Arch -> CompilerInfo -> + PI PN -> FlagInfo -> + (a -> Bool) -> -- how to detect if a branch is active + (Condition ConfVar, + CondTree ConfVar [Dependency] a, + Maybe (CondTree ConfVar [Dependency] a)) -> FlaggedDeps PN +convBranch os arch cinfo pi fds p (c', t', mf') = + go c' ( convCondTree os arch cinfo pi fds p t') + (maybe [] (convCondTree os arch cinfo pi fds p) mf') + where + go :: Condition ConfVar -> + FlaggedDeps PN -> FlaggedDeps PN -> FlaggedDeps PN + go (Lit True) t _ = t + go (Lit False) _ f = f + go (CNot c) t f = go c f t + go (CAnd c d) t f = go c (go d t f) f + go (COr c d) t f = go c t (go d t f) + go (Var (Flag fn)) t f = extractCommon t f ++ [Flagged (FN pi fn) (fds ! fn) t f] + go (Var (OS os')) t f + | os == os' = t + | otherwise = f + go (Var (Arch arch')) t f + | arch == arch' = t + | otherwise = f + go (Var (Impl cf cvr)) t f + | matchImpl (compilerInfoId cinfo) || + -- fixme: Nothing should be treated as unknown, rather than empty + -- list. This code should eventually be changed to either + -- support partial resolution of compiler flags or to + -- complain about incompletely configured compilers. + any matchImpl (fromMaybe [] $ compilerInfoCompat cinfo) = t + | otherwise = f + where + matchImpl (CompilerId cf' cv) = cf == cf' && checkVR cvr cv + + -- If both branches contain the same package as a simple dep, we lift it to + -- the next higher-level, but without constraints. This heuristic together + -- with deferring flag choices will then usually first resolve this package, + -- and try an already installed version before imposing a default flag choice + -- that might not be what we want. + extractCommon :: FlaggedDeps PN -> FlaggedDeps PN -> FlaggedDeps PN + extractCommon ps ps' = [ D.Simple (Dep pn (Constrained [])) | D.Simple (Dep pn _) <- ps, D.Simple (Dep pn' _) <- ps', pn == pn' ] + +-- | Convert a Cabal dependency to a solver-specific dependency. +convDep :: PN -> Dependency -> Dep PN +convDep pn' (Dependency pn vr) = Dep pn (Constrained [(vr, Goal (P pn') [])]) + +-- | Convert a Cabal package identifier to a solver-specific dependency. +convPI :: PN -> PackageIdentifier -> Dep PN +convPI pn' (PackageIdentifier pn v) = Dep pn (Constrained [(eqVR v, Goal (P pn') [])]) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Index.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Index.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Index.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Index.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,33 @@ +module Distribution.Client.Dependency.Modular.Index where + +import Data.List as L +import Data.Map as M +import Prelude hiding (pi) + +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Flag +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.Tree + +-- | An index contains information about package instances. This is a nested +-- dictionary. Package names are mapped to instances, which in turn is mapped +-- to info. +type Index = Map PN (Map I PInfo) + +-- | Info associated with a package instance. +-- Currently, dependencies, flags, encapsulations and failure reasons. +-- Packages that have a failure reason recorded for them are disabled +-- globally, for reasons external to the solver. We currently use this +-- for shadowing which essentially is a GHC limitation, and for +-- installed packages that are broken. +data PInfo = PInfo (FlaggedDeps PN) FlagInfo Encaps (Maybe FailReason) + deriving (Show) + +-- | Encapsulations. A list of package names. +type Encaps = [PN] + +mkIndex :: [(PN, I, PInfo)] -> Index +mkIndex xs = M.map M.fromList (groupMap (L.map (\ (pn, i, pi) -> (pn, (i, pi))) xs)) + +groupMap :: Ord a => [(a, b)] -> Map a [b] +groupMap xs = M.fromListWith (flip (++)) (L.map (\ (x, y) -> (x, [y])) xs) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Log.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Log.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Log.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Log.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,121 @@ +module Distribution.Client.Dependency.Modular.Log where + +import Control.Applicative +import Data.List as L +import Data.Set as S + +import Distribution.Client.Dependency.Types -- from Cabal + +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Message +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.Tree (FailReason(..)) + +-- | The 'Log' datatype. +-- +-- Represents the progress of a computation lazily. +-- +-- Parameterized over the type of actual messages and the final result. +type Log m a = Progress m () a + +-- | Turns a log into a list of messages paired with a final result. A final result +-- of 'Nothing' indicates failure. A final result of 'Just' indicates success. +-- Keep in mind that forcing the second component of the returned pair will force the +-- entire log. +runLog :: Log m a -> ([m], Maybe a) +runLog (Done x) = ([], Just x) +runLog (Fail _) = ([], Nothing) +runLog (Step m p) = let + (ms, r) = runLog p + in + (m : ms, r) + +-- | Postprocesses a log file. Takes as an argument a limit on allowed backjumps. +-- If the limit is 'Nothing', then infinitely many backjumps are allowed. If the +-- limit is 'Just 0', backtracking is completely disabled. +logToProgress :: Maybe Int -> Log Message a -> Progress String String a +logToProgress mbj l = let + (ms, s) = runLog l + -- 'Nothing' for 's' means search tree exhaustively searched and failed + (es, e) = proc 0 ms -- catch first error (always) + -- 'Nothing' in 'e' means no backjump found + (ns, t) = case mbj of + Nothing -> (ms, Nothing) + Just n -> proc n ms + -- 'Nothing' in 't' means backjump limit not reached + -- prefer first error over later error + (exh, r) = case t of + -- backjump limit not reached + Nothing -> case s of + Nothing -> (True, e) -- failed after exhaustive search + Just _ -> (True, Nothing) -- success + -- backjump limit reached; prefer first error + Just _ -> (False, e) -- failed after backjump limit was reached + in go es es -- trace for first error + (showMessages (const True) True ns) -- shortened run + r s exh + where + -- Proc takes the allowed number of backjumps and a list of messages and explores the + -- message list until the maximum number of backjumps has been reached. The log until + -- that point as well as whether we have encountered an error or not are returned. + proc :: Int -> [Message] -> ([Message], Maybe (ConflictSet QPN)) + proc _ [] = ([], Nothing) + proc n ( Failure cs Backjump : xs@(Leave : Failure cs' Backjump : _)) + | cs == cs' = proc n xs -- repeated backjumps count as one + proc 0 ( Failure cs Backjump : _ ) = ([], Just cs) + proc n (x@(Failure _ Backjump) : xs) = (\ ~(ys, r) -> (x : ys, r)) (proc (n - 1) xs) + proc n (x : xs) = (\ ~(ys, r) -> (x : ys, r)) (proc n xs) + + -- This function takes a lot of arguments. The first two are both supposed to be + -- the log up to the first error. That's the error that will always be printed in + -- case we do not find a solution. We pass this log twice, because we evaluate it + -- in parallel with the full log, but we also want to retain the reference to its + -- beginning for when we print it. This trick prevents a space leak! + -- + -- The third argument is the full log, the fifth and six error conditions. + -- The seventh argument indicates whether the search was exhaustive. + -- + -- The order of arguments is important! In particular 's' must not be evaluated + -- unless absolutely necessary. It contains the final result, and if we shortcut + -- with an error due to backjumping, evaluating 's' would still require traversing + -- the entire tree. + go ms (_ : ns) (x : xs) r s exh = Step x (go ms ns xs r s exh) + go ms [] (x : xs) r s exh = Step x (go ms [] xs r s exh) + go ms _ [] (Just cs) _ exh = Fail $ + "Could not resolve dependencies:\n" ++ + unlines (showMessages (L.foldr (\ v _ -> v `S.member` cs) True) False ms) ++ + (if exh then "Dependency tree exhaustively searched.\n" + else "Backjump limit reached (change with --max-backjumps).\n") + go _ _ [] _ (Just s) _ = Done s + go _ _ [] _ _ _ = Fail ("Could not resolve dependencies; something strange happened.") -- should not happen + +logToProgress' :: Log Message a -> Progress String String a +logToProgress' l = let + (ms, r) = runLog l + xs = showMessages (const True) True ms + in go xs r + where + go [x] Nothing = Fail x + go [] Nothing = Fail "" + go [] (Just r) = Done r + go (x:xs) r = Step x (go xs r) + + +runLogIO :: Log Message a -> IO (Maybe a) +runLogIO x = + do + let (ms, r) = runLog x + putStr (unlines $ showMessages (const True) True ms) + return r + +failWith :: m -> Log m a +failWith m = Step m (Fail ()) + +succeedWith :: m -> a -> Log m a +succeedWith m x = Step m (Done x) + +continueWith :: m -> Log m a -> Log m a +continueWith = Step + +tryWith :: Message -> Log Message a -> Log Message a +tryWith m x = Step m (Step Enter x) <|> failWith Leave diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Message.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Message.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Message.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Message.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,101 @@ +module Distribution.Client.Dependency.Modular.Message where + +import qualified Data.List as L +import Prelude hiding (pi) + +import Distribution.Text -- from Cabal + +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Flag +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.Tree + +data Message = + Enter -- ^ increase indentation level + | Leave -- ^ decrease indentation level + | TryP (PI QPN) + | TryF QFN Bool + | TryS QSN Bool + | Next (Goal QPN) + | Success + | Failure (ConflictSet QPN) FailReason + +-- | Transforms the structured message type to actual messages (strings). +-- +-- Takes an additional relevance predicate. The predicate gets a stack of goal +-- variables and can decide whether messages regarding these goals are relevant. +-- You can plug in 'const True' if you're interested in a full trace. If you +-- want a slice of the trace concerning a particular conflict set, then plug in +-- a predicate returning 'True' on the empty stack and if the head is in the +-- conflict set. +-- +-- The second argument indicates if the level numbers should be shown. This is +-- recommended for any trace that involves backtracking, because only the level +-- numbers will allow to keep track of backjumps. +showMessages :: ([Var QPN] -> Bool) -> Bool -> [Message] -> [String] +showMessages p sl = go [] 0 + where + go :: [Var QPN] -> Int -> [Message] -> [String] + go _ _ [] = [] + -- complex patterns + go v l (TryP (PI qpn i) : Enter : Failure c fr : Leave : ms) = goPReject v l qpn [i] c fr ms + go v l (TryF qfn b : Enter : Failure c fr : Leave : ms) = (atLevel (add (F qfn) v) l $ "rejecting: " ++ showQFNBool qfn b ++ showFR c fr) (go v l ms) + go v l (TryS qsn b : Enter : Failure c fr : Leave : ms) = (atLevel (add (S qsn) v) l $ "rejecting: " ++ showQSNBool qsn b ++ showFR c fr) (go v l ms) + go v l (Next (Goal (P qpn) gr) : TryP pi : ms@(Enter : Next _ : _)) = (atLevel (add (P qpn) v) l $ "trying: " ++ showPI pi ++ showGRs gr) (go (add (P qpn) v) l ms) + go v l (Failure c Backjump : ms@(Leave : Failure c' Backjump : _)) | c == c' = go v l ms + -- standard display + go v l (Enter : ms) = go v (l+1) ms + go v l (Leave : ms) = go (drop 1 v) (l-1) ms + go v l (TryP pi@(PI qpn _) : ms) = (atLevel (add (P qpn) v) l $ "trying: " ++ showPI pi) (go (add (P qpn) v) l ms) + go v l (TryF qfn b : ms) = (atLevel (add (F qfn) v) l $ "trying: " ++ showQFNBool qfn b) (go (add (F qfn) v) l ms) + go v l (TryS qsn b : ms) = (atLevel (add (S qsn) v) l $ "trying: " ++ showQSNBool qsn b) (go (add (S qsn) v) l ms) + go v l (Next (Goal (P qpn) gr) : ms) = (atLevel (add (P qpn) v) l $ "next goal: " ++ showQPN qpn ++ showGRs gr) (go v l ms) + go v l (Next _ : ms) = go v l ms -- ignore flag goals in the log + go v l (Success : ms) = (atLevel v l $ "done") (go v l ms) + go v l (Failure c fr : ms) = (atLevel v l $ "fail" ++ showFR c fr) (go v l ms) + + add :: Var QPN -> [Var QPN] -> [Var QPN] + add v vs = simplifyVar v : vs + + -- special handler for many subsequent package rejections + goPReject :: [Var QPN] -> Int -> QPN -> [I] -> ConflictSet QPN -> FailReason -> [Message] -> [String] + goPReject v l qpn is c fr (TryP (PI qpn' i) : Enter : Failure _ fr' : Leave : ms) | qpn == qpn' && fr == fr' = goPReject v l qpn (i : is) c fr ms + goPReject v l qpn is c fr ms = (atLevel (P qpn : v) l $ "rejecting: " ++ showQPN qpn ++ "-" ++ L.intercalate ", " (map showI (reverse is)) ++ showFR c fr) (go v l ms) + + -- write a message, but only if it's relevant; we can also enable or disable the display of the current level + atLevel v l x xs + | sl && p v = let s = show l + in ("[" ++ replicate (3 - length s) '_' ++ s ++ "] " ++ x) : xs + | p v = x : xs + | otherwise = xs + +showGRs :: QGoalReasonChain -> String +showGRs (gr : _) = showGR gr +showGRs [] = "" + +showGR :: GoalReason QPN -> String +showGR UserGoal = " (user goal)" +showGR (PDependency pi) = " (dependency of " ++ showPI pi ++ ")" +showGR (FDependency qfn b) = " (dependency of " ++ showQFNBool qfn b ++ ")" +showGR (SDependency qsn) = " (dependency of " ++ showQSNBool qsn True ++ ")" + +showFR :: ConflictSet QPN -> FailReason -> String +showFR _ InconsistentInitialConstraints = " (inconsistent initial constraints)" +showFR _ (Conflicting ds) = " (conflict: " ++ L.intercalate ", " (map showDep ds) ++ ")" +showFR _ CannotInstall = " (only already installed instances can be used)" +showFR _ CannotReinstall = " (avoiding to reinstall a package with same version but new dependencies)" +showFR _ Shadowed = " (shadowed by another installed package with same version)" +showFR _ Broken = " (package is broken)" +showFR _ (GlobalConstraintVersion vr) = " (global constraint requires " ++ display vr ++ ")" +showFR _ GlobalConstraintInstalled = " (global constraint requires installed instance)" +showFR _ GlobalConstraintSource = " (global constraint requires source instance)" +showFR _ GlobalConstraintFlag = " (global constraint requires opposite flag selection)" +showFR _ ManualFlag = " (manual flag can only be changed explicitly)" +showFR _ (BuildFailureNotInIndex pn) = " (unknown package: " ++ display pn ++ ")" +showFR c Backjump = " (backjumping, conflict set: " ++ showCS c ++ ")" +-- The following are internal failures. They should not occur. In the +-- interest of not crashing unnecessarily, we still just print an error +-- message though. +showFR _ (MalformedFlagChoice qfn) = " (INTERNAL ERROR: MALFORMED FLAG CHOICE: " ++ showQFN qfn ++ ")" +showFR _ (MalformedStanzaChoice qsn) = " (INTERNAL ERROR: MALFORMED STANZA CHOICE: " ++ showQSN qsn ++ ")" +showFR _ EmptyGoalChoice = " (INTERNAL ERROR: EMPTY GOAL CHOICE)" diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Package.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Package.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Package.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Package.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,111 @@ +{-# LANGUAGE DeriveFunctor #-} +module Distribution.Client.Dependency.Modular.Package + (module Distribution.Client.Dependency.Modular.Package, + module Distribution.Package) where + +import Data.List as L +import Data.Map as M + +import Distribution.Package -- from Cabal +import Distribution.Text -- from Cabal + +import Distribution.Client.Dependency.Modular.Version + +-- | A package name. +type PN = PackageName + +-- | Unpacking a package name. +unPN :: PN -> String +unPN (PackageName pn) = pn + +-- | Package version. A package name plus a version number. +type PV = PackageId + +-- | Qualified package version. +type QPV = Q PV + +-- | Package id. Currently just a black-box string. +type PId = InstalledPackageId + +-- | Location. Info about whether a package is installed or not, and where +-- exactly it is located. For installed packages, uniquely identifies the +-- package instance via its 'PId'. +-- +-- TODO: More information is needed about the repo. +data Loc = Inst PId | InRepo + deriving (Eq, Ord, Show) + +-- | Instance. A version number and a location. +data I = I Ver Loc + deriving (Eq, Ord, Show) + +-- | String representation of an instance. +showI :: I -> String +showI (I v InRepo) = showVer v +showI (I v (Inst (InstalledPackageId i))) = showVer v ++ "/installed" ++ shortId i + where + -- A hack to extract the beginning of the package ABI hash + shortId = snip (splitAt 4) (++ "...") . + snip ((\ (x, y) -> (reverse x, y)) . break (=='-') . reverse) ('-':) + snip p f xs = case p xs of + (ys, zs) -> (if L.null zs then id else f) ys + +-- | Package instance. A package name and an instance. +data PI qpn = PI qpn I + deriving (Eq, Ord, Show, Functor) + +-- | String representation of a package instance. +showPI :: PI QPN -> String +showPI (PI qpn i) = showQPN qpn ++ "-" ++ showI i + +-- | Checks if a package instance corresponds to an installed package. +instPI :: PI qpn -> Bool +instPI (PI _ (I _ (Inst _))) = True +instPI _ = False + +instI :: I -> Bool +instI (I _ (Inst _)) = True +instI _ = False + +-- | Package path. (Stored in "reverse" order.) +type PP = [PN] + +-- | String representation of a package path. +showPP :: PP -> String +showPP = intercalate "." . L.map display . reverse + + +-- | A qualified entity. Pairs a package path with the entity. +data Q a = Q PP a + deriving (Eq, Ord, Show) + +-- | Standard string representation of a qualified entity. +showQ :: (a -> String) -> (Q a -> String) +showQ showa (Q [] x) = showa x +showQ showa (Q pp x) = showPP pp ++ "." ++ showa x + +-- | Qualified package name. +type QPN = Q PN + +-- | String representation of a qualified package path. +showQPN :: QPN -> String +showQPN = showQ display + +-- | The scope associates every package with a path. The convention is that packages +-- not in the data structure have an empty path associated with them. +type Scope = Map PN PP + +-- | An empty scope structure, for initialization. +emptyScope :: Scope +emptyScope = M.empty + +-- | Create artificial parents for each of the package names, making +-- them all independent. +makeIndependent :: [PN] -> Scope +makeIndependent ps = L.foldl (\ sc (n, p) -> M.insert p [PackageName (show n)] sc) emptyScope (zip ([0..] :: [Int]) ps) + +qualify :: Scope -> PN -> QPN +qualify sc pn = Q (findWithDefault [] pn sc) pn + +unQualify :: Q a -> a +unQualify (Q _ x) = x diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Preference.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Preference.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Preference.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Preference.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,281 @@ +{-# LANGUAGE CPP #-} +module Distribution.Client.Dependency.Modular.Preference where + +-- Reordering or pruning the tree in order to prefer or make certain choices. + +import qualified Data.List as L +import qualified Data.Map as M +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid +#endif +import Data.Ord + +import Distribution.Client.Dependency.Types + ( PackageConstraint(..), PackagePreferences(..), InstalledPreference(..) ) +import Distribution.Client.Types + ( OptionalStanza(..) ) + +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Flag +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.PSQ as P +import Distribution.Client.Dependency.Modular.Tree +import Distribution.Client.Dependency.Modular.Version + +-- | Generic abstraction for strategies that just rearrange the package order. +-- Only packages that match the given predicate are reordered. +packageOrderFor :: (PN -> Bool) -> (PN -> I -> I -> Ordering) -> Tree a -> Tree a +packageOrderFor p cmp = trav go + where + go (PChoiceF v@(Q _ pn) r cs) + | p pn = PChoiceF v r (P.sortByKeys (flip (cmp pn)) cs) + | otherwise = PChoiceF v r cs + go x = x + +-- | Ordering that treats preferred versions as greater than non-preferred +-- versions. +preferredVersionsOrdering :: VR -> Ver -> Ver -> Ordering +preferredVersionsOrdering vr v1 v2 = + compare (checkVR vr v1) (checkVR vr v2) + +-- | Traversal that tries to establish package preferences (not constraints). +-- Works by reordering choice nodes. +preferPackagePreferences :: (PN -> PackagePreferences) -> Tree a -> Tree a +preferPackagePreferences pcs = packageOrderFor (const True) preference + where + preference pn i1@(I v1 _) i2@(I v2 _) = + let PackagePreferences vr ipref = pcs pn + in preferredVersionsOrdering vr v1 v2 `mappend` -- combines lexically + locationsOrdering ipref i1 i2 + + -- Note that we always rank installed before uninstalled, and later + -- versions before earlier, but we can change the priority of the + -- two orderings. + locationsOrdering PreferInstalled v1 v2 = + preferInstalledOrdering v1 v2 `mappend` preferLatestOrdering v1 v2 + locationsOrdering PreferLatest v1 v2 = + preferLatestOrdering v1 v2 `mappend` preferInstalledOrdering v1 v2 + +-- | Ordering that treats installed instances as greater than uninstalled ones. +preferInstalledOrdering :: I -> I -> Ordering +preferInstalledOrdering (I _ (Inst _)) (I _ (Inst _)) = EQ +preferInstalledOrdering (I _ (Inst _)) _ = GT +preferInstalledOrdering _ (I _ (Inst _)) = LT +preferInstalledOrdering _ _ = EQ + +-- | Compare instances by their version numbers. +preferLatestOrdering :: I -> I -> Ordering +preferLatestOrdering (I v1 _) (I v2 _) = compare v1 v2 + +-- | Helper function that tries to enforce a single package constraint on a +-- given instance for a P-node. Translates the constraint into a +-- tree-transformer that either leaves the subtree untouched, or replaces it +-- with an appropriate failure node. +processPackageConstraintP :: ConflictSet QPN -> I -> PackageConstraint -> Tree a -> Tree a +processPackageConstraintP c (I v _) (PackageConstraintVersion _ vr) r + | checkVR vr v = r + | otherwise = Fail c (GlobalConstraintVersion vr) +processPackageConstraintP c i (PackageConstraintInstalled _) r + | instI i = r + | otherwise = Fail c GlobalConstraintInstalled +processPackageConstraintP c i (PackageConstraintSource _) r + | not (instI i) = r + | otherwise = Fail c GlobalConstraintSource +processPackageConstraintP _ _ _ r = r + +-- | Helper function that tries to enforce a single package constraint on a +-- given flag setting for an F-node. Translates the constraint into a +-- tree-transformer that either leaves the subtree untouched, or replaces it +-- with an appropriate failure node. +processPackageConstraintF :: Flag -> ConflictSet QPN -> Bool -> PackageConstraint -> Tree a -> Tree a +processPackageConstraintF f c b' (PackageConstraintFlags _ fa) r = + case L.lookup f fa of + Nothing -> r + Just b | b == b' -> r + | otherwise -> Fail c GlobalConstraintFlag +processPackageConstraintF _ _ _ _ r = r + +-- | Helper function that tries to enforce a single package constraint on a +-- given flag setting for an F-node. Translates the constraint into a +-- tree-transformer that either leaves the subtree untouched, or replaces it +-- with an appropriate failure node. +processPackageConstraintS :: OptionalStanza -> ConflictSet QPN -> Bool -> PackageConstraint -> Tree a -> Tree a +processPackageConstraintS s c b' (PackageConstraintStanzas _ ss) r = + if not b' && s `elem` ss then Fail c GlobalConstraintFlag + else r +processPackageConstraintS _ _ _ _ r = r + +-- | Traversal that tries to establish various kinds of user constraints. Works +-- by selectively disabling choices that have been ruled out by global user +-- constraints. +enforcePackageConstraints :: M.Map PN [PackageConstraint] -> Tree QGoalReasonChain -> Tree QGoalReasonChain +enforcePackageConstraints pcs = trav go + where + go (PChoiceF qpn@(Q _ pn) gr ts) = + let c = toConflictSet (Goal (P qpn) gr) + -- compose the transformation functions for each of the relevant constraint + g = \ i -> foldl (\ h pc -> h . processPackageConstraintP c i pc) id + (M.findWithDefault [] pn pcs) + in PChoiceF qpn gr (P.mapWithKey g ts) + go (FChoiceF qfn@(FN (PI (Q _ pn) _) f) gr tr m ts) = + let c = toConflictSet (Goal (F qfn) gr) + -- compose the transformation functions for each of the relevant constraint + g = \ b -> foldl (\ h pc -> h . processPackageConstraintF f c b pc) id + (M.findWithDefault [] pn pcs) + in FChoiceF qfn gr tr m (P.mapWithKey g ts) + go (SChoiceF qsn@(SN (PI (Q _ pn) _) f) gr tr ts) = + let c = toConflictSet (Goal (S qsn) gr) + -- compose the transformation functions for each of the relevant constraint + g = \ b -> foldl (\ h pc -> h . processPackageConstraintS f c b pc) id + (M.findWithDefault [] pn pcs) + in SChoiceF qsn gr tr (P.mapWithKey g ts) + go x = x + +-- | Transformation that tries to enforce manual flags. Manual flags +-- can only be re-set explicitly by the user. This transformation should +-- be run after user preferences have been enforced. For manual flags, +-- it checks if a user choice has been made. If not, it disables all but +-- the first choice. +enforceManualFlags :: Tree QGoalReasonChain -> Tree QGoalReasonChain +enforceManualFlags = trav go + where + go (FChoiceF qfn gr tr True ts) = FChoiceF qfn gr tr True $ + let c = toConflictSet (Goal (F qfn) gr) + in case span isDisabled (P.toList ts) of + ([], y : ys) -> P.fromList (y : L.map (\ (b, _) -> (b, Fail c ManualFlag)) ys) + _ -> ts -- something has been manually selected, leave things alone + where + isDisabled (_, Fail _ GlobalConstraintFlag) = True + isDisabled _ = False + go x = x + +-- | Prefer installed packages over non-installed packages, generally. +-- All installed packages or non-installed packages are treated as +-- equivalent. +preferInstalled :: Tree a -> Tree a +preferInstalled = packageOrderFor (const True) (const preferInstalledOrdering) + +-- | Prefer packages with higher version numbers over packages with +-- lower version numbers, for certain packages. +preferLatestFor :: (PN -> Bool) -> Tree a -> Tree a +preferLatestFor p = packageOrderFor p (const preferLatestOrdering) + +-- | Prefer packages with higher version numbers over packages with +-- lower version numbers, for all packages. +preferLatest :: Tree a -> Tree a +preferLatest = preferLatestFor (const True) + +-- | Require installed packages. +requireInstalled :: (PN -> Bool) -> Tree (QGoalReasonChain, a) -> Tree (QGoalReasonChain, a) +requireInstalled p = trav go + where + go (PChoiceF v@(Q _ pn) i@(gr, _) cs) + | p pn = PChoiceF v i (P.mapWithKey installed cs) + | otherwise = PChoiceF v i cs + where + installed (I _ (Inst _)) x = x + installed _ _ = Fail (toConflictSet (Goal (P v) gr)) CannotInstall + go x = x + +-- | Avoid reinstalls. +-- +-- This is a tricky strategy. If a package version is installed already and the +-- same version is available from a repo, the repo version will never be chosen. +-- This would result in a reinstall (either destructively, or potentially, +-- shadowing). The old instance won't be visible or even present anymore, but +-- other packages might have depended on it. +-- +-- TODO: It would be better to actually check the reverse dependencies of installed +-- packages. If they're not depended on, then reinstalling should be fine. Even if +-- they are, perhaps this should just result in trying to reinstall those other +-- packages as well. However, doing this all neatly in one pass would require to +-- change the builder, or at least to change the goal set after building. +avoidReinstalls :: (PN -> Bool) -> Tree (QGoalReasonChain, a) -> Tree (QGoalReasonChain, a) +avoidReinstalls p = trav go + where + go (PChoiceF qpn@(Q _ pn) i@(gr, _) cs) + | p pn = PChoiceF qpn i disableReinstalls + | otherwise = PChoiceF qpn i cs + where + disableReinstalls = + let installed = [ v | (I v (Inst _), _) <- toList cs ] + in P.mapWithKey (notReinstall installed) cs + + notReinstall vs (I v InRepo) _ + | v `elem` vs = Fail (toConflictSet (Goal (P qpn) gr)) CannotReinstall + notReinstall _ _ x = x + go x = x + +-- | Always choose the first goal in the list next, abandoning all +-- other choices. +-- +-- This is unnecessary for the default search strategy, because +-- it descends only into the first goal choice anyway, +-- but may still make sense to just reduce the tree size a bit. +firstGoal :: Tree a -> Tree a +firstGoal = trav go + where + go (GoalChoiceF xs) = -- casePSQ xs (GoalChoiceF xs) (\ _ t _ -> out t) -- more space efficient, but removes valuable debug info + casePSQ xs (GoalChoiceF (fromList [])) (\ g t _ -> GoalChoiceF (fromList [(g, t)])) + go x = x + -- Note that we keep empty choice nodes, because they mean success. + +-- | Transformation that tries to make a decision on base as early as +-- possible. In nearly all cases, there's a single choice for the base +-- package. Also, fixing base early should lead to better error messages. +preferBaseGoalChoice :: Tree a -> Tree a +preferBaseGoalChoice = trav go + where + go (GoalChoiceF xs) = GoalChoiceF (P.sortByKeys preferBase xs) + go x = x + + preferBase :: OpenGoal -> OpenGoal -> Ordering + preferBase (OpenGoal (Simple (Dep (Q [] pn) _)) _) _ | unPN pn == "base" = LT + preferBase _ (OpenGoal (Simple (Dep (Q [] pn) _)) _) | unPN pn == "base" = GT + preferBase _ _ = EQ + +-- | Transformation that sorts choice nodes so that +-- child nodes with a small branching degree are preferred. As a +-- special case, choices with 0 branches will be preferred (as they +-- are immediately considered inconsistent), and choices with 1 +-- branch will also be preferred (as they don't involve choice). +preferEasyGoalChoices :: Tree a -> Tree a +preferEasyGoalChoices = trav go + where + go (GoalChoiceF xs) = GoalChoiceF (P.sortBy (comparing choices) xs) + go x = x + +-- | Transformation that tries to avoid making weak flag choices early. +-- Weak flags are trivial flags (not influencing dependencies) or such +-- flags that are explicitly declared to be weak in the index. +deferWeakFlagChoices :: Tree a -> Tree a +deferWeakFlagChoices = trav go + where + go (GoalChoiceF xs) = GoalChoiceF (P.sortBy defer xs) + go x = x + + defer :: Tree a -> Tree a -> Ordering + defer (FChoice _ _ True _ _) _ = GT + defer _ (FChoice _ _ True _ _) = LT + defer _ _ = EQ + +-- | Variant of 'preferEasyGoalChoices'. +-- +-- Only approximates the number of choices in the branches. Less accurate, +-- more efficient. +lpreferEasyGoalChoices :: Tree a -> Tree a +lpreferEasyGoalChoices = trav go + where + go (GoalChoiceF xs) = GoalChoiceF (P.sortBy (comparing lchoices) xs) + go x = x + +-- | Variant of 'preferEasyGoalChoices'. +-- +-- I first thought that using a paramorphism might be faster here, +-- but it doesn't seem to make any difference. +preferEasyGoalChoices' :: Tree a -> Tree a +preferEasyGoalChoices' = para (inn . go) + where + go (GoalChoiceF xs) = GoalChoiceF (P.map fst (P.sortBy (comparing (choices . snd)) xs)) + go x = fmap fst x + diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/PSQ.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/PSQ.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/PSQ.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/PSQ.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,94 @@ +{-# LANGUAGE DeriveFunctor, DeriveFoldable, DeriveFoldable, DeriveTraversable #-} +module Distribution.Client.Dependency.Modular.PSQ where + +-- Priority search queues. +-- +-- I am not yet sure what exactly is needed. But we need a data structure with +-- key-based lookup that can be sorted. We're using a sequence right now with +-- (inefficiently implemented) lookup, because I think that queue-based +-- operations and sorting turn out to be more efficiency-critical in practice. + +import Data.Foldable +import Data.Function +import Data.List as S hiding (foldr) +import Data.Traversable +import Prelude hiding (foldr) + +newtype PSQ k v = PSQ [(k, v)] + deriving (Eq, Show, Functor, Foldable, Traversable) + +keys :: PSQ k v -> [k] +keys (PSQ xs) = fmap fst xs + +lookup :: Eq k => k -> PSQ k v -> Maybe v +lookup k (PSQ xs) = S.lookup k xs + +map :: (v1 -> v2) -> PSQ k v1 -> PSQ k v2 +map f (PSQ xs) = PSQ (fmap (\ (k, v) -> (k, f v)) xs) + +mapKeys :: (k1 -> k2) -> PSQ k1 v -> PSQ k2 v +mapKeys f (PSQ xs) = PSQ (fmap (\ (k, v) -> (f k, v)) xs) + +mapWithKey :: (k -> a -> b) -> PSQ k a -> PSQ k b +mapWithKey f (PSQ xs) = PSQ (fmap (\ (k, v) -> (k, f k v)) xs) + +mapWithKeyState :: (s -> k -> a -> (b, s)) -> PSQ k a -> s -> PSQ k b +mapWithKeyState p (PSQ xs) s0 = + PSQ (foldr (\ (k, v) r s -> case p s k v of + (w, n) -> (k, w) : (r n)) + (const []) xs s0) + +delete :: Eq k => k -> PSQ k a -> PSQ k a +delete k (PSQ xs) = PSQ (snd (partition ((== k) . fst) xs)) + +fromList :: [(k, a)] -> PSQ k a +fromList = PSQ + +cons :: k -> a -> PSQ k a -> PSQ k a +cons k x (PSQ xs) = PSQ ((k, x) : xs) + +snoc :: PSQ k a -> k -> a -> PSQ k a +snoc (PSQ xs) k x = PSQ (xs ++ [(k, x)]) + +casePSQ :: PSQ k a -> r -> (k -> a -> PSQ k a -> r) -> r +casePSQ (PSQ xs) n c = + case xs of + [] -> n + (k, v) : ys -> c k v (PSQ ys) + +splits :: PSQ k a -> PSQ k (a, PSQ k a) +splits = go id + where + go f xs = casePSQ xs + (PSQ []) + (\ k v ys -> cons k (v, f ys) (go (f . cons k v) ys)) + +sortBy :: (a -> a -> Ordering) -> PSQ k a -> PSQ k a +sortBy cmp (PSQ xs) = PSQ (S.sortBy (cmp `on` snd) xs) + +sortByKeys :: (k -> k -> Ordering) -> PSQ k a -> PSQ k a +sortByKeys cmp (PSQ xs) = PSQ (S.sortBy (cmp `on` fst) xs) + +filterKeys :: (k -> Bool) -> PSQ k a -> PSQ k a +filterKeys p (PSQ xs) = PSQ (S.filter (p . fst) xs) + +filter :: (a -> Bool) -> PSQ k a -> PSQ k a +filter p (PSQ xs) = PSQ (S.filter (p . snd) xs) + +length :: PSQ k a -> Int +length (PSQ xs) = S.length xs + +-- | "Lazy length". +-- +-- Only approximates the length, but doesn't force the list. +llength :: PSQ k a -> Int +llength (PSQ []) = 0 +llength (PSQ (_:[])) = 1 +llength (PSQ (_:_:[])) = 2 +llength (PSQ _) = 3 + +null :: PSQ k a -> Bool +null (PSQ xs) = S.null xs + +toList :: PSQ k a -> [(k, a)] +toList (PSQ xs) = xs diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Solver.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Solver.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Solver.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Solver.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,60 @@ +module Distribution.Client.Dependency.Modular.Solver where + +import Data.Map as M + +import Distribution.Client.Dependency.Types + +import Distribution.Client.Dependency.Modular.Assignment +import Distribution.Client.Dependency.Modular.Builder +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Explore +import Distribution.Client.Dependency.Modular.Index +import Distribution.Client.Dependency.Modular.Log +import Distribution.Client.Dependency.Modular.Message +import Distribution.Client.Dependency.Modular.Package +import qualified Distribution.Client.Dependency.Modular.Preference as P +import Distribution.Client.Dependency.Modular.Validate + +-- | Various options for the modular solver. +data SolverConfig = SolverConfig { + preferEasyGoalChoices :: Bool, + independentGoals :: Bool, + avoidReinstalls :: Bool, + shadowPkgs :: Bool, + strongFlags :: Bool, + maxBackjumps :: Maybe Int +} + +solve :: SolverConfig -> -- solver parameters + Index -> -- all available packages as an index + (PN -> PackagePreferences) -> -- preferences + Map PN [PackageConstraint] -> -- global constraints + [PN] -> -- global goals + Log Message (Assignment, RevDepMap) +solve sc idx userPrefs userConstraints userGoals = + explorePhase $ + heuristicsPhase $ + preferencesPhase $ + validationPhase $ + prunePhase $ + buildPhase + where + explorePhase = exploreTreeLog . backjump + heuristicsPhase = P.firstGoal . -- after doing goal-choice heuristics, commit to the first choice (saves space) + P.deferWeakFlagChoices . + P.preferBaseGoalChoice . + if preferEasyGoalChoices sc + then P.lpreferEasyGoalChoices + else id + preferencesPhase = P.preferPackagePreferences userPrefs + validationPhase = P.enforceManualFlags . -- can only be done after user constraints + P.enforcePackageConstraints userConstraints . + validateTree idx + prunePhase = (if avoidReinstalls sc then P.avoidReinstalls (const True) else id) . + -- packages that can never be "upgraded": + P.requireInstalled (`elem` [ PackageName "base" + , PackageName "ghc-prim" + , PackageName "integer-gmp" + , PackageName "integer-simple" + ]) + buildPhase = buildTree idx (independentGoals sc) userGoals diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Tree.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Tree.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Tree.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Tree.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,121 @@ +{-# LANGUAGE DeriveFunctor, DeriveFoldable, DeriveTraversable #-} +module Distribution.Client.Dependency.Modular.Tree where + +import Control.Monad hiding (mapM) +import Data.Foldable +import Data.Traversable +import Prelude hiding (foldr, mapM) + +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Flag +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.PSQ as P +import Distribution.Client.Dependency.Modular.Version + +-- | Type of the search tree. Inlining the choice nodes for now. +data Tree a = + PChoice QPN a (PSQ I (Tree a)) + | FChoice QFN a Bool Bool (PSQ Bool (Tree a)) -- Bool indicates whether it's weak/trivial, second Bool whether it's manual + | SChoice QSN a Bool (PSQ Bool (Tree a)) -- Bool indicates whether it's trivial + | GoalChoice (PSQ OpenGoal (Tree a)) -- PSQ should never be empty + | Done RevDepMap + | Fail (ConflictSet QPN) FailReason + deriving (Eq, Show, Functor) + -- Above, a choice is called trivial if it clearly does not matter. The + -- special case of triviality we actually consider is if there are no new + -- dependencies introduced by this node. + -- + -- A (flag) choice is called weak if we do want to defer it. This is the + -- case for flags that should be implied by what's currently installed on + -- the system, as opposed to flags that are used to explicitly enable or + -- disable some functionality. + +data FailReason = InconsistentInitialConstraints + | Conflicting [Dep QPN] + | CannotInstall + | CannotReinstall + | Shadowed + | Broken + | GlobalConstraintVersion VR + | GlobalConstraintInstalled + | GlobalConstraintSource + | GlobalConstraintFlag + | ManualFlag + | BuildFailureNotInIndex PN + | MalformedFlagChoice QFN + | MalformedStanzaChoice QSN + | EmptyGoalChoice + | Backjump + deriving (Eq, Show) + +-- | Functor for the tree type. +data TreeF a b = + PChoiceF QPN a (PSQ I b) + | FChoiceF QFN a Bool Bool (PSQ Bool b) + | SChoiceF QSN a Bool (PSQ Bool b) + | GoalChoiceF (PSQ OpenGoal b) + | DoneF RevDepMap + | FailF (ConflictSet QPN) FailReason + deriving (Functor, Foldable, Traversable) + +out :: Tree a -> TreeF a (Tree a) +out (PChoice p i ts) = PChoiceF p i ts +out (FChoice p i b m ts) = FChoiceF p i b m ts +out (SChoice p i b ts) = SChoiceF p i b ts +out (GoalChoice ts) = GoalChoiceF ts +out (Done x ) = DoneF x +out (Fail c x ) = FailF c x + +inn :: TreeF a (Tree a) -> Tree a +inn (PChoiceF p i ts) = PChoice p i ts +inn (FChoiceF p i b m ts) = FChoice p i b m ts +inn (SChoiceF p i b ts) = SChoice p i b ts +inn (GoalChoiceF ts) = GoalChoice ts +inn (DoneF x ) = Done x +inn (FailF c x ) = Fail c x + +-- | Determines whether a tree is active, i.e., isn't a failure node. +active :: Tree a -> Bool +active (Fail _ _) = False +active _ = True + +-- | Determines how many active choices are available in a node. Note that we +-- count goal choices as having one choice, always. +choices :: Tree a -> Int +choices (PChoice _ _ ts) = P.length (P.filter active ts) +choices (FChoice _ _ _ _ ts) = P.length (P.filter active ts) +choices (SChoice _ _ _ ts) = P.length (P.filter active ts) +choices (GoalChoice _ ) = 1 +choices (Done _ ) = 1 +choices (Fail _ _ ) = 0 + +-- | Variant of 'choices' that only approximates the number of choices, +-- using 'llength'. +lchoices :: Tree a -> Int +lchoices (PChoice _ _ ts) = P.llength (P.filter active ts) +lchoices (FChoice _ _ _ _ ts) = P.llength (P.filter active ts) +lchoices (SChoice _ _ _ ts) = P.llength (P.filter active ts) +lchoices (GoalChoice _ ) = 1 +lchoices (Done _ ) = 1 +lchoices (Fail _ _ ) = 0 + +-- | Catamorphism on trees. +cata :: (TreeF a b -> b) -> Tree a -> b +cata phi x = (phi . fmap (cata phi) . out) x + +trav :: (TreeF a (Tree b) -> TreeF b (Tree b)) -> Tree a -> Tree b +trav psi x = cata (inn . psi) x + +-- | Paramorphism on trees. +para :: (TreeF a (b, Tree a) -> b) -> Tree a -> b +para phi = phi . fmap (\ x -> (para phi x, x)) . out + +cataM :: Monad m => (TreeF a b -> m b) -> Tree a -> m b +cataM phi = phi <=< mapM (cataM phi) <=< return . out + +-- | Anamorphism on trees. +ana :: (b -> TreeF a b) -> b -> Tree a +ana psi = inn . fmap (ana psi) . psi + +anaM :: Monad m => (b -> m (TreeF a b)) -> b -> m (Tree a) +anaM psi = return . inn <=< mapM (anaM psi) <=< psi diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Validate.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Validate.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Validate.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Validate.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,232 @@ +module Distribution.Client.Dependency.Modular.Validate where + +-- Validation of the tree. +-- +-- The task here is to make sure all constraints hold. After validation, any +-- assignment returned by exploration of the tree should be a complete valid +-- assignment, i.e., actually constitute a solution. + +import Control.Applicative +import Control.Monad.Reader hiding (sequence) +import Data.List as L +import Data.Map as M +import Data.Traversable +import Prelude hiding (sequence) + +import Distribution.Client.Dependency.Modular.Assignment +import Distribution.Client.Dependency.Modular.Dependency +import Distribution.Client.Dependency.Modular.Flag +import Distribution.Client.Dependency.Modular.Index +import Distribution.Client.Dependency.Modular.Package +import Distribution.Client.Dependency.Modular.PSQ as P +import Distribution.Client.Dependency.Modular.Tree + +-- In practice, most constraints are implication constraints (IF we have made +-- a number of choices, THEN we also have to ensure that). We call constraints +-- that for which the preconditions are fulfilled ACTIVE. We maintain a set +-- of currently active constraints that we pass down the node. +-- +-- We aim at detecting inconsistent states as early as possible. +-- +-- Whenever we make a choice, there are two things that need to happen: +-- +-- (1) We must check that the choice is consistent with the currently +-- active constraints. +-- +-- (2) The choice increases the set of active constraints. For the new +-- active constraints, we must check that they are consistent with +-- the current state. +-- +-- We can actually merge (1) and (2) by saying the the current choice is +-- a new active constraint, fixing the choice. +-- +-- If a test fails, we have detected an inconsistent state. We can +-- disable the current subtree and do not have to traverse it any further. +-- +-- We need a good way to represent the current state, i.e., the current +-- set of active constraints. Since the main situation where we have to +-- search in it is (1), it seems best to store the state by package: for +-- every package, we store which versions are still allowed. If for any +-- package, we have inconsistent active constraints, we can also stop. +-- This is a particular way to read task (2): +-- +-- (2, weak) We only check if the new constraints are consistent with +-- the choices we've already made, and add them to the active set. +-- +-- (2, strong) We check if the new constraints are consistent with the +-- choices we've already made, and the constraints we already have. +-- +-- It currently seems as if we're implementing the weak variant. However, +-- when used together with 'preferEasyGoalChoices', we will find an +-- inconsistent state in the very next step. +-- +-- What do we do about flags? +-- +-- Like for packages, we store the flag choices we have already made. +-- Now, regarding (1), we only have to test whether we've decided the +-- current flag before. Regarding (2), the interesting bit is in discovering +-- the new active constraints. To this end, we look up the constraints for +-- the package the flag belongs to, and traverse its flagged dependencies. +-- Wherever we find the flag in question, we start recording dependencies +-- underneath as new active dependencies. If we encounter other flags, we +-- check if we've chosen them already and either proceed or stop. + +-- | The state needed during validation. +data ValidateState = VS { + index :: Index, + saved :: Map QPN (FlaggedDeps QPN), -- saved, scoped, dependencies + pa :: PreAssignment +} + +type Validate = Reader ValidateState + +validate :: Tree (QGoalReasonChain, Scope) -> Validate (Tree QGoalReasonChain) +validate = cata go + where + go :: TreeF (QGoalReasonChain, Scope) (Validate (Tree QGoalReasonChain)) -> Validate (Tree QGoalReasonChain) + + go (PChoiceF qpn (gr, sc) ts) = PChoice qpn gr <$> sequence (P.mapWithKey (goP qpn gr sc) ts) + go (FChoiceF qfn (gr, _sc) b m ts) = + do + -- Flag choices may occur repeatedly (because they can introduce new constraints + -- in various places). However, subsequent choices must be consistent. We thereby + -- collapse repeated flag choice nodes. + PA _ pfa _ <- asks pa -- obtain current flag-preassignment + case M.lookup qfn pfa of + Just rb -> -- flag has already been assigned; collapse choice to the correct branch + case P.lookup rb ts of + Just t -> goF qfn gr rb t + Nothing -> return $ Fail (toConflictSet (Goal (F qfn) gr)) (MalformedFlagChoice qfn) + Nothing -> -- flag choice is new, follow both branches + FChoice qfn gr b m <$> sequence (P.mapWithKey (goF qfn gr) ts) + go (SChoiceF qsn (gr, _sc) b ts) = + do + -- Optional stanza choices are very similar to flag choices. + PA _ _ psa <- asks pa -- obtain current stanza-preassignment + case M.lookup qsn psa of + Just rb -> -- stanza choice has already been made; collapse choice to the correct branch + case P.lookup rb ts of + Just t -> goS qsn gr rb t + Nothing -> return $ Fail (toConflictSet (Goal (S qsn) gr)) (MalformedStanzaChoice qsn) + Nothing -> -- stanza choice is new, follow both branches + SChoice qsn gr b <$> sequence (P.mapWithKey (goS qsn gr) ts) + + -- We don't need to do anything for goal choices or failure nodes. + go (GoalChoiceF ts) = GoalChoice <$> sequence ts + go (DoneF rdm ) = pure (Done rdm) + go (FailF c fr ) = pure (Fail c fr) + + -- What to do for package nodes ... + goP :: QPN -> QGoalReasonChain -> Scope -> I -> Validate (Tree QGoalReasonChain) -> Validate (Tree QGoalReasonChain) + goP qpn@(Q _pp pn) gr sc i r = do + PA ppa pfa psa <- asks pa -- obtain current preassignment + idx <- asks index -- obtain the index + svd <- asks saved -- obtain saved dependencies + let (PInfo deps _ _ mfr) = idx ! pn ! i -- obtain dependencies and index-dictated exclusions introduced by the choice + let qdeps = L.map (fmap (qualify sc)) deps -- qualify the deps in the current scope + -- the new active constraints are given by the instance we have chosen, + -- plus the dependency information we have for that instance + let goal = Goal (P qpn) gr + let newactives = Dep qpn (Fixed i goal) : L.map (resetGoal goal) (extractDeps pfa psa qdeps) + -- We now try to extend the partial assignment with the new active constraints. + let mnppa = extend (P qpn) ppa newactives + -- In case we continue, we save the scoped dependencies + let nsvd = M.insert qpn qdeps svd + case mfr of + Just fr -> -- The index marks this as an invalid choice. We can stop. + return (Fail (toConflictSet goal) fr) + _ -> case mnppa of + Left (c, d) -> -- We have an inconsistency. We can stop. + return (Fail c (Conflicting d)) + Right nppa -> -- We have an updated partial assignment for the recursive validation. + local (\ s -> s { pa = PA nppa pfa psa, saved = nsvd }) r + + -- What to do for flag nodes ... + goF :: QFN -> QGoalReasonChain -> Bool -> Validate (Tree QGoalReasonChain) -> Validate (Tree QGoalReasonChain) + goF qfn@(FN (PI qpn _i) _f) gr b r = do + PA ppa pfa psa <- asks pa -- obtain current preassignment + svd <- asks saved -- obtain saved dependencies + -- Note that there should be saved dependencies for the package in question, + -- because while building, we do not choose flags before we see the packages + -- that define them. + let qdeps = svd ! qpn + -- We take the *saved* dependencies, because these have been qualified in the + -- correct scope. + -- + -- Extend the flag assignment + let npfa = M.insert qfn b pfa + -- We now try to get the new active dependencies we might learn about because + -- we have chosen a new flag. + let newactives = extractNewDeps (F qfn) gr b npfa psa qdeps + -- As in the package case, we try to extend the partial assignment. + case extend (F qfn) ppa newactives of + Left (c, d) -> return (Fail c (Conflicting d)) -- inconsistency found + Right nppa -> local (\ s -> s { pa = PA nppa npfa psa }) r + + -- What to do for stanza nodes (similar to flag nodes) ... + goS :: QSN -> QGoalReasonChain -> Bool -> Validate (Tree QGoalReasonChain) -> Validate (Tree QGoalReasonChain) + goS qsn@(SN (PI qpn _i) _f) gr b r = do + PA ppa pfa psa <- asks pa -- obtain current preassignment + svd <- asks saved -- obtain saved dependencies + -- Note that there should be saved dependencies for the package in question, + -- because while building, we do not choose flags before we see the packages + -- that define them. + let qdeps = svd ! qpn + -- We take the *saved* dependencies, because these have been qualified in the + -- correct scope. + -- + -- Extend the flag assignment + let npsa = M.insert qsn b psa + -- We now try to get the new active dependencies we might learn about because + -- we have chosen a new flag. + let newactives = extractNewDeps (S qsn) gr b pfa npsa qdeps + -- As in the package case, we try to extend the partial assignment. + case extend (S qsn) ppa newactives of + Left (c, d) -> return (Fail c (Conflicting d)) -- inconsistency found + Right nppa -> local (\ s -> s { pa = PA nppa pfa npsa }) r + +-- | We try to extract as many concrete dependencies from the given flagged +-- dependencies as possible. We make use of all the flag knowledge we have +-- already acquired. +extractDeps :: FAssignment -> SAssignment -> FlaggedDeps QPN -> [Dep QPN] +extractDeps fa sa deps = do + d <- deps + case d of + Simple sd -> return sd + Flagged qfn _ td fd -> case M.lookup qfn fa of + Nothing -> mzero + Just True -> extractDeps fa sa td + Just False -> extractDeps fa sa fd + Stanza qsn td -> case M.lookup qsn sa of + Nothing -> mzero + Just True -> extractDeps fa sa td + Just False -> [] + +-- | We try to find new dependencies that become available due to the given +-- flag or stanza choice. We therefore look for the choice in question, and then call +-- 'extractDeps' for everything underneath. +extractNewDeps :: Var QPN -> QGoalReasonChain -> Bool -> FAssignment -> SAssignment -> FlaggedDeps QPN -> [Dep QPN] +extractNewDeps v gr b fa sa = go + where + go deps = do + d <- deps + case d of + Simple _ -> mzero + Flagged qfn' _ td fd + | v == F qfn' -> L.map (resetGoal (Goal v gr)) $ + if b then extractDeps fa sa td else extractDeps fa sa fd + | otherwise -> case M.lookup qfn' fa of + Nothing -> mzero + Just True -> go td + Just False -> go fd + Stanza qsn' td + | v == S qsn' -> L.map (resetGoal (Goal v gr)) $ + if b then extractDeps fa sa td else [] + | otherwise -> case M.lookup qsn' sa of + Nothing -> mzero + Just True -> go td + Just False -> [] + +-- | Interface. +validateTree :: Index -> Tree (QGoalReasonChain, Scope) -> Tree QGoalReasonChain +validateTree idx t = runReader (validate t) (VS idx M.empty (PA M.empty M.empty M.empty)) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Version.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Version.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Version.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular/Version.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,42 @@ +module Distribution.Client.Dependency.Modular.Version where + +import qualified Distribution.Version as CV -- from Cabal +import Distribution.Text -- from Cabal + +-- | Preliminary type for versions. +type Ver = CV.Version + +-- | String representation of a version. +showVer :: Ver -> String +showVer = display + +-- | Version range. Consists of a lower and upper bound. +type VR = CV.VersionRange + +-- | String representation of a version range. +showVR :: VR -> String +showVR = display + +-- | Unconstrained version range. +anyVR :: VR +anyVR = CV.anyVersion + +-- | Version range fixing a single version. +eqVR :: Ver -> VR +eqVR = CV.thisVersion + +-- | Intersect two version ranges. +(.&&.) :: VR -> VR -> VR +(.&&.) = CV.intersectVersionRanges + +-- | Simplify a version range. +simplifyVR :: VR -> VR +simplifyVR = CV.simplifyVersionRange + +-- | Checking a version against a version range. +checkVR :: VR -> Ver -> Bool +checkVR = flip CV.withinRange + +-- | Make a version number. +mkV :: [Int] -> Ver +mkV xs = CV.Version xs [] diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Modular.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,58 @@ +module Distribution.Client.Dependency.Modular + ( modularResolver, SolverConfig(..)) where + +-- Here, we try to map between the external cabal-install solver +-- interface and the internal interface that the solver actually +-- expects. There are a number of type conversions to perform: we +-- have to convert the package indices to the uniform index used +-- by the solver; we also have to convert the initial constraints; +-- and finally, we have to convert back the resulting install +-- plan. + +import Data.Map as M + ( fromListWith ) +import Distribution.Client.Dependency.Modular.Assignment + ( Assignment, toCPs ) +import Distribution.Client.Dependency.Modular.Dependency + ( RevDepMap ) +import Distribution.Client.Dependency.Modular.ConfiguredConversion + ( convCP ) +import Distribution.Client.Dependency.Modular.IndexConversion + ( convPIs ) +import Distribution.Client.Dependency.Modular.Log + ( logToProgress ) +import Distribution.Client.Dependency.Modular.Package + ( PN ) +import Distribution.Client.Dependency.Modular.Solver + ( SolverConfig(..), solve ) +import Distribution.Client.Dependency.Types + ( DependencyResolver, PackageConstraint(..) ) +import Distribution.Client.InstallPlan + ( PlanPackage ) +import Distribution.System + ( Platform(..) ) + +-- | Ties the two worlds together: classic cabal-install vs. the modular +-- solver. Performs the necessary translations before and after. +modularResolver :: SolverConfig -> DependencyResolver +modularResolver sc (Platform arch os) cinfo iidx sidx pprefs pcs pns = + fmap (uncurry postprocess) $ -- convert install plan + logToProgress (maxBackjumps sc) $ -- convert log format into progress format + solve sc idx pprefs gcs pns + where + -- Indices have to be converted into solver-specific uniform index. + idx = convPIs os arch cinfo (shadowPkgs sc) (strongFlags sc) iidx sidx + -- Constraints have to be converted into a finite map indexed by PN. + gcs = M.fromListWith (++) (map (\ pc -> (pcName pc, [pc])) pcs) + + -- Results have to be converted into an install plan. + postprocess :: Assignment -> RevDepMap -> [PlanPackage] + postprocess a rdm = map (convCP iidx sidx) (toCPs a rdm) + + -- Helper function to extract the PN from a constraint. + pcName :: PackageConstraint -> PN + pcName (PackageConstraintVersion pn _) = pn + pcName (PackageConstraintInstalled pn ) = pn + pcName (PackageConstraintSource pn ) = pn + pcName (PackageConstraintFlags pn _) = pn + pcName (PackageConstraintStanzas pn _) = pn diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown/Constraints.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown/Constraints.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown/Constraints.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown/Constraints.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,603 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Dependency.TopDown.Constraints +-- Copyright : (c) Duncan Coutts 2008 +-- License : BSD-like +-- +-- Maintainer : duncan@community.haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- A set of satisfiable constraints on a set of packages. +----------------------------------------------------------------------------- +module Distribution.Client.Dependency.TopDown.Constraints ( + Constraints, + empty, + packages, + choices, + isPaired, + + addTarget, + constrain, + Satisfiable(..), + conflicting, + ) where + +import Distribution.Client.Dependency.TopDown.Types +import qualified Distribution.Client.PackageIndex as PackageIndex +import Distribution.Client.PackageIndex (PackageIndex) +import Distribution.Package + ( PackageName, PackageId, PackageIdentifier(..) + , Package(packageId), packageName, packageVersion + , Dependency, PackageFixedDeps(depends) ) +import Distribution.Version + ( Version ) +import Distribution.Client.Utils + ( mergeBy, MergeResult(..) ) + +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid + ( Monoid(mempty) ) +#endif +import Data.Either + ( partitionEithers ) +import qualified Data.Map as Map +import Data.Map (Map) +import qualified Data.Set as Set +import Data.Set (Set) +import Control.Exception + ( assert ) + + +-- | A set of satisfiable constraints on a set of packages. +-- +-- The 'Constraints' type keeps track of a set of targets (identified by +-- package name) that we know that we need. It also keeps track of a set of +-- constraints over all packages in the environment. +-- +-- It maintains the guarantee that, for the target set, the constraints are +-- satisfiable, meaning that there is at least one instance available for each +-- package name that satisfies the constraints on that package name. +-- +-- Note that it is possible to over-constrain a package in the environment that +-- is not in the target set -- the satisfiability guarantee is only maintained +-- for the target set. This is useful because it allows us to exclude packages +-- without needing to know if it would ever be needed or not (e.g. allows +-- excluding broken installed packages). +-- +-- Adding a constraint for a target package can fail if it would mean that +-- there are no remaining choices. +-- +-- Adding a constraint for package that is not a target never fails. +-- +-- Adding a new target package can fail if that package already has conflicting +-- constraints. +-- +data Constraints installed source reason + = Constraints + + -- | Targets that we know we need. This is the set for which we + -- guarantee the constraints are satisfiable. + !(Set PackageName) + + -- | The available/remaining set. These are packages that have available + -- choices remaining. This is guaranteed to cover the target packages, + -- but can also cover other packages in the environment. New targets can + -- only be added if there are available choices remaining for them. + !(PackageIndex (InstalledOrSource installed source)) + + -- | The excluded set. Choices that we have excluded by applying + -- constraints. Excluded choices are tagged with the reason. + !(PackageIndex (ExcludedPkg (InstalledOrSource installed source) reason)) + + -- | Paired choices, this is an ugly hack. + !(Map PackageName (Version, Version)) + + -- | Purely for the invariant, we keep a copy of the original index + !(PackageIndex (InstalledOrSource installed source)) + + +-- | Reasons for excluding all, or some choices for a package version. +-- +-- Each package version can have a source instance, an installed instance or +-- both. We distinguish reasons for constraints that excluded both instances, +-- from reasons for constraints that excluded just one instance. +-- +data ExcludedPkg pkg reason + = ExcludedPkg pkg + [reason] -- ^ reasons for excluding both source and installed instances + [reason] -- ^ reasons for excluding the installed instance + [reason] -- ^ reasons for excluding the source instance + +instance Package pkg => Package (ExcludedPkg pkg reason) where + packageId (ExcludedPkg p _ _ _) = packageId p + + +-- | There is a conservation of packages property. Packages are never gained or +-- lost, they just transfer from the remaining set to the excluded set. +-- +invariant :: (Package installed, Package source) + => Constraints installed source a -> Bool +invariant (Constraints targets available excluded _ original) = + + -- Relationship between available, excluded and original + all check merged + + -- targets is a subset of available + && all (PackageIndex.elemByPackageName available) (Set.elems targets) + + where + merged = mergeBy (\a b -> packageId a `compare` mergedPackageId b) + (PackageIndex.allPackages original) + (mergeBy (\a b -> packageId a `compare` packageId b) + (PackageIndex.allPackages available) + (PackageIndex.allPackages excluded)) + where + mergedPackageId (OnlyInLeft p ) = packageId p + mergedPackageId (OnlyInRight p) = packageId p + mergedPackageId (InBoth p _) = packageId p + + -- If the package was originally installed only, then + check (InBoth (InstalledOnly _) cur) = case cur of + -- now it's either still remaining as installed only + OnlyInLeft (InstalledOnly _) -> True + -- or it has been excluded + OnlyInRight (ExcludedPkg (InstalledOnly _) [] (_:_) []) -> True + _ -> False + + -- If the package was originally available only, then + check (InBoth (SourceOnly _) cur) = case cur of + -- now it's either still remaining as source only + OnlyInLeft (SourceOnly _) -> True + -- or it has been excluded + OnlyInRight (ExcludedPkg (SourceOnly _) [] [] (_:_)) -> True + _ -> False + + -- If the package was originally installed and source, then + check (InBoth (InstalledAndSource _ _) cur) = case cur of + -- We can have both remaining: + OnlyInLeft (InstalledAndSource _ _) -> True + + -- both excluded, in particular it can have had the just source or + -- installed excluded and later had both excluded so we do not mind if + -- the source or installed excluded is empty or non-empty. + OnlyInRight (ExcludedPkg (InstalledAndSource _ _) _ _ _) -> True + + -- the installed remaining and the source excluded: + InBoth (InstalledOnly _) + (ExcludedPkg (SourceOnly _) [] [] (_:_)) -> True + + -- the source remaining and the installed excluded: + InBoth (SourceOnly _) + (ExcludedPkg (InstalledOnly _) [] (_:_) []) -> True + _ -> False + + check _ = False + + +-- | An update to the constraints can move packages between the two piles +-- but not gain or loose packages. +transitionsTo :: (Package installed, Package source) + => Constraints installed source a + -> Constraints installed source a -> Bool +transitionsTo constraints @(Constraints _ available excluded _ _) + constraints'@(Constraints _ available' excluded' _ _) = + + invariant constraints && invariant constraints' + && null availableGained && null excludedLost + && map (mapInstalledOrSource packageId packageId) availableLost + == map (mapInstalledOrSource packageId packageId) excludedGained + + where + (availableLost, availableGained) + = partitionEithers (foldr lostAndGained [] availableChange) + + (excludedLost, excludedGained) + = partitionEithers (foldr lostAndGained [] excludedChange) + + availableChange = + mergeBy (\a b -> packageId a `compare` packageId b) + (PackageIndex.allPackages available) + (PackageIndex.allPackages available') + + excludedChange = + mergeBy (\a b -> packageId a `compare` packageId b) + [ pkg | ExcludedPkg pkg _ _ _ <- PackageIndex.allPackages excluded ] + [ pkg | ExcludedPkg pkg _ _ _ <- PackageIndex.allPackages excluded' ] + + lostAndGained mr rest = case mr of + OnlyInLeft pkg -> Left pkg : rest + InBoth (InstalledAndSource pkg _) + (SourceOnly _) -> Left (InstalledOnly pkg) : rest + InBoth (InstalledAndSource _ pkg) + (InstalledOnly _) -> Left (SourceOnly pkg) : rest + InBoth (SourceOnly _) + (InstalledAndSource pkg _) -> Right (InstalledOnly pkg) : rest + InBoth (InstalledOnly _) + (InstalledAndSource _ pkg) -> Right (SourceOnly pkg) : rest + OnlyInRight pkg -> Right pkg : rest + _ -> rest + + mapInstalledOrSource f g pkg = case pkg of + InstalledOnly a -> InstalledOnly (f a) + SourceOnly b -> SourceOnly (g b) + InstalledAndSource a b -> InstalledAndSource (f a) (g b) + + +-- | We construct 'Constraints' with an initial 'PackageIndex' of all the +-- packages available. +-- +empty :: (PackageFixedDeps installed, Package source) + => PackageIndex installed + -> PackageIndex source + -> Constraints installed source reason +empty installed source = + Constraints targets pkgs excluded pairs pkgs + where + targets = mempty + excluded = mempty + pkgs = PackageIndex.fromList + . map toInstalledOrSource + $ mergeBy (\a b -> packageId a `compare` packageId b) + (PackageIndex.allPackages installed) + (PackageIndex.allPackages source) + toInstalledOrSource (OnlyInLeft i ) = InstalledOnly i + toInstalledOrSource (OnlyInRight a) = SourceOnly a + toInstalledOrSource (InBoth i a) = InstalledAndSource i a + + -- pick up cases like base-3 and 4 where one version depends on the other: + pairs = Map.fromList + [ (name, (packageVersion pkgid1, packageVersion pkgid2)) + | [pkg1, pkg2] <- PackageIndex.allPackagesByName installed + , let name = packageName pkg1 + pkgid1 = packageId pkg1 + pkgid2 = packageId pkg2 + , any ((pkgid1==) . packageId) (depends pkg2) + || any ((pkgid2==) . packageId) (depends pkg1) ] + + +-- | The package targets. +-- +packages :: (Package installed, Package source) + => Constraints installed source reason + -> Set PackageName +packages (Constraints ts _ _ _ _) = ts + + +-- | The package choices that are still available. +-- +choices :: (Package installed, Package source) + => Constraints installed source reason + -> PackageIndex (InstalledOrSource installed source) +choices (Constraints _ available _ _ _) = available + +isPaired :: (Package installed, Package source) + => Constraints installed source reason + -> PackageId -> Maybe PackageId +isPaired (Constraints _ _ _ pairs _) (PackageIdentifier name version) = + case Map.lookup name pairs of + Just (v1, v2) + | version == v1 -> Just (PackageIdentifier name v2) + | version == v2 -> Just (PackageIdentifier name v1) + _ -> Nothing + + +data Satisfiable constraints discarded reason + = Satisfiable constraints discarded + | Unsatisfiable + | ConflictsWith [(PackageId, [reason])] + + +addTarget :: (Package installed, Package source) + => PackageName + -> Constraints installed source reason + -> Satisfiable (Constraints installed source reason) + () reason +addTarget pkgname + constraints@(Constraints targets available excluded paired original) + + -- If it's already a target then there's no change + | pkgname `Set.member` targets + = Satisfiable constraints () + + -- If there is some possible choice available for this target then we're ok + | PackageIndex.elemByPackageName available pkgname + = let targets' = Set.insert pkgname targets + constraints' = Constraints targets' available excluded paired original + in assert (constraints `transitionsTo` constraints') $ + Satisfiable constraints' () + + -- If it's not available and it is excluded then we return the conflicts + | PackageIndex.elemByPackageName excluded pkgname + = ConflictsWith conflicts + + -- Otherwise, it's not available and it has not been excluded so the + -- package is simply completely unknown. + | otherwise + = Unsatisfiable + + where + conflicts = + [ (packageId pkg, reasons) + | let excludedChoices = PackageIndex.lookupPackageName excluded pkgname + , ExcludedPkg pkg isReasons iReasons sReasons <- excludedChoices + , let reasons = isReasons ++ iReasons ++ sReasons ] + + +constrain :: (Package installed, Package source) + => PackageName -- ^ which package to constrain + -> (Version -> Bool -> Bool) -- ^ the constraint test + -> reason -- ^ the reason for the constraint + -> Constraints installed source reason + -> Satisfiable (Constraints installed source reason) + [PackageId] reason +constrain pkgname constraint reason + constraints@(Constraints targets available excluded paired original) + + | pkgname `Set.member` targets && not anyRemaining + = if null conflicts then Unsatisfiable + else ConflictsWith conflicts + + | otherwise + = let constraints' = Constraints targets available' excluded' paired original + in assert (constraints `transitionsTo` constraints') $ + Satisfiable constraints' (map packageId newExcluded) + + where + -- This tells us if any packages would remain at all for this package name if + -- we applied this constraint. This amounts to checking if any package + -- satisfies the given constraint, including version range and installation + -- status. + -- + (available', excluded', newExcluded, anyRemaining, conflicts) = + updatePkgsStatus + available excluded + [] False [] + (mergeBy (\pkg pkg' -> packageVersion pkg `compare` packageVersion pkg') + (PackageIndex.lookupPackageName available pkgname) + (PackageIndex.lookupPackageName excluded pkgname)) + + testConstraint pkg = + let ver = packageVersion pkg in + case Map.lookup (packageName pkg) paired of + + Just (v1, v2) + | ver == v1 || ver == v2 + -> case pkg of + InstalledOnly ipkg -> InstalledOnly (ipkg, iOk) + SourceOnly spkg -> SourceOnly (spkg, sOk) + InstalledAndSource ipkg spkg -> + InstalledAndSource (ipkg, iOk) (spkg, sOk) + where + iOk = constraint v1 True || constraint v2 True + sOk = constraint v1 False || constraint v2 False + + _ -> case pkg of + InstalledOnly ipkg -> InstalledOnly (ipkg, iOk) + SourceOnly spkg -> SourceOnly (spkg, sOk) + InstalledAndSource ipkg spkg -> + InstalledAndSource (ipkg, iOk) (spkg, sOk) + where + iOk = constraint ver True + sOk = constraint ver False + + -- For the info about available and excluded versions of the package in + -- question, update the info given the current constraint + -- + -- We update the available package map and the excluded package map + -- we also collect: + -- * the change in available packages (for logging) + -- * whether there are any remaining choices + -- * any constraints that conflict with the current constraint + + updatePkgsStatus _ _ nePkgs ok cs _ + | seq nePkgs $ seq ok $ seq cs False = undefined + + updatePkgsStatus aPkgs ePkgs nePkgs ok cs [] + = (aPkgs, ePkgs, reverse nePkgs, ok, reverse cs) + + updatePkgsStatus aPkgs ePkgs nePkgs ok cs (pkg:pkgs) = + let (aPkgs', ePkgs', mnePkg, ok', mc) = updatePkgStatus aPkgs ePkgs pkg + nePkgs' = maybeCons mnePkg nePkgs + cs' = maybeCons mc cs + in updatePkgsStatus aPkgs' ePkgs' nePkgs' (ok' || ok) cs' pkgs + + maybeCons Nothing xs = xs + maybeCons (Just x) xs = x:xs + + + -- For the info about an available or excluded version of the package in + -- question, update the info given the current constraint. + -- + updatePkgStatus aPkgs ePkgs pkg = + case viewPackageStatus pkg of + AllAvailable (InstalledOnly (aiPkg, False)) -> + removeAvailable False + (InstalledOnly aiPkg) + (PackageIndex.deletePackageId pkgid) + (ExcludedPkg (InstalledOnly aiPkg) [] [reason] []) + Nothing + + AllAvailable (SourceOnly (asPkg, False)) -> + removeAvailable False + (SourceOnly asPkg) + (PackageIndex.deletePackageId pkgid) + (ExcludedPkg (SourceOnly asPkg) [] [] [reason]) + Nothing + + AllAvailable (InstalledAndSource (aiPkg, False) (asPkg, False)) -> + removeAvailable False + (InstalledAndSource aiPkg asPkg) + (PackageIndex.deletePackageId pkgid) + (ExcludedPkg (InstalledAndSource aiPkg asPkg) [reason] [] []) + Nothing + + AllAvailable (InstalledAndSource (aiPkg, True) (asPkg, False)) -> + removeAvailable True + (SourceOnly asPkg) + (PackageIndex.insert (InstalledOnly aiPkg)) + (ExcludedPkg (SourceOnly asPkg) [] [] [reason]) + Nothing + + AllAvailable (InstalledAndSource (aiPkg, False) (asPkg, True)) -> + removeAvailable True + (InstalledOnly aiPkg) + (PackageIndex.insert (SourceOnly asPkg)) + (ExcludedPkg (InstalledOnly aiPkg) [] [reason] []) + Nothing + + AllAvailable _ -> noChange True Nothing + + AvailableExcluded (aiPkg, False) (ExcludedPkg (esPkg, False) _ _ srs) -> + removeAvailable False + (InstalledOnly aiPkg) + (PackageIndex.deletePackageId pkgid) + (ExcludedPkg (InstalledAndSource aiPkg esPkg) [reason] [] srs) + Nothing + + AvailableExcluded (_aiPkg, True) (ExcludedPkg (esPkg, False) _ _ srs) -> + addExtraExclusion True + (ExcludedPkg (SourceOnly esPkg) [] [] (reason:srs)) + Nothing + + AvailableExcluded (aiPkg, False) (ExcludedPkg (esPkg, True) _ _ srs) -> + removeAvailable True + (InstalledOnly aiPkg) + (PackageIndex.deletePackageId pkgid) + (ExcludedPkg (InstalledAndSource aiPkg esPkg) [] [reason] srs) + (Just (pkgid, srs)) + + AvailableExcluded (_aiPkg, True) (ExcludedPkg (_esPkg, True) _ _ srs) -> + noChange True + (Just (pkgid, srs)) + + ExcludedAvailable (ExcludedPkg (eiPkg, False) _ irs _) (asPkg, False) -> + removeAvailable False + (SourceOnly asPkg) + (PackageIndex.deletePackageId pkgid) + (ExcludedPkg (InstalledAndSource eiPkg asPkg) [reason] irs []) + Nothing + + ExcludedAvailable (ExcludedPkg (eiPkg, True) _ irs _) (asPkg, False) -> + removeAvailable False + (SourceOnly asPkg) + (PackageIndex.deletePackageId pkgid) + (ExcludedPkg (InstalledAndSource eiPkg asPkg) [] irs [reason]) + (Just (pkgid, irs)) + + ExcludedAvailable (ExcludedPkg (eiPkg, False) _ irs _) (_asPkg, True) -> + addExtraExclusion True + (ExcludedPkg (InstalledOnly eiPkg) [] (reason:irs) []) + Nothing + + ExcludedAvailable (ExcludedPkg (_eiPkg, True) _ irs _) (_asPkg, True) -> + noChange True + (Just (pkgid, irs)) + + AllExcluded (ExcludedPkg (InstalledOnly (eiPkg, False)) _ irs _) -> + addExtraExclusion False + (ExcludedPkg (InstalledOnly eiPkg) [] (reason:irs) []) + Nothing + + AllExcluded (ExcludedPkg (InstalledOnly (_eiPkg, True)) _ irs _) -> + noChange False + (Just (pkgid, irs)) + + AllExcluded (ExcludedPkg (SourceOnly (esPkg, False)) _ _ srs) -> + addExtraExclusion False + (ExcludedPkg (SourceOnly esPkg) [] [] (reason:srs)) + Nothing + + AllExcluded (ExcludedPkg (SourceOnly (_esPkg, True)) _ _ srs) -> + noChange False + (Just (pkgid, srs)) + + AllExcluded (ExcludedPkg (InstalledAndSource (eiPkg, False) (esPkg, False)) isrs irs srs) -> + addExtraExclusion False + (ExcludedPkg (InstalledAndSource eiPkg esPkg) (reason:isrs) irs srs) + Nothing + + AllExcluded (ExcludedPkg (InstalledAndSource (eiPkg, True) (esPkg, False)) isrs irs srs) -> + addExtraExclusion False + (ExcludedPkg (InstalledAndSource eiPkg esPkg) isrs irs (reason:srs)) + (Just (pkgid, irs)) + + AllExcluded (ExcludedPkg (InstalledAndSource (eiPkg, False) (esPkg, True)) isrs irs srs) -> + addExtraExclusion False + (ExcludedPkg (InstalledAndSource eiPkg esPkg) isrs (reason:irs) srs) + (Just (pkgid, srs)) + + AllExcluded (ExcludedPkg (InstalledAndSource (_eiPkg, True) (_esPkg, True)) isrs irs srs) -> + noChange False + (Just (pkgid, isrs ++ irs ++ srs)) + + where + removeAvailable ok nePkg adjustAvailable ePkg c = + let aPkgs' = adjustAvailable aPkgs + ePkgs' = PackageIndex.insert ePkg ePkgs + in aPkgs' `seq` ePkgs' `seq` + (aPkgs', ePkgs', Just nePkg, ok, c) + + addExtraExclusion ok ePkg c = + let ePkgs' = PackageIndex.insert ePkg ePkgs + in ePkgs' `seq` + (aPkgs, ePkgs', Nothing, ok, c) + + noChange ok c = + (aPkgs, ePkgs, Nothing, ok, c) + + pkgid = case pkg of OnlyInLeft p -> packageId p + OnlyInRight p -> packageId p + InBoth p _ -> packageId p + + + viewPackageStatus + :: (Package installed, Package source) + => MergeResult (InstalledOrSource installed source) + (ExcludedPkg (InstalledOrSource installed source) reason) + -> PackageStatus (installed, Bool) (source, Bool) reason + viewPackageStatus merged = + case merged of + OnlyInLeft aPkg -> + AllAvailable (testConstraint aPkg) + + OnlyInRight (ExcludedPkg ePkg isrs irs srs) -> + AllExcluded (ExcludedPkg (testConstraint ePkg) isrs irs srs) + + InBoth (InstalledOnly aiPkg) + (ExcludedPkg (SourceOnly esPkg) [] [] srs) -> + case testConstraint (InstalledAndSource aiPkg esPkg) of + InstalledAndSource (aiPkg', iOk) (esPkg', sOk) -> + AvailableExcluded (aiPkg', iOk) (ExcludedPkg (esPkg', sOk) [] [] srs) + _ -> impossible + + InBoth (SourceOnly asPkg) + (ExcludedPkg (InstalledOnly eiPkg) [] irs []) -> + case testConstraint (InstalledAndSource eiPkg asPkg) of + InstalledAndSource (eiPkg', iOk) (asPkg', sOk) -> + ExcludedAvailable (ExcludedPkg (eiPkg', iOk) [] irs []) (asPkg', sOk) + _ -> impossible + _ -> impossible + where + impossible = error "impossible: viewPackageStatus invariant violation" + +-- A intermediate structure that enumerates all the possible cases given the +-- invariant. This helps us to get simpler and complete pattern matching in +-- updatePkg above +-- +data PackageStatus installed source reason + = AllAvailable (InstalledOrSource installed source) + | AllExcluded (ExcludedPkg (InstalledOrSource installed source) reason) + | AvailableExcluded installed (ExcludedPkg source reason) + | ExcludedAvailable (ExcludedPkg installed reason) source + + +conflicting :: (Package installed, Package source) + => Constraints installed source reason + -> Dependency + -> [(PackageId, [reason])] +conflicting (Constraints _ _ excluded _ _) dep = + [ (packageId pkg, reasonsAll ++ reasonsAvail ++ reasonsInstalled) --TODO + | ExcludedPkg pkg reasonsAll reasonsAvail reasonsInstalled <- + PackageIndex.lookupDependency excluded dep ] diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown/Types.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown/Types.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown/Types.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown/Types.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,91 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Dependency.TopDown.Types +-- Copyright : (c) Duncan Coutts 2008 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Types for the top-down dependency resolver. +----------------------------------------------------------------------------- +module Distribution.Client.Dependency.TopDown.Types where + +import Distribution.Client.Types + ( SourcePackage(..), InstalledPackage, OptionalStanza ) + +import Distribution.Package + ( PackageIdentifier, Dependency + , Package(packageId), PackageFixedDeps(depends) ) +import Distribution.PackageDescription + ( FlagAssignment ) + +-- ------------------------------------------------------------ +-- * The various kinds of packages +-- ------------------------------------------------------------ + +type SelectablePackage + = InstalledOrSource InstalledPackageEx UnconfiguredPackage + +type SelectedPackage + = InstalledOrSource InstalledPackageEx SemiConfiguredPackage + +data InstalledOrSource installed source + = InstalledOnly installed + | SourceOnly source + | InstalledAndSource installed source + deriving Eq + +type TopologicalSortNumber = Int + +data InstalledPackageEx + = InstalledPackageEx + InstalledPackage + !TopologicalSortNumber + [PackageIdentifier] -- transitive closure of installed deps + +data UnconfiguredPackage + = UnconfiguredPackage + SourcePackage + !TopologicalSortNumber + FlagAssignment + [OptionalStanza] + +data SemiConfiguredPackage + = SemiConfiguredPackage + SourcePackage -- package info + FlagAssignment -- total flag assignment for the package + [OptionalStanza] -- enabled optional stanzas + [Dependency] -- dependencies we end up with when we apply + -- the flag assignment + +instance Package InstalledPackageEx where + packageId (InstalledPackageEx p _ _) = packageId p + +instance PackageFixedDeps InstalledPackageEx where + depends (InstalledPackageEx _ _ deps) = deps + +instance Package UnconfiguredPackage where + packageId (UnconfiguredPackage p _ _ _) = packageId p + +instance Package SemiConfiguredPackage where + packageId (SemiConfiguredPackage p _ _ _) = packageId p + +instance (Package installed, Package source) + => Package (InstalledOrSource installed source) where + packageId (InstalledOnly p ) = packageId p + packageId (SourceOnly p ) = packageId p + packageId (InstalledAndSource p _) = packageId p + + +-- | We can have constraints on selecting just installed or just source +-- packages. +-- +-- In particular, installed packages can only depend on other installed +-- packages while packages that are not yet installed but which we plan to +-- install can depend on installed or other not-yet-installed packages. +-- +data InstalledConstraint = InstalledConstraint + | SourceConstraint + deriving (Eq, Show) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/TopDown.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,946 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Dependency.Types +-- Copyright : (c) Duncan Coutts 2008 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Common types for dependency resolution. +----------------------------------------------------------------------------- +module Distribution.Client.Dependency.TopDown ( + topDownResolver + ) where + +import Distribution.Client.Dependency.TopDown.Types +import qualified Distribution.Client.Dependency.TopDown.Constraints as Constraints +import Distribution.Client.Dependency.TopDown.Constraints + ( Satisfiable(..) ) +import Distribution.Client.IndexUtils + ( convert ) +import qualified Distribution.Client.InstallPlan as InstallPlan +import Distribution.Client.InstallPlan + ( PlanPackage(..) ) +import Distribution.Client.Types + ( SourcePackage(..), ConfiguredPackage(..), InstalledPackage(..) + , enableStanzas ) +import Distribution.Client.Dependency.Types + ( DependencyResolver, PackageConstraint(..) + , PackagePreferences(..), InstalledPreference(..) + , Progress(..), foldProgress ) + +import qualified Distribution.Client.PackageIndex as PackageIndex +import Distribution.Client.PackageIndex (PackageIndex) +import Distribution.Package + ( PackageName(..), PackageId, Package(..), packageVersion, packageName + , Dependency(Dependency), thisPackageVersion + , simplifyDependency, PackageFixedDeps(depends) ) +import Distribution.PackageDescription + ( PackageDescription(buildDepends) ) +import Distribution.Client.PackageUtils + ( externalBuildDepends ) +import Distribution.PackageDescription.Configuration + ( finalizePackageDescription, flattenPackageDescription ) +import Distribution.Version + ( VersionRange, withinRange, simplifyVersionRange + , UpperBound(..), asVersionIntervals ) +import Distribution.Compiler + ( CompilerInfo ) +import Distribution.System + ( Platform ) +import Distribution.Simple.Utils + ( equating, comparing ) +import Distribution.Text + ( display ) + +import Data.List + ( foldl', maximumBy, minimumBy, nub, sort, sortBy, groupBy ) +import Data.Maybe + ( fromJust, fromMaybe, catMaybes ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid + ( Monoid(mempty) ) +#endif +import Control.Monad + ( guard ) +import qualified Data.Set as Set +import Data.Set (Set) +import qualified Data.Map as Map +import qualified Data.Graph as Graph +import qualified Data.Array as Array +import Control.Exception + ( assert ) + +-- ------------------------------------------------------------ +-- * Search state types +-- ------------------------------------------------------------ + +type Constraints = Constraints.Constraints + InstalledPackageEx UnconfiguredPackage ExclusionReason +type SelectedPackages = PackageIndex SelectedPackage + +-- ------------------------------------------------------------ +-- * The search tree type +-- ------------------------------------------------------------ + +data SearchSpace inherited pkg + = ChoiceNode inherited [[(pkg, SearchSpace inherited pkg)]] + | Failure Failure + +-- ------------------------------------------------------------ +-- * Traverse a search tree +-- ------------------------------------------------------------ + +explore :: (PackageName -> PackagePreferences) + -> SearchSpace (SelectedPackages, Constraints, SelectionChanges) + SelectablePackage + -> Progress Log Failure (SelectedPackages, Constraints) + +explore _ (Failure failure) = Fail failure +explore _ (ChoiceNode (s,c,_) []) = Done (s,c) +explore pref (ChoiceNode _ choices) = + case [ choice | [choice] <- choices ] of + ((_, node'):_) -> Step (logInfo node') (explore pref node') + [] -> Step (logInfo node') (explore pref node') + where + choice = minimumBy (comparing topSortNumber) choices + pkgname = packageName . fst . head $ choice + (_, node') = maximumBy (bestByPref pkgname) choice + where + topSortNumber choice = case fst (head choice) of + InstalledOnly (InstalledPackageEx _ i _) -> i + SourceOnly (UnconfiguredPackage _ i _ _) -> i + InstalledAndSource _ (UnconfiguredPackage _ i _ _) -> i + + bestByPref pkgname = case packageInstalledPreference of + PreferLatest -> + comparing (\(p,_) -> ( isPreferred p, packageId p)) + PreferInstalled -> + comparing (\(p,_) -> (isInstalled p, isPreferred p, packageId p)) + where + isInstalled (SourceOnly _) = False + isInstalled _ = True + isPreferred p = packageVersion p `withinRange` preferredVersions + (PackagePreferences preferredVersions packageInstalledPreference) + = pref pkgname + + logInfo node = Select selected discarded + where (selected, discarded) = case node of + Failure _ -> ([], []) + ChoiceNode (_,_,changes) _ -> changes + +-- ------------------------------------------------------------ +-- * Generate a search tree +-- ------------------------------------------------------------ + +type ConfigurePackage = PackageIndex SelectablePackage + -> SelectablePackage + -> Either [Dependency] SelectedPackage + +-- | (packages selected, packages discarded) +type SelectionChanges = ([SelectedPackage], [PackageId]) + +searchSpace :: ConfigurePackage + -> Constraints + -> SelectedPackages + -> SelectionChanges + -> Set PackageName + -> SearchSpace (SelectedPackages, Constraints, SelectionChanges) + SelectablePackage +searchSpace configure constraints selected changes next = + assert (Set.null (selectedSet `Set.intersection` next)) $ + assert (selectedSet `Set.isSubsetOf` Constraints.packages constraints) $ + assert (next `Set.isSubsetOf` Constraints.packages constraints) $ + + ChoiceNode (selected, constraints, changes) + [ [ (pkg, select name pkg) + | pkg <- PackageIndex.lookupPackageName available name ] + | name <- Set.elems next ] + where + available = Constraints.choices constraints + + selectedSet = Set.fromList (map packageName (PackageIndex.allPackages selected)) + + select name pkg = case configure available pkg of + Left missing -> Failure $ ConfigureFailed pkg + [ (dep, Constraints.conflicting constraints dep) + | dep <- missing ] + Right pkg' -> + case constrainDeps pkg' newDeps (addDeps constraints newPkgs) [] of + Left failure -> Failure failure + Right (constraints', newDiscarded) -> + searchSpace configure + constraints' selected' (newSelected, newDiscarded) next' + where + selected' = foldl' (flip PackageIndex.insert) selected newSelected + newSelected = + case Constraints.isPaired constraints (packageId pkg) of + Nothing -> [pkg'] + Just pkgid' -> [pkg', pkg''] + where + Just pkg'' = fmap (\(InstalledOnly p) -> InstalledOnly p) + (PackageIndex.lookupPackageId available pkgid') + + newPkgs = [ name' + | (Dependency name' _, _) <- newDeps + , null (PackageIndex.lookupPackageName selected' name') ] + newDeps = concatMap packageConstraints newSelected + next' = Set.delete name + $ foldl' (flip Set.insert) next newPkgs + +packageConstraints :: SelectedPackage -> [(Dependency, Bool)] +packageConstraints = either installedConstraints availableConstraints + . preferSource + where + preferSource (InstalledOnly pkg) = Left pkg + preferSource (SourceOnly pkg) = Right pkg + preferSource (InstalledAndSource _ pkg) = Right pkg + installedConstraints (InstalledPackageEx _ _ deps) = + [ (thisPackageVersion dep, True) + | dep <- deps ] + availableConstraints (SemiConfiguredPackage _ _ _ deps) = + [ (dep, False) | dep <- deps ] + +addDeps :: Constraints -> [PackageName] -> Constraints +addDeps = + foldr $ \pkgname cs -> + case Constraints.addTarget pkgname cs of + Satisfiable cs' () -> cs' + _ -> impossible "addDeps unsatisfiable" + +constrainDeps :: SelectedPackage -> [(Dependency, Bool)] -> Constraints + -> [PackageId] + -> Either Failure (Constraints, [PackageId]) +constrainDeps pkg [] cs discard = + case addPackageSelectConstraint (packageId pkg) cs of + Satisfiable cs' discard' -> Right (cs', discard' ++ discard) + _ -> impossible "constrainDeps unsatisfiable(1)" +constrainDeps pkg ((dep, installedConstraint):deps) cs discard = + case addPackageDependencyConstraint (packageId pkg) dep installedConstraint cs of + Satisfiable cs' discard' -> constrainDeps pkg deps cs' (discard' ++ discard) + Unsatisfiable -> impossible "constrainDeps unsatisfiable(2)" + ConflictsWith conflicts -> + Left (DependencyConflict pkg dep installedConstraint conflicts) + +-- ------------------------------------------------------------ +-- * The main algorithm +-- ------------------------------------------------------------ + +search :: ConfigurePackage + -> (PackageName -> PackagePreferences) + -> Constraints + -> Set PackageName + -> Progress Log Failure (SelectedPackages, Constraints) +search configure pref constraints = + explore pref . searchSpace configure constraints mempty ([], []) + +-- ------------------------------------------------------------ +-- * The top level resolver +-- ------------------------------------------------------------ + +-- | The main exported resolver, with string logging and failure types to fit +-- the standard 'DependencyResolver' interface. +-- +topDownResolver :: DependencyResolver +topDownResolver platform cinfo installedPkgIndex sourcePkgIndex + preferences constraints targets = + mapMessages (topDownResolver' platform cinfo + (convert installedPkgIndex) sourcePkgIndex + preferences constraints targets) + where + mapMessages :: Progress Log Failure a -> Progress String String a + mapMessages = foldProgress (Step . showLog) (Fail . showFailure) Done + +-- | The native resolver with detailed structured logging and failure types. +-- +topDownResolver' :: Platform -> CompilerInfo + -> PackageIndex InstalledPackage + -> PackageIndex SourcePackage + -> (PackageName -> PackagePreferences) + -> [PackageConstraint] + -> [PackageName] + -> Progress Log Failure [PlanPackage] +topDownResolver' platform cinfo installedPkgIndex sourcePkgIndex + preferences constraints targets = + fmap (uncurry finalise) + . (\cs -> search configure preferences cs initialPkgNames) + =<< pruneBottomUp platform cinfo + =<< addTopLevelConstraints constraints + =<< addTopLevelTargets targets emptyConstraintSet + + where + configure = configurePackage platform cinfo + emptyConstraintSet :: Constraints + emptyConstraintSet = Constraints.empty + (annotateInstalledPackages topSortNumber installedPkgIndex') + (annotateSourcePackages constraints topSortNumber sourcePkgIndex') + (installedPkgIndex', sourcePkgIndex') = + selectNeededSubset installedPkgIndex sourcePkgIndex initialPkgNames + topSortNumber = topologicalSortNumbering installedPkgIndex' sourcePkgIndex' + + initialPkgNames = Set.fromList targets + + finalise selected' constraints' = + PackageIndex.allPackages + . fst . improvePlan installedPkgIndex' constraints' + . PackageIndex.fromList + $ finaliseSelectedPackages preferences selected' constraints' + + +addTopLevelTargets :: [PackageName] + -> Constraints + -> Progress a Failure Constraints +addTopLevelTargets [] cs = Done cs +addTopLevelTargets (pkg:pkgs) cs = + case Constraints.addTarget pkg cs of + Satisfiable cs' () -> addTopLevelTargets pkgs cs' + Unsatisfiable -> Fail (NoSuchPackage pkg) + ConflictsWith _conflicts -> impossible "addTopLevelTargets conflicts" + + +addTopLevelConstraints :: [PackageConstraint] -> Constraints + -> Progress Log Failure Constraints +addTopLevelConstraints [] cs = Done cs +addTopLevelConstraints (PackageConstraintFlags _ _ :deps) cs = + addTopLevelConstraints deps cs + +addTopLevelConstraints (PackageConstraintVersion pkg ver:deps) cs = + case addTopLevelVersionConstraint pkg ver cs of + Satisfiable cs' pkgids -> + Step (AppliedVersionConstraint pkg ver pkgids) + (addTopLevelConstraints deps cs') + + Unsatisfiable -> + Fail (TopLevelVersionConstraintUnsatisfiable pkg ver) + + ConflictsWith conflicts -> + Fail (TopLevelVersionConstraintConflict pkg ver conflicts) + +addTopLevelConstraints (PackageConstraintInstalled pkg:deps) cs = + case addTopLevelInstalledConstraint pkg cs of + Satisfiable cs' pkgids -> + Step (AppliedInstalledConstraint pkg InstalledConstraint pkgids) + (addTopLevelConstraints deps cs') + + Unsatisfiable -> + Fail (TopLevelInstallConstraintUnsatisfiable pkg InstalledConstraint) + + ConflictsWith conflicts -> + Fail (TopLevelInstallConstraintConflict pkg InstalledConstraint conflicts) + +addTopLevelConstraints (PackageConstraintSource pkg:deps) cs = + case addTopLevelSourceConstraint pkg cs of + Satisfiable cs' pkgids -> + Step (AppliedInstalledConstraint pkg SourceConstraint pkgids) + (addTopLevelConstraints deps cs') + + Unsatisfiable -> + Fail (TopLevelInstallConstraintUnsatisfiable pkg SourceConstraint) + + ConflictsWith conflicts -> + Fail (TopLevelInstallConstraintConflict pkg SourceConstraint conflicts) + +addTopLevelConstraints (PackageConstraintStanzas _ _ : deps) cs = + addTopLevelConstraints deps cs + +-- | Add exclusion on available packages that cannot be configured. +-- +pruneBottomUp :: Platform -> CompilerInfo + -> Constraints -> Progress Log Failure Constraints +pruneBottomUp platform comp constraints = + foldr prune Done (initialPackages constraints) constraints + + where + prune pkgs rest cs = foldr addExcludeConstraint rest unconfigurable cs + where + unconfigurable = + [ (pkg, missing) -- if necessary we could look up missing reasons + | (Just pkg', pkg) <- zip (map getSourcePkg pkgs) pkgs + , Left missing <- [configure cs pkg'] ] + + addExcludeConstraint (pkg, missing) rest cs = + let reason = ExcludedByConfigureFail missing in + case addPackageExcludeConstraint (packageId pkg) reason cs of + Satisfiable cs' [pkgid]| packageId pkg == pkgid + -> Step (ExcludeUnconfigurable pkgid) (rest cs') + Satisfiable _ _ -> impossible "pruneBottomUp satisfiable" + _ -> Fail $ ConfigureFailed pkg + [ (dep, Constraints.conflicting cs dep) + | dep <- missing ] + + configure cs (UnconfiguredPackage (SourcePackage _ pkg _ _) _ flags stanzas) = + finalizePackageDescription flags (dependencySatisfiable cs) + platform comp [] (enableStanzas stanzas pkg) + dependencySatisfiable cs = + not . null . PackageIndex.lookupDependency (Constraints.choices cs) + + -- collect each group of packages (by name) in reverse topsort order + initialPackages = + reverse + . sortBy (comparing (topSortNumber . head)) + . PackageIndex.allPackagesByName + . Constraints.choices + + topSortNumber (InstalledOnly (InstalledPackageEx _ i _)) = i + topSortNumber (SourceOnly (UnconfiguredPackage _ i _ _)) = i + topSortNumber (InstalledAndSource _ (UnconfiguredPackage _ i _ _)) = i + + getSourcePkg (InstalledOnly _ ) = Nothing + getSourcePkg (SourceOnly spkg) = Just spkg + getSourcePkg (InstalledAndSource _ spkg) = Just spkg + + +configurePackage :: Platform -> CompilerInfo -> ConfigurePackage +configurePackage platform cinfo available spkg = case spkg of + InstalledOnly ipkg -> Right (InstalledOnly ipkg) + SourceOnly apkg -> fmap SourceOnly (configure apkg) + InstalledAndSource ipkg apkg -> fmap (InstalledAndSource ipkg) + (configure apkg) + where + configure (UnconfiguredPackage apkg@(SourcePackage _ p _ _) _ flags stanzas) = + case finalizePackageDescription flags dependencySatisfiable + platform cinfo [] + (enableStanzas stanzas p) of + Left missing -> Left missing + Right (pkg, flags') -> Right $ + SemiConfiguredPackage apkg flags' stanzas (externalBuildDepends pkg) + + dependencySatisfiable = not . null . PackageIndex.lookupDependency available + +-- | Annotate each installed packages with its set of transitive dependencies +-- and its topological sort number. +-- +annotateInstalledPackages :: (PackageName -> TopologicalSortNumber) + -> PackageIndex InstalledPackage + -> PackageIndex InstalledPackageEx +annotateInstalledPackages dfsNumber installed = PackageIndex.fromList + [ InstalledPackageEx pkg (dfsNumber (packageName pkg)) (transitiveDepends pkg) + | pkg <- PackageIndex.allPackages installed ] + where + transitiveDepends :: InstalledPackage -> [PackageId] + transitiveDepends = map (packageId . toPkg) . tail . Graph.reachable graph + . fromJust . toVertex . packageId + (graph, toPkg, toVertex) = PackageIndex.dependencyGraph installed + + +-- | Annotate each available packages with its topological sort number and any +-- user-supplied partial flag assignment. +-- +annotateSourcePackages :: [PackageConstraint] + -> (PackageName -> TopologicalSortNumber) + -> PackageIndex SourcePackage + -> PackageIndex UnconfiguredPackage +annotateSourcePackages constraints dfsNumber sourcePkgIndex = + PackageIndex.fromList + [ UnconfiguredPackage pkg (dfsNumber name) (flagsFor name) (stanzasFor name) + | pkg <- PackageIndex.allPackages sourcePkgIndex + , let name = packageName pkg ] + where + flagsFor = fromMaybe [] . flip Map.lookup flagsMap + flagsMap = Map.fromList + [ (name, flags) + | PackageConstraintFlags name flags <- constraints ] + stanzasFor = fromMaybe [] . flip Map.lookup stanzasMap + stanzasMap = Map.fromListWith (++) + [ (name, stanzas) + | PackageConstraintStanzas name stanzas <- constraints ] + +-- | One of the heuristics we use when guessing which path to take in the +-- search space is an ordering on the choices we make. It's generally better +-- to make decisions about packages higer in the dep graph first since they +-- place constraints on packages lower in the dep graph. +-- +-- To pick them in that order we annotate each package with its topological +-- sort number. So if package A depends on package B then package A will have +-- a lower topological sort number than B and we'll make a choice about which +-- version of A to pick before we make a choice about B (unless there is only +-- one possible choice for B in which case we pick that immediately). +-- +-- To construct these topological sort numbers we combine and flatten the +-- installed and source package sets. We consider only dependencies between +-- named packages, not including versions and for not-yet-configured packages +-- we look at all the possible dependencies, not just those under any single +-- flag assignment. This means we can actually get impossible combinations of +-- edges and even cycles, but that doesn't really matter here, it's only a +-- heuristic. +-- +topologicalSortNumbering :: PackageIndex InstalledPackage + -> PackageIndex SourcePackage + -> (PackageName -> TopologicalSortNumber) +topologicalSortNumbering installedPkgIndex sourcePkgIndex = + \pkgname -> let Just vertex = toVertex pkgname + in topologicalSortNumbers Array.! vertex + where + topologicalSortNumbers = Array.array (Array.bounds graph) + (zip (Graph.topSort graph) [0..]) + (graph, _, toVertex) = Graph.graphFromEdges $ + [ ((), packageName pkg, nub deps) + | pkgs@(pkg:_) <- PackageIndex.allPackagesByName installedPkgIndex + , let deps = [ packageName dep + | pkg' <- pkgs + , dep <- depends pkg' ] ] + ++ [ ((), packageName pkg, nub deps) + | pkgs@(pkg:_) <- PackageIndex.allPackagesByName sourcePkgIndex + , let deps = [ depName + | SourcePackage _ pkg' _ _ <- pkgs + , Dependency depName _ <- + buildDepends (flattenPackageDescription pkg') ] ] + +-- | We don't need the entire index (which is rather large and costly if we +-- force it by examining the whole thing). So trace out the maximul subset of +-- each index that we could possibly ever need. Do this by flattening packages +-- and looking at the names of all possible dependencies. +-- +selectNeededSubset :: PackageIndex InstalledPackage + -> PackageIndex SourcePackage + -> Set PackageName + -> (PackageIndex InstalledPackage + ,PackageIndex SourcePackage) +selectNeededSubset installedPkgIndex sourcePkgIndex = select mempty mempty + where + select :: PackageIndex InstalledPackage + -> PackageIndex SourcePackage + -> Set PackageName + -> (PackageIndex InstalledPackage + ,PackageIndex SourcePackage) + select installedPkgIndex' sourcePkgIndex' remaining + | Set.null remaining = (installedPkgIndex', sourcePkgIndex') + | otherwise = select installedPkgIndex'' sourcePkgIndex'' remaining'' + where + (next, remaining') = Set.deleteFindMin remaining + moreInstalled = PackageIndex.lookupPackageName installedPkgIndex next + moreSource = PackageIndex.lookupPackageName sourcePkgIndex next + moreRemaining = -- we filter out packages already included in the indexes + -- this avoids an infinite loop if a package depends on itself + -- like base-3.0.3.0 with base-4.0.0.0 + filter notAlreadyIncluded + $ [ packageName dep + | pkg <- moreInstalled + , dep <- depends pkg ] + ++ [ name + | SourcePackage _ pkg _ _ <- moreSource + , Dependency name _ <- + buildDepends (flattenPackageDescription pkg) ] + installedPkgIndex'' = foldl' (flip PackageIndex.insert) + installedPkgIndex' moreInstalled + sourcePkgIndex'' = foldl' (flip PackageIndex.insert) + sourcePkgIndex' moreSource + remaining'' = foldl' (flip Set.insert) + remaining' moreRemaining + notAlreadyIncluded name = + null (PackageIndex.lookupPackageName installedPkgIndex' name) + && null (PackageIndex.lookupPackageName sourcePkgIndex' name) + +-- ------------------------------------------------------------ +-- * Post processing the solution +-- ------------------------------------------------------------ + +finaliseSelectedPackages :: (PackageName -> PackagePreferences) + -> SelectedPackages + -> Constraints + -> [PlanPackage] +finaliseSelectedPackages pref selected constraints = + map finaliseSelected (PackageIndex.allPackages selected) + where + remainingChoices = Constraints.choices constraints + finaliseSelected (InstalledOnly ipkg ) = finaliseInstalled ipkg + finaliseSelected (SourceOnly apkg) = finaliseSource Nothing apkg + finaliseSelected (InstalledAndSource ipkg apkg) = + case PackageIndex.lookupPackageId remainingChoices (packageId ipkg) of + --picked package not in constraints + Nothing -> impossible "finaliseSelected no pkg" + -- to constrain to avail only: + Just (SourceOnly _) -> impossible "finaliseSelected src only" + Just (InstalledOnly _) -> finaliseInstalled ipkg + Just (InstalledAndSource _ _) -> finaliseSource (Just ipkg) apkg + + finaliseInstalled (InstalledPackageEx pkg _ _) = InstallPlan.PreExisting pkg + finaliseSource mipkg (SemiConfiguredPackage pkg flags stanzas deps) = + InstallPlan.Configured (ConfiguredPackage pkg flags stanzas deps') + where + deps' = map (packageId . pickRemaining mipkg) deps + + pickRemaining mipkg dep@(Dependency _name versionRange) = + case PackageIndex.lookupDependency remainingChoices dep of + [] -> impossible "pickRemaining no pkg" + [pkg'] -> pkg' + remaining -> assert (checkIsPaired remaining) + $ maximumBy bestByPref remaining + where + -- We order candidate packages to pick for a dependency by these + -- three factors. The last factor is just highest version wins. + bestByPref = + comparing (\p -> (isCurrent p, isPreferred p, packageVersion p)) + -- Is the package already used by the installed version of this + -- package? If so we should pick that first. This stops us from doing + -- silly things like deciding to rebuild haskell98 against base 3. + isCurrent = case mipkg :: Maybe InstalledPackageEx of + Nothing -> \_ -> False + Just ipkg -> \p -> packageId p `elem` depends ipkg + -- If there is no upper bound on the version range then we apply a + -- preferred version according to the hackage or user's suggested + -- version constraints. TODO: distinguish hacks from prefs + bounded = boundedAbove versionRange + isPreferred p + | bounded = True -- any constant will do + | otherwise = packageVersion p `withinRange` preferredVersions + where (PackagePreferences preferredVersions _) = pref (packageName p) + + boundedAbove :: VersionRange -> Bool + boundedAbove vr = case asVersionIntervals vr of + [] -> True -- this is the inconsistent version range. + intervals -> case last intervals of + (_, UpperBound _ _) -> True + (_, NoUpperBound ) -> False + + -- We really only expect to find more than one choice remaining when + -- we're finalising a dependency on a paired package. + checkIsPaired [p1, p2] = + case Constraints.isPaired constraints (packageId p1) of + Just p2' -> packageId p2' == packageId p2 + Nothing -> False + checkIsPaired _ = False + +-- | Improve an existing installation plan by, where possible, swapping +-- packages we plan to install with ones that are already installed. +-- This may add additional constraints due to the dependencies of installed +-- packages on other installed packages. +-- +improvePlan :: PackageIndex InstalledPackage + -> Constraints + -> PackageIndex PlanPackage + -> (PackageIndex PlanPackage, Constraints) +improvePlan installed constraints0 selected0 = + foldl' improve (selected0, constraints0) (reverseTopologicalOrder selected0) + where + improve (selected, constraints) = fromMaybe (selected, constraints) + . improvePkg selected constraints + + -- The idea is to improve the plan by swapping a configured package for + -- an equivalent installed one. For a particular package the condition is + -- that the package be in a configured state, that a the same version be + -- already installed with the exact same dependencies and all the packages + -- in the plan that it depends on are in the installed state + improvePkg selected constraints pkgid = do + Configured pkg <- PackageIndex.lookupPackageId selected pkgid + ipkg <- PackageIndex.lookupPackageId installed pkgid + guard $ all (isInstalled selected) (depends pkg) + tryInstalled selected constraints [ipkg] + + isInstalled selected pkgid = + case PackageIndex.lookupPackageId selected pkgid of + Just (PreExisting _) -> True + _ -> False + + tryInstalled :: PackageIndex PlanPackage -> Constraints + -> [InstalledPackage] + -> Maybe (PackageIndex PlanPackage, Constraints) + tryInstalled selected constraints [] = Just (selected, constraints) + tryInstalled selected constraints (pkg:pkgs) = + case constraintsOk (packageId pkg) (depends pkg) constraints of + Nothing -> Nothing + Just constraints' -> tryInstalled selected' constraints' pkgs' + where + selected' = PackageIndex.insert (PreExisting pkg) selected + pkgs' = catMaybes (map notSelected (depends pkg)) ++ pkgs + notSelected pkgid = + case (PackageIndex.lookupPackageId installed pkgid + ,PackageIndex.lookupPackageId selected pkgid) of + (Just pkg', Nothing) -> Just pkg' + _ -> Nothing + + constraintsOk _ [] constraints = Just constraints + constraintsOk pkgid (pkgid':pkgids) constraints = + case addPackageDependencyConstraint pkgid dep True constraints of + Satisfiable constraints' _ -> constraintsOk pkgid pkgids constraints' + _ -> Nothing + where + dep = thisPackageVersion pkgid' + + reverseTopologicalOrder :: PackageFixedDeps pkg + => PackageIndex pkg -> [PackageId] + reverseTopologicalOrder index = map (packageId . toPkg) + . Graph.topSort + . Graph.transposeG + $ graph + where (graph, toPkg, _) = PackageIndex.dependencyGraph index + +-- ------------------------------------------------------------ +-- * Adding and recording constraints +-- ------------------------------------------------------------ + +addPackageSelectConstraint :: PackageId -> Constraints + -> Satisfiable Constraints + [PackageId] ExclusionReason +addPackageSelectConstraint pkgid = + Constraints.constrain pkgname constraint reason + where + pkgname = packageName pkgid + constraint ver _ = ver == packageVersion pkgid + reason = SelectedOther pkgid + +addPackageExcludeConstraint :: PackageId -> ExclusionReason + -> Constraints + -> Satisfiable Constraints + [PackageId] ExclusionReason +addPackageExcludeConstraint pkgid reason = + Constraints.constrain pkgname constraint reason + where + pkgname = packageName pkgid + constraint ver installed + | ver == packageVersion pkgid = installed + | otherwise = True + +addPackageDependencyConstraint :: PackageId -> Dependency -> Bool + -> Constraints + -> Satisfiable Constraints + [PackageId] ExclusionReason +addPackageDependencyConstraint pkgid dep@(Dependency pkgname verrange) + installedConstraint = + Constraints.constrain pkgname constraint reason + where + constraint ver installed = ver `withinRange` verrange + && if installedConstraint then installed else True + reason = ExcludedByPackageDependency pkgid dep installedConstraint + +addTopLevelVersionConstraint :: PackageName -> VersionRange + -> Constraints + -> Satisfiable Constraints + [PackageId] ExclusionReason +addTopLevelVersionConstraint pkgname verrange = + Constraints.constrain pkgname constraint reason + where + constraint ver _installed = ver `withinRange` verrange + reason = ExcludedByTopLevelConstraintVersion pkgname verrange + +addTopLevelInstalledConstraint, + addTopLevelSourceConstraint :: PackageName + -> Constraints + -> Satisfiable Constraints + [PackageId] ExclusionReason +addTopLevelInstalledConstraint pkgname = + Constraints.constrain pkgname constraint reason + where + constraint _ver installed = installed + reason = ExcludedByTopLevelConstraintInstalled pkgname + +addTopLevelSourceConstraint pkgname = + Constraints.constrain pkgname constraint reason + where + constraint _ver installed = not installed + reason = ExcludedByTopLevelConstraintSource pkgname + + +-- ------------------------------------------------------------ +-- * Reasons for constraints +-- ------------------------------------------------------------ + +-- | For every constraint we record we also record the reason that constraint +-- is needed. So if we end up failing due to conflicting constraints then we +-- can give an explnanation as to what was conflicting and why. +-- +data ExclusionReason = + + -- | We selected this other version of the package. That means we exclude + -- all the other versions. + SelectedOther PackageId + + -- | We excluded this version of the package because it failed to + -- configure probably because of unsatisfiable deps. + | ExcludedByConfigureFail [Dependency] + + -- | We excluded this version of the package because another package that + -- we selected imposed a dependency which this package did not satisfy. + | ExcludedByPackageDependency PackageId Dependency Bool + + -- | We excluded this version of the package because it did not satisfy + -- a dependency given as an original top level input. + -- + | ExcludedByTopLevelConstraintVersion PackageName VersionRange + | ExcludedByTopLevelConstraintInstalled PackageName + | ExcludedByTopLevelConstraintSource PackageName + + deriving Eq + +-- | Given an excluded package and the reason it was excluded, produce a human +-- readable explanation. +-- +showExclusionReason :: PackageId -> ExclusionReason -> String +showExclusionReason pkgid (SelectedOther pkgid') = + display pkgid ++ " was excluded because " ++ + display pkgid' ++ " was selected instead" +showExclusionReason pkgid (ExcludedByConfigureFail missingDeps) = + display pkgid ++ " was excluded because it could not be configured. " + ++ "It requires " ++ listOf displayDep missingDeps +showExclusionReason pkgid (ExcludedByPackageDependency pkgid' dep installedConstraint) + = display pkgid ++ " was excluded because " ++ display pkgid' ++ " requires " + ++ (if installedConstraint then "an installed instance of " else "") + ++ displayDep dep +showExclusionReason pkgid (ExcludedByTopLevelConstraintVersion pkgname verRange) = + display pkgid ++ " was excluded because of the top level constraint " ++ + displayDep (Dependency pkgname verRange) +showExclusionReason pkgid (ExcludedByTopLevelConstraintInstalled pkgname) + = display pkgid ++ " was excluded because of the top level constraint '" + ++ display pkgname ++ " installed' which means that only installed instances " + ++ "of the package may be selected." +showExclusionReason pkgid (ExcludedByTopLevelConstraintSource pkgname) + = display pkgid ++ " was excluded because of the top level constraint '" + ++ display pkgname ++ " source' which means that only source versions " + ++ "of the package may be selected." + + +-- ------------------------------------------------------------ +-- * Logging progress and failures +-- ------------------------------------------------------------ + +data Log = Select [SelectedPackage] [PackageId] + | AppliedVersionConstraint PackageName VersionRange [PackageId] + | AppliedInstalledConstraint PackageName InstalledConstraint [PackageId] + | ExcludeUnconfigurable PackageId + +data Failure + = NoSuchPackage + PackageName + | ConfigureFailed + SelectablePackage + [(Dependency, [(PackageId, [ExclusionReason])])] + | DependencyConflict + SelectedPackage Dependency Bool + [(PackageId, [ExclusionReason])] + | TopLevelVersionConstraintConflict + PackageName VersionRange + [(PackageId, [ExclusionReason])] + | TopLevelVersionConstraintUnsatisfiable + PackageName VersionRange + | TopLevelInstallConstraintConflict + PackageName InstalledConstraint + [(PackageId, [ExclusionReason])] + | TopLevelInstallConstraintUnsatisfiable + PackageName InstalledConstraint + +showLog :: Log -> String +showLog (Select selected discarded) = case (selectedMsg, discardedMsg) of + ("", y) -> y + (x, "") -> x + (x, y) -> x ++ " and " ++ y + + where + selectedMsg = "selecting " ++ case selected of + [] -> "" + [s] -> display (packageId s) ++ " " ++ kind s + (s:ss) -> listOf id + $ (display (packageId s) ++ " " ++ kind s) + : [ display (packageVersion s') ++ " " ++ kind s' + | s' <- ss ] + + kind (InstalledOnly _) = "(installed)" + kind (SourceOnly _) = "(source)" + kind (InstalledAndSource _ _) = "(installed or source)" + + discardedMsg = case discarded of + [] -> "" + _ -> "discarding " ++ listOf id + [ element + | (pkgid:pkgids) <- groupBy (equating packageName) (sort discarded) + , element <- display pkgid : map (display . packageVersion) pkgids ] +showLog (AppliedVersionConstraint pkgname ver pkgids) = + "applying constraint " ++ display (Dependency pkgname ver) + ++ if null pkgids + then "" + else " which excludes " ++ listOf display pkgids +showLog (AppliedInstalledConstraint pkgname inst pkgids) = + "applying constraint " ++ display pkgname ++ " '" + ++ (case inst of InstalledConstraint -> "installed"; _ -> "source") ++ "' " + ++ if null pkgids + then "" + else "which excludes " ++ listOf display pkgids +showLog (ExcludeUnconfigurable pkgid) = + "excluding " ++ display pkgid ++ " (it cannot be configured)" + +showFailure :: Failure -> String +showFailure (NoSuchPackage pkgname) = + "The package " ++ display pkgname ++ " is unknown." +showFailure (ConfigureFailed pkg missingDeps) = + "cannot configure " ++ displayPkg pkg ++ ". It requires " + ++ listOf (displayDep . fst) missingDeps + ++ '\n' : unlines (map (uncurry whyNot) missingDeps) + + where + whyNot (Dependency name ver) [] = + "There is no available version of " ++ display name + ++ " that satisfies " ++ displayVer ver + + whyNot dep conflicts = + "For the dependency on " ++ displayDep dep + ++ " there are these packages: " ++ listOf display pkgs + ++ ". However none of them are available.\n" + ++ unlines [ showExclusionReason (packageId pkg') reason + | (pkg', reasons) <- conflicts, reason <- reasons ] + + where pkgs = map fst conflicts + +showFailure (DependencyConflict pkg dep installedConstraint conflicts) = + "dependencies conflict: " + ++ displayPkg pkg ++ " requires " + ++ (if installedConstraint then "an installed instance of " else "") + ++ displayDep dep ++ " however:\n" + ++ unlines [ showExclusionReason (packageId pkg') reason + | (pkg', reasons) <- conflicts, reason <- reasons ] + +showFailure (TopLevelVersionConstraintConflict name ver conflicts) = + "constraints conflict: we have the top level constraint " + ++ displayDep (Dependency name ver) ++ ", but\n" + ++ unlines [ showExclusionReason (packageId pkg') reason + | (pkg', reasons) <- conflicts, reason <- reasons ] + +showFailure (TopLevelVersionConstraintUnsatisfiable name ver) = + "There is no available version of " ++ display name + ++ " that satisfies " ++ displayVer ver + +showFailure (TopLevelInstallConstraintConflict name InstalledConstraint conflicts) = + "constraints conflict: " + ++ "top level constraint '" ++ display name ++ " installed' however\n" + ++ unlines [ showExclusionReason (packageId pkg') reason + | (pkg', reasons) <- conflicts, reason <- reasons ] + +showFailure (TopLevelInstallConstraintUnsatisfiable name InstalledConstraint) = + "There is no installed version of " ++ display name + +showFailure (TopLevelInstallConstraintConflict name SourceConstraint conflicts) = + "constraints conflict: " + ++ "top level constraint '" ++ display name ++ " source' however\n" + ++ unlines [ showExclusionReason (packageId pkg') reason + | (pkg', reasons) <- conflicts, reason <- reasons ] + +showFailure (TopLevelInstallConstraintUnsatisfiable name SourceConstraint) = + "There is no available source version of " ++ display name + +displayVer :: VersionRange -> String +displayVer = display . simplifyVersionRange + +displayDep :: Dependency -> String +displayDep = display . simplifyDependency + + +-- ------------------------------------------------------------ +-- * Utils +-- ------------------------------------------------------------ + +impossible :: String -> a +impossible msg = internalError $ "assertion failure: " ++ msg + +internalError :: String -> a +internalError msg = error $ "internal error: " ++ msg + +displayPkg :: Package pkg => pkg -> String +displayPkg = display . packageId + +listOf :: (a -> String) -> [a] -> String +listOf _ [] = [] +listOf disp [x0] = disp x0 +listOf disp (x0:x1:xs) = disp x0 ++ go x1 xs + where go x [] = " and " ++ disp x + go x (x':xs') = ", " ++ disp x ++ go x' xs' diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Types.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Types.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Types.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency/Types.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,258 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveFunctor #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Dependency.Types +-- Copyright : (c) Duncan Coutts 2008 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Common types for dependency resolution. +----------------------------------------------------------------------------- +module Distribution.Client.Dependency.Types ( + ExtDependency(..), + + PreSolver(..), + Solver(..), + DependencyResolver, + + AllowNewer(..), isAllowNewer, + PackageConstraint(..), + debugPackageConstraint, + PackagePreferences(..), + InstalledPreference(..), + PackagesPreferenceDefault(..), + + Progress(..), + foldProgress, + ) where + +#if !MIN_VERSION_base(4,8,0) +import Control.Applicative + ( Applicative(..) ) +#endif +import Control.Applicative + ( Alternative(..) ) + + +import Data.Char + ( isAlpha, toLower ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid + ( Monoid(..) ) +#endif + +import Distribution.Client.Types + ( OptionalStanza(..), SourcePackage(..) ) +import qualified Distribution.Client.InstallPlan as InstallPlan + +import Distribution.Compat.ReadP + ( (<++) ) + +import qualified Distribution.Compat.ReadP as Parse + ( pfail, munch1 ) +import Distribution.PackageDescription + ( FlagAssignment, FlagName(..) ) +import qualified Distribution.Client.PackageIndex as PackageIndex + ( PackageIndex ) +import Distribution.Simple.PackageIndex ( InstalledPackageIndex ) +import Distribution.Package + ( Dependency, PackageName, InstalledPackageId ) +import Distribution.Version + ( VersionRange, simplifyVersionRange ) +import Distribution.Compiler + ( CompilerInfo ) +import Distribution.System + ( Platform ) +import Distribution.Text + ( Text(..), display ) + +import Text.PrettyPrint + ( text ) + +import Prelude hiding (fail) + +-- | Covers source dependencies and installed dependencies in +-- one type. +data ExtDependency = SourceDependency Dependency + | InstalledDependency InstalledPackageId + +instance Text ExtDependency where + disp (SourceDependency dep) = disp dep + disp (InstalledDependency dep) = disp dep + + parse = (SourceDependency `fmap` parse) <++ (InstalledDependency `fmap` parse) + +-- | All the solvers that can be selected. +data PreSolver = AlwaysTopDown | AlwaysModular | Choose + deriving (Eq, Ord, Show, Bounded, Enum) + +-- | All the solvers that can be used. +data Solver = TopDown | Modular + deriving (Eq, Ord, Show, Bounded, Enum) + +instance Text PreSolver where + disp AlwaysTopDown = text "topdown" + disp AlwaysModular = text "modular" + disp Choose = text "choose" + parse = do + name <- Parse.munch1 isAlpha + case map toLower name of + "topdown" -> return AlwaysTopDown + "modular" -> return AlwaysModular + "choose" -> return Choose + _ -> Parse.pfail + +-- | A dependency resolver is a function that works out an installation plan +-- given the set of installed and available packages and a set of deps to +-- solve for. +-- +-- The reason for this interface is because there are dozens of approaches to +-- solving the package dependency problem and we want to make it easy to swap +-- in alternatives. +-- +type DependencyResolver = Platform + -> CompilerInfo + -> InstalledPackageIndex + -> PackageIndex.PackageIndex SourcePackage + -> (PackageName -> PackagePreferences) + -> [PackageConstraint] + -> [PackageName] + -> Progress String String [InstallPlan.PlanPackage] + +-- | Per-package constraints. Package constraints must be respected by the +-- solver. Multiple constraints for each package can be given, though obviously +-- it is possible to construct conflicting constraints (eg impossible version +-- range or inconsistent flag assignment). +-- +data PackageConstraint + = PackageConstraintVersion PackageName VersionRange + | PackageConstraintInstalled PackageName + | PackageConstraintSource PackageName + | PackageConstraintFlags PackageName FlagAssignment + | PackageConstraintStanzas PackageName [OptionalStanza] + deriving (Show,Eq) + +-- | Provide a textual representation of a package constraint +-- for debugging purposes. +-- +debugPackageConstraint :: PackageConstraint -> String +debugPackageConstraint (PackageConstraintVersion pn vr) = + display pn ++ " " ++ display (simplifyVersionRange vr) +debugPackageConstraint (PackageConstraintInstalled pn) = + display pn ++ " installed" +debugPackageConstraint (PackageConstraintSource pn) = + display pn ++ " source" +debugPackageConstraint (PackageConstraintFlags pn fs) = + "flags " ++ display pn ++ " " ++ unwords (map (uncurry showFlag) fs) + where + showFlag (FlagName f) True = "+" ++ f + showFlag (FlagName f) False = "-" ++ f +debugPackageConstraint (PackageConstraintStanzas pn ss) = + "stanzas " ++ display pn ++ " " ++ unwords (map showStanza ss) + where + showStanza TestStanzas = "test" + showStanza BenchStanzas = "bench" + +-- | A per-package preference on the version. It is a soft constraint that the +-- 'DependencyResolver' should try to respect where possible. It consists of +-- a 'InstalledPreference' which says if we prefer versions of packages +-- that are already installed. It also has a 'PackageVersionPreference' which +-- is a suggested constraint on the version number. The resolver should try to +-- use package versions that satisfy the suggested version constraint. +-- +-- It is not specified if preferences on some packages are more important than +-- others. +-- +data PackagePreferences = PackagePreferences VersionRange InstalledPreference + +-- | Whether we prefer an installed version of a package or simply the latest +-- version. +-- +data InstalledPreference = PreferInstalled | PreferLatest + deriving Show + +-- | Global policy for all packages to say if we prefer package versions that +-- are already installed locally or if we just prefer the latest available. +-- +data PackagesPreferenceDefault = + + -- | Always prefer the latest version irrespective of any existing + -- installed version. + -- + -- * This is the standard policy for upgrade. + -- + PreferAllLatest + + -- | Always prefer the installed versions over ones that would need to be + -- installed. Secondarily, prefer latest versions (eg the latest installed + -- version or if there are none then the latest source version). + | PreferAllInstalled + + -- | Prefer the latest version for packages that are explicitly requested + -- but prefers the installed version for any other packages. + -- + -- * This is the standard policy for install. + -- + | PreferLatestForSelected + deriving Show + +-- | Policy for relaxing upper bounds in dependencies. For example, given +-- 'build-depends: array >= 0.3 && < 0.5', are we allowed to relax the upper +-- bound and choose a version of 'array' that is greater or equal to 0.5? By +-- default the upper bounds are always strictly honored. +data AllowNewer = + + -- | Default: honor the upper bounds in all dependencies, never choose + -- versions newer than allowed. + AllowNewerNone + + -- | Ignore upper bounds in dependencies on the given packages. + | AllowNewerSome [PackageName] + + -- | Ignore upper bounds in dependencies on all packages. + | AllowNewerAll + +-- | Convert 'AllowNewer' to a boolean. +isAllowNewer :: AllowNewer -> Bool +isAllowNewer AllowNewerNone = False +isAllowNewer (AllowNewerSome _) = True +isAllowNewer AllowNewerAll = True + +-- | A type to represent the unfolding of an expensive long running +-- calculation that may fail. We may get intermediate steps before the final +-- result which may be used to indicate progress and\/or logging messages. +-- +data Progress step fail done = Step step (Progress step fail done) + | Fail fail + | Done done + deriving Functor + +-- | Consume a 'Progress' calculation. Much like 'foldr' for lists but with two +-- base cases, one for a final result and one for failure. +-- +-- Eg to convert into a simple 'Either' result use: +-- +-- > foldProgress (flip const) Left Right +-- +foldProgress :: (step -> a -> a) -> (fail -> a) -> (done -> a) + -> Progress step fail done -> a +foldProgress step fail done = fold + where fold (Step s p) = step s (fold p) + fold (Fail f) = fail f + fold (Done r) = done r + +instance Monad (Progress step fail) where + return a = Done a + p >>= f = foldProgress Step Fail f p + +instance Applicative (Progress step fail) where + pure a = Done a + p <*> x = foldProgress Step Fail (flip fmap x) p + +instance Monoid fail => Alternative (Progress step fail) where + empty = Fail mempty + p <|> q = foldProgress Step (const q) Done p diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Dependency.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,687 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Dependency +-- Copyright : (c) David Himmelstrup 2005, +-- Bjorn Bringert 2007 +-- Duncan Coutts 2008 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Top level interface to dependency resolution. +----------------------------------------------------------------------------- +module Distribution.Client.Dependency ( + -- * The main package dependency resolver + chooseSolver, + resolveDependencies, + Progress(..), + foldProgress, + + -- * Alternate, simple resolver that does not do dependencies recursively + resolveWithoutDependencies, + + -- * Constructing resolver policies + DepResolverParams(..), + PackageConstraint(..), + PackagesPreferenceDefault(..), + PackagePreference(..), + InstalledPreference(..), + + -- ** Standard policy + standardInstallPolicy, + PackageSpecifier(..), + + -- ** Sandbox policy + applySandboxInstallPolicy, + + -- ** Extra policy options + dontUpgradeNonUpgradeablePackages, + hideBrokenInstalledPackages, + upgradeDependencies, + reinstallTargets, + + -- ** Policy utils + addConstraints, + addPreferences, + setPreferenceDefault, + setReorderGoals, + setIndependentGoals, + setAvoidReinstalls, + setShadowPkgs, + setStrongFlags, + setMaxBackjumps, + addSourcePackages, + hideInstalledPackagesSpecificByInstalledPackageId, + hideInstalledPackagesSpecificBySourcePackageId, + hideInstalledPackagesAllVersions, + removeUpperBounds + ) where + +import Distribution.Client.Dependency.TopDown + ( topDownResolver ) +import Distribution.Client.Dependency.Modular + ( modularResolver, SolverConfig(..) ) +import qualified Distribution.Client.PackageIndex as PackageIndex +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex +import qualified Distribution.Client.InstallPlan as InstallPlan +import Distribution.Client.InstallPlan (InstallPlan) +import Distribution.Client.Types + ( SourcePackageDb(SourcePackageDb) + , SourcePackage(..) ) +import Distribution.Client.Dependency.Types + ( PreSolver(..), Solver(..), DependencyResolver, PackageConstraint(..) + , debugPackageConstraint + , AllowNewer(..), PackagePreferences(..), InstalledPreference(..) + , PackagesPreferenceDefault(..) + , Progress(..), foldProgress ) +import Distribution.Client.Sandbox.Types + ( SandboxPackageInfo(..) ) +import Distribution.Client.Targets +import qualified Distribution.InstalledPackageInfo as Installed +import Distribution.Package + ( PackageName(..), PackageId, Package(..), packageName, packageVersion + , InstalledPackageId, Dependency(Dependency)) +import qualified Distribution.PackageDescription as PD + ( PackageDescription(..), GenericPackageDescription(..) + , Library(..), Executable(..), TestSuite(..), Benchmark(..), CondTree) +import Distribution.PackageDescription (BuildInfo(targetBuildDepends)) +import Distribution.PackageDescription.Configuration (mapCondTree) +import Distribution.Version + ( Version(..), VersionRange, anyVersion, thisVersion, withinRange + , removeUpperBound, simplifyVersionRange ) +import Distribution.Compiler + ( CompilerId(..), CompilerInfo(..), CompilerFlavor(..) ) +import Distribution.System + ( Platform ) +import Distribution.Simple.Utils + ( comparing, warn, info ) +import Distribution.Text + ( display ) +import Distribution.Verbosity + ( Verbosity ) + +import Data.List (maximumBy, foldl', intercalate) +import Data.Maybe (fromMaybe) +import qualified Data.Map as Map +import qualified Data.Set as Set +import Data.Set (Set) + +-- ------------------------------------------------------------ +-- * High level planner policy +-- ------------------------------------------------------------ + +-- | The set of parameters to the dependency resolver. These parameters are +-- relatively low level but many kinds of high level policies can be +-- implemented in terms of adjustments to the parameters. +-- +data DepResolverParams = DepResolverParams { + depResolverTargets :: [PackageName], + depResolverConstraints :: [PackageConstraint], + depResolverPreferences :: [PackagePreference], + depResolverPreferenceDefault :: PackagesPreferenceDefault, + depResolverInstalledPkgIndex :: InstalledPackageIndex, + depResolverSourcePkgIndex :: PackageIndex.PackageIndex SourcePackage, + depResolverReorderGoals :: Bool, + depResolverIndependentGoals :: Bool, + depResolverAvoidReinstalls :: Bool, + depResolverShadowPkgs :: Bool, + depResolverStrongFlags :: Bool, + depResolverMaxBackjumps :: Maybe Int + } + +debugDepResolverParams :: DepResolverParams -> String +debugDepResolverParams p = + "targets: " ++ intercalate ", " (map display (depResolverTargets p)) + ++ "\nconstraints: " + ++ concatMap (("\n " ++) . debugPackageConstraint) (depResolverConstraints p) + ++ "\npreferences: " + ++ concatMap (("\n " ++) . debugPackagePreference) (depResolverPreferences p) + ++ "\nstrategy: " ++ show (depResolverPreferenceDefault p) + +-- | A package selection preference for a particular package. +-- +-- Preferences are soft constraints that the dependency resolver should try to +-- respect where possible. It is not specified if preferences on some packages +-- are more important than others. +-- +data PackagePreference = + + -- | A suggested constraint on the version number. + PackageVersionPreference PackageName VersionRange + + -- | If we prefer versions of packages that are already installed. + | PackageInstalledPreference PackageName InstalledPreference + +-- | Provide a textual representation of a package preference +-- for debugging purposes. +-- +debugPackagePreference :: PackagePreference -> String +debugPackagePreference (PackageVersionPreference pn vr) = + display pn ++ " " ++ display (simplifyVersionRange vr) +debugPackagePreference (PackageInstalledPreference pn ip) = + display pn ++ " " ++ show ip + +basicDepResolverParams :: InstalledPackageIndex + -> PackageIndex.PackageIndex SourcePackage + -> DepResolverParams +basicDepResolverParams installedPkgIndex sourcePkgIndex = + DepResolverParams { + depResolverTargets = [], + depResolverConstraints = [], + depResolverPreferences = [], + depResolverPreferenceDefault = PreferLatestForSelected, + depResolverInstalledPkgIndex = installedPkgIndex, + depResolverSourcePkgIndex = sourcePkgIndex, + depResolverReorderGoals = False, + depResolverIndependentGoals = False, + depResolverAvoidReinstalls = False, + depResolverShadowPkgs = False, + depResolverStrongFlags = False, + depResolverMaxBackjumps = Nothing + } + +addTargets :: [PackageName] + -> DepResolverParams -> DepResolverParams +addTargets extraTargets params = + params { + depResolverTargets = extraTargets ++ depResolverTargets params + } + +addConstraints :: [PackageConstraint] + -> DepResolverParams -> DepResolverParams +addConstraints extraConstraints params = + params { + depResolverConstraints = extraConstraints + ++ depResolverConstraints params + } + +addPreferences :: [PackagePreference] + -> DepResolverParams -> DepResolverParams +addPreferences extraPreferences params = + params { + depResolverPreferences = extraPreferences + ++ depResolverPreferences params + } + +setPreferenceDefault :: PackagesPreferenceDefault + -> DepResolverParams -> DepResolverParams +setPreferenceDefault preferenceDefault params = + params { + depResolverPreferenceDefault = preferenceDefault + } + +setReorderGoals :: Bool -> DepResolverParams -> DepResolverParams +setReorderGoals b params = + params { + depResolverReorderGoals = b + } + +setIndependentGoals :: Bool -> DepResolverParams -> DepResolverParams +setIndependentGoals b params = + params { + depResolverIndependentGoals = b + } + +setAvoidReinstalls :: Bool -> DepResolverParams -> DepResolverParams +setAvoidReinstalls b params = + params { + depResolverAvoidReinstalls = b + } + +setShadowPkgs :: Bool -> DepResolverParams -> DepResolverParams +setShadowPkgs b params = + params { + depResolverShadowPkgs = b + } + +setStrongFlags :: Bool -> DepResolverParams -> DepResolverParams +setStrongFlags b params = + params { + depResolverStrongFlags = b + } + +setMaxBackjumps :: Maybe Int -> DepResolverParams -> DepResolverParams +setMaxBackjumps n params = + params { + depResolverMaxBackjumps = n + } + +-- | Some packages are specific to a given compiler version and should never be +-- upgraded. +dontUpgradeNonUpgradeablePackages :: DepResolverParams -> DepResolverParams +dontUpgradeNonUpgradeablePackages params = + addConstraints extraConstraints params + where + extraConstraints = + [ PackageConstraintInstalled pkgname + | all (/=PackageName "base") (depResolverTargets params) + , pkgname <- map PackageName [ "base", "ghc-prim", "integer-gmp" + , "integer-simple" ] + , isInstalled pkgname ] + -- TODO: the top down resolver chokes on the base constraints + -- below when there are no targets and thus no dep on base. + -- Need to refactor constraints separate from needing packages. + isInstalled = not . null + . InstalledPackageIndex.lookupPackageName + (depResolverInstalledPkgIndex params) + +addSourcePackages :: [SourcePackage] + -> DepResolverParams -> DepResolverParams +addSourcePackages pkgs params = + params { + depResolverSourcePkgIndex = + foldl (flip PackageIndex.insert) + (depResolverSourcePkgIndex params) pkgs + } + +hideInstalledPackagesSpecificByInstalledPackageId :: [InstalledPackageId] + -> DepResolverParams + -> DepResolverParams +hideInstalledPackagesSpecificByInstalledPackageId pkgids params = + --TODO: this should work using exclude constraints instead + params { + depResolverInstalledPkgIndex = + foldl' (flip InstalledPackageIndex.deleteInstalledPackageId) + (depResolverInstalledPkgIndex params) pkgids + } + +hideInstalledPackagesSpecificBySourcePackageId :: [PackageId] + -> DepResolverParams + -> DepResolverParams +hideInstalledPackagesSpecificBySourcePackageId pkgids params = + --TODO: this should work using exclude constraints instead + params { + depResolverInstalledPkgIndex = + foldl' (flip InstalledPackageIndex.deleteSourcePackageId) + (depResolverInstalledPkgIndex params) pkgids + } + +hideInstalledPackagesAllVersions :: [PackageName] + -> DepResolverParams -> DepResolverParams +hideInstalledPackagesAllVersions pkgnames params = + --TODO: this should work using exclude constraints instead + params { + depResolverInstalledPkgIndex = + foldl' (flip InstalledPackageIndex.deletePackageName) + (depResolverInstalledPkgIndex params) pkgnames + } + + +hideBrokenInstalledPackages :: DepResolverParams -> DepResolverParams +hideBrokenInstalledPackages params = + hideInstalledPackagesSpecificByInstalledPackageId pkgids params + where + pkgids = map Installed.installedPackageId + . InstalledPackageIndex.reverseDependencyClosure + (depResolverInstalledPkgIndex params) + . map (Installed.installedPackageId . fst) + . InstalledPackageIndex.brokenPackages + $ depResolverInstalledPkgIndex params + +-- | Remove upper bounds in dependencies using the policy specified by the +-- 'AllowNewer' argument (all/some/none). +removeUpperBounds :: AllowNewer -> DepResolverParams -> DepResolverParams +removeUpperBounds allowNewer params = + params { + -- NB: It's important to apply 'removeUpperBounds' after + -- 'addSourcePackages'. Otherwise, the packages inserted by + -- 'addSourcePackages' won't have upper bounds in dependencies relaxed. + + depResolverSourcePkgIndex = sourcePkgIndex' + } + where + sourcePkgIndex = depResolverSourcePkgIndex params + sourcePkgIndex' = case allowNewer of + AllowNewerNone -> sourcePkgIndex + AllowNewerAll -> fmap relaxAllPackageDeps sourcePkgIndex + AllowNewerSome pkgs -> fmap (relaxSomePackageDeps pkgs) sourcePkgIndex + + relaxAllPackageDeps :: SourcePackage -> SourcePackage + relaxAllPackageDeps = onAllBuildDepends doRelax + where + doRelax (Dependency pkgName verRange) = + Dependency pkgName (removeUpperBound verRange) + + relaxSomePackageDeps :: [PackageName] -> SourcePackage -> SourcePackage + relaxSomePackageDeps pkgNames = onAllBuildDepends doRelax + where + doRelax d@(Dependency pkgName verRange) + | pkgName `elem` pkgNames = Dependency pkgName + (removeUpperBound verRange) + | otherwise = d + + -- Walk a 'GenericPackageDescription' and apply 'f' to all 'build-depends' + -- fields. + onAllBuildDepends :: (Dependency -> Dependency) + -> SourcePackage -> SourcePackage + onAllBuildDepends f srcPkg = srcPkg' + where + gpd = packageDescription srcPkg + pd = PD.packageDescription gpd + condLib = PD.condLibrary gpd + condExes = PD.condExecutables gpd + condTests = PD.condTestSuites gpd + condBenchs = PD.condBenchmarks gpd + + f' = onBuildInfo f + onBuildInfo g bi = bi + { targetBuildDepends = map g (targetBuildDepends bi) } + + onLibrary lib = lib { PD.libBuildInfo = f' $ PD.libBuildInfo lib } + onExecutable exe = exe { PD.buildInfo = f' $ PD.buildInfo exe } + onTestSuite tst = tst { PD.testBuildInfo = f' $ PD.testBuildInfo tst } + onBenchmark bmk = bmk { PD.benchmarkBuildInfo = + f' $ PD.benchmarkBuildInfo bmk } + + srcPkg' = srcPkg { packageDescription = gpd' } + gpd' = gpd { + PD.packageDescription = pd', + PD.condLibrary = condLib', + PD.condExecutables = condExes', + PD.condTestSuites = condTests', + PD.condBenchmarks = condBenchs' + } + pd' = pd { + PD.buildDepends = map f (PD.buildDepends pd), + PD.library = fmap onLibrary (PD.library pd), + PD.executables = map onExecutable (PD.executables pd), + PD.testSuites = map onTestSuite (PD.testSuites pd), + PD.benchmarks = map onBenchmark (PD.benchmarks pd) + } + condLib' = fmap (onCondTree onLibrary) condLib + condExes' = map (mapSnd $ onCondTree onExecutable) condExes + condTests' = map (mapSnd $ onCondTree onTestSuite) condTests + condBenchs' = map (mapSnd $ onCondTree onBenchmark) condBenchs + + mapSnd :: (a -> b) -> (c,a) -> (c,b) + mapSnd = fmap + + onCondTree :: (a -> b) -> PD.CondTree v [Dependency] a + -> PD.CondTree v [Dependency] b + onCondTree g = mapCondTree g (map f) id + + +upgradeDependencies :: DepResolverParams -> DepResolverParams +upgradeDependencies = setPreferenceDefault PreferAllLatest + + +reinstallTargets :: DepResolverParams -> DepResolverParams +reinstallTargets params = + hideInstalledPackagesAllVersions (depResolverTargets params) params + + +standardInstallPolicy :: InstalledPackageIndex + -> SourcePackageDb + -> [PackageSpecifier SourcePackage] + -> DepResolverParams +standardInstallPolicy + installedPkgIndex (SourcePackageDb sourcePkgIndex sourcePkgPrefs) + pkgSpecifiers + + = addPreferences + [ PackageVersionPreference name ver + | (name, ver) <- Map.toList sourcePkgPrefs ] + + . addConstraints + (concatMap pkgSpecifierConstraints pkgSpecifiers) + + . addTargets + (map pkgSpecifierTarget pkgSpecifiers) + + . hideInstalledPackagesSpecificBySourcePackageId + [ packageId pkg | SpecificSourcePackage pkg <- pkgSpecifiers ] + + . addSourcePackages + [ pkg | SpecificSourcePackage pkg <- pkgSpecifiers ] + + $ basicDepResolverParams + installedPkgIndex sourcePkgIndex + +applySandboxInstallPolicy :: SandboxPackageInfo + -> DepResolverParams + -> DepResolverParams +applySandboxInstallPolicy + (SandboxPackageInfo modifiedDeps otherDeps allSandboxPkgs _allDeps) + params + + = addPreferences [ PackageInstalledPreference n PreferInstalled + | n <- installedNotModified ] + + . addTargets installedNotModified + + . addPreferences + [ PackageVersionPreference (packageName pkg) + (thisVersion (packageVersion pkg)) | pkg <- otherDeps ] + + . addConstraints + [ PackageConstraintVersion (packageName pkg) + (thisVersion (packageVersion pkg)) | pkg <- modifiedDeps ] + + . addTargets [ packageName pkg | pkg <- modifiedDeps ] + + . hideInstalledPackagesSpecificBySourcePackageId + [ packageId pkg | pkg <- modifiedDeps ] + + -- We don't need to add source packages for add-source deps to the + -- 'installedPkgIndex' since 'getSourcePackages' did that for us. + + $ params + + where + installedPkgIds = + map fst . InstalledPackageIndex.allPackagesBySourcePackageId + $ allSandboxPkgs + modifiedPkgIds = map packageId modifiedDeps + installedNotModified = [ packageName pkg | pkg <- installedPkgIds, + pkg `notElem` modifiedPkgIds ] + +-- ------------------------------------------------------------ +-- * Interface to the standard resolver +-- ------------------------------------------------------------ + +chooseSolver :: Verbosity -> PreSolver -> CompilerInfo -> IO Solver +chooseSolver _ AlwaysTopDown _ = return TopDown +chooseSolver _ AlwaysModular _ = return Modular +chooseSolver verbosity Choose cinfo = do + let (CompilerId f v) = compilerInfoId cinfo + chosenSolver | f == GHC && v <= Version [7] [] = TopDown + | otherwise = Modular + msg TopDown = warn verbosity "Falling back to topdown solver for GHC < 7." + msg Modular = info verbosity "Choosing modular solver." + msg chosenSolver + return chosenSolver + +runSolver :: Solver -> SolverConfig -> DependencyResolver +runSolver TopDown = const topDownResolver -- TODO: warn about unsupported options +runSolver Modular = modularResolver + +-- | Run the dependency solver. +-- +-- Since this is potentially an expensive operation, the result is wrapped in a +-- a 'Progress' structure that can be unfolded to provide progress information, +-- logging messages and the final result or an error. +-- +resolveDependencies :: Platform + -> CompilerInfo + -> Solver + -> DepResolverParams + -> Progress String String InstallPlan + + --TODO: is this needed here? see dontUpgradeNonUpgradeablePackages +resolveDependencies platform comp _solver params + | null (depResolverTargets params) + = return (mkInstallPlan platform comp []) + +resolveDependencies platform comp solver params = + + Step (debugDepResolverParams finalparams) + $ fmap (mkInstallPlan platform comp) + $ runSolver solver (SolverConfig reorderGoals indGoals noReinstalls + shadowing strFlags maxBkjumps) + platform comp installedPkgIndex sourcePkgIndex + preferences constraints targets + where + + finalparams @ (DepResolverParams + targets constraints + prefs defpref + installedPkgIndex + sourcePkgIndex + reorderGoals + indGoals + noReinstalls + shadowing + strFlags + maxBkjumps) = dontUpgradeNonUpgradeablePackages + -- TODO: + -- The modular solver can properly deal with broken + -- packages and won't select them. So the + -- 'hideBrokenInstalledPackages' function should be moved + -- into a module that is specific to the top-down solver. + . (if solver /= Modular then hideBrokenInstalledPackages + else id) + $ params + + preferences = interpretPackagesPreference + (Set.fromList targets) defpref prefs + +-- | Make an install plan from the output of the dep resolver. +-- It checks that the plan is valid, or it's an error in the dep resolver. +-- +mkInstallPlan :: Platform + -> CompilerInfo + -> [InstallPlan.PlanPackage] -> InstallPlan +mkInstallPlan platform comp pkgIndex = + let index = InstalledPackageIndex.fromList pkgIndex in + case InstallPlan.new platform comp index of + Right plan -> plan + Left problems -> error $ unlines $ + "internal error: could not construct a valid install plan." + : "The proposed (invalid) plan contained the following problems:" + : map InstallPlan.showPlanProblem problems + ++ "Proposed plan:" + : [InstallPlan.showPlanIndex index] + + +-- | Give an interpretation to the global 'PackagesPreference' as +-- specific per-package 'PackageVersionPreference'. +-- +interpretPackagesPreference :: Set PackageName + -> PackagesPreferenceDefault + -> [PackagePreference] + -> (PackageName -> PackagePreferences) +interpretPackagesPreference selected defaultPref prefs = + \pkgname -> PackagePreferences (versionPref pkgname) (installPref pkgname) + + where + versionPref pkgname = + fromMaybe anyVersion (Map.lookup pkgname versionPrefs) + versionPrefs = Map.fromList + [ (pkgname, pref) + | PackageVersionPreference pkgname pref <- prefs ] + + installPref pkgname = + fromMaybe (installPrefDefault pkgname) (Map.lookup pkgname installPrefs) + installPrefs = Map.fromList + [ (pkgname, pref) + | PackageInstalledPreference pkgname pref <- prefs ] + installPrefDefault = case defaultPref of + PreferAllLatest -> \_ -> PreferLatest + PreferAllInstalled -> \_ -> PreferInstalled + PreferLatestForSelected -> \pkgname -> + -- When you say cabal install foo, what you really mean is, prefer the + -- latest version of foo, but the installed version of everything else + if pkgname `Set.member` selected then PreferLatest + else PreferInstalled + +-- ------------------------------------------------------------ +-- * Simple resolver that ignores dependencies +-- ------------------------------------------------------------ + +-- | A simplistic method of resolving a list of target package names to +-- available packages. +-- +-- Specifically, it does not consider package dependencies at all. Unlike +-- 'resolveDependencies', no attempt is made to ensure that the selected +-- packages have dependencies that are satisfiable or consistent with +-- each other. +-- +-- It is suitable for tasks such as selecting packages to download for user +-- inspection. It is not suitable for selecting packages to install. +-- +-- Note: if no installed package index is available, it is OK to pass 'mempty'. +-- It simply means preferences for installed packages will be ignored. +-- +resolveWithoutDependencies :: DepResolverParams + -> Either [ResolveNoDepsError] [SourcePackage] +resolveWithoutDependencies (DepResolverParams targets constraints + prefs defpref installedPkgIndex sourcePkgIndex + _reorderGoals _indGoals _avoidReinstalls + _shadowing _strFlags _maxBjumps) = + collectEithers (map selectPackage targets) + where + selectPackage :: PackageName -> Either ResolveNoDepsError SourcePackage + selectPackage pkgname + | null choices = Left $! ResolveUnsatisfiable pkgname requiredVersions + | otherwise = Right $! maximumBy bestByPrefs choices + + where + -- Constraints + requiredVersions = packageConstraints pkgname + pkgDependency = Dependency pkgname requiredVersions + choices = PackageIndex.lookupDependency sourcePkgIndex + pkgDependency + + -- Preferences + PackagePreferences preferredVersions preferInstalled + = packagePreferences pkgname + + bestByPrefs = comparing $ \pkg -> + (installPref pkg, versionPref pkg, packageVersion pkg) + installPref = case preferInstalled of + PreferLatest -> const False + PreferInstalled -> not . null + . InstalledPackageIndex.lookupSourcePackageId + installedPkgIndex + . packageId + versionPref pkg = packageVersion pkg `withinRange` preferredVersions + + packageConstraints :: PackageName -> VersionRange + packageConstraints pkgname = + Map.findWithDefault anyVersion pkgname packageVersionConstraintMap + packageVersionConstraintMap = + Map.fromList [ (name, range) + | PackageConstraintVersion name range <- constraints ] + + packagePreferences :: PackageName -> PackagePreferences + packagePreferences = interpretPackagesPreference + (Set.fromList targets) defpref prefs + + +collectEithers :: [Either a b] -> Either [a] [b] +collectEithers = collect . partitionEithers + where + collect ([], xs) = Right xs + collect (errs,_) = Left errs + partitionEithers :: [Either a b] -> ([a],[b]) + partitionEithers = foldr (either left right) ([],[]) + where + left a (l, r) = (a:l, r) + right a (l, r) = (l, a:r) + +-- | Errors for 'resolveWithoutDependencies'. +-- +data ResolveNoDepsError = + + -- | A package name which cannot be resolved to a specific package. + -- Also gives the constraint on the version and whether there was + -- a constraint on the package being installed. + ResolveUnsatisfiable PackageName VersionRange + +instance Show ResolveNoDepsError where + show (ResolveUnsatisfiable name ver) = + "There is no available version of " ++ display name + ++ " that satisfies " ++ display (simplifyVersionRange ver) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Exec.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Exec.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Exec.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Exec.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,122 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Exec +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Implementation of the 'exec' command. Runs an arbitrary executable in an +-- environment suitable for making use of the sandbox. +----------------------------------------------------------------------------- + +module Distribution.Client.Exec ( exec + ) where + +import qualified Distribution.Simple.GHC as GHC +import qualified Distribution.Simple.GHCJS as GHCJS + +import Distribution.Client.Sandbox (getSandboxConfigFilePath) +import Distribution.Client.Sandbox.PackageEnvironment (sandboxPackageDBPath) +import Distribution.Client.Sandbox.Types (UseSandbox (..)) + +import Distribution.Simple.Compiler (Compiler, CompilerFlavor(..), compilerFlavor) +import Distribution.Simple.Program (ghcProgram, ghcjsProgram, lookupProgram) +import Distribution.Simple.Program.Db (ProgramDb, requireProgram, modifyProgramSearchPath) +import Distribution.Simple.Program.Find (ProgramSearchPathEntry(..)) +import Distribution.Simple.Program.Run (programInvocation, runProgramInvocation) +import Distribution.Simple.Program.Types ( simpleProgram, ConfiguredProgram(..) ) +import Distribution.Simple.Utils (die) + +import Distribution.System (Platform) +import Distribution.Verbosity (Verbosity) + +import System.FilePath (searchPathSeparator, ()) +#if !MIN_VERSION_base(4,8,0) +import Control.Applicative ((<$>)) +import Data.Monoid (mempty) +#endif + + +-- | Execute the given command in the package's environment. +-- +-- The given command is executed with GHC configured to use the correct +-- package database and with the sandbox bin directory added to the PATH. +exec :: Verbosity + -> UseSandbox + -> Compiler + -> Platform + -> ProgramDb + -> [String] + -> IO () +exec verbosity useSandbox comp platform programDb extraArgs = + case extraArgs of + (exe:args) -> do + program <- requireProgram' verbosity useSandbox programDb exe + env <- ((++) (programOverrideEnv program)) <$> environmentOverrides + let invocation = programInvocation + program { programOverrideEnv = env } + args + runProgramInvocation verbosity invocation + + [] -> die "Please specify an executable to run" + where + environmentOverrides = + case useSandbox of + NoSandbox -> return [] + (UseSandbox sandboxDir) -> + sandboxEnvironment verbosity sandboxDir comp platform programDb + + +-- | Return the package's sandbox environment. +-- +-- The environment sets GHC_PACKAGE_PATH so that GHC will use the sandbox. +sandboxEnvironment :: Verbosity + -> FilePath + -> Compiler + -> Platform + -> ProgramDb + -> IO [(String, Maybe String)] +sandboxEnvironment verbosity sandboxDir comp platform programDb = + case compilerFlavor comp of + GHC -> env GHC.getGlobalPackageDB ghcProgram "GHC_PACKAGE_PATH" + GHCJS -> env GHCJS.getGlobalPackageDB ghcjsProgram "GHCJS_PACKAGE_PATH" + _ -> die "exec only works with GHC and GHCJS" + where + env getGlobalPackageDB hcProgram packagePathEnvVar = do + let Just program = lookupProgram hcProgram programDb + gDb <- getGlobalPackageDB verbosity program + sandboxConfigFilePath <- getSandboxConfigFilePath mempty + let compilerPackagePath = hcPackagePath gDb + return [ (packagePathEnvVar, compilerPackagePath) + , ("CABAL_SANDBOX_PACKAGE_PATH", compilerPackagePath) + , ("CABAL_SANDBOX_CONFIG", Just sandboxConfigFilePath) + ] + + hcPackagePath gDb = + let s = sandboxPackageDBPath sandboxDir comp platform + in Just $ prependToSearchPath gDb s + + prependToSearchPath path newValue = + newValue ++ [searchPathSeparator] ++ path + + +-- | Check that a program is configured and available to be run. If +-- a sandbox is available check in the sandbox's directory. +requireProgram' :: Verbosity + -> UseSandbox + -> ProgramDb + -> String + -> IO ConfiguredProgram +requireProgram' verbosity useSandbox programDb exe = do + (program, _) <- requireProgram + verbosity + (simpleProgram exe) + updateSearchPath + return program + where + updateSearchPath = + flip modifyProgramSearchPath programDb $ \searchPath -> + case useSandbox of + NoSandbox -> searchPath + UseSandbox sandboxDir -> + ProgramSearchPathDir (sandboxDir "bin") : searchPath diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Fetch.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Fetch.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Fetch.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Fetch.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,195 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Fetch +-- Copyright : (c) David Himmelstrup 2005 +-- Duncan Coutts 2011 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- The cabal fetch command +----------------------------------------------------------------------------- +module Distribution.Client.Fetch ( + fetch, + ) where + +import Distribution.Client.Types +import Distribution.Client.Targets +import Distribution.Client.FetchUtils hiding (fetchPackage) +import Distribution.Client.Dependency +import Distribution.Client.IndexUtils as IndexUtils + ( getSourcePackages, getInstalledPackages ) +import qualified Distribution.Client.InstallPlan as InstallPlan +import Distribution.Client.Setup + ( GlobalFlags(..), FetchFlags(..) ) + +import Distribution.Package + ( packageId ) +import Distribution.Simple.Compiler + ( Compiler, compilerInfo, PackageDBStack ) +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import Distribution.Simple.Program + ( ProgramConfiguration ) +import Distribution.Simple.Setup + ( fromFlag ) +import Distribution.Simple.Utils + ( die, notice, debug ) +import Distribution.System + ( Platform ) +import Distribution.Text + ( display ) +import Distribution.Verbosity + ( Verbosity ) + +import Control.Monad + ( filterM ) + +-- ------------------------------------------------------------ +-- * The fetch command +-- ------------------------------------------------------------ + +--TODO: +-- * add fetch -o support +-- * support tarball URLs via ad-hoc download cache (or in -o mode?) +-- * suggest using --no-deps, unpack or fetch -o if deps cannot be satisfied +-- * Port various flags from install: +-- * --updage-dependencies +-- * --constraint and --preference +-- * --only-dependencies, but note it conflicts with --no-deps + + +-- | Fetch a list of packages and their dependencies. +-- +fetch :: Verbosity + -> PackageDBStack + -> [Repo] + -> Compiler + -> Platform + -> ProgramConfiguration + -> GlobalFlags + -> FetchFlags + -> [UserTarget] + -> IO () +fetch verbosity _ _ _ _ _ _ _ [] = + notice verbosity "No packages requested. Nothing to do." + +fetch verbosity packageDBs repos comp platform conf + globalFlags fetchFlags userTargets = do + + mapM_ checkTarget userTargets + + installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf + sourcePkgDb <- getSourcePackages verbosity repos + + pkgSpecifiers <- resolveUserTargets verbosity + (fromFlag $ globalWorldFile globalFlags) + (packageIndex sourcePkgDb) + userTargets + + pkgs <- planPackages + verbosity comp platform fetchFlags + installedPkgIndex sourcePkgDb pkgSpecifiers + + pkgs' <- filterM (fmap not . isFetched . packageSource) pkgs + if null pkgs' + --TODO: when we add support for remote tarballs then this message + -- will need to be changed because for remote tarballs we fetch them + -- at the earlier phase. + then notice verbosity $ "No packages need to be fetched. " + ++ "All the requested packages are already local " + ++ "or cached locally." + else if dryRun + then notice verbosity $ unlines $ + "The following packages would be fetched:" + : map (display . packageId) pkgs' + + else mapM_ (fetchPackage verbosity . packageSource) pkgs' + + where + dryRun = fromFlag (fetchDryRun fetchFlags) + +planPackages :: Verbosity + -> Compiler + -> Platform + -> FetchFlags + -> InstalledPackageIndex + -> SourcePackageDb + -> [PackageSpecifier SourcePackage] + -> IO [SourcePackage] +planPackages verbosity comp platform fetchFlags + installedPkgIndex sourcePkgDb pkgSpecifiers + + | includeDependencies = do + solver <- chooseSolver verbosity + (fromFlag (fetchSolver fetchFlags)) (compilerInfo comp) + notice verbosity "Resolving dependencies..." + installPlan <- foldProgress logMsg die return $ + resolveDependencies + platform (compilerInfo comp) + solver + resolverParams + + -- The packages we want to fetch are those packages the 'InstallPlan' + -- that are in the 'InstallPlan.Configured' state. + return + [ pkg + | (InstallPlan.Configured (InstallPlan.ConfiguredPackage pkg _ _ _)) + <- InstallPlan.toList installPlan ] + + | otherwise = + either (die . unlines . map show) return $ + resolveWithoutDependencies resolverParams + + where + resolverParams = + + setMaxBackjumps (if maxBackjumps < 0 then Nothing + else Just maxBackjumps) + + . setIndependentGoals independentGoals + + . setReorderGoals reorderGoals + + . setShadowPkgs shadowPkgs + + . setStrongFlags strongFlags + + -- Reinstall the targets given on the command line so that the dep + -- resolver will decide that they need fetching, even if they're + -- already installed. Since we want to get the source packages of + -- things we might have installed (but not have the sources for). + . reinstallTargets + + $ standardInstallPolicy installedPkgIndex sourcePkgDb pkgSpecifiers + + includeDependencies = fromFlag (fetchDeps fetchFlags) + logMsg message rest = debug verbosity message >> rest + + reorderGoals = fromFlag (fetchReorderGoals fetchFlags) + independentGoals = fromFlag (fetchIndependentGoals fetchFlags) + shadowPkgs = fromFlag (fetchShadowPkgs fetchFlags) + strongFlags = fromFlag (fetchStrongFlags fetchFlags) + maxBackjumps = fromFlag (fetchMaxBackjumps fetchFlags) + + +checkTarget :: UserTarget -> IO () +checkTarget target = case target of + UserTargetRemoteTarball _uri + -> die $ "The 'fetch' command does not yet support remote tarballs. " + ++ "In the meantime you can use the 'unpack' commands." + _ -> return () + +fetchPackage :: Verbosity -> PackageLocation a -> IO () +fetchPackage verbosity pkgsrc = case pkgsrc of + LocalUnpackedPackage _dir -> return () + LocalTarballPackage _file -> return () + + RemoteTarballPackage _uri _ -> + die $ "The 'fetch' command does not yet support remote tarballs. " + ++ "In the meantime you can use the 'unpack' commands." + + RepoTarballPackage repo pkgid _ -> do + _ <- fetchRepoTarball verbosity repo pkgid + return () diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/FetchUtils.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/FetchUtils.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/FetchUtils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/FetchUtils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,192 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.FetchUtils +-- Copyright : (c) David Himmelstrup 2005 +-- Duncan Coutts 2011 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Functions for fetching packages +----------------------------------------------------------------------------- +module Distribution.Client.FetchUtils ( + + -- * fetching packages + fetchPackage, + isFetched, + checkFetched, + + -- ** specifically for repo packages + fetchRepoTarball, + + -- * fetching other things + downloadIndex, + ) where + +import Distribution.Client.Types +import Distribution.Client.HttpUtils + ( downloadURI, isOldHackageURI, DownloadResult(..) ) + +import Distribution.Package + ( PackageId, packageName, packageVersion ) +import Distribution.Simple.Utils + ( notice, info, setupMessage ) +import Distribution.Text + ( display ) +import Distribution.Verbosity + ( Verbosity ) + +import Data.Maybe +import System.Directory + ( doesFileExist, createDirectoryIfMissing, getTemporaryDirectory ) +import System.IO + ( openTempFile, hClose ) +import System.FilePath + ( (), (<.>) ) +import qualified System.FilePath.Posix as FilePath.Posix + ( combine, joinPath ) +import Network.URI + ( URI(uriPath) ) + +-- ------------------------------------------------------------ +-- * Actually fetch things +-- ------------------------------------------------------------ + +-- | Returns @True@ if the package has already been fetched +-- or does not need fetching. +-- +isFetched :: PackageLocation (Maybe FilePath) -> IO Bool +isFetched loc = case loc of + LocalUnpackedPackage _dir -> return True + LocalTarballPackage _file -> return True + RemoteTarballPackage _uri local -> return (isJust local) + RepoTarballPackage repo pkgid _ -> doesFileExist (packageFile repo pkgid) + + +checkFetched :: PackageLocation (Maybe FilePath) + -> IO (Maybe (PackageLocation FilePath)) +checkFetched loc = case loc of + LocalUnpackedPackage dir -> + return (Just $ LocalUnpackedPackage dir) + LocalTarballPackage file -> + return (Just $ LocalTarballPackage file) + RemoteTarballPackage uri (Just file) -> + return (Just $ RemoteTarballPackage uri file) + RepoTarballPackage repo pkgid (Just file) -> + return (Just $ RepoTarballPackage repo pkgid file) + + RemoteTarballPackage _uri Nothing -> return Nothing + RepoTarballPackage repo pkgid Nothing -> do + let file = packageFile repo pkgid + exists <- doesFileExist file + if exists + then return (Just $ RepoTarballPackage repo pkgid file) + else return Nothing + + +-- | Fetch a package if we don't have it already. +-- +fetchPackage :: Verbosity + -> PackageLocation (Maybe FilePath) + -> IO (PackageLocation FilePath) +fetchPackage verbosity loc = case loc of + LocalUnpackedPackage dir -> + return (LocalUnpackedPackage dir) + LocalTarballPackage file -> + return (LocalTarballPackage file) + RemoteTarballPackage uri (Just file) -> + return (RemoteTarballPackage uri file) + RepoTarballPackage repo pkgid (Just file) -> + return (RepoTarballPackage repo pkgid file) + + RemoteTarballPackage uri Nothing -> do + path <- downloadTarballPackage uri + return (RemoteTarballPackage uri path) + RepoTarballPackage repo pkgid Nothing -> do + local <- fetchRepoTarball verbosity repo pkgid + return (RepoTarballPackage repo pkgid local) + where + downloadTarballPackage uri = do + notice verbosity ("Downloading " ++ show uri) + tmpdir <- getTemporaryDirectory + (path, hnd) <- openTempFile tmpdir "cabal-.tar.gz" + hClose hnd + _ <- downloadURI verbosity uri path + return path + + +-- | Fetch a repo package if we don't have it already. +-- +fetchRepoTarball :: Verbosity -> Repo -> PackageId -> IO FilePath +fetchRepoTarball verbosity repo pkgid = do + fetched <- doesFileExist (packageFile repo pkgid) + if fetched + then do info verbosity $ display pkgid ++ " has already been downloaded." + return (packageFile repo pkgid) + else do setupMessage verbosity "Downloading" pkgid + downloadRepoPackage + where + downloadRepoPackage = case repoKind repo of + Right LocalRepo -> return (packageFile repo pkgid) + + Left remoteRepo -> do + let uri = packageURI remoteRepo pkgid + dir = packageDir repo pkgid + path = packageFile repo pkgid + createDirectoryIfMissing True dir + _ <- downloadURI verbosity uri path + return path + +-- | Downloads an index file to [config-dir/packages/serv-id]. +-- +downloadIndex :: Verbosity -> RemoteRepo -> FilePath -> IO DownloadResult +downloadIndex verbosity repo cacheDir = do + let uri = (remoteRepoURI repo) { + uriPath = uriPath (remoteRepoURI repo) + `FilePath.Posix.combine` "00-index.tar.gz" + } + path = cacheDir "00-index" <.> "tar.gz" + createDirectoryIfMissing True cacheDir + downloadURI verbosity uri path + + +-- ------------------------------------------------------------ +-- * Path utilities +-- ------------------------------------------------------------ + +-- | Generate the full path to the locally cached copy of +-- the tarball for a given @PackageIdentifer@. +-- +packageFile :: Repo -> PackageId -> FilePath +packageFile repo pkgid = packageDir repo pkgid + display pkgid + <.> "tar.gz" + +-- | Generate the full path to the directory where the local cached copy of +-- the tarball for a given @PackageIdentifer@ is stored. +-- +packageDir :: Repo -> PackageId -> FilePath +packageDir repo pkgid = repoLocalDir repo + display (packageName pkgid) + display (packageVersion pkgid) + +-- | Generate the URI of the tarball for a given package. +-- +packageURI :: RemoteRepo -> PackageId -> URI +packageURI repo pkgid | isOldHackageURI (remoteRepoURI repo) = + (remoteRepoURI repo) { + uriPath = FilePath.Posix.joinPath + [uriPath (remoteRepoURI repo) + ,display (packageName pkgid) + ,display (packageVersion pkgid) + ,display pkgid <.> "tar.gz"] + } +packageURI repo pkgid = + (remoteRepoURI repo) { + uriPath = FilePath.Posix.joinPath + [uriPath (remoteRepoURI repo) + ,"package" + ,display pkgid <.> "tar.gz"] + } diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Freeze.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Freeze.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Freeze.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Freeze.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,237 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Freeze +-- Copyright : (c) David Himmelstrup 2005 +-- Duncan Coutts 2011 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- The cabal freeze command +----------------------------------------------------------------------------- +module Distribution.Client.Freeze ( + freeze, + ) where + +import Distribution.Client.Config ( SavedConfig(..) ) +import Distribution.Client.Types +import Distribution.Client.Targets +import Distribution.Client.Dependency +import Distribution.Client.IndexUtils as IndexUtils + ( getSourcePackages, getInstalledPackages ) +import Distribution.Client.InstallPlan + ( PlanPackage ) +import qualified Distribution.Client.InstallPlan as InstallPlan +import Distribution.Client.Setup + ( GlobalFlags(..), FreezeFlags(..), ConfigExFlags(..) ) +import Distribution.Client.Sandbox.PackageEnvironment + ( loadUserConfig, pkgEnvSavedConfig, showPackageEnvironment, + userPackageEnvironmentFile ) +import Distribution.Client.Sandbox.Types + ( SandboxPackageInfo(..) ) + +import Distribution.Package + ( Package, PackageIdentifier, packageId, packageName, packageVersion ) +import Distribution.Simple.Compiler + ( Compiler, compilerInfo, PackageDBStack ) +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import qualified Distribution.Client.PackageIndex as PackageIndex +import Distribution.Simple.Program + ( ProgramConfiguration ) +import Distribution.Simple.Setup + ( fromFlag, fromFlagOrDefault ) +import Distribution.Simple.Utils + ( die, notice, debug, writeFileAtomic ) +import Distribution.System + ( Platform ) +import Distribution.Text + ( display ) +import Distribution.Verbosity + ( Verbosity ) + +import Control.Monad + ( when ) +import qualified Data.ByteString.Lazy.Char8 as BS.Char8 +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid + ( mempty ) +#endif +import Data.Version + ( showVersion ) +import Distribution.Version + ( thisVersion ) + +-- ------------------------------------------------------------ +-- * The freeze command +-- ------------------------------------------------------------ + +-- | Freeze all of the dependencies by writing a constraints section +-- constraining each dependency to an exact version. +-- +freeze :: Verbosity + -> PackageDBStack + -> [Repo] + -> Compiler + -> Platform + -> ProgramConfiguration + -> Maybe SandboxPackageInfo + -> GlobalFlags + -> FreezeFlags + -> IO () +freeze verbosity packageDBs repos comp platform conf mSandboxPkgInfo + globalFlags freezeFlags = do + + installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf + sourcePkgDb <- getSourcePackages verbosity repos + + pkgSpecifiers <- resolveUserTargets verbosity + (fromFlag $ globalWorldFile globalFlags) + (packageIndex sourcePkgDb) + [UserTargetLocalDir "."] + + sanityCheck pkgSpecifiers + pkgs <- planPackages + verbosity comp platform mSandboxPkgInfo freezeFlags + installedPkgIndex sourcePkgDb pkgSpecifiers + + if null pkgs + then notice verbosity $ "No packages to be frozen. " + ++ "As this package has no dependencies." + else if dryRun + then notice verbosity $ unlines $ + "The following packages would be frozen:" + : formatPkgs pkgs + + else freezePackages verbosity pkgs + + where + dryRun = fromFlag (freezeDryRun freezeFlags) + + sanityCheck pkgSpecifiers = do + when (not . null $ [n | n@(NamedPackage _ _) <- pkgSpecifiers]) $ + die $ "internal error: 'resolveUserTargets' returned " + ++ "unexpected named package specifiers!" + when (length pkgSpecifiers /= 1) $ + die $ "internal error: 'resolveUserTargets' returned " + ++ "unexpected source package specifiers!" + +planPackages :: Verbosity + -> Compiler + -> Platform + -> Maybe SandboxPackageInfo + -> FreezeFlags + -> InstalledPackageIndex + -> SourcePackageDb + -> [PackageSpecifier SourcePackage] + -> IO [PlanPackage] +planPackages verbosity comp platform mSandboxPkgInfo freezeFlags + installedPkgIndex sourcePkgDb pkgSpecifiers = do + + solver <- chooseSolver verbosity + (fromFlag (freezeSolver freezeFlags)) (compilerInfo comp) + notice verbosity "Resolving dependencies..." + + installPlan <- foldProgress logMsg die return $ + resolveDependencies + platform (compilerInfo comp) + solver + resolverParams + + return $ either id + (error "planPackages: installPlan contains broken packages") + (pruneInstallPlan installPlan pkgSpecifiers) + + where + resolverParams = + + setMaxBackjumps (if maxBackjumps < 0 then Nothing + else Just maxBackjumps) + + . setIndependentGoals independentGoals + + . setReorderGoals reorderGoals + + . setShadowPkgs shadowPkgs + + . setStrongFlags strongFlags + + . addConstraints + [ PackageConstraintStanzas (pkgSpecifierTarget pkgSpecifier) stanzas + | pkgSpecifier <- pkgSpecifiers ] + + . maybe id applySandboxInstallPolicy mSandboxPkgInfo + + $ standardInstallPolicy installedPkgIndex sourcePkgDb pkgSpecifiers + + logMsg message rest = debug verbosity message >> rest + + stanzas = concat + [ if testsEnabled then [TestStanzas] else [] + , if benchmarksEnabled then [BenchStanzas] else [] + ] + testsEnabled = fromFlagOrDefault False $ freezeTests freezeFlags + benchmarksEnabled = fromFlagOrDefault False $ freezeBenchmarks freezeFlags + + reorderGoals = fromFlag (freezeReorderGoals freezeFlags) + independentGoals = fromFlag (freezeIndependentGoals freezeFlags) + shadowPkgs = fromFlag (freezeShadowPkgs freezeFlags) + strongFlags = fromFlag (freezeStrongFlags freezeFlags) + maxBackjumps = fromFlag (freezeMaxBackjumps freezeFlags) + + +-- | Remove all unneeded packages from an install plan. +-- +-- A package is unneeded if it is either +-- +-- 1) the package that we are freezing, or +-- +-- 2) not a dependency (directly or transitively) of the package we are +-- freezing. This is useful for removing previously installed packages +-- which are no longer required from the install plan. +pruneInstallPlan :: InstallPlan.InstallPlan + -> [PackageSpecifier SourcePackage] + -> Either [PlanPackage] [(PlanPackage, [PackageIdentifier])] +pruneInstallPlan installPlan pkgSpecifiers = + mapLeft (removeSelf pkgIds . PackageIndex.allPackages) $ + PackageIndex.dependencyClosure pkgIdx pkgIds + where + pkgIdx = PackageIndex.fromList $ InstallPlan.toList installPlan + pkgIds = [ packageId pkg | SpecificSourcePackage pkg <- pkgSpecifiers ] + mapLeft f (Left v) = Left $ f v + mapLeft _ (Right v) = Right v + removeSelf [thisPkg] = filter (\pp -> packageId pp /= thisPkg) + removeSelf _ = + error $ "internal error: 'pruneInstallPlan' given " + ++ "unexpected package specifiers!" + + +freezePackages :: Package pkg => Verbosity -> [pkg] -> IO () +freezePackages verbosity pkgs = do + pkgEnv <- fmap (createPkgEnv . addFrozenConstraints) $ + loadUserConfig verbosity "" + writeFileAtomic userPackageEnvironmentFile $ showPkgEnv pkgEnv + where + addFrozenConstraints config = + config { + savedConfigureExFlags = (savedConfigureExFlags config) { + configExConstraints = constraints pkgs + } + } + constraints = map $ pkgIdToConstraint . packageId + where + pkgIdToConstraint pkg = + UserConstraintVersion (packageName pkg) + (thisVersion $ packageVersion pkg) + createPkgEnv config = mempty { pkgEnvSavedConfig = config } + showPkgEnv = BS.Char8.pack . showPackageEnvironment + + +formatPkgs :: Package pkg => [pkg] -> [String] +formatPkgs = map $ showPkg . packageId + where + showPkg pid = name pid ++ " == " ++ version pid + name = display . packageName + version = showVersion . packageVersion diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Get.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Get.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Get.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Get.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,355 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Get +-- Copyright : (c) Andrea Vezzosi 2008 +-- Duncan Coutts 2011 +-- John Millikin 2012 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- The 'cabal get' command. +----------------------------------------------------------------------------- + +module Distribution.Client.Get ( + get + ) where + +import Distribution.Package + ( PackageId, packageId, packageName ) +import Distribution.Simple.Setup + ( Flag(..), fromFlag, fromFlagOrDefault ) +import Distribution.Simple.Utils + ( notice, die, info, writeFileAtomic ) +import Distribution.Verbosity + ( Verbosity ) +import Distribution.Text(display) +import qualified Distribution.PackageDescription as PD + +import Distribution.Client.Setup + ( GlobalFlags(..), GetFlags(..) ) +import Distribution.Client.Types +import Distribution.Client.Targets +import Distribution.Client.Dependency +import Distribution.Client.FetchUtils +import qualified Distribution.Client.Tar as Tar (extractTarGzFile) +import Distribution.Client.IndexUtils as IndexUtils + ( getSourcePackages ) +import Distribution.Client.Compat.Process + ( readProcessWithExitCode ) +import Distribution.Compat.Exception + ( catchIO ) + +import Control.Exception + ( finally ) +import Control.Monad + ( filterM, forM_, unless, when ) +import Data.List + ( sortBy ) +import qualified Data.Map +import Data.Maybe + ( listToMaybe, mapMaybe ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid + ( mempty ) +#endif +import Data.Ord + ( comparing ) +import System.Directory + ( createDirectoryIfMissing, doesDirectoryExist, doesFileExist + , getCurrentDirectory, setCurrentDirectory + ) +import System.Exit + ( ExitCode(..) ) +import System.FilePath + ( (), (<.>), addTrailingPathSeparator ) +import System.Process + ( rawSystem ) + + +-- | Entry point for the 'cabal get' command. +get :: Verbosity + -> [Repo] + -> GlobalFlags + -> GetFlags + -> [UserTarget] + -> IO () +get verbosity _ _ _ [] = + notice verbosity "No packages requested. Nothing to do." + +get verbosity repos globalFlags getFlags userTargets = do + let useFork = case (getSourceRepository getFlags) of + NoFlag -> False + _ -> True + + unless useFork $ + mapM_ checkTarget userTargets + + sourcePkgDb <- getSourcePackages verbosity repos + + pkgSpecifiers <- resolveUserTargets verbosity + (fromFlag $ globalWorldFile globalFlags) + (packageIndex sourcePkgDb) + userTargets + + pkgs <- either (die . unlines . map show) return $ + resolveWithoutDependencies + (resolverParams sourcePkgDb pkgSpecifiers) + + unless (null prefix) $ + createDirectoryIfMissing True prefix + + if useFork + then fork pkgs + else unpack pkgs + + where + resolverParams sourcePkgDb pkgSpecifiers = + --TODO: add command-line constraint and preference args for unpack + standardInstallPolicy mempty sourcePkgDb pkgSpecifiers + + prefix = fromFlagOrDefault "" (getDestDir getFlags) + + fork :: [SourcePackage] -> IO () + fork pkgs = do + let kind = fromFlag . getSourceRepository $ getFlags + branchers <- findUsableBranchers + mapM_ (forkPackage verbosity branchers prefix kind) pkgs + + unpack :: [SourcePackage] -> IO () + unpack pkgs = do + forM_ pkgs $ \pkg -> do + location <- fetchPackage verbosity (packageSource pkg) + let pkgid = packageId pkg + descOverride | usePristine = Nothing + | otherwise = packageDescrOverride pkg + case location of + LocalTarballPackage tarballPath -> + unpackPackage verbosity prefix pkgid descOverride tarballPath + + RemoteTarballPackage _tarballURL tarballPath -> + unpackPackage verbosity prefix pkgid descOverride tarballPath + + RepoTarballPackage _repo _pkgid tarballPath -> + unpackPackage verbosity prefix pkgid descOverride tarballPath + + LocalUnpackedPackage _ -> + error "Distribution.Client.Get.unpack: the impossible happened." + where + usePristine = fromFlagOrDefault False (getPristine getFlags) + +checkTarget :: UserTarget -> IO () +checkTarget target = case target of + UserTargetLocalDir dir -> die (notTarball dir) + UserTargetLocalCabalFile file -> die (notTarball file) + _ -> return () + where + notTarball t = + "The 'get' command is for tarball packages. " + ++ "The target '" ++ t ++ "' is not a tarball." + +-- ------------------------------------------------------------ +-- * Unpacking the source tarball +-- ------------------------------------------------------------ + +unpackPackage :: Verbosity -> FilePath -> PackageId + -> PackageDescriptionOverride + -> FilePath -> IO () +unpackPackage verbosity prefix pkgid descOverride pkgPath = do + let pkgdirname = display pkgid + pkgdir = prefix pkgdirname + pkgdir' = addTrailingPathSeparator pkgdir + existsDir <- doesDirectoryExist pkgdir + when existsDir $ die $ + "The directory \"" ++ pkgdir' ++ "\" already exists, not unpacking." + existsFile <- doesFileExist pkgdir + when existsFile $ die $ + "A file \"" ++ pkgdir ++ "\" is in the way, not unpacking." + notice verbosity $ "Unpacking to " ++ pkgdir' + Tar.extractTarGzFile prefix pkgdirname pkgPath + + case descOverride of + Nothing -> return () + Just pkgtxt -> do + let descFilePath = pkgdir display (packageName pkgid) <.> "cabal" + info verbosity $ + "Updating " ++ descFilePath + ++ " with the latest revision from the index." + writeFileAtomic descFilePath pkgtxt + + +-- ------------------------------------------------------------ +-- * Forking the source repository +-- ------------------------------------------------------------ + +data BranchCmd = BranchCmd (Verbosity -> FilePath -> IO ExitCode) + +data Brancher = Brancher + { brancherBinary :: String + , brancherBuildCmd :: PD.SourceRepo -> Maybe BranchCmd + } + +-- | The set of all supported branch drivers. +allBranchers :: [(PD.RepoType, Brancher)] +allBranchers = + [ (PD.Bazaar, branchBzr) + , (PD.Darcs, branchDarcs) + , (PD.Git, branchGit) + , (PD.Mercurial, branchHg) + , (PD.SVN, branchSvn) + ] + +-- | Find which usable branch drivers (selected from 'allBranchers') are +-- available and usable on the local machine. +-- +-- Each driver's main command is run with @--help@, and if the child process +-- exits successfully, that brancher is considered usable. +findUsableBranchers :: IO (Data.Map.Map PD.RepoType Brancher) +findUsableBranchers = do + let usable (_, brancher) = flip catchIO (const (return False)) $ do + let cmd = brancherBinary brancher + (exitCode, _, _) <- readProcessWithExitCode cmd ["--help"] "" + return (exitCode == ExitSuccess) + pairs <- filterM usable allBranchers + return (Data.Map.fromList pairs) + +-- | Fork a single package from a remote source repository to the local +-- file system. +forkPackage :: Verbosity + -> Data.Map.Map PD.RepoType Brancher + -- ^ Branchers supported by the local machine. + -> FilePath + -- ^ The directory in which new branches or repositories will + -- be created. + -> (Maybe PD.RepoKind) + -- ^ Which repo to choose. + -> SourcePackage + -- ^ The package to fork. + -> IO () +forkPackage verbosity branchers prefix kind src = do + let desc = PD.packageDescription (packageDescription src) + pkgid = display (packageId src) + pkgname = display (packageName src) + destdir = prefix pkgname + + destDirExists <- doesDirectoryExist destdir + when destDirExists $ do + die ("The directory " ++ show destdir ++ " already exists, not forking.") + + destFileExists <- doesFileExist destdir + when destFileExists $ do + die ("A file " ++ show destdir ++ " is in the way, not forking.") + + let repos = PD.sourceRepos desc + case findBranchCmd branchers repos kind of + Just (BranchCmd io) -> do + exitCode <- io verbosity destdir + case exitCode of + ExitSuccess -> return () + ExitFailure _ -> die ("Couldn't fork package " ++ pkgid) + Nothing -> case repos of + [] -> die ("Package " ++ pkgid + ++ " does not have any source repositories.") + _ -> die ("Package " ++ pkgid + ++ " does not have any usable source repositories.") + +-- | Given a set of possible branchers, and a set of possible source +-- repositories, find a repository that is both 1) likely to be specific to +-- this source version and 2) is supported by the local machine. +findBranchCmd :: Data.Map.Map PD.RepoType Brancher -> [PD.SourceRepo] + -> (Maybe PD.RepoKind) -> Maybe BranchCmd +findBranchCmd branchers allRepos maybeKind = cmd where + -- Sort repositories by kind, from This to Head to Unknown. Repositories + -- with equivalent kinds are selected based on the order they appear in + -- the Cabal description file. + repos' = sortBy (comparing thisFirst) allRepos + thisFirst r = case PD.repoKind r of + PD.RepoThis -> 0 :: Int + PD.RepoHead -> case PD.repoTag r of + -- If the type is 'head' but the author specified a tag, they + -- probably meant to create a 'this' repository but screwed up. + Just _ -> 0 + Nothing -> 1 + PD.RepoKindUnknown _ -> 2 + + -- If the user has specified the repo kind, filter out the repositories + -- she's not interested in. + repos = maybe repos' (\k -> filter ((==) k . PD.repoKind) repos') maybeKind + + repoBranchCmd repo = do + t <- PD.repoType repo + brancher <- Data.Map.lookup t branchers + brancherBuildCmd brancher repo + + cmd = listToMaybe (mapMaybe repoBranchCmd repos) + +-- | Branch driver for Bazaar. +branchBzr :: Brancher +branchBzr = Brancher "bzr" $ \repo -> do + src <- PD.repoLocation repo + let args dst = case PD.repoTag repo of + Just tag -> ["branch", src, dst, "-r", "tag:" ++ tag] + Nothing -> ["branch", src, dst] + return $ BranchCmd $ \verbosity dst -> do + notice verbosity ("bzr: branch " ++ show src) + rawSystem "bzr" (args dst) + +-- | Branch driver for Darcs. +branchDarcs :: Brancher +branchDarcs = Brancher "darcs" $ \repo -> do + src <- PD.repoLocation repo + let args dst = case PD.repoTag repo of + Just tag -> ["get", src, dst, "-t", tag] + Nothing -> ["get", src, dst] + return $ BranchCmd $ \verbosity dst -> do + notice verbosity ("darcs: get " ++ show src) + rawSystem "darcs" (args dst) + +-- | Branch driver for Git. +branchGit :: Brancher +branchGit = Brancher "git" $ \repo -> do + src <- PD.repoLocation repo + let branchArgs = case PD.repoBranch repo of + Just b -> ["--branch", b] + Nothing -> [] + let postClone dst = case PD.repoTag repo of + Just t -> do + cwd <- getCurrentDirectory + setCurrentDirectory dst + finally + (rawSystem "git" (["checkout", t] ++ branchArgs)) + (setCurrentDirectory cwd) + Nothing -> return ExitSuccess + return $ BranchCmd $ \verbosity dst -> do + notice verbosity ("git: clone " ++ show src) + code <- rawSystem "git" (["clone", src, dst] ++ branchArgs) + case code of + ExitFailure _ -> return code + ExitSuccess -> postClone dst + +-- | Branch driver for Mercurial. +branchHg :: Brancher +branchHg = Brancher "hg" $ \repo -> do + src <- PD.repoLocation repo + let branchArgs = case PD.repoBranch repo of + Just b -> ["--branch", b] + Nothing -> [] + let tagArgs = case PD.repoTag repo of + Just t -> ["--rev", t] + Nothing -> [] + let args dst = ["clone", src, dst] ++ branchArgs ++ tagArgs + return $ BranchCmd $ \verbosity dst -> do + notice verbosity ("hg: clone " ++ show src) + rawSystem "hg" (args dst) + +-- | Branch driver for Subversion. +branchSvn :: Brancher +branchSvn = Brancher "svn" $ \repo -> do + src <- PD.repoLocation repo + let args dst = ["checkout", src, dst] + return $ BranchCmd $ \verbosity dst -> do + notice verbosity ("svn: checkout " ++ show src) + rawSystem "svn" (args dst) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/GZipUtils.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/GZipUtils.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/GZipUtils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/GZipUtils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,86 @@ +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE CPP #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.GZipUtils +-- Copyright : (c) Dmitry Astapov 2010 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Provides a convenience functions for working with files that may or may not +-- be zipped. +----------------------------------------------------------------------------- +module Distribution.Client.GZipUtils ( + maybeDecompress, + ) where + +import Codec.Compression.Zlib.Internal +import Data.ByteString.Lazy.Internal as BS (ByteString(Empty, Chunk)) + +#if MIN_VERSION_zlib(0,6,0) +import Control.Exception (throw) +import Control.Monad (liftM) +import Control.Monad.ST.Lazy (ST, runST) +import qualified Data.ByteString as Strict +#endif + +-- | Attempts to decompress the `bytes' under the assumption that +-- "data format" error at the very beginning of the stream means +-- that it is already decompressed. Caller should make sanity checks +-- to verify that it is not, in fact, garbage. +-- +-- This is to deal with http proxies that lie to us and transparently +-- decompress without removing the content-encoding header. See: +-- +-- +maybeDecompress :: ByteString -> ByteString +#if MIN_VERSION_zlib(0,6,0) +maybeDecompress bytes = runST (go bytes decompressor) + where + decompressor :: DecompressStream (ST s) + decompressor = decompressST gzipOrZlibFormat defaultDecompressParams + + -- DataError at the beginning of the stream probably means that stream is + -- not compressed, so we return it as-is. + -- TODO: alternatively, we might consider looking for the two magic bytes + -- at the beginning of the gzip header. (not an option for zlib, though.) + go :: Monad m => ByteString -> DecompressStream m -> m ByteString + go cs (DecompressOutputAvailable bs k) = liftM (Chunk bs) $ go' cs =<< k + go _ (DecompressStreamEnd _bs ) = return Empty + go _ (DecompressStreamError _err ) = return bytes + go cs (DecompressInputRequired k) = go cs' =<< k c + where + (c, cs') = uncons cs + + -- Once we have received any output though we regard errors as actual errors + -- and we throw them (as pure exceptions). + -- TODO: We could (and should) avoid these pure exceptions. + go' :: Monad m => ByteString -> DecompressStream m -> m ByteString + go' cs (DecompressOutputAvailable bs k) = liftM (Chunk bs) $ go' cs =<< k + go' _ (DecompressStreamEnd _bs ) = return Empty + go' _ (DecompressStreamError err ) = throw err + go' cs (DecompressInputRequired k) = go' cs' =<< k c + where + (c, cs') = uncons cs + + uncons :: ByteString -> (Strict.ByteString, ByteString) + uncons Empty = (Strict.empty, Empty) + uncons (Chunk c cs) = (c, cs) +#else +maybeDecompress bytes = foldStream $ decompressWithErrors gzipOrZlibFormat defaultDecompressParams bytes + where + -- DataError at the beginning of the stream probably means that stream is not compressed. + -- Returning it as-is. + -- TODO: alternatively, we might consider looking for the two magic bytes + -- at the beginning of the gzip header. + foldStream (StreamError _ _) = bytes + foldStream somethingElse = doFold somethingElse + + doFold StreamEnd = BS.Empty + doFold (StreamChunk bs stream) = BS.Chunk bs (doFold stream) + doFold (StreamError _ msg) = error $ "Codec.Compression.Zlib: " ++ msg +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Haddock.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Haddock.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Haddock.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Haddock.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,69 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Haddock +-- Copyright : (c) Andrea Vezzosi 2009 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Interfacing with Haddock +-- +----------------------------------------------------------------------------- +module Distribution.Client.Haddock + ( + regenerateHaddockIndex + ) + where + +import Data.List (maximumBy) +import System.Directory (createDirectoryIfMissing, renameFile) +import System.FilePath ((), splitFileName) +import Distribution.Package + ( packageVersion ) +import Distribution.Simple.Haddock (haddockPackagePaths) +import Distribution.Simple.Program (haddockProgram, ProgramConfiguration + , rawSystemProgram, requireProgramVersion) +import Distribution.Version (Version(Version), orLaterVersion) +import Distribution.Verbosity (Verbosity) +import Distribution.Simple.PackageIndex + ( InstalledPackageIndex, allPackagesByName ) +import Distribution.Simple.Utils + ( comparing, debug, installDirectoryContents, withTempDirectory ) +import Distribution.InstalledPackageInfo as InstalledPackageInfo + ( InstalledPackageInfo_(exposed) ) + +regenerateHaddockIndex :: Verbosity + -> InstalledPackageIndex -> ProgramConfiguration -> FilePath + -> IO () +regenerateHaddockIndex verbosity pkgs conf index = do + (paths, warns) <- haddockPackagePaths pkgs' Nothing + let paths' = [ (interface, html) | (interface, Just html) <- paths] + case warns of + Nothing -> return () + Just m -> debug verbosity m + + (confHaddock, _, _) <- + requireProgramVersion verbosity haddockProgram + (orLaterVersion (Version [0,6] [])) conf + + createDirectoryIfMissing True destDir + + withTempDirectory verbosity destDir "tmphaddock" $ \tempDir -> do + + let flags = [ "--gen-contents" + , "--gen-index" + , "--odir=" ++ tempDir + , "--title=Haskell modules on this system" ] + ++ [ "--read-interface=" ++ html ++ "," ++ interface + | (interface, html) <- paths' ] + rawSystemProgram verbosity confHaddock flags + renameFile (tempDir "index.html") (tempDir destFile) + installDirectoryContents verbosity tempDir destDir + + where + (destDir,destFile) = splitFileName index + pkgs' = [ maximumBy (comparing packageVersion) pkgvers' + | (_pname, pkgvers) <- allPackagesByName pkgs + , let pkgvers' = filter exposed pkgvers + , not (null pkgvers') ] diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/HttpUtils.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/HttpUtils.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/HttpUtils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/HttpUtils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,159 @@ +----------------------------------------------------------------------------- +-- | Separate module for HTTP actions, using a proxy server if one exists +----------------------------------------------------------------------------- +module Distribution.Client.HttpUtils ( + DownloadResult(..), + downloadURI, + getHTTP, + cabalBrowse, + proxy, + isOldHackageURI + ) where + +import Network.HTTP + ( Request (..), Response (..), RequestMethod (..) + , Header(..), HeaderName(..), lookupHeader ) +import Network.HTTP.Proxy ( Proxy(..), fetchProxy) +import Network.URI + ( URI (..), URIAuth (..) ) +import Network.Browser + ( BrowserAction, browse, setAllowBasicAuth, setAuthorityGen + , setOutHandler, setErrHandler, setProxy, request) +import Network.Stream + ( Result, ConnError(..) ) +import Control.Monad + ( liftM ) +import qualified Data.ByteString.Lazy.Char8 as ByteString +import Data.ByteString.Lazy (ByteString) + +import qualified Paths_cabal_install (version) +import Distribution.Verbosity (Verbosity) +import Distribution.Simple.Utils + ( die, info, warn, debug, notice + , copyFileVerbose, writeFileAtomic ) +import Distribution.System + ( buildOS, buildArch ) +import Distribution.Text + ( display ) +import Data.Char ( isSpace ) +import qualified System.FilePath.Posix as FilePath.Posix + ( splitDirectories ) +import System.FilePath + ( (<.>) ) +import System.Directory + ( doesFileExist ) + +data DownloadResult = FileAlreadyInCache | FileDownloaded FilePath deriving (Eq) + +-- Trim +trim :: String -> String +trim = f . f + where f = reverse . dropWhile isSpace + +-- |Get the local proxy settings +--TODO: print info message when we're using a proxy based on verbosity +proxy :: Verbosity -> IO Proxy +proxy _verbosity = do + p <- fetchProxy True + -- Handle empty proxy strings + return $ case p of + Proxy uri auth -> + let uri' = trim uri in + if uri' == "" then NoProxy else Proxy uri' auth + _ -> p + +mkRequest :: URI + -> Maybe String -- ^ Optional etag to be set in the If-None-Match HTTP header. + -> Request ByteString +mkRequest uri etag = Request{ rqURI = uri + , rqMethod = GET + , rqHeaders = Header HdrUserAgent userAgent : ifNoneMatchHdr + , rqBody = ByteString.empty } + where userAgent = concat [ "cabal-install/", display Paths_cabal_install.version + , " (", display buildOS, "; ", display buildArch, ")" + ] + ifNoneMatchHdr = maybe [] (\t -> [Header HdrIfNoneMatch t]) etag + +-- |Carry out a GET request, using the local proxy settings +getHTTP :: Verbosity + -> URI + -> Maybe String -- ^ Optional etag to check if we already have the latest file. + -> IO (Result (Response ByteString)) +getHTTP verbosity uri etag = liftM (\(_, resp) -> Right resp) $ + cabalBrowse verbosity Nothing (request (mkRequest uri etag)) + +cabalBrowse :: Verbosity + -> Maybe (String, String) + -> BrowserAction s a + -> IO a +cabalBrowse verbosity auth act = do + p <- proxy verbosity + browse $ do + setProxy p + setErrHandler (warn verbosity . ("http error: "++)) + setOutHandler (debug verbosity) + setAllowBasicAuth False + setAuthorityGen (\_ _ -> return auth) + act + +downloadURI :: Verbosity + -> URI -- ^ What to download + -> FilePath -- ^ Where to put it + -> IO DownloadResult +downloadURI verbosity uri path | uriScheme uri == "file:" = do + copyFileVerbose verbosity (uriPath uri) path + return (FileDownloaded path) + -- Can we store the hash of the file so we can safely return path when the + -- hash matches to avoid unnecessary computation? +downloadURI verbosity uri path = do + let etagPath = path <.> "etag" + targetExists <- doesFileExist path + etagPathExists <- doesFileExist etagPath + -- In rare cases the target file doesn't exist, but the etag does. + etag <- if targetExists && etagPathExists + then liftM Just $ readFile etagPath + else return Nothing + + result <- getHTTP verbosity uri etag + let result' = case result of + Left err -> Left err + Right rsp -> case rspCode rsp of + (2,0,0) -> Right rsp + (3,0,4) -> Right rsp + (a,b,c) -> Left err + where + err = ErrorMisc $ "Error HTTP code: " + ++ concatMap show [a,b,c] + + -- Only write the etag if we get a 200 response code. + -- A 304 still sends us an etag header. + case result' of + Left _ -> return () + Right rsp -> case rspCode rsp of + (2,0,0) -> case lookupHeader HdrETag (rspHeaders rsp) of + Nothing -> return () + Just newEtag -> writeFile etagPath newEtag + (_,_,_) -> return () + + case result' of + Left err -> die $ "Failed to download " ++ show uri ++ " : " ++ show err + Right rsp -> case rspCode rsp of + (2,0,0) -> do + info verbosity ("Downloaded to " ++ path) + writeFileAtomic path $ rspBody rsp + return (FileDownloaded path) + (3,0,4) -> do + notice verbosity "Skipping download: Local and remote files match." + return FileAlreadyInCache + (_,_,_) -> return (FileDownloaded path) + --FIXME: check the content-length header matches the body length. + --TODO: stream the download into the file rather than buffering the whole + -- thing in memory. + +-- Utility function for legacy support. +isOldHackageURI :: URI -> Bool +isOldHackageURI uri + = case uriAuthority uri of + Just (URIAuth {uriRegName = "hackage.haskell.org"}) -> + FilePath.Posix.splitDirectories (uriPath uri) == ["/","packages","archive"] + _ -> False diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/IndexUtils.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/IndexUtils.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/IndexUtils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/IndexUtils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,591 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.IndexUtils +-- Copyright : (c) Duncan Coutts 2008 +-- License : BSD-like +-- +-- Maintainer : duncan@community.haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Extra utils related to the package indexes. +----------------------------------------------------------------------------- +module Distribution.Client.IndexUtils ( + getIndexFileAge, + getInstalledPackages, + getSourcePackages, + getSourcePackagesStrict, + convert, + + readPackageIndexFile, + parsePackageIndex, + readRepoIndex, + updateRepoIndexCache, + updatePackageIndexCacheFile, + + BuildTreeRefType(..), refTypeFromTypeCode, typeCodeFromRefType + ) where + +import qualified Distribution.Client.Tar as Tar +import Distribution.Client.Types + +import Distribution.Package + ( PackageId, PackageIdentifier(..), PackageName(..) + , Package(..), packageVersion, packageName + , Dependency(Dependency), InstalledPackageId(..) ) +import Distribution.Client.PackageIndex (PackageIndex) +import qualified Distribution.Client.PackageIndex as PackageIndex +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex +import qualified Distribution.InstalledPackageInfo as InstalledPackageInfo +import qualified Distribution.PackageDescription.Parse as PackageDesc.Parse +import Distribution.PackageDescription + ( GenericPackageDescription ) +import Distribution.PackageDescription.Parse + ( parsePackageDescription ) +import Distribution.Simple.Compiler + ( Compiler, PackageDBStack ) +import Distribution.Simple.Program + ( ProgramConfiguration ) +import qualified Distribution.Simple.Configure as Configure + ( getInstalledPackages ) +import Distribution.ParseUtils + ( ParseResult(..) ) +import Distribution.Version + ( Version(Version), intersectVersionRanges ) +import Distribution.Text + ( display, simpleParse ) +import Distribution.Verbosity + ( Verbosity, normal, lessVerbose ) +import Distribution.Simple.Utils + ( die, warn, info, fromUTF8 ) + +import Data.Char (isAlphaNum) +import Data.Maybe (mapMaybe, fromMaybe) +import Data.List (isPrefixOf) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid (Monoid(..)) +#endif +import qualified Data.Map as Map +import Control.Monad (MonadPlus(mplus), when, liftM) +import Control.Exception (evaluate) +import qualified Data.ByteString.Lazy as BS +import qualified Data.ByteString.Lazy.Char8 as BS.Char8 +import qualified Data.ByteString.Char8 as BSS +import Data.ByteString.Lazy (ByteString) +import Distribution.Client.GZipUtils (maybeDecompress) +import Distribution.Client.Utils ( byteStringToFilePath + , tryFindAddSourcePackageDesc ) +import Distribution.Compat.Exception (catchIO) +import Distribution.Client.Compat.Time (getFileAge, getModTime) +import System.Directory (doesFileExist) +import System.FilePath ((), takeExtension, splitDirectories, normalise) +import System.FilePath.Posix as FilePath.Posix + ( takeFileName ) +import System.IO +import System.IO.Unsafe (unsafeInterleaveIO) +import System.IO.Error (isDoesNotExistError) +import Numeric (showFFloat) + + +getInstalledPackages :: Verbosity -> Compiler + -> PackageDBStack -> ProgramConfiguration + -> IO InstalledPackageIndex +getInstalledPackages verbosity comp packageDbs conf = + Configure.getInstalledPackages verbosity' comp packageDbs conf + where + --FIXME: make getInstalledPackages use sensible verbosity in the first place + verbosity' = lessVerbose verbosity + +convert :: InstalledPackageIndex -> PackageIndex InstalledPackage +convert index' = PackageIndex.fromList + -- There can be multiple installed instances of each package version, + -- like when the same package is installed in the global & user DBs. + -- InstalledPackageIndex.allPackagesBySourcePackageId gives us the + -- installed packages with the most preferred instances first, so by + -- picking the first we should get the user one. This is almost but not + -- quite the same as what ghc does. + [ InstalledPackage ipkg (sourceDeps index' ipkg) + | (_,ipkg:_) <- InstalledPackageIndex.allPackagesBySourcePackageId index' ] + where + -- The InstalledPackageInfo only lists dependencies by the + -- InstalledPackageId, which means we do not directly know the corresponding + -- source dependency. The only way to find out is to lookup the + -- InstalledPackageId to get the InstalledPackageInfo and look at its + -- source PackageId. But if the package is broken because it depends on + -- other packages that do not exist then we have a problem we cannot find + -- the original source package id. Instead we make up a bogus package id. + -- This should have the same effect since it should be a dependency on a + -- nonexistent package. + sourceDeps index ipkg = + [ maybe (brokenPackageId depid) packageId mdep + | let depids = InstalledPackageInfo.depends ipkg + getpkg = InstalledPackageIndex.lookupInstalledPackageId index + , (depid, mdep) <- zip depids (map getpkg depids) ] + + brokenPackageId (InstalledPackageId str) = + PackageIdentifier (PackageName (str ++ "-broken")) (Version [] []) + +------------------------------------------------------------------------ +-- Reading the source package index +-- + +-- | Read a repository index from disk, from the local files specified by +-- a list of 'Repo's. +-- +-- All the 'SourcePackage's are marked as having come from the appropriate +-- 'Repo'. +-- +-- This is a higher level wrapper used internally in cabal-install. +-- +getSourcePackages :: Verbosity -> [Repo] -> IO SourcePackageDb +getSourcePackages verbosity repos = getSourcePackages' verbosity repos + ReadPackageIndexLazyIO + +-- | Like 'getSourcePackages', but reads the package index strictly. Useful if +-- you want to write to the package index after having read it. +getSourcePackagesStrict :: Verbosity -> [Repo] -> IO SourcePackageDb +getSourcePackagesStrict verbosity repos = getSourcePackages' verbosity repos + ReadPackageIndexStrict + +-- | Common implementation used by getSourcePackages and +-- getSourcePackagesStrict. +getSourcePackages' :: Verbosity -> [Repo] -> ReadPackageIndexMode + -> IO SourcePackageDb +getSourcePackages' verbosity [] _mode = do + warn verbosity $ "No remote package servers have been specified. Usually " + ++ "you would have one specified in the config file." + return SourcePackageDb { + packageIndex = mempty, + packagePreferences = mempty + } +getSourcePackages' verbosity repos mode = do + info verbosity "Reading available packages..." + pkgss <- mapM (\r -> readRepoIndex verbosity r mode) repos + let (pkgs, prefs) = mconcat pkgss + prefs' = Map.fromListWith intersectVersionRanges + [ (name, range) | Dependency name range <- prefs ] + _ <- evaluate pkgs + _ <- evaluate prefs' + return SourcePackageDb { + packageIndex = pkgs, + packagePreferences = prefs' + } + +-- | Read a repository index from disk, from the local file specified by +-- the 'Repo'. +-- +-- All the 'SourcePackage's are marked as having come from the given 'Repo'. +-- +-- This is a higher level wrapper used internally in cabal-install. +-- +readRepoIndex :: Verbosity -> Repo -> ReadPackageIndexMode + -> IO (PackageIndex SourcePackage, [Dependency]) +readRepoIndex verbosity repo mode = + let indexFile = repoLocalDir repo "00-index.tar" + cacheFile = repoLocalDir repo "00-index.cache" + in handleNotFound $ do + warnIfIndexIsOld =<< getIndexFileAge repo + whenCacheOutOfDate indexFile cacheFile $ do + updatePackageIndexCacheFile verbosity indexFile cacheFile + readPackageIndexCacheFile mkAvailablePackage indexFile cacheFile mode + + where + mkAvailablePackage pkgEntry = + SourcePackage { + packageInfoId = pkgid, + packageDescription = packageDesc pkgEntry, + packageSource = case pkgEntry of + NormalPackage _ _ _ _ -> RepoTarballPackage repo pkgid Nothing + BuildTreeRef _ _ _ path _ -> LocalUnpackedPackage path, + packageDescrOverride = case pkgEntry of + NormalPackage _ _ pkgtxt _ -> Just pkgtxt + _ -> Nothing + } + where + pkgid = packageId pkgEntry + + handleNotFound action = catchIO action $ \e -> if isDoesNotExistError e + then do + case repoKind repo of + Left remoteRepo -> warn verbosity $ + "The package list for '" ++ remoteRepoName remoteRepo + ++ "' does not exist. Run 'cabal update' to download it." + Right _localRepo -> warn verbosity $ + "The package list for the local repo '" ++ repoLocalDir repo + ++ "' is missing. The repo is invalid." + return mempty + else ioError e + + isOldThreshold = 15 --days + warnIfIndexIsOld dt = do + when (dt >= isOldThreshold) $ case repoKind repo of + Left remoteRepo -> warn verbosity $ + "The package list for '" ++ remoteRepoName remoteRepo + ++ "' is " ++ showFFloat (Just 1) dt " days old.\nRun " + ++ "'cabal update' to get the latest list of available packages." + Right _localRepo -> return () + + +-- | Return the age of the index file in days (as a Double). +getIndexFileAge :: Repo -> IO Double +getIndexFileAge repo = getFileAge $ repoLocalDir repo "00-index.tar" + + +-- | It is not necessary to call this, as the cache will be updated when the +-- index is read normally. However you can do the work earlier if you like. +-- +updateRepoIndexCache :: Verbosity -> Repo -> IO () +updateRepoIndexCache verbosity repo = + whenCacheOutOfDate indexFile cacheFile $ do + updatePackageIndexCacheFile verbosity indexFile cacheFile + where + indexFile = repoLocalDir repo "00-index.tar" + cacheFile = repoLocalDir repo "00-index.cache" + +whenCacheOutOfDate :: FilePath -> FilePath -> IO () -> IO () +whenCacheOutOfDate origFile cacheFile action = do + exists <- doesFileExist cacheFile + if not exists + then action + else do + origTime <- getModTime origFile + cacheTime <- getModTime cacheFile + when (origTime > cacheTime) action + +------------------------------------------------------------------------ +-- Reading the index file +-- + +-- | An index entry is either a normal package, or a local build tree reference. +data PackageEntry = + NormalPackage PackageId GenericPackageDescription ByteString BlockNo + | BuildTreeRef BuildTreeRefType + PackageId GenericPackageDescription FilePath BlockNo + +-- | A build tree reference is either a link or a snapshot. +data BuildTreeRefType = SnapshotRef | LinkRef + deriving Eq + +refTypeFromTypeCode :: Tar.TypeCode -> BuildTreeRefType +refTypeFromTypeCode t + | t == Tar.buildTreeRefTypeCode = LinkRef + | t == Tar.buildTreeSnapshotTypeCode = SnapshotRef + | otherwise = + error "Distribution.Client.IndexUtils.refTypeFromTypeCode: unknown type code" + +typeCodeFromRefType :: BuildTreeRefType -> Tar.TypeCode +typeCodeFromRefType LinkRef = Tar.buildTreeRefTypeCode +typeCodeFromRefType SnapshotRef = Tar.buildTreeSnapshotTypeCode + +type MkPackageEntry = IO PackageEntry + +instance Package PackageEntry where + packageId (NormalPackage pkgid _ _ _) = pkgid + packageId (BuildTreeRef _ pkgid _ _ _) = pkgid + +packageDesc :: PackageEntry -> GenericPackageDescription +packageDesc (NormalPackage _ descr _ _) = descr +packageDesc (BuildTreeRef _ _ descr _ _) = descr + +-- | Read a compressed \"00-index.tar.gz\" file into a 'PackageIndex'. +-- +-- This is supposed to be an \"all in one\" way to easily get at the info in +-- the Hackage package index. +-- +-- It takes a function to map a 'GenericPackageDescription' into any more +-- specific instance of 'Package' that you might want to use. In the simple +-- case you can just use @\_ p -> p@ here. +-- +readPackageIndexFile :: Package pkg + => (PackageEntry -> pkg) + -> FilePath + -> IO (PackageIndex pkg, [Dependency]) +readPackageIndexFile mkPkg indexFile = do + (mkPkgs, prefs) <- either fail return + . parsePackageIndex + . maybeDecompress + =<< BS.readFile indexFile + + pkgEntries <- sequence mkPkgs + pkgs <- evaluate $ PackageIndex.fromList (map mkPkg pkgEntries) + return (pkgs, prefs) + +-- | Parse an uncompressed \"00-index.tar\" repository index file represented +-- as a 'ByteString'. +-- +parsePackageIndex :: ByteString + -> Either String ([MkPackageEntry], [Dependency]) +parsePackageIndex = accum 0 [] [] . Tar.read + where + accum blockNo pkgs prefs es = case es of + Tar.Fail err -> Left err + Tar.Done -> Right (reverse pkgs, reverse prefs) + Tar.Next e es' -> accum blockNo' pkgs' prefs' es' + where + (pkgs', prefs') = extract blockNo pkgs prefs e + blockNo' = blockNo + Tar.entrySizeInBlocks e + + extract blockNo pkgs prefs entry = + fromMaybe (pkgs, prefs) $ + tryExtractPkg + `mplus` tryExtractPrefs + where + tryExtractPkg = do + mkPkgEntry <- extractPkg entry blockNo + return (mkPkgEntry:pkgs, prefs) + + tryExtractPrefs = do + prefs' <- extractPrefs entry + return (pkgs, prefs'++prefs) + +extractPkg :: Tar.Entry -> BlockNo -> Maybe MkPackageEntry +extractPkg entry blockNo = case Tar.entryContent entry of + Tar.NormalFile content _ + | takeExtension fileName == ".cabal" + -> case splitDirectories (normalise fileName) of + [pkgname,vers,_] -> case simpleParse vers of + Just ver -> Just $ return (NormalPackage pkgid descr content blockNo) + where + pkgid = PackageIdentifier (PackageName pkgname) ver + parsed = parsePackageDescription . fromUTF8 . BS.Char8.unpack + $ content + descr = case parsed of + ParseOk _ d -> d + _ -> error $ "Couldn't read cabal file " + ++ show fileName + _ -> Nothing + _ -> Nothing + + Tar.OtherEntryType typeCode content _ + | Tar.isBuildTreeRefTypeCode typeCode -> + Just $ do + let path = byteStringToFilePath content + err = "Error reading package index." + cabalFile <- tryFindAddSourcePackageDesc path err + descr <- PackageDesc.Parse.readPackageDescription normal cabalFile + return $ BuildTreeRef (refTypeFromTypeCode typeCode) (packageId descr) + descr path blockNo + + _ -> Nothing + + where + fileName = Tar.entryPath entry + +extractPrefs :: Tar.Entry -> Maybe [Dependency] +extractPrefs entry = case Tar.entryContent entry of + Tar.NormalFile content _ + | takeFileName (Tar.entryPath entry) == "preferred-versions" + -> Just . parsePreferredVersions + . BS.Char8.unpack $ content + _ -> Nothing + +parsePreferredVersions :: String -> [Dependency] +parsePreferredVersions = mapMaybe simpleParse + . filter (not . isPrefixOf "--") + . lines + +------------------------------------------------------------------------ +-- Reading and updating the index cache +-- + +updatePackageIndexCacheFile :: Verbosity -> FilePath -> FilePath -> IO () +updatePackageIndexCacheFile verbosity indexFile cacheFile = do + info verbosity "Updating the index cache file..." + (mkPkgs, prefs) <- either fail return + . parsePackageIndex + . maybeDecompress + =<< BS.readFile indexFile + pkgEntries <- sequence mkPkgs + let cache = mkCache pkgEntries prefs + writeFile cacheFile (showIndexCache cache) + where + mkCache pkgs prefs = + [ CachePreference pref | pref <- prefs ] + ++ [ CachePackageId pkgid blockNo + | (NormalPackage pkgid _ _ blockNo) <- pkgs ] + ++ [ CacheBuildTreeRef refType blockNo + | (BuildTreeRef refType _ _ _ blockNo) <- pkgs] + +data ReadPackageIndexMode = ReadPackageIndexStrict + | ReadPackageIndexLazyIO + +readPackageIndexCacheFile :: Package pkg + => (PackageEntry -> pkg) + -> FilePath + -> FilePath + -> ReadPackageIndexMode + -> IO (PackageIndex pkg, [Dependency]) +readPackageIndexCacheFile mkPkg indexFile cacheFile mode = do + cache <- liftM readIndexCache (BSS.readFile cacheFile) + myWithFile indexFile ReadMode $ \indexHnd -> + packageIndexFromCache mkPkg indexHnd cache mode + where + myWithFile f m act = case mode of + ReadPackageIndexStrict -> withFile f m act + ReadPackageIndexLazyIO -> do indexHnd <- openFile f m + act indexHnd + + +packageIndexFromCache :: Package pkg + => (PackageEntry -> pkg) + -> Handle + -> [IndexCacheEntry] + -> ReadPackageIndexMode + -> IO (PackageIndex pkg, [Dependency]) +packageIndexFromCache mkPkg hnd entrs mode = accum mempty [] entrs + where + accum srcpkgs prefs [] = do + -- Have to reverse entries, since in a tar file, later entries mask + -- earlier ones, and PackageIndex.fromList does the same, but we + -- accumulate the list of entries in reverse order, so need to reverse. + pkgIndex <- evaluate $ PackageIndex.fromList (reverse srcpkgs) + return (pkgIndex, prefs) + + accum srcpkgs prefs (CachePackageId pkgid blockno : entries) = do + -- Given the cache entry, make a package index entry. + -- The magic here is that we use lazy IO to read the .cabal file + -- from the index tarball if it turns out that we need it. + -- Most of the time we only need the package id. + ~(pkg, pkgtxt) <- unsafeInterleaveIO $ do + pkgtxt <- getEntryContent blockno + pkg <- readPackageDescription pkgtxt + return (pkg, pkgtxt) + let srcpkg = case mode of + ReadPackageIndexLazyIO -> + mkPkg (NormalPackage pkgid pkg pkgtxt blockno) + ReadPackageIndexStrict -> + pkg `seq` pkgtxt `seq` mkPkg (NormalPackage pkgid pkg + pkgtxt blockno) + accum (srcpkg:srcpkgs) prefs entries + + accum srcpkgs prefs (CacheBuildTreeRef refType blockno : entries) = do + -- We have to read the .cabal file eagerly here because we can't cache the + -- package id for build tree references - the user might edit the .cabal + -- file after the reference was added to the index. + path <- liftM byteStringToFilePath . getEntryContent $ blockno + pkg <- do let err = "Error reading package index from cache." + file <- tryFindAddSourcePackageDesc path err + PackageDesc.Parse.readPackageDescription normal file + let srcpkg = mkPkg (BuildTreeRef refType (packageId pkg) pkg path blockno) + accum (srcpkg:srcpkgs) prefs entries + + accum srcpkgs prefs (CachePreference pref : entries) = + accum srcpkgs (pref:prefs) entries + + getEntryContent :: BlockNo -> IO ByteString + getEntryContent blockno = do + hSeek hnd AbsoluteSeek (fromIntegral (blockno * 512)) + header <- BS.hGet hnd 512 + size <- getEntrySize header + BS.hGet hnd (fromIntegral size) + + getEntrySize :: ByteString -> IO Tar.FileSize + getEntrySize header = + case Tar.read header of + Tar.Next e _ -> + case Tar.entryContent e of + Tar.NormalFile _ size -> return size + Tar.OtherEntryType typecode _ size + | Tar.isBuildTreeRefTypeCode typecode + -> return size + _ -> interror "unexpected tar entry type" + _ -> interror "could not read tar file entry" + + readPackageDescription :: ByteString -> IO GenericPackageDescription + readPackageDescription content = + case parsePackageDescription . fromUTF8 . BS.Char8.unpack $ content of + ParseOk _ d -> return d + _ -> interror "failed to parse .cabal file" + + interror msg = die $ "internal error when reading package index: " ++ msg + ++ "The package index or index cache is probably " + ++ "corrupt. Running cabal update might fix it." + +------------------------------------------------------------------------ +-- Index cache data structure +-- + +-- | Tar files are block structured with 512 byte blocks. Every header and file +-- content starts on a block boundary. +-- +type BlockNo = Int + +data IndexCacheEntry = CachePackageId PackageId BlockNo + | CacheBuildTreeRef BuildTreeRefType BlockNo + | CachePreference Dependency + deriving (Eq) + +packageKey, blocknoKey, buildTreeRefKey, preferredVersionKey :: String +packageKey = "pkg:" +blocknoKey = "b#" +buildTreeRefKey = "build-tree-ref:" +preferredVersionKey = "pref-ver:" + +readIndexCacheEntry :: BSS.ByteString -> Maybe IndexCacheEntry +readIndexCacheEntry = \line -> + case BSS.words line of + [key, pkgnamestr, pkgverstr, sep, blocknostr] + | key == BSS.pack packageKey && sep == BSS.pack blocknoKey -> + case (parseName pkgnamestr, parseVer pkgverstr [], + parseBlockNo blocknostr) of + (Just pkgname, Just pkgver, Just blockno) + -> Just (CachePackageId (PackageIdentifier pkgname pkgver) blockno) + _ -> Nothing + [key, typecodestr, blocknostr] | key == BSS.pack buildTreeRefKey -> + case (parseRefType typecodestr, parseBlockNo blocknostr) of + (Just refType, Just blockno) + -> Just (CacheBuildTreeRef refType blockno) + _ -> Nothing + + (key: remainder) | key == BSS.pack preferredVersionKey -> + fmap CachePreference (simpleParse (BSS.unpack (BSS.unwords remainder))) + _ -> Nothing + where + parseName str + | BSS.all (\c -> isAlphaNum c || c == '-') str + = Just (PackageName (BSS.unpack str)) + | otherwise = Nothing + + parseVer str vs = + case BSS.readInt str of + Nothing -> Nothing + Just (v, str') -> case BSS.uncons str' of + Just ('.', str'') -> parseVer str'' (v:vs) + Just _ -> Nothing + Nothing -> Just (Version (reverse (v:vs)) []) + + parseBlockNo str = + case BSS.readInt str of + Just (blockno, remainder) | BSS.null remainder -> Just blockno + _ -> Nothing + + parseRefType str = + case BSS.uncons str of + Just (typeCode, remainder) + | BSS.null remainder && Tar.isBuildTreeRefTypeCode typeCode + -> Just (refTypeFromTypeCode typeCode) + _ -> Nothing + +showIndexCacheEntry :: IndexCacheEntry -> String +showIndexCacheEntry entry = unwords $ case entry of + CachePackageId pkgid b -> [ packageKey + , display (packageName pkgid) + , display (packageVersion pkgid) + , blocknoKey + , show b + ] + CacheBuildTreeRef t b -> [ buildTreeRefKey + , [typeCodeFromRefType t] + , show b + ] + CachePreference dep -> [ preferredVersionKey + , display dep + ] + +readIndexCache :: BSS.ByteString -> [IndexCacheEntry] +readIndexCache = mapMaybe readIndexCacheEntry . BSS.lines + +showIndexCache :: [IndexCacheEntry] -> String +showIndexCache = unlines . map showIndexCacheEntry diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Heuristics.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Heuristics.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Heuristics.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Heuristics.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,386 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Init.Heuristics +-- Copyright : (c) Benedikt Huber 2009 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Heuristics for creating initial cabal files. +-- +----------------------------------------------------------------------------- +module Distribution.Client.Init.Heuristics ( + guessPackageName, + scanForModules, SourceFileEntry(..), + neededBuildPrograms, + guessMainFileCandidates, + guessAuthorNameMail, + knownCategories, +) where +import Distribution.Text (simpleParse) +import Distribution.Simple.Setup (Flag(..), flagToMaybe) +import Distribution.ModuleName + ( ModuleName, toFilePath ) +import Distribution.Client.PackageIndex + ( allPackagesByName ) +import qualified Distribution.Package as P +import qualified Distribution.PackageDescription as PD + ( category, packageDescription ) +import Distribution.Simple.Utils + ( intercalate ) +import Distribution.Client.Utils + ( tryCanonicalizePath ) +import Language.Haskell.Extension ( Extension ) + +import Distribution.Client.Types ( packageDescription, SourcePackageDb(..) ) +#if !MIN_VERSION_base(4,8,0) +import Control.Applicative ( pure, (<$>), (<*>) ) +#endif +import Control.Arrow ( first ) +import Control.Monad ( liftM ) +import Data.Char ( isAlphaNum, isNumber, isUpper, isLower, isSpace ) +import Data.Either ( partitionEithers ) +import Data.List ( isInfixOf, isPrefixOf, isSuffixOf, sortBy ) +import Data.Maybe ( mapMaybe, catMaybes, maybeToList ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid ( mempty, mappend, mconcat, ) +#endif +import Data.Ord ( comparing ) +import qualified Data.Set as Set ( fromList, toList ) +import System.Directory ( getCurrentDirectory, getDirectoryContents, + doesDirectoryExist, doesFileExist, getHomeDirectory, ) +import Distribution.Compat.Environment ( getEnvironment ) +import System.FilePath ( takeExtension, takeBaseName, dropExtension, + (), (<.>), splitDirectories, makeRelative ) + +import Distribution.Client.Init.Types ( InitFlags(..) ) +import Distribution.Client.Compat.Process ( readProcessWithExitCode ) +import System.Exit ( ExitCode(..) ) + +-- | Return a list of candidate main files for this executable: top-level +-- modules including the word 'Main' in the file name. The list is sorted in +-- order of preference, shorter file names are preferred. 'Right's are existing +-- candidates and 'Left's are those that do not yet exist. +guessMainFileCandidates :: InitFlags -> IO [Either FilePath FilePath] +guessMainFileCandidates flags = do + dir <- + maybe getCurrentDirectory return (flagToMaybe $ packageDir flags) + files <- getDirectoryContents dir + let existingCandidates = filter isMain files + -- We always want to give the user at least one default choice. If either + -- Main.hs or Main.lhs has already been created, then we don't want to + -- suggest the other; however, if neither has been created, then we + -- suggest both. + newCandidates = + if any (`elem` existingCandidates) ["Main.hs", "Main.lhs"] + then [] + else ["Main.hs", "Main.lhs"] + candidates = + sortBy (\x y -> comparing (length . either id id) x y + `mappend` compare x y) + (map Left newCandidates ++ map Right existingCandidates) + return candidates + + where + isMain f = (isInfixOf "Main" f || isInfixOf "main" f) + && (isSuffixOf ".hs" f || isSuffixOf ".lhs" f) + +-- | Guess the package name based on the given root directory. +guessPackageName :: FilePath -> IO P.PackageName +guessPackageName = liftM (P.PackageName . repair . last . splitDirectories) + . tryCanonicalizePath + where + -- Treat each span of non-alphanumeric characters as a hyphen. Each + -- hyphenated component of a package name must contain at least one + -- alphabetic character. An arbitrary character ('x') will be prepended if + -- this is not the case for the first component, and subsequent components + -- will simply be run together. For example, "1+2_foo-3" will become + -- "x12-foo3". + repair = repair' ('x' :) id + repair' invalid valid x = case dropWhile (not . isAlphaNum) x of + "" -> repairComponent "" + x' -> let (c, r) = first repairComponent $ break (not . isAlphaNum) x' + in c ++ repairRest r + where + repairComponent c | all isNumber c = invalid c + | otherwise = valid c + repairRest = repair' id ('-' :) + +-- |Data type of source files found in the working directory +data SourceFileEntry = SourceFileEntry + { relativeSourcePath :: FilePath + , moduleName :: ModuleName + , fileExtension :: String + , imports :: [ModuleName] + , extensions :: [Extension] + } deriving Show + +sfToFileName :: FilePath -> SourceFileEntry -> FilePath +sfToFileName projectRoot (SourceFileEntry relPath m ext _ _) + = projectRoot relPath toFilePath m <.> ext + +-- |Search for source files in the given directory +-- and return pairs of guessed Haskell source path and +-- module names. +scanForModules :: FilePath -> IO [SourceFileEntry] +scanForModules rootDir = scanForModulesIn rootDir rootDir + +scanForModulesIn :: FilePath -> FilePath -> IO [SourceFileEntry] +scanForModulesIn projectRoot srcRoot = scan srcRoot [] + where + scan dir hierarchy = do + entries <- getDirectoryContents (projectRoot dir) + (files, dirs) <- liftM partitionEithers (mapM (tagIsDir dir) entries) + let modules = catMaybes [ guessModuleName hierarchy file + | file <- files + , isUpper (head file) ] + modules' <- mapM (findImportsAndExts projectRoot) modules + recMods <- mapM (scanRecursive dir hierarchy) dirs + return $ concat (modules' : recMods) + tagIsDir parent entry = do + isDir <- doesDirectoryExist (parent entry) + return $ (if isDir then Right else Left) entry + guessModuleName hierarchy entry + | takeBaseName entry == "Setup" = Nothing + | ext `elem` sourceExtensions = + SourceFileEntry <$> pure relRoot <*> modName <*> pure ext <*> pure [] <*> pure [] + | otherwise = Nothing + where + relRoot = makeRelative projectRoot srcRoot + unqualModName = dropExtension entry + modName = simpleParse + $ intercalate "." . reverse $ (unqualModName : hierarchy) + ext = case takeExtension entry of '.':e -> e; e -> e + scanRecursive parent hierarchy entry + | isUpper (head entry) = scan (parent entry) (entry : hierarchy) + | isLower (head entry) && not (ignoreDir entry) = + scanForModulesIn projectRoot $ foldl () srcRoot (reverse (entry : hierarchy)) + | otherwise = return [] + ignoreDir ('.':_) = True + ignoreDir dir = dir `elem` ["dist", "_darcs"] + +findImportsAndExts :: FilePath -> SourceFileEntry -> IO SourceFileEntry +findImportsAndExts projectRoot sf = do + s <- readFile (sfToFileName projectRoot sf) + + let modules = mapMaybe + ( getModName + . drop 1 + . filter (not . null) + . dropWhile (/= "import") + . words + ) + . filter (not . ("--" `isPrefixOf`)) -- poor man's comment filtering + . lines + $ s + + -- XXX we should probably make a better attempt at parsing + -- comments above. Unfortunately we can't use a full-fledged + -- Haskell parser since cabal's dependencies must be kept at a + -- minimum. + + -- A poor man's LANGUAGE pragma parser. + exts = mapMaybe simpleParse + . concatMap getPragmas + . filter isLANGUAGEPragma + . map fst + . drop 1 + . takeWhile (not . null . snd) + . iterate (takeBraces . snd) + $ ("",s) + + takeBraces = break (== '}') . dropWhile (/= '{') + + isLANGUAGEPragma = ("{-# LANGUAGE " `isPrefixOf`) + + getPragmas = map trim . splitCommas . takeWhile (/= '#') . drop 13 + + splitCommas "" = [] + splitCommas xs = x : splitCommas (drop 1 y) + where (x,y) = break (==',') xs + + return sf { imports = modules + , extensions = exts + } + + where getModName :: [String] -> Maybe ModuleName + getModName [] = Nothing + getModName ("qualified":ws) = getModName ws + getModName (ms:_) = simpleParse ms + + + +-- Unfortunately we cannot use the version exported by Distribution.Simple.Program +knownSuffixHandlers :: [(String,String)] +knownSuffixHandlers = + [ ("gc", "greencard") + , ("chs", "chs") + , ("hsc", "hsc2hs") + , ("x", "alex") + , ("y", "happy") + , ("ly", "happy") + , ("cpphs", "cpp") + ] + +sourceExtensions :: [String] +sourceExtensions = "hs" : "lhs" : map fst knownSuffixHandlers + +neededBuildPrograms :: [SourceFileEntry] -> [String] +neededBuildPrograms entries = + [ handler + | ext <- nubSet (map fileExtension entries) + , handler <- maybeToList (lookup ext knownSuffixHandlers) + ] + +-- | Guess author and email using darcs and git configuration options. Use +-- the following in decreasing order of preference: +-- +-- 1. vcs env vars ($DARCS_EMAIL, $GIT_AUTHOR_*) +-- 2. Local repo configs +-- 3. Global vcs configs +-- 4. The generic $EMAIL +-- +-- Name and email are processed separately, so the guess might end up being +-- a name from DARCS_EMAIL and an email from git config. +-- +-- Darcs has preference, for tradition's sake. +guessAuthorNameMail :: IO (Flag String, Flag String) +guessAuthorNameMail = fmap authorGuessPure authorGuessIO + +-- Ordered in increasing preference, since Flag-as-monoid is identical to +-- Last. +authorGuessPure :: AuthorGuessIO -> AuthorGuess +authorGuessPure (AuthorGuessIO env darcsLocalF darcsGlobalF gitLocal gitGlobal) + = mconcat + [ emailEnv env + , gitGlobal + , darcsCfg darcsGlobalF + , gitLocal + , darcsCfg darcsLocalF + , gitEnv env + , darcsEnv env + ] + +authorGuessIO :: IO AuthorGuessIO +authorGuessIO = AuthorGuessIO + <$> getEnvironment + <*> (maybeReadFile $ "_darcs" "prefs" "author") + <*> (maybeReadFile =<< liftM ( (".darcs" "author")) getHomeDirectory) + <*> gitCfg Local + <*> gitCfg Global + +-- Types and functions used for guessing the author are now defined: + +type AuthorGuess = (Flag String, Flag String) +type Enviro = [(String, String)] +data GitLoc = Local | Global +data AuthorGuessIO = AuthorGuessIO + Enviro -- ^ Environment lookup table + (Maybe String) -- ^ Contents of local darcs author info + (Maybe String) -- ^ Contents of global darcs author info + AuthorGuess -- ^ Git config --local + AuthorGuess -- ^ Git config --global + +darcsEnv :: Enviro -> AuthorGuess +darcsEnv = maybe mempty nameAndMail . lookup "DARCS_EMAIL" + +gitEnv :: Enviro -> AuthorGuess +gitEnv env = (name, mail) + where + name = maybeFlag "GIT_AUTHOR_NAME" env + mail = maybeFlag "GIT_AUTHOR_EMAIL" env + +darcsCfg :: Maybe String -> AuthorGuess +darcsCfg = maybe mempty nameAndMail + +emailEnv :: Enviro -> AuthorGuess +emailEnv env = (mempty, mail) + where + mail = maybeFlag "EMAIL" env + +gitCfg :: GitLoc -> IO AuthorGuess +gitCfg which = do + name <- gitVar which "user.name" + mail <- gitVar which "user.email" + return (name, mail) + +gitVar :: GitLoc -> String -> IO (Flag String) +gitVar which = fmap happyOutput . gitConfigQuery which + +happyOutput :: (ExitCode, a, t) -> Flag a +happyOutput v = case v of + (ExitSuccess, s, _) -> Flag s + _ -> mempty + +gitConfigQuery :: GitLoc -> String -> IO (ExitCode, String, String) +gitConfigQuery which key = + fmap trim' $ readProcessWithExitCode "git" ["config", w, key] "" + where + w = case which of + Local -> "--local" + Global -> "--global" + trim' (a, b, c) = (a, trim b, c) + +maybeFlag :: String -> Enviro -> Flag String +maybeFlag k = maybe mempty Flag . lookup k + +maybeReadFile :: String -> IO (Maybe String) +maybeReadFile f = do + exists <- doesFileExist f + if exists + then fmap Just $ readFile f + else return Nothing + +-- |Get list of categories used in Hackage. NOTE: Very slow, needs to be cached +knownCategories :: SourcePackageDb -> [String] +knownCategories (SourcePackageDb sourcePkgIndex _) = nubSet + [ cat | pkg <- map head (allPackagesByName sourcePkgIndex) + , let catList = (PD.category . PD.packageDescription . packageDescription) pkg + , cat <- splitString ',' catList + ] + +-- Parse name and email, from darcs pref files or environment variable +nameAndMail :: String -> (Flag String, Flag String) +nameAndMail str + | all isSpace nameOrEmail = mempty + | null erest = (mempty, Flag $ trim nameOrEmail) + | otherwise = (Flag $ trim nameOrEmail, Flag mail) + where + (nameOrEmail,erest) = break (== '<') str + (mail,_) = break (== '>') (tail erest) + +trim :: String -> String +trim = removeLeadingSpace . reverse . removeLeadingSpace . reverse + where + removeLeadingSpace = dropWhile isSpace + +-- split string at given character, and remove whitespace +splitString :: Char -> String -> [String] +splitString sep str = go str where + go s = if null s' then [] else tok : go rest where + s' = dropWhile (\c -> c == sep || isSpace c) s + (tok,rest) = break (==sep) s' + +nubSet :: (Ord a) => [a] -> [a] +nubSet = Set.toList . Set.fromList + +{- +test db testProjectRoot = do + putStrLn "Guessed package name" + (guessPackageName >=> print) testProjectRoot + putStrLn "Guessed name and email" + guessAuthorNameMail >>= print + + mods <- scanForModules testProjectRoot + + putStrLn "Guessed modules" + mapM_ print mods + putStrLn "Needed build programs" + print (neededBuildPrograms mods) + + putStrLn "List of known categories" + print $ knownCategories db +-} diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Licenses.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Licenses.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Licenses.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Licenses.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,3065 @@ +module Distribution.Client.Init.Licenses + ( License + , bsd2 + , bsd3 + , gplv2 + , gplv3 + , lgpl21 + , lgpl3 + , agplv3 + , apache20 + , mit + , mpl20 + , isc + ) where + +type License = String + +bsd2 :: String -> String -> License +bsd2 authors year = unlines + [ "Copyright (c) " ++ year ++ ", " ++ authors + , "All rights reserved." + , "" + , "Redistribution and use in source and binary forms, with or without" + , "modification, are permitted provided that the following conditions are" + , "met:" + , "" + , "1. Redistributions of source code must retain the above copyright" + , " notice, this list of conditions and the following disclaimer." + , "" + , "2. Redistributions in binary form must reproduce the above copyright" + , " notice, this list of conditions and the following disclaimer in the" + , " documentation and/or other materials provided with the" + , " distribution." + , "" + , "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS" + , "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT" + , "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR" + , "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT" + , "OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL," + , "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT" + , "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE," + , "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY" + , "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT" + , "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE" + , "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." + ] + +bsd3 :: String -> String -> License +bsd3 authors year = unlines + [ "Copyright (c) " ++ year ++ ", " ++ authors + , "" + , "All rights reserved." + , "" + , "Redistribution and use in source and binary forms, with or without" + , "modification, are permitted provided that the following conditions are met:" + , "" + , " * Redistributions of source code must retain the above copyright" + , " notice, this list of conditions and the following disclaimer." + , "" + , " * Redistributions in binary form must reproduce the above" + , " copyright notice, this list of conditions and the following" + , " disclaimer in the documentation and/or other materials provided" + , " with the distribution." + , "" + , " * Neither the name of " ++ authors ++ " nor the names of other" + , " contributors may be used to endorse or promote products derived" + , " from this software without specific prior written permission." + , "" + , "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS" + , "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT" + , "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR" + , "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT" + , "OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL," + , "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT" + , "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE," + , "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY" + , "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT" + , "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE" + , "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." + ] + +gplv2 :: License +gplv2 = unlines + [ " GNU GENERAL PUBLIC LICENSE" + , " Version 2, June 1991" + , "" + , " Copyright (C) 1989, 1991 Free Software Foundation, Inc.," + , " 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA" + , " Everyone is permitted to copy and distribute verbatim copies" + , " of this license document, but changing it is not allowed." + , "" + , " Preamble" + , "" + , " The licenses for most software are designed to take away your" + , "freedom to share and change it. By contrast, the GNU General Public" + , "License is intended to guarantee your freedom to share and change free" + , "software--to make sure the software is free for all its users. This" + , "General Public License applies to most of the Free Software" + , "Foundation's software and to any other program whose authors commit to" + , "using it. (Some other Free Software Foundation software is covered by" + , "the GNU Lesser General Public License instead.) You can apply it to" + , "your programs, too." + , "" + , " When we speak of free software, we are referring to freedom, not" + , "price. Our General Public Licenses are designed to make sure that you" + , "have the freedom to distribute copies of free software (and charge for" + , "this service if you wish), that you receive source code or can get it" + , "if you want it, that you can change the software or use pieces of it" + , "in new free programs; and that you know you can do these things." + , "" + , " To protect your rights, we need to make restrictions that forbid" + , "anyone to deny you these rights or to ask you to surrender the rights." + , "These restrictions translate to certain responsibilities for you if you" + , "distribute copies of the software, or if you modify it." + , "" + , " For example, if you distribute copies of such a program, whether" + , "gratis or for a fee, you must give the recipients all the rights that" + , "you have. You must make sure that they, too, receive or can get the" + , "source code. And you must show them these terms so they know their" + , "rights." + , "" + , " We protect your rights with two steps: (1) copyright the software, and" + , "(2) offer you this license which gives you legal permission to copy," + , "distribute and/or modify the software." + , "" + , " Also, for each author's protection and ours, we want to make certain" + , "that everyone understands that there is no warranty for this free" + , "software. If the software is modified by someone else and passed on, we" + , "want its recipients to know that what they have is not the original, so" + , "that any problems introduced by others will not reflect on the original" + , "authors' reputations." + , "" + , " Finally, any free program is threatened constantly by software" + , "patents. We wish to avoid the danger that redistributors of a free" + , "program will individually obtain patent licenses, in effect making the" + , "program proprietary. To prevent this, we have made it clear that any" + , "patent must be licensed for everyone's free use or not licensed at all." + , "" + , " The precise terms and conditions for copying, distribution and" + , "modification follow." + , "" + , " GNU GENERAL PUBLIC LICENSE" + , " TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION" + , "" + , " 0. This License applies to any program or other work which contains" + , "a notice placed by the copyright holder saying it may be distributed" + , "under the terms of this General Public License. The \"Program\", below," + , "refers to any such program or work, and a \"work based on the Program\"" + , "means either the Program or any derivative work under copyright law:" + , "that is to say, a work containing the Program or a portion of it," + , "either verbatim or with modifications and/or translated into another" + , "language. (Hereinafter, translation is included without limitation in" + , "the term \"modification\".) Each licensee is addressed as \"you\"." + , "" + , "Activities other than copying, distribution and modification are not" + , "covered by this License; they are outside its scope. The act of" + , "running the Program is not restricted, and the output from the Program" + , "is covered only if its contents constitute a work based on the" + , "Program (independent of having been made by running the Program)." + , "Whether that is true depends on what the Program does." + , "" + , " 1. You may copy and distribute verbatim copies of the Program's" + , "source code as you receive it, in any medium, provided that you" + , "conspicuously and appropriately publish on each copy an appropriate" + , "copyright notice and disclaimer of warranty; keep intact all the" + , "notices that refer to this License and to the absence of any warranty;" + , "and give any other recipients of the Program a copy of this License" + , "along with the Program." + , "" + , "You may charge a fee for the physical act of transferring a copy, and" + , "you may at your option offer warranty protection in exchange for a fee." + , "" + , " 2. You may modify your copy or copies of the Program or any portion" + , "of it, thus forming a work based on the Program, and copy and" + , "distribute such modifications or work under the terms of Section 1" + , "above, provided that you also meet all of these conditions:" + , "" + , " a) You must cause the modified files to carry prominent notices" + , " stating that you changed the files and the date of any change." + , "" + , " b) You must cause any work that you distribute or publish, that in" + , " whole or in part contains or is derived from the Program or any" + , " part thereof, to be licensed as a whole at no charge to all third" + , " parties under the terms of this License." + , "" + , " c) If the modified program normally reads commands interactively" + , " when run, you must cause it, when started running for such" + , " interactive use in the most ordinary way, to print or display an" + , " announcement including an appropriate copyright notice and a" + , " notice that there is no warranty (or else, saying that you provide" + , " a warranty) and that users may redistribute the program under" + , " these conditions, and telling the user how to view a copy of this" + , " License. (Exception: if the Program itself is interactive but" + , " does not normally print such an announcement, your work based on" + , " the Program is not required to print an announcement.)" + , "" + , "These requirements apply to the modified work as a whole. If" + , "identifiable sections of that work are not derived from the Program," + , "and can be reasonably considered independent and separate works in" + , "themselves, then this License, and its terms, do not apply to those" + , "sections when you distribute them as separate works. But when you" + , "distribute the same sections as part of a whole which is a work based" + , "on the Program, the distribution of the whole must be on the terms of" + , "this License, whose permissions for other licensees extend to the" + , "entire whole, and thus to each and every part regardless of who wrote it." + , "" + , "Thus, it is not the intent of this section to claim rights or contest" + , "your rights to work written entirely by you; rather, the intent is to" + , "exercise the right to control the distribution of derivative or" + , "collective works based on the Program." + , "" + , "In addition, mere aggregation of another work not based on the Program" + , "with the Program (or with a work based on the Program) on a volume of" + , "a storage or distribution medium does not bring the other work under" + , "the scope of this License." + , "" + , " 3. You may copy and distribute the Program (or a work based on it," + , "under Section 2) in object code or executable form under the terms of" + , "Sections 1 and 2 above provided that you also do one of the following:" + , "" + , " a) Accompany it with the complete corresponding machine-readable" + , " source code, which must be distributed under the terms of Sections" + , " 1 and 2 above on a medium customarily used for software interchange; or," + , "" + , " b) Accompany it with a written offer, valid for at least three" + , " years, to give any third party, for a charge no more than your" + , " cost of physically performing source distribution, a complete" + , " machine-readable copy of the corresponding source code, to be" + , " distributed under the terms of Sections 1 and 2 above on a medium" + , " customarily used for software interchange; or," + , "" + , " c) Accompany it with the information you received as to the offer" + , " to distribute corresponding source code. (This alternative is" + , " allowed only for noncommercial distribution and only if you" + , " received the program in object code or executable form with such" + , " an offer, in accord with Subsection b above.)" + , "" + , "The source code for a work means the preferred form of the work for" + , "making modifications to it. For an executable work, complete source" + , "code means all the source code for all modules it contains, plus any" + , "associated interface definition files, plus the scripts used to" + , "control compilation and installation of the executable. However, as a" + , "special exception, the source code distributed need not include" + , "anything that is normally distributed (in either source or binary" + , "form) with the major components (compiler, kernel, and so on) of the" + , "operating system on which the executable runs, unless that component" + , "itself accompanies the executable." + , "" + , "If distribution of executable or object code is made by offering" + , "access to copy from a designated place, then offering equivalent" + , "access to copy the source code from the same place counts as" + , "distribution of the source code, even though third parties are not" + , "compelled to copy the source along with the object code." + , "" + , " 4. You may not copy, modify, sublicense, or distribute the Program" + , "except as expressly provided under this License. Any attempt" + , "otherwise to copy, modify, sublicense or distribute the Program is" + , "void, and will automatically terminate your rights under this License." + , "However, parties who have received copies, or rights, from you under" + , "this License will not have their licenses terminated so long as such" + , "parties remain in full compliance." + , "" + , " 5. You are not required to accept this License, since you have not" + , "signed it. However, nothing else grants you permission to modify or" + , "distribute the Program or its derivative works. These actions are" + , "prohibited by law if you do not accept this License. Therefore, by" + , "modifying or distributing the Program (or any work based on the" + , "Program), you indicate your acceptance of this License to do so, and" + , "all its terms and conditions for copying, distributing or modifying" + , "the Program or works based on it." + , "" + , " 6. Each time you redistribute the Program (or any work based on the" + , "Program), the recipient automatically receives a license from the" + , "original licensor to copy, distribute or modify the Program subject to" + , "these terms and conditions. You may not impose any further" + , "restrictions on the recipients' exercise of the rights granted herein." + , "You are not responsible for enforcing compliance by third parties to" + , "this License." + , "" + , " 7. If, as a consequence of a court judgment or allegation of patent" + , "infringement or for any other reason (not limited to patent issues)," + , "conditions are imposed on you (whether by court order, agreement or" + , "otherwise) that contradict the conditions of this License, they do not" + , "excuse you from the conditions of this License. If you cannot" + , "distribute so as to satisfy simultaneously your obligations under this" + , "License and any other pertinent obligations, then as a consequence you" + , "may not distribute the Program at all. For example, if a patent" + , "license would not permit royalty-free redistribution of the Program by" + , "all those who receive copies directly or indirectly through you, then" + , "the only way you could satisfy both it and this License would be to" + , "refrain entirely from distribution of the Program." + , "" + , "If any portion of this section is held invalid or unenforceable under" + , "any particular circumstance, the balance of the section is intended to" + , "apply and the section as a whole is intended to apply in other" + , "circumstances." + , "" + , "It is not the purpose of this section to induce you to infringe any" + , "patents or other property right claims or to contest validity of any" + , "such claims; this section has the sole purpose of protecting the" + , "integrity of the free software distribution system, which is" + , "implemented by public license practices. Many people have made" + , "generous contributions to the wide range of software distributed" + , "through that system in reliance on consistent application of that" + , "system; it is up to the author/donor to decide if he or she is willing" + , "to distribute software through any other system and a licensee cannot" + , "impose that choice." + , "" + , "This section is intended to make thoroughly clear what is believed to" + , "be a consequence of the rest of this License." + , "" + , " 8. If the distribution and/or use of the Program is restricted in" + , "certain countries either by patents or by copyrighted interfaces, the" + , "original copyright holder who places the Program under this License" + , "may add an explicit geographical distribution limitation excluding" + , "those countries, so that distribution is permitted only in or among" + , "countries not thus excluded. In such case, this License incorporates" + , "the limitation as if written in the body of this License." + , "" + , " 9. The Free Software Foundation may publish revised and/or new versions" + , "of the General Public License from time to time. Such new versions will" + , "be similar in spirit to the present version, but may differ in detail to" + , "address new problems or concerns." + , "" + , "Each version is given a distinguishing version number. If the Program" + , "specifies a version number of this License which applies to it and \"any" + , "later version\", you have the option of following the terms and conditions" + , "either of that version or of any later version published by the Free" + , "Software Foundation. If the Program does not specify a version number of" + , "this License, you may choose any version ever published by the Free Software" + , "Foundation." + , "" + , " 10. If you wish to incorporate parts of the Program into other free" + , "programs whose distribution conditions are different, write to the author" + , "to ask for permission. For software which is copyrighted by the Free" + , "Software Foundation, write to the Free Software Foundation; we sometimes" + , "make exceptions for this. Our decision will be guided by the two goals" + , "of preserving the free status of all derivatives of our free software and" + , "of promoting the sharing and reuse of software generally." + , "" + , " NO WARRANTY" + , "" + , " 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY" + , "FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN" + , "OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES" + , "PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED" + , "OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF" + , "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS" + , "TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE" + , "PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING," + , "REPAIR OR CORRECTION." + , "" + , " 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING" + , "WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR" + , "REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES," + , "INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING" + , "OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED" + , "TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY" + , "YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER" + , "PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE" + , "POSSIBILITY OF SUCH DAMAGES." + , "" + , " END OF TERMS AND CONDITIONS" + , "" + , " How to Apply These Terms to Your New Programs" + , "" + , " If you develop a new program, and you want it to be of the greatest" + , "possible use to the public, the best way to achieve this is to make it" + , "free software which everyone can redistribute and change under these terms." + , "" + , " To do so, attach the following notices to the program. It is safest" + , "to attach them to the start of each source file to most effectively" + , "convey the exclusion of warranty; and each file should have at least" + , "the \"copyright\" line and a pointer to where the full notice is found." + , "" + , " " + , " Copyright (C) " + , "" + , " This program is free software; you can redistribute it and/or modify" + , " it under the terms of the GNU General Public License as published by" + , " the Free Software Foundation; either version 2 of the License, or" + , " (at your option) any later version." + , "" + , " This program is distributed in the hope that it will be useful," + , " but WITHOUT ANY WARRANTY; without even the implied warranty of" + , " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the" + , " GNU General Public License for more details." + , "" + , " You should have received a copy of the GNU General Public License along" + , " with this program; if not, write to the Free Software Foundation, Inc.," + , " 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA." + , "" + , "Also add information on how to contact you by electronic and paper mail." + , "" + , "If the program is interactive, make it output a short notice like this" + , "when it starts in an interactive mode:" + , "" + , " Gnomovision version 69, Copyright (C) year name of author" + , " Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'." + , " This is free software, and you are welcome to redistribute it" + , " under certain conditions; type `show c' for details." + , "" + , "The hypothetical commands `show w' and `show c' should show the appropriate" + , "parts of the General Public License. Of course, the commands you use may" + , "be called something other than `show w' and `show c'; they could even be" + , "mouse-clicks or menu items--whatever suits your program." + , "" + , "You should also get your employer (if you work as a programmer) or your" + , "school, if any, to sign a \"copyright disclaimer\" for the program, if" + , "necessary. Here is a sample; alter the names:" + , "" + , " Yoyodyne, Inc., hereby disclaims all copyright interest in the program" + , " `Gnomovision' (which makes passes at compilers) written by James Hacker." + , "" + , " , 1 April 1989" + , " Ty Coon, President of Vice" + , "" + , "This General Public License does not permit incorporating your program into" + , "proprietary programs. If your program is a subroutine library, you may" + , "consider it more useful to permit linking proprietary applications with the" + , "library. If this is what you want to do, use the GNU Lesser General" + , "Public License instead of this License." + ] + +gplv3 :: License +gplv3 = unlines + [ " GNU GENERAL PUBLIC LICENSE" + , " Version 3, 29 June 2007" + , "" + , " Copyright (C) 2007 Free Software Foundation, Inc. " + , " Everyone is permitted to copy and distribute verbatim copies" + , " of this license document, but changing it is not allowed." + , "" + , " Preamble" + , "" + , " The GNU General Public License is a free, copyleft license for" + , "software and other kinds of works." + , "" + , " The licenses for most software and other practical works are designed" + , "to take away your freedom to share and change the works. By contrast," + , "the GNU General Public License is intended to guarantee your freedom to" + , "share and change all versions of a program--to make sure it remains free" + , "software for all its users. We, the Free Software Foundation, use the" + , "GNU General Public License for most of our software; it applies also to" + , "any other work released this way by its authors. You can apply it to" + , "your programs, too." + , "" + , " When we speak of free software, we are referring to freedom, not" + , "price. Our General Public Licenses are designed to make sure that you" + , "have the freedom to distribute copies of free software (and charge for" + , "them if you wish), that you receive source code or can get it if you" + , "want it, that you can change the software or use pieces of it in new" + , "free programs, and that you know you can do these things." + , "" + , " To protect your rights, we need to prevent others from denying you" + , "these rights or asking you to surrender the rights. Therefore, you have" + , "certain responsibilities if you distribute copies of the software, or if" + , "you modify it: responsibilities to respect the freedom of others." + , "" + , " For example, if you distribute copies of such a program, whether" + , "gratis or for a fee, you must pass on to the recipients the same" + , "freedoms that you received. You must make sure that they, too, receive" + , "or can get the source code. And you must show them these terms so they" + , "know their rights." + , "" + , " Developers that use the GNU GPL protect your rights with two steps:" + , "(1) assert copyright on the software, and (2) offer you this License" + , "giving you legal permission to copy, distribute and/or modify it." + , "" + , " For the developers' and authors' protection, the GPL clearly explains" + , "that there is no warranty for this free software. For both users' and" + , "authors' sake, the GPL requires that modified versions be marked as" + , "changed, so that their problems will not be attributed erroneously to" + , "authors of previous versions." + , "" + , " Some devices are designed to deny users access to install or run" + , "modified versions of the software inside them, although the manufacturer" + , "can do so. This is fundamentally incompatible with the aim of" + , "protecting users' freedom to change the software. The systematic" + , "pattern of such abuse occurs in the area of products for individuals to" + , "use, which is precisely where it is most unacceptable. Therefore, we" + , "have designed this version of the GPL to prohibit the practice for those" + , "products. If such problems arise substantially in other domains, we" + , "stand ready to extend this provision to those domains in future versions" + , "of the GPL, as needed to protect the freedom of users." + , "" + , " Finally, every program is threatened constantly by software patents." + , "States should not allow patents to restrict development and use of" + , "software on general-purpose computers, but in those that do, we wish to" + , "avoid the special danger that patents applied to a free program could" + , "make it effectively proprietary. To prevent this, the GPL assures that" + , "patents cannot be used to render the program non-free." + , "" + , " The precise terms and conditions for copying, distribution and" + , "modification follow." + , "" + , " TERMS AND CONDITIONS" + , "" + , " 0. Definitions." + , "" + , " \"This License\" refers to version 3 of the GNU General Public License." + , "" + , " \"Copyright\" also means copyright-like laws that apply to other kinds of" + , "works, such as semiconductor masks." + , "" + , " \"The Program\" refers to any copyrightable work licensed under this" + , "License. Each licensee is addressed as \"you\". \"Licensees\" and" + , "\"recipients\" may be individuals or organizations." + , "" + , " To \"modify\" a work means to copy from or adapt all or part of the work" + , "in a fashion requiring copyright permission, other than the making of an" + , "exact copy. The resulting work is called a \"modified version\" of the" + , "earlier work or a work \"based on\" the earlier work." + , "" + , " A \"covered work\" means either the unmodified Program or a work based" + , "on the Program." + , "" + , " To \"propagate\" a work means to do anything with it that, without" + , "permission, would make you directly or secondarily liable for" + , "infringement under applicable copyright law, except executing it on a" + , "computer or modifying a private copy. Propagation includes copying," + , "distribution (with or without modification), making available to the" + , "public, and in some countries other activities as well." + , "" + , " To \"convey\" a work means any kind of propagation that enables other" + , "parties to make or receive copies. Mere interaction with a user through" + , "a computer network, with no transfer of a copy, is not conveying." + , "" + , " An interactive user interface displays \"Appropriate Legal Notices\"" + , "to the extent that it includes a convenient and prominently visible" + , "feature that (1) displays an appropriate copyright notice, and (2)" + , "tells the user that there is no warranty for the work (except to the" + , "extent that warranties are provided), that licensees may convey the" + , "work under this License, and how to view a copy of this License. If" + , "the interface presents a list of user commands or options, such as a" + , "menu, a prominent item in the list meets this criterion." + , "" + , " 1. Source Code." + , "" + , " The \"source code\" for a work means the preferred form of the work" + , "for making modifications to it. \"Object code\" means any non-source" + , "form of a work." + , "" + , " A \"Standard Interface\" means an interface that either is an official" + , "standard defined by a recognized standards body, or, in the case of" + , "interfaces specified for a particular programming language, one that" + , "is widely used among developers working in that language." + , "" + , " The \"System Libraries\" of an executable work include anything, other" + , "than the work as a whole, that (a) is included in the normal form of" + , "packaging a Major Component, but which is not part of that Major" + , "Component, and (b) serves only to enable use of the work with that" + , "Major Component, or to implement a Standard Interface for which an" + , "implementation is available to the public in source code form. A" + , "\"Major Component\", in this context, means a major essential component" + , "(kernel, window system, and so on) of the specific operating system" + , "(if any) on which the executable work runs, or a compiler used to" + , "produce the work, or an object code interpreter used to run it." + , "" + , " The \"Corresponding Source\" for a work in object code form means all" + , "the source code needed to generate, install, and (for an executable" + , "work) run the object code and to modify the work, including scripts to" + , "control those activities. However, it does not include the work's" + , "System Libraries, or general-purpose tools or generally available free" + , "programs which are used unmodified in performing those activities but" + , "which are not part of the work. For example, Corresponding Source" + , "includes interface definition files associated with source files for" + , "the work, and the source code for shared libraries and dynamically" + , "linked subprograms that the work is specifically designed to require," + , "such as by intimate data communication or control flow between those" + , "subprograms and other parts of the work." + , "" + , " The Corresponding Source need not include anything that users" + , "can regenerate automatically from other parts of the Corresponding" + , "Source." + , "" + , " The Corresponding Source for a work in source code form is that" + , "same work." + , "" + , " 2. Basic Permissions." + , "" + , " All rights granted under this License are granted for the term of" + , "copyright on the Program, and are irrevocable provided the stated" + , "conditions are met. This License explicitly affirms your unlimited" + , "permission to run the unmodified Program. The output from running a" + , "covered work is covered by this License only if the output, given its" + , "content, constitutes a covered work. This License acknowledges your" + , "rights of fair use or other equivalent, as provided by copyright law." + , "" + , " You may make, run and propagate covered works that you do not" + , "convey, without conditions so long as your license otherwise remains" + , "in force. You may convey covered works to others for the sole purpose" + , "of having them make modifications exclusively for you, or provide you" + , "with facilities for running those works, provided that you comply with" + , "the terms of this License in conveying all material for which you do" + , "not control copyright. Those thus making or running the covered works" + , "for you must do so exclusively on your behalf, under your direction" + , "and control, on terms that prohibit them from making any copies of" + , "your copyrighted material outside their relationship with you." + , "" + , " Conveying under any other circumstances is permitted solely under" + , "the conditions stated below. Sublicensing is not allowed; section 10" + , "makes it unnecessary." + , "" + , " 3. Protecting Users' Legal Rights From Anti-Circumvention Law." + , "" + , " No covered work shall be deemed part of an effective technological" + , "measure under any applicable law fulfilling obligations under article" + , "11 of the WIPO copyright treaty adopted on 20 December 1996, or" + , "similar laws prohibiting or restricting circumvention of such" + , "measures." + , "" + , " When you convey a covered work, you waive any legal power to forbid" + , "circumvention of technological measures to the extent such circumvention" + , "is effected by exercising rights under this License with respect to" + , "the covered work, and you disclaim any intention to limit operation or" + , "modification of the work as a means of enforcing, against the work's" + , "users, your or third parties' legal rights to forbid circumvention of" + , "technological measures." + , "" + , " 4. Conveying Verbatim Copies." + , "" + , " You may convey verbatim copies of the Program's source code as you" + , "receive it, in any medium, provided that you conspicuously and" + , "appropriately publish on each copy an appropriate copyright notice;" + , "keep intact all notices stating that this License and any" + , "non-permissive terms added in accord with section 7 apply to the code;" + , "keep intact all notices of the absence of any warranty; and give all" + , "recipients a copy of this License along with the Program." + , "" + , " You may charge any price or no price for each copy that you convey," + , "and you may offer support or warranty protection for a fee." + , "" + , " 5. Conveying Modified Source Versions." + , "" + , " You may convey a work based on the Program, or the modifications to" + , "produce it from the Program, in the form of source code under the" + , "terms of section 4, provided that you also meet all of these conditions:" + , "" + , " a) The work must carry prominent notices stating that you modified" + , " it, and giving a relevant date." + , "" + , " b) The work must carry prominent notices stating that it is" + , " released under this License and any conditions added under section" + , " 7. This requirement modifies the requirement in section 4 to" + , " \"keep intact all notices\"." + , "" + , " c) You must license the entire work, as a whole, under this" + , " License to anyone who comes into possession of a copy. This" + , " License will therefore apply, along with any applicable section 7" + , " additional terms, to the whole of the work, and all its parts," + , " regardless of how they are packaged. This License gives no" + , " permission to license the work in any other way, but it does not" + , " invalidate such permission if you have separately received it." + , "" + , " d) If the work has interactive user interfaces, each must display" + , " Appropriate Legal Notices; however, if the Program has interactive" + , " interfaces that do not display Appropriate Legal Notices, your" + , " work need not make them do so." + , "" + , " A compilation of a covered work with other separate and independent" + , "works, which are not by their nature extensions of the covered work," + , "and which are not combined with it such as to form a larger program," + , "in or on a volume of a storage or distribution medium, is called an" + , "\"aggregate\" if the compilation and its resulting copyright are not" + , "used to limit the access or legal rights of the compilation's users" + , "beyond what the individual works permit. Inclusion of a covered work" + , "in an aggregate does not cause this License to apply to the other" + , "parts of the aggregate." + , "" + , " 6. Conveying Non-Source Forms." + , "" + , " You may convey a covered work in object code form under the terms" + , "of sections 4 and 5, provided that you also convey the" + , "machine-readable Corresponding Source under the terms of this License," + , "in one of these ways:" + , "" + , " a) Convey the object code in, or embodied in, a physical product" + , " (including a physical distribution medium), accompanied by the" + , " Corresponding Source fixed on a durable physical medium" + , " customarily used for software interchange." + , "" + , " b) Convey the object code in, or embodied in, a physical product" + , " (including a physical distribution medium), accompanied by a" + , " written offer, valid for at least three years and valid for as" + , " long as you offer spare parts or customer support for that product" + , " model, to give anyone who possesses the object code either (1) a" + , " copy of the Corresponding Source for all the software in the" + , " product that is covered by this License, on a durable physical" + , " medium customarily used for software interchange, for a price no" + , " more than your reasonable cost of physically performing this" + , " conveying of source, or (2) access to copy the" + , " Corresponding Source from a network server at no charge." + , "" + , " c) Convey individual copies of the object code with a copy of the" + , " written offer to provide the Corresponding Source. This" + , " alternative is allowed only occasionally and noncommercially, and" + , " only if you received the object code with such an offer, in accord" + , " with subsection 6b." + , "" + , " d) Convey the object code by offering access from a designated" + , " place (gratis or for a charge), and offer equivalent access to the" + , " Corresponding Source in the same way through the same place at no" + , " further charge. You need not require recipients to copy the" + , " Corresponding Source along with the object code. If the place to" + , " copy the object code is a network server, the Corresponding Source" + , " may be on a different server (operated by you or a third party)" + , " that supports equivalent copying facilities, provided you maintain" + , " clear directions next to the object code saying where to find the" + , " Corresponding Source. Regardless of what server hosts the" + , " Corresponding Source, you remain obligated to ensure that it is" + , " available for as long as needed to satisfy these requirements." + , "" + , " e) Convey the object code using peer-to-peer transmission, provided" + , " you inform other peers where the object code and Corresponding" + , " Source of the work are being offered to the general public at no" + , " charge under subsection 6d." + , "" + , " A separable portion of the object code, whose source code is excluded" + , "from the Corresponding Source as a System Library, need not be" + , "included in conveying the object code work." + , "" + , " A \"User Product\" is either (1) a \"consumer product\", which means any" + , "tangible personal property which is normally used for personal, family," + , "or household purposes, or (2) anything designed or sold for incorporation" + , "into a dwelling. In determining whether a product is a consumer product," + , "doubtful cases shall be resolved in favor of coverage. For a particular" + , "product received by a particular user, \"normally used\" refers to a" + , "typical or common use of that class of product, regardless of the status" + , "of the particular user or of the way in which the particular user" + , "actually uses, or expects or is expected to use, the product. A product" + , "is a consumer product regardless of whether the product has substantial" + , "commercial, industrial or non-consumer uses, unless such uses represent" + , "the only significant mode of use of the product." + , "" + , " \"Installation Information\" for a User Product means any methods," + , "procedures, authorization keys, or other information required to install" + , "and execute modified versions of a covered work in that User Product from" + , "a modified version of its Corresponding Source. The information must" + , "suffice to ensure that the continued functioning of the modified object" + , "code is in no case prevented or interfered with solely because" + , "modification has been made." + , "" + , " If you convey an object code work under this section in, or with, or" + , "specifically for use in, a User Product, and the conveying occurs as" + , "part of a transaction in which the right of possession and use of the" + , "User Product is transferred to the recipient in perpetuity or for a" + , "fixed term (regardless of how the transaction is characterized), the" + , "Corresponding Source conveyed under this section must be accompanied" + , "by the Installation Information. But this requirement does not apply" + , "if neither you nor any third party retains the ability to install" + , "modified object code on the User Product (for example, the work has" + , "been installed in ROM)." + , "" + , " The requirement to provide Installation Information does not include a" + , "requirement to continue to provide support service, warranty, or updates" + , "for a work that has been modified or installed by the recipient, or for" + , "the User Product in which it has been modified or installed. Access to a" + , "network may be denied when the modification itself materially and" + , "adversely affects the operation of the network or violates the rules and" + , "protocols for communication across the network." + , "" + , " Corresponding Source conveyed, and Installation Information provided," + , "in accord with this section must be in a format that is publicly" + , "documented (and with an implementation available to the public in" + , "source code form), and must require no special password or key for" + , "unpacking, reading or copying." + , "" + , " 7. Additional Terms." + , "" + , " \"Additional permissions\" are terms that supplement the terms of this" + , "License by making exceptions from one or more of its conditions." + , "Additional permissions that are applicable to the entire Program shall" + , "be treated as though they were included in this License, to the extent" + , "that they are valid under applicable law. If additional permissions" + , "apply only to part of the Program, that part may be used separately" + , "under those permissions, but the entire Program remains governed by" + , "this License without regard to the additional permissions." + , "" + , " When you convey a copy of a covered work, you may at your option" + , "remove any additional permissions from that copy, or from any part of" + , "it. (Additional permissions may be written to require their own" + , "removal in certain cases when you modify the work.) You may place" + , "additional permissions on material, added by you to a covered work," + , "for which you have or can give appropriate copyright permission." + , "" + , " Notwithstanding any other provision of this License, for material you" + , "add to a covered work, you may (if authorized by the copyright holders of" + , "that material) supplement the terms of this License with terms:" + , "" + , " a) Disclaiming warranty or limiting liability differently from the" + , " terms of sections 15 and 16 of this License; or" + , "" + , " b) Requiring preservation of specified reasonable legal notices or" + , " author attributions in that material or in the Appropriate Legal" + , " Notices displayed by works containing it; or" + , "" + , " c) Prohibiting misrepresentation of the origin of that material, or" + , " requiring that modified versions of such material be marked in" + , " reasonable ways as different from the original version; or" + , "" + , " d) Limiting the use for publicity purposes of names of licensors or" + , " authors of the material; or" + , "" + , " e) Declining to grant rights under trademark law for use of some" + , " trade names, trademarks, or service marks; or" + , "" + , " f) Requiring indemnification of licensors and authors of that" + , " material by anyone who conveys the material (or modified versions of" + , " it) with contractual assumptions of liability to the recipient, for" + , " any liability that these contractual assumptions directly impose on" + , " those licensors and authors." + , "" + , " All other non-permissive additional terms are considered \"further" + , "restrictions\" within the meaning of section 10. If the Program as you" + , "received it, or any part of it, contains a notice stating that it is" + , "governed by this License along with a term that is a further" + , "restriction, you may remove that term. If a license document contains" + , "a further restriction but permits relicensing or conveying under this" + , "License, you may add to a covered work material governed by the terms" + , "of that license document, provided that the further restriction does" + , "not survive such relicensing or conveying." + , "" + , " If you add terms to a covered work in accord with this section, you" + , "must place, in the relevant source files, a statement of the" + , "additional terms that apply to those files, or a notice indicating" + , "where to find the applicable terms." + , "" + , " Additional terms, permissive or non-permissive, may be stated in the" + , "form of a separately written license, or stated as exceptions;" + , "the above requirements apply either way." + , "" + , " 8. Termination." + , "" + , " You may not propagate or modify a covered work except as expressly" + , "provided under this License. Any attempt otherwise to propagate or" + , "modify it is void, and will automatically terminate your rights under" + , "this License (including any patent licenses granted under the third" + , "paragraph of section 11)." + , "" + , " However, if you cease all violation of this License, then your" + , "license from a particular copyright holder is reinstated (a)" + , "provisionally, unless and until the copyright holder explicitly and" + , "finally terminates your license, and (b) permanently, if the copyright" + , "holder fails to notify you of the violation by some reasonable means" + , "prior to 60 days after the cessation." + , "" + , " Moreover, your license from a particular copyright holder is" + , "reinstated permanently if the copyright holder notifies you of the" + , "violation by some reasonable means, this is the first time you have" + , "received notice of violation of this License (for any work) from that" + , "copyright holder, and you cure the violation prior to 30 days after" + , "your receipt of the notice." + , "" + , " Termination of your rights under this section does not terminate the" + , "licenses of parties who have received copies or rights from you under" + , "this License. If your rights have been terminated and not permanently" + , "reinstated, you do not qualify to receive new licenses for the same" + , "material under section 10." + , "" + , " 9. Acceptance Not Required for Having Copies." + , "" + , " You are not required to accept this License in order to receive or" + , "run a copy of the Program. Ancillary propagation of a covered work" + , "occurring solely as a consequence of using peer-to-peer transmission" + , "to receive a copy likewise does not require acceptance. However," + , "nothing other than this License grants you permission to propagate or" + , "modify any covered work. These actions infringe copyright if you do" + , "not accept this License. Therefore, by modifying or propagating a" + , "covered work, you indicate your acceptance of this License to do so." + , "" + , " 10. Automatic Licensing of Downstream Recipients." + , "" + , " Each time you convey a covered work, the recipient automatically" + , "receives a license from the original licensors, to run, modify and" + , "propagate that work, subject to this License. You are not responsible" + , "for enforcing compliance by third parties with this License." + , "" + , " An \"entity transaction\" is a transaction transferring control of an" + , "organization, or substantially all assets of one, or subdividing an" + , "organization, or merging organizations. If propagation of a covered" + , "work results from an entity transaction, each party to that" + , "transaction who receives a copy of the work also receives whatever" + , "licenses to the work the party's predecessor in interest had or could" + , "give under the previous paragraph, plus a right to possession of the" + , "Corresponding Source of the work from the predecessor in interest, if" + , "the predecessor has it or can get it with reasonable efforts." + , "" + , " You may not impose any further restrictions on the exercise of the" + , "rights granted or affirmed under this License. For example, you may" + , "not impose a license fee, royalty, or other charge for exercise of" + , "rights granted under this License, and you may not initiate litigation" + , "(including a cross-claim or counterclaim in a lawsuit) alleging that" + , "any patent claim is infringed by making, using, selling, offering for" + , "sale, or importing the Program or any portion of it." + , "" + , " 11. Patents." + , "" + , " A \"contributor\" is a copyright holder who authorizes use under this" + , "License of the Program or a work on which the Program is based. The" + , "work thus licensed is called the contributor's \"contributor version\"." + , "" + , " A contributor's \"essential patent claims\" are all patent claims" + , "owned or controlled by the contributor, whether already acquired or" + , "hereafter acquired, that would be infringed by some manner, permitted" + , "by this License, of making, using, or selling its contributor version," + , "but do not include claims that would be infringed only as a" + , "consequence of further modification of the contributor version. For" + , "purposes of this definition, \"control\" includes the right to grant" + , "patent sublicenses in a manner consistent with the requirements of" + , "this License." + , "" + , " Each contributor grants you a non-exclusive, worldwide, royalty-free" + , "patent license under the contributor's essential patent claims, to" + , "make, use, sell, offer for sale, import and otherwise run, modify and" + , "propagate the contents of its contributor version." + , "" + , " In the following three paragraphs, a \"patent license\" is any express" + , "agreement or commitment, however denominated, not to enforce a patent" + , "(such as an express permission to practice a patent or covenant not to" + , "sue for patent infringement). To \"grant\" such a patent license to a" + , "party means to make such an agreement or commitment not to enforce a" + , "patent against the party." + , "" + , " If you convey a covered work, knowingly relying on a patent license," + , "and the Corresponding Source of the work is not available for anyone" + , "to copy, free of charge and under the terms of this License, through a" + , "publicly available network server or other readily accessible means," + , "then you must either (1) cause the Corresponding Source to be so" + , "available, or (2) arrange to deprive yourself of the benefit of the" + , "patent license for this particular work, or (3) arrange, in a manner" + , "consistent with the requirements of this License, to extend the patent" + , "license to downstream recipients. \"Knowingly relying\" means you have" + , "actual knowledge that, but for the patent license, your conveying the" + , "covered work in a country, or your recipient's use of the covered work" + , "in a country, would infringe one or more identifiable patents in that" + , "country that you have reason to believe are valid." + , "" + , " If, pursuant to or in connection with a single transaction or" + , "arrangement, you convey, or propagate by procuring conveyance of, a" + , "covered work, and grant a patent license to some of the parties" + , "receiving the covered work authorizing them to use, propagate, modify" + , "or convey a specific copy of the covered work, then the patent license" + , "you grant is automatically extended to all recipients of the covered" + , "work and works based on it." + , "" + , " A patent license is \"discriminatory\" if it does not include within" + , "the scope of its coverage, prohibits the exercise of, or is" + , "conditioned on the non-exercise of one or more of the rights that are" + , "specifically granted under this License. You may not convey a covered" + , "work if you are a party to an arrangement with a third party that is" + , "in the business of distributing software, under which you make payment" + , "to the third party based on the extent of your activity of conveying" + , "the work, and under which the third party grants, to any of the" + , "parties who would receive the covered work from you, a discriminatory" + , "patent license (a) in connection with copies of the covered work" + , "conveyed by you (or copies made from those copies), or (b) primarily" + , "for and in connection with specific products or compilations that" + , "contain the covered work, unless you entered into that arrangement," + , "or that patent license was granted, prior to 28 March 2007." + , "" + , " Nothing in this License shall be construed as excluding or limiting" + , "any implied license or other defenses to infringement that may" + , "otherwise be available to you under applicable patent law." + , "" + , " 12. No Surrender of Others' Freedom." + , "" + , " If conditions are imposed on you (whether by court order, agreement or" + , "otherwise) that contradict the conditions of this License, they do not" + , "excuse you from the conditions of this License. If you cannot convey a" + , "covered work so as to satisfy simultaneously your obligations under this" + , "License and any other pertinent obligations, then as a consequence you may" + , "not convey it at all. For example, if you agree to terms that obligate you" + , "to collect a royalty for further conveying from those to whom you convey" + , "the Program, the only way you could satisfy both those terms and this" + , "License would be to refrain entirely from conveying the Program." + , "" + , " 13. Use with the GNU Affero General Public License." + , "" + , " Notwithstanding any other provision of this License, you have" + , "permission to link or combine any covered work with a work licensed" + , "under version 3 of the GNU Affero General Public License into a single" + , "combined work, and to convey the resulting work. The terms of this" + , "License will continue to apply to the part which is the covered work," + , "but the special requirements of the GNU Affero General Public License," + , "section 13, concerning interaction through a network will apply to the" + , "combination as such." + , "" + , " 14. Revised Versions of this License." + , "" + , " The Free Software Foundation may publish revised and/or new versions of" + , "the GNU General Public License from time to time. Such new versions will" + , "be similar in spirit to the present version, but may differ in detail to" + , "address new problems or concerns." + , "" + , " Each version is given a distinguishing version number. If the" + , "Program specifies that a certain numbered version of the GNU General" + , "Public License \"or any later version\" applies to it, you have the" + , "option of following the terms and conditions either of that numbered" + , "version or of any later version published by the Free Software" + , "Foundation. If the Program does not specify a version number of the" + , "GNU General Public License, you may choose any version ever published" + , "by the Free Software Foundation." + , "" + , " If the Program specifies that a proxy can decide which future" + , "versions of the GNU General Public License can be used, that proxy's" + , "public statement of acceptance of a version permanently authorizes you" + , "to choose that version for the Program." + , "" + , " Later license versions may give you additional or different" + , "permissions. However, no additional obligations are imposed on any" + , "author or copyright holder as a result of your choosing to follow a" + , "later version." + , "" + , " 15. Disclaimer of Warranty." + , "" + , " THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY" + , "APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT" + , "HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY" + , "OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO," + , "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR" + , "PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM" + , "IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF" + , "ALL NECESSARY SERVICING, REPAIR OR CORRECTION." + , "" + , " 16. Limitation of Liability." + , "" + , " IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING" + , "WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS" + , "THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY" + , "GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE" + , "USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF" + , "DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD" + , "PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS)," + , "EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF" + , "SUCH DAMAGES." + , "" + , " 17. Interpretation of Sections 15 and 16." + , "" + , " If the disclaimer of warranty and limitation of liability provided" + , "above cannot be given local legal effect according to their terms," + , "reviewing courts shall apply local law that most closely approximates" + , "an absolute waiver of all civil liability in connection with the" + , "Program, unless a warranty or assumption of liability accompanies a" + , "copy of the Program in return for a fee." + , "" + , " END OF TERMS AND CONDITIONS" + , "" + , " How to Apply These Terms to Your New Programs" + , "" + , " If you develop a new program, and you want it to be of the greatest" + , "possible use to the public, the best way to achieve this is to make it" + , "free software which everyone can redistribute and change under these terms." + , "" + , " To do so, attach the following notices to the program. It is safest" + , "to attach them to the start of each source file to most effectively" + , "state the exclusion of warranty; and each file should have at least" + , "the \"copyright\" line and a pointer to where the full notice is found." + , "" + , " " + , " Copyright (C) " + , "" + , " This program is free software: you can redistribute it and/or modify" + , " it under the terms of the GNU General Public License as published by" + , " the Free Software Foundation, either version 3 of the License, or" + , " (at your option) any later version." + , "" + , " This program is distributed in the hope that it will be useful," + , " but WITHOUT ANY WARRANTY; without even the implied warranty of" + , " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the" + , " GNU General Public License for more details." + , "" + , " You should have received a copy of the GNU General Public License" + , " along with this program. If not, see ." + , "" + , "Also add information on how to contact you by electronic and paper mail." + , "" + , " If the program does terminal interaction, make it output a short" + , "notice like this when it starts in an interactive mode:" + , "" + , " Copyright (C) " + , " This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'." + , " This is free software, and you are welcome to redistribute it" + , " under certain conditions; type `show c' for details." + , "" + , "The hypothetical commands `show w' and `show c' should show the appropriate" + , "parts of the General Public License. Of course, your program's commands" + , "might be different; for a GUI interface, you would use an \"about box\"." + , "" + , " You should also get your employer (if you work as a programmer) or school," + , "if any, to sign a \"copyright disclaimer\" for the program, if necessary." + , "For more information on this, and how to apply and follow the GNU GPL, see" + , "." + , "" + , " The GNU General Public License does not permit incorporating your program" + , "into proprietary programs. If your program is a subroutine library, you" + , "may consider it more useful to permit linking proprietary applications with" + , "the library. If this is what you want to do, use the GNU Lesser General" + , "Public License instead of this License. But first, please read" + , "." + ] + +agplv3 :: License +agplv3 = unlines + [ " GNU AFFERO GENERAL PUBLIC LICENSE" + , " Version 3, 19 November 2007" + , "" + , " Copyright (C) 2007 Free Software Foundation, Inc. " + , " Everyone is permitted to copy and distribute verbatim copies" + , " of this license document, but changing it is not allowed." + , "" + , " Preamble" + , "" + , " The GNU Affero General Public License is a free, copyleft license for" + , "software and other kinds of works, specifically designed to ensure" + , "cooperation with the community in the case of network server software." + , "" + , " The licenses for most software and other practical works are designed" + , "to take away your freedom to share and change the works. By contrast," + , "our General Public Licenses are intended to guarantee your freedom to" + , "share and change all versions of a program--to make sure it remains free" + , "software for all its users." + , "" + , " When we speak of free software, we are referring to freedom, not" + , "price. Our General Public Licenses are designed to make sure that you" + , "have the freedom to distribute copies of free software (and charge for" + , "them if you wish), that you receive source code or can get it if you" + , "want it, that you can change the software or use pieces of it in new" + , "free programs, and that you know you can do these things." + , "" + , " Developers that use our General Public Licenses protect your rights" + , "with two steps: (1) assert copyright on the software, and (2) offer" + , "you this License which gives you legal permission to copy, distribute" + , "and/or modify the software." + , "" + , " A secondary benefit of defending all users' freedom is that" + , "improvements made in alternate versions of the program, if they" + , "receive widespread use, become available for other developers to" + , "incorporate. Many developers of free software are heartened and" + , "encouraged by the resulting cooperation. However, in the case of" + , "software used on network servers, this result may fail to come about." + , "The GNU General Public License permits making a modified version and" + , "letting the public access it on a server without ever releasing its" + , "source code to the public." + , "" + , " The GNU Affero General Public License is designed specifically to" + , "ensure that, in such cases, the modified source code becomes available" + , "to the community. It requires the operator of a network server to" + , "provide the source code of the modified version running there to the" + , "users of that server. Therefore, public use of a modified version, on" + , "a publicly accessible server, gives the public access to the source" + , "code of the modified version." + , "" + , " An older license, called the Affero General Public License and" + , "published by Affero, was designed to accomplish similar goals. This is" + , "a different license, not a version of the Affero GPL, but Affero has" + , "released a new version of the Affero GPL which permits relicensing under" + , "this license." + , "" + , " The precise terms and conditions for copying, distribution and" + , "modification follow." + , "" + , " TERMS AND CONDITIONS" + , "" + , " 0. Definitions." + , "" + , " \"This License\" refers to version 3 of the GNU Affero General Public License." + , "" + , " \"Copyright\" also means copyright-like laws that apply to other kinds of" + , "works, such as semiconductor masks." + , "" + , " \"The Program\" refers to any copyrightable work licensed under this" + , "License. Each licensee is addressed as \"you\". \"Licensees\" and" + , "\"recipients\" may be individuals or organizations." + , "" + , " To \"modify\" a work means to copy from or adapt all or part of the work" + , "in a fashion requiring copyright permission, other than the making of an" + , "exact copy. The resulting work is called a \"modified version\" of the" + , "earlier work or a work \"based on\" the earlier work." + , "" + , " A \"covered work\" means either the unmodified Program or a work based" + , "on the Program." + , "" + , " To \"propagate\" a work means to do anything with it that, without" + , "permission, would make you directly or secondarily liable for" + , "infringement under applicable copyright law, except executing it on a" + , "computer or modifying a private copy. Propagation includes copying," + , "distribution (with or without modification), making available to the" + , "public, and in some countries other activities as well." + , "" + , " To \"convey\" a work means any kind of propagation that enables other" + , "parties to make or receive copies. Mere interaction with a user through" + , "a computer network, with no transfer of a copy, is not conveying." + , "" + , " An interactive user interface displays \"Appropriate Legal Notices\"" + , "to the extent that it includes a convenient and prominently visible" + , "feature that (1) displays an appropriate copyright notice, and (2)" + , "tells the user that there is no warranty for the work (except to the" + , "extent that warranties are provided), that licensees may convey the" + , "work under this License, and how to view a copy of this License. If" + , "the interface presents a list of user commands or options, such as a" + , "menu, a prominent item in the list meets this criterion." + , "" + , " 1. Source Code." + , "" + , " The \"source code\" for a work means the preferred form of the work" + , "for making modifications to it. \"Object code\" means any non-source" + , "form of a work." + , "" + , " A \"Standard Interface\" means an interface that either is an official" + , "standard defined by a recognized standards body, or, in the case of" + , "interfaces specified for a particular programming language, one that" + , "is widely used among developers working in that language." + , "" + , " The \"System Libraries\" of an executable work include anything, other" + , "than the work as a whole, that (a) is included in the normal form of" + , "packaging a Major Component, but which is not part of that Major" + , "Component, and (b) serves only to enable use of the work with that" + , "Major Component, or to implement a Standard Interface for which an" + , "implementation is available to the public in source code form. A" + , "\"Major Component\", in this context, means a major essential component" + , "(kernel, window system, and so on) of the specific operating system" + , "(if any) on which the executable work runs, or a compiler used to" + , "produce the work, or an object code interpreter used to run it." + , "" + , " The \"Corresponding Source\" for a work in object code form means all" + , "the source code needed to generate, install, and (for an executable" + , "work) run the object code and to modify the work, including scripts to" + , "control those activities. However, it does not include the work's" + , "System Libraries, or general-purpose tools or generally available free" + , "programs which are used unmodified in performing those activities but" + , "which are not part of the work. For example, Corresponding Source" + , "includes interface definition files associated with source files for" + , "the work, and the source code for shared libraries and dynamically" + , "linked subprograms that the work is specifically designed to require," + , "such as by intimate data communication or control flow between those" + , "subprograms and other parts of the work." + , "" + , " The Corresponding Source need not include anything that users" + , "can regenerate automatically from other parts of the Corresponding" + , "Source." + , "" + , " The Corresponding Source for a work in source code form is that" + , "same work." + , "" + , " 2. Basic Permissions." + , "" + , " All rights granted under this License are granted for the term of" + , "copyright on the Program, and are irrevocable provided the stated" + , "conditions are met. This License explicitly affirms your unlimited" + , "permission to run the unmodified Program. The output from running a" + , "covered work is covered by this License only if the output, given its" + , "content, constitutes a covered work. This License acknowledges your" + , "rights of fair use or other equivalent, as provided by copyright law." + , "" + , " You may make, run and propagate covered works that you do not" + , "convey, without conditions so long as your license otherwise remains" + , "in force. You may convey covered works to others for the sole purpose" + , "of having them make modifications exclusively for you, or provide you" + , "with facilities for running those works, provided that you comply with" + , "the terms of this License in conveying all material for which you do" + , "not control copyright. Those thus making or running the covered works" + , "for you must do so exclusively on your behalf, under your direction" + , "and control, on terms that prohibit them from making any copies of" + , "your copyrighted material outside their relationship with you." + , "" + , " Conveying under any other circumstances is permitted solely under" + , "the conditions stated below. Sublicensing is not allowed; section 10" + , "makes it unnecessary." + , "" + , " 3. Protecting Users' Legal Rights From Anti-Circumvention Law." + , "" + , " No covered work shall be deemed part of an effective technological" + , "measure under any applicable law fulfilling obligations under article" + , "11 of the WIPO copyright treaty adopted on 20 December 1996, or" + , "similar laws prohibiting or restricting circumvention of such" + , "measures." + , "" + , " When you convey a covered work, you waive any legal power to forbid" + , "circumvention of technological measures to the extent such circumvention" + , "is effected by exercising rights under this License with respect to" + , "the covered work, and you disclaim any intention to limit operation or" + , "modification of the work as a means of enforcing, against the work's" + , "users, your or third parties' legal rights to forbid circumvention of" + , "technological measures." + , "" + , " 4. Conveying Verbatim Copies." + , "" + , " You may convey verbatim copies of the Program's source code as you" + , "receive it, in any medium, provided that you conspicuously and" + , "appropriately publish on each copy an appropriate copyright notice;" + , "keep intact all notices stating that this License and any" + , "non-permissive terms added in accord with section 7 apply to the code;" + , "keep intact all notices of the absence of any warranty; and give all" + , "recipients a copy of this License along with the Program." + , "" + , " You may charge any price or no price for each copy that you convey," + , "and you may offer support or warranty protection for a fee." + , "" + , " 5. Conveying Modified Source Versions." + , "" + , " You may convey a work based on the Program, or the modifications to" + , "produce it from the Program, in the form of source code under the" + , "terms of section 4, provided that you also meet all of these conditions:" + , "" + , " a) The work must carry prominent notices stating that you modified" + , " it, and giving a relevant date." + , "" + , " b) The work must carry prominent notices stating that it is" + , " released under this License and any conditions added under section" + , " 7. This requirement modifies the requirement in section 4 to" + , " \"keep intact all notices\"." + , "" + , " c) You must license the entire work, as a whole, under this" + , " License to anyone who comes into possession of a copy. This" + , " License will therefore apply, along with any applicable section 7" + , " additional terms, to the whole of the work, and all its parts," + , " regardless of how they are packaged. This License gives no" + , " permission to license the work in any other way, but it does not" + , " invalidate such permission if you have separately received it." + , "" + , " d) If the work has interactive user interfaces, each must display" + , " Appropriate Legal Notices; however, if the Program has interactive" + , " interfaces that do not display Appropriate Legal Notices, your" + , " work need not make them do so." + , "" + , " A compilation of a covered work with other separate and independent" + , "works, which are not by their nature extensions of the covered work," + , "and which are not combined with it such as to form a larger program," + , "in or on a volume of a storage or distribution medium, is called an" + , "\"aggregate\" if the compilation and its resulting copyright are not" + , "used to limit the access or legal rights of the compilation's users" + , "beyond what the individual works permit. Inclusion of a covered work" + , "in an aggregate does not cause this License to apply to the other" + , "parts of the aggregate." + , "" + , " 6. Conveying Non-Source Forms." + , "" + , " You may convey a covered work in object code form under the terms" + , "of sections 4 and 5, provided that you also convey the" + , "machine-readable Corresponding Source under the terms of this License," + , "in one of these ways:" + , "" + , " a) Convey the object code in, or embodied in, a physical product" + , " (including a physical distribution medium), accompanied by the" + , " Corresponding Source fixed on a durable physical medium" + , " customarily used for software interchange." + , "" + , " b) Convey the object code in, or embodied in, a physical product" + , " (including a physical distribution medium), accompanied by a" + , " written offer, valid for at least three years and valid for as" + , " long as you offer spare parts or customer support for that product" + , " model, to give anyone who possesses the object code either (1) a" + , " copy of the Corresponding Source for all the software in the" + , " product that is covered by this License, on a durable physical" + , " medium customarily used for software interchange, for a price no" + , " more than your reasonable cost of physically performing this" + , " conveying of source, or (2) access to copy the" + , " Corresponding Source from a network server at no charge." + , "" + , " c) Convey individual copies of the object code with a copy of the" + , " written offer to provide the Corresponding Source. This" + , " alternative is allowed only occasionally and noncommercially, and" + , " only if you received the object code with such an offer, in accord" + , " with subsection 6b." + , "" + , " d) Convey the object code by offering access from a designated" + , " place (gratis or for a charge), and offer equivalent access to the" + , " Corresponding Source in the same way through the same place at no" + , " further charge. You need not require recipients to copy the" + , " Corresponding Source along with the object code. If the place to" + , " copy the object code is a network server, the Corresponding Source" + , " may be on a different server (operated by you or a third party)" + , " that supports equivalent copying facilities, provided you maintain" + , " clear directions next to the object code saying where to find the" + , " Corresponding Source. Regardless of what server hosts the" + , " Corresponding Source, you remain obligated to ensure that it is" + , " available for as long as needed to satisfy these requirements." + , "" + , " e) Convey the object code using peer-to-peer transmission, provided" + , " you inform other peers where the object code and Corresponding" + , " Source of the work are being offered to the general public at no" + , " charge under subsection 6d." + , "" + , " A separable portion of the object code, whose source code is excluded" + , "from the Corresponding Source as a System Library, need not be" + , "included in conveying the object code work." + , "" + , " A \"User Product\" is either (1) a \"consumer product\", which means any" + , "tangible personal property which is normally used for personal, family," + , "or household purposes, or (2) anything designed or sold for incorporation" + , "into a dwelling. In determining whether a product is a consumer product," + , "doubtful cases shall be resolved in favor of coverage. For a particular" + , "product received by a particular user, \"normally used\" refers to a" + , "typical or common use of that class of product, regardless of the status" + , "of the particular user or of the way in which the particular user" + , "actually uses, or expects or is expected to use, the product. A product" + , "is a consumer product regardless of whether the product has substantial" + , "commercial, industrial or non-consumer uses, unless such uses represent" + , "the only significant mode of use of the product." + , "" + , " \"Installation Information\" for a User Product means any methods," + , "procedures, authorization keys, or other information required to install" + , "and execute modified versions of a covered work in that User Product from" + , "a modified version of its Corresponding Source. The information must" + , "suffice to ensure that the continued functioning of the modified object" + , "code is in no case prevented or interfered with solely because" + , "modification has been made." + , "" + , " If you convey an object code work under this section in, or with, or" + , "specifically for use in, a User Product, and the conveying occurs as" + , "part of a transaction in which the right of possession and use of the" + , "User Product is transferred to the recipient in perpetuity or for a" + , "fixed term (regardless of how the transaction is characterized), the" + , "Corresponding Source conveyed under this section must be accompanied" + , "by the Installation Information. But this requirement does not apply" + , "if neither you nor any third party retains the ability to install" + , "modified object code on the User Product (for example, the work has" + , "been installed in ROM)." + , "" + , " The requirement to provide Installation Information does not include a" + , "requirement to continue to provide support service, warranty, or updates" + , "for a work that has been modified or installed by the recipient, or for" + , "the User Product in which it has been modified or installed. Access to a" + , "network may be denied when the modification itself materially and" + , "adversely affects the operation of the network or violates the rules and" + , "protocols for communication across the network." + , "" + , " Corresponding Source conveyed, and Installation Information provided," + , "in accord with this section must be in a format that is publicly" + , "documented (and with an implementation available to the public in" + , "source code form), and must require no special password or key for" + , "unpacking, reading or copying." + , "" + , " 7. Additional Terms." + , "" + , " \"Additional permissions\" are terms that supplement the terms of this" + , "License by making exceptions from one or more of its conditions." + , "Additional permissions that are applicable to the entire Program shall" + , "be treated as though they were included in this License, to the extent" + , "that they are valid under applicable law. If additional permissions" + , "apply only to part of the Program, that part may be used separately" + , "under those permissions, but the entire Program remains governed by" + , "this License without regard to the additional permissions." + , "" + , " When you convey a copy of a covered work, you may at your option" + , "remove any additional permissions from that copy, or from any part of" + , "it. (Additional permissions may be written to require their own" + , "removal in certain cases when you modify the work.) You may place" + , "additional permissions on material, added by you to a covered work," + , "for which you have or can give appropriate copyright permission." + , "" + , " Notwithstanding any other provision of this License, for material you" + , "add to a covered work, you may (if authorized by the copyright holders of" + , "that material) supplement the terms of this License with terms:" + , "" + , " a) Disclaiming warranty or limiting liability differently from the" + , " terms of sections 15 and 16 of this License; or" + , "" + , " b) Requiring preservation of specified reasonable legal notices or" + , " author attributions in that material or in the Appropriate Legal" + , " Notices displayed by works containing it; or" + , "" + , " c) Prohibiting misrepresentation of the origin of that material, or" + , " requiring that modified versions of such material be marked in" + , " reasonable ways as different from the original version; or" + , "" + , " d) Limiting the use for publicity purposes of names of licensors or" + , " authors of the material; or" + , "" + , " e) Declining to grant rights under trademark law for use of some" + , " trade names, trademarks, or service marks; or" + , "" + , " f) Requiring indemnification of licensors and authors of that" + , " material by anyone who conveys the material (or modified versions of" + , " it) with contractual assumptions of liability to the recipient, for" + , " any liability that these contractual assumptions directly impose on" + , " those licensors and authors." + , "" + , " All other non-permissive additional terms are considered \"further" + , "restrictions\" within the meaning of section 10. If the Program as you" + , "received it, or any part of it, contains a notice stating that it is" + , "governed by this License along with a term that is a further" + , "restriction, you may remove that term. If a license document contains" + , "a further restriction but permits relicensing or conveying under this" + , "License, you may add to a covered work material governed by the terms" + , "of that license document, provided that the further restriction does" + , "not survive such relicensing or conveying." + , "" + , " If you add terms to a covered work in accord with this section, you" + , "must place, in the relevant source files, a statement of the" + , "additional terms that apply to those files, or a notice indicating" + , "where to find the applicable terms." + , "" + , " Additional terms, permissive or non-permissive, may be stated in the" + , "form of a separately written license, or stated as exceptions;" + , "the above requirements apply either way." + , "" + , " 8. Termination." + , "" + , " You may not propagate or modify a covered work except as expressly" + , "provided under this License. Any attempt otherwise to propagate or" + , "modify it is void, and will automatically terminate your rights under" + , "this License (including any patent licenses granted under the third" + , "paragraph of section 11)." + , "" + , " However, if you cease all violation of this License, then your" + , "license from a particular copyright holder is reinstated (a)" + , "provisionally, unless and until the copyright holder explicitly and" + , "finally terminates your license, and (b) permanently, if the copyright" + , "holder fails to notify you of the violation by some reasonable means" + , "prior to 60 days after the cessation." + , "" + , " Moreover, your license from a particular copyright holder is" + , "reinstated permanently if the copyright holder notifies you of the" + , "violation by some reasonable means, this is the first time you have" + , "received notice of violation of this License (for any work) from that" + , "copyright holder, and you cure the violation prior to 30 days after" + , "your receipt of the notice." + , "" + , " Termination of your rights under this section does not terminate the" + , "licenses of parties who have received copies or rights from you under" + , "this License. If your rights have been terminated and not permanently" + , "reinstated, you do not qualify to receive new licenses for the same" + , "material under section 10." + , "" + , " 9. Acceptance Not Required for Having Copies." + , "" + , " You are not required to accept this License in order to receive or" + , "run a copy of the Program. Ancillary propagation of a covered work" + , "occurring solely as a consequence of using peer-to-peer transmission" + , "to receive a copy likewise does not require acceptance. However," + , "nothing other than this License grants you permission to propagate or" + , "modify any covered work. These actions infringe copyright if you do" + , "not accept this License. Therefore, by modifying or propagating a" + , "covered work, you indicate your acceptance of this License to do so." + , "" + , " 10. Automatic Licensing of Downstream Recipients." + , "" + , " Each time you convey a covered work, the recipient automatically" + , "receives a license from the original licensors, to run, modify and" + , "propagate that work, subject to this License. You are not responsible" + , "for enforcing compliance by third parties with this License." + , "" + , " An \"entity transaction\" is a transaction transferring control of an" + , "organization, or substantially all assets of one, or subdividing an" + , "organization, or merging organizations. If propagation of a covered" + , "work results from an entity transaction, each party to that" + , "transaction who receives a copy of the work also receives whatever" + , "licenses to the work the party's predecessor in interest had or could" + , "give under the previous paragraph, plus a right to possession of the" + , "Corresponding Source of the work from the predecessor in interest, if" + , "the predecessor has it or can get it with reasonable efforts." + , "" + , " You may not impose any further restrictions on the exercise of the" + , "rights granted or affirmed under this License. For example, you may" + , "not impose a license fee, royalty, or other charge for exercise of" + , "rights granted under this License, and you may not initiate litigation" + , "(including a cross-claim or counterclaim in a lawsuit) alleging that" + , "any patent claim is infringed by making, using, selling, offering for" + , "sale, or importing the Program or any portion of it." + , "" + , " 11. Patents." + , "" + , " A \"contributor\" is a copyright holder who authorizes use under this" + , "License of the Program or a work on which the Program is based. The" + , "work thus licensed is called the contributor's \"contributor version\"." + , "" + , " A contributor's \"essential patent claims\" are all patent claims" + , "owned or controlled by the contributor, whether already acquired or" + , "hereafter acquired, that would be infringed by some manner, permitted" + , "by this License, of making, using, or selling its contributor version," + , "but do not include claims that would be infringed only as a" + , "consequence of further modification of the contributor version. For" + , "purposes of this definition, \"control\" includes the right to grant" + , "patent sublicenses in a manner consistent with the requirements of" + , "this License." + , "" + , " Each contributor grants you a non-exclusive, worldwide, royalty-free" + , "patent license under the contributor's essential patent claims, to" + , "make, use, sell, offer for sale, import and otherwise run, modify and" + , "propagate the contents of its contributor version." + , "" + , " In the following three paragraphs, a \"patent license\" is any express" + , "agreement or commitment, however denominated, not to enforce a patent" + , "(such as an express permission to practice a patent or covenant not to" + , "sue for patent infringement). To \"grant\" such a patent license to a" + , "party means to make such an agreement or commitment not to enforce a" + , "patent against the party." + , "" + , " If you convey a covered work, knowingly relying on a patent license," + , "and the Corresponding Source of the work is not available for anyone" + , "to copy, free of charge and under the terms of this License, through a" + , "publicly available network server or other readily accessible means," + , "then you must either (1) cause the Corresponding Source to be so" + , "available, or (2) arrange to deprive yourself of the benefit of the" + , "patent license for this particular work, or (3) arrange, in a manner" + , "consistent with the requirements of this License, to extend the patent" + , "license to downstream recipients. \"Knowingly relying\" means you have" + , "actual knowledge that, but for the patent license, your conveying the" + , "covered work in a country, or your recipient's use of the covered work" + , "in a country, would infringe one or more identifiable patents in that" + , "country that you have reason to believe are valid." + , "" + , " If, pursuant to or in connection with a single transaction or" + , "arrangement, you convey, or propagate by procuring conveyance of, a" + , "covered work, and grant a patent license to some of the parties" + , "receiving the covered work authorizing them to use, propagate, modify" + , "or convey a specific copy of the covered work, then the patent license" + , "you grant is automatically extended to all recipients of the covered" + , "work and works based on it." + , "" + , " A patent license is \"discriminatory\" if it does not include within" + , "the scope of its coverage, prohibits the exercise of, or is" + , "conditioned on the non-exercise of one or more of the rights that are" + , "specifically granted under this License. You may not convey a covered" + , "work if you are a party to an arrangement with a third party that is" + , "in the business of distributing software, under which you make payment" + , "to the third party based on the extent of your activity of conveying" + , "the work, and under which the third party grants, to any of the" + , "parties who would receive the covered work from you, a discriminatory" + , "patent license (a) in connection with copies of the covered work" + , "conveyed by you (or copies made from those copies), or (b) primarily" + , "for and in connection with specific products or compilations that" + , "contain the covered work, unless you entered into that arrangement," + , "or that patent license was granted, prior to 28 March 2007." + , "" + , " Nothing in this License shall be construed as excluding or limiting" + , "any implied license or other defenses to infringement that may" + , "otherwise be available to you under applicable patent law." + , "" + , " 12. No Surrender of Others' Freedom." + , "" + , " If conditions are imposed on you (whether by court order, agreement or" + , "otherwise) that contradict the conditions of this License, they do not" + , "excuse you from the conditions of this License. If you cannot convey a" + , "covered work so as to satisfy simultaneously your obligations under this" + , "License and any other pertinent obligations, then as a consequence you may" + , "not convey it at all. For example, if you agree to terms that obligate you" + , "to collect a royalty for further conveying from those to whom you convey" + , "the Program, the only way you could satisfy both those terms and this" + , "License would be to refrain entirely from conveying the Program." + , "" + , " 13. Remote Network Interaction; Use with the GNU General Public License." + , "" + , " Notwithstanding any other provision of this License, if you modify the" + , "Program, your modified version must prominently offer all users" + , "interacting with it remotely through a computer network (if your version" + , "supports such interaction) an opportunity to receive the Corresponding" + , "Source of your version by providing access to the Corresponding Source" + , "from a network server at no charge, through some standard or customary" + , "means of facilitating copying of software. This Corresponding Source" + , "shall include the Corresponding Source for any work covered by version 3" + , "of the GNU General Public License that is incorporated pursuant to the" + , "following paragraph." + , "" + , " Notwithstanding any other provision of this License, you have" + , "permission to link or combine any covered work with a work licensed" + , "under version 3 of the GNU General Public License into a single" + , "combined work, and to convey the resulting work. The terms of this" + , "License will continue to apply to the part which is the covered work," + , "but the work with which it is combined will remain governed by version" + , "3 of the GNU General Public License." + , "" + , " 14. Revised Versions of this License." + , "" + , " The Free Software Foundation may publish revised and/or new versions of" + , "the GNU Affero General Public License from time to time. Such new versions" + , "will be similar in spirit to the present version, but may differ in detail to" + , "address new problems or concerns." + , "" + , " Each version is given a distinguishing version number. If the" + , "Program specifies that a certain numbered version of the GNU Affero General" + , "Public License \"or any later version\" applies to it, you have the" + , "option of following the terms and conditions either of that numbered" + , "version or of any later version published by the Free Software" + , "Foundation. If the Program does not specify a version number of the" + , "GNU Affero General Public License, you may choose any version ever published" + , "by the Free Software Foundation." + , "" + , " If the Program specifies that a proxy can decide which future" + , "versions of the GNU Affero General Public License can be used, that proxy's" + , "public statement of acceptance of a version permanently authorizes you" + , "to choose that version for the Program." + , "" + , " Later license versions may give you additional or different" + , "permissions. However, no additional obligations are imposed on any" + , "author or copyright holder as a result of your choosing to follow a" + , "later version." + , "" + , " 15. Disclaimer of Warranty." + , "" + , " THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY" + , "APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT" + , "HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY" + , "OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO," + , "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR" + , "PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM" + , "IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF" + , "ALL NECESSARY SERVICING, REPAIR OR CORRECTION." + , "" + , " 16. Limitation of Liability." + , "" + , " IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING" + , "WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS" + , "THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY" + , "GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE" + , "USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF" + , "DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD" + , "PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS)," + , "EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF" + , "SUCH DAMAGES." + , "" + , " 17. Interpretation of Sections 15 and 16." + , "" + , " If the disclaimer of warranty and limitation of liability provided" + , "above cannot be given local legal effect according to their terms," + , "reviewing courts shall apply local law that most closely approximates" + , "an absolute waiver of all civil liability in connection with the" + , "Program, unless a warranty or assumption of liability accompanies a" + , "copy of the Program in return for a fee." + , "" + , " END OF TERMS AND CONDITIONS" + , "" + , " How to Apply These Terms to Your New Programs" + , "" + , " If you develop a new program, and you want it to be of the greatest" + , "possible use to the public, the best way to achieve this is to make it" + , "free software which everyone can redistribute and change under these terms." + , "" + , " To do so, attach the following notices to the program. It is safest" + , "to attach them to the start of each source file to most effectively" + , "state the exclusion of warranty; and each file should have at least" + , "the \"copyright\" line and a pointer to where the full notice is found." + , "" + , " " + , " Copyright (C) " + , "" + , " This program is free software: you can redistribute it and/or modify" + , " it under the terms of the GNU Affero General Public License as published by" + , " the Free Software Foundation, either version 3 of the License, or" + , " (at your option) any later version." + , "" + , " This program is distributed in the hope that it will be useful," + , " but WITHOUT ANY WARRANTY; without even the implied warranty of" + , " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the" + , " GNU Affero General Public License for more details." + , "" + , " You should have received a copy of the GNU Affero General Public License" + , " along with this program. If not, see ." + , "" + , "Also add information on how to contact you by electronic and paper mail." + , "" + , " If your software can interact with users remotely through a computer" + , "network, you should also make sure that it provides a way for users to" + , "get its source. For example, if your program is a web application, its" + , "interface could display a \"Source\" link that leads users to an archive" + , "of the code. There are many ways you could offer source, and different" + , "solutions will be better for different programs; see section 13 for the" + , "specific requirements." + , "" + , " You should also get your employer (if you work as a programmer) or school," + , "if any, to sign a \"copyright disclaimer\" for the program, if necessary." + , "For more information on this, and how to apply and follow the GNU AGPL, see" + , "." + ] + +lgpl21 :: License +lgpl21 = unlines + [ " GNU LESSER GENERAL PUBLIC LICENSE" + , " Version 2.1, February 1999" + , "" + , " Copyright (C) 1991, 1999 Free Software Foundation, Inc." + , " 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA" + , " Everyone is permitted to copy and distribute verbatim copies" + , " of this license document, but changing it is not allowed." + , "" + , "[This is the first released version of the Lesser GPL. It also counts" + , " as the successor of the GNU Library Public License, version 2, hence" + , " the version number 2.1.]" + , "" + , " Preamble" + , "" + , " The licenses for most software are designed to take away your" + , "freedom to share and change it. By contrast, the GNU General Public" + , "Licenses are intended to guarantee your freedom to share and change" + , "free software--to make sure the software is free for all its users." + , "" + , " This license, the Lesser General Public License, applies to some" + , "specially designated software packages--typically libraries--of the" + , "Free Software Foundation and other authors who decide to use it. You" + , "can use it too, but we suggest you first think carefully about whether" + , "this license or the ordinary General Public License is the better" + , "strategy to use in any particular case, based on the explanations below." + , "" + , " When we speak of free software, we are referring to freedom of use," + , "not price. Our General Public Licenses are designed to make sure that" + , "you have the freedom to distribute copies of free software (and charge" + , "for this service if you wish); that you receive source code or can get" + , "it if you want it; that you can change the software and use pieces of" + , "it in new free programs; and that you are informed that you can do" + , "these things." + , "" + , " To protect your rights, we need to make restrictions that forbid" + , "distributors to deny you these rights or to ask you to surrender these" + , "rights. These restrictions translate to certain responsibilities for" + , "you if you distribute copies of the library or if you modify it." + , "" + , " For example, if you distribute copies of the library, whether gratis" + , "or for a fee, you must give the recipients all the rights that we gave" + , "you. You must make sure that they, too, receive or can get the source" + , "code. If you link other code with the library, you must provide" + , "complete object files to the recipients, so that they can relink them" + , "with the library after making changes to the library and recompiling" + , "it. And you must show them these terms so they know their rights." + , "" + , " We protect your rights with a two-step method: (1) we copyright the" + , "library, and (2) we offer you this license, which gives you legal" + , "permission to copy, distribute and/or modify the library." + , "" + , " To protect each distributor, we want to make it very clear that" + , "there is no warranty for the free library. Also, if the library is" + , "modified by someone else and passed on, the recipients should know" + , "that what they have is not the original version, so that the original" + , "author's reputation will not be affected by problems that might be" + , "introduced by others." + , "" + , " Finally, software patents pose a constant threat to the existence of" + , "any free program. We wish to make sure that a company cannot" + , "effectively restrict the users of a free program by obtaining a" + , "restrictive license from a patent holder. Therefore, we insist that" + , "any patent license obtained for a version of the library must be" + , "consistent with the full freedom of use specified in this license." + , "" + , " Most GNU software, including some libraries, is covered by the" + , "ordinary GNU General Public License. This license, the GNU Lesser" + , "General Public License, applies to certain designated libraries, and" + , "is quite different from the ordinary General Public License. We use" + , "this license for certain libraries in order to permit linking those" + , "libraries into non-free programs." + , "" + , " When a program is linked with a library, whether statically or using" + , "a shared library, the combination of the two is legally speaking a" + , "combined work, a derivative of the original library. The ordinary" + , "General Public License therefore permits such linking only if the" + , "entire combination fits its criteria of freedom. The Lesser General" + , "Public License permits more lax criteria for linking other code with" + , "the library." + , "" + , " We call this license the \"Lesser\" General Public License because it" + , "does Less to protect the user's freedom than the ordinary General" + , "Public License. It also provides other free software developers Less" + , "of an advantage over competing non-free programs. These disadvantages" + , "are the reason we use the ordinary General Public License for many" + , "libraries. However, the Lesser license provides advantages in certain" + , "special circumstances." + , "" + , " For example, on rare occasions, there may be a special need to" + , "encourage the widest possible use of a certain library, so that it becomes" + , "a de-facto standard. To achieve this, non-free programs must be" + , "allowed to use the library. A more frequent case is that a free" + , "library does the same job as widely used non-free libraries. In this" + , "case, there is little to gain by limiting the free library to free" + , "software only, so we use the Lesser General Public License." + , "" + , " In other cases, permission to use a particular library in non-free" + , "programs enables a greater number of people to use a large body of" + , "free software. For example, permission to use the GNU C Library in" + , "non-free programs enables many more people to use the whole GNU" + , "operating system, as well as its variant, the GNU/Linux operating" + , "system." + , "" + , " Although the Lesser General Public License is Less protective of the" + , "users' freedom, it does ensure that the user of a program that is" + , "linked with the Library has the freedom and the wherewithal to run" + , "that program using a modified version of the Library." + , "" + , " The precise terms and conditions for copying, distribution and" + , "modification follow. Pay close attention to the difference between a" + , "\"work based on the library\" and a \"work that uses the library\". The" + , "former contains code derived from the library, whereas the latter must" + , "be combined with the library in order to run." + , "" + , " GNU LESSER GENERAL PUBLIC LICENSE" + , " TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION" + , "" + , " 0. This License Agreement applies to any software library or other" + , "program which contains a notice placed by the copyright holder or" + , "other authorized party saying it may be distributed under the terms of" + , "this Lesser General Public License (also called \"this License\")." + , "Each licensee is addressed as \"you\"." + , "" + , " A \"library\" means a collection of software functions and/or data" + , "prepared so as to be conveniently linked with application programs" + , "(which use some of those functions and data) to form executables." + , "" + , " The \"Library\", below, refers to any such software library or work" + , "which has been distributed under these terms. A \"work based on the" + , "Library\" means either the Library or any derivative work under" + , "copyright law: that is to say, a work containing the Library or a" + , "portion of it, either verbatim or with modifications and/or translated" + , "straightforwardly into another language. (Hereinafter, translation is" + , "included without limitation in the term \"modification\".)" + , "" + , " \"Source code\" for a work means the preferred form of the work for" + , "making modifications to it. For a library, complete source code means" + , "all the source code for all modules it contains, plus any associated" + , "interface definition files, plus the scripts used to control compilation" + , "and installation of the library." + , "" + , " Activities other than copying, distribution and modification are not" + , "covered by this License; they are outside its scope. The act of" + , "running a program using the Library is not restricted, and output from" + , "such a program is covered only if its contents constitute a work based" + , "on the Library (independent of the use of the Library in a tool for" + , "writing it). Whether that is true depends on what the Library does" + , "and what the program that uses the Library does." + , "" + , " 1. You may copy and distribute verbatim copies of the Library's" + , "complete source code as you receive it, in any medium, provided that" + , "you conspicuously and appropriately publish on each copy an" + , "appropriate copyright notice and disclaimer of warranty; keep intact" + , "all the notices that refer to this License and to the absence of any" + , "warranty; and distribute a copy of this License along with the" + , "Library." + , "" + , " You may charge a fee for the physical act of transferring a copy," + , "and you may at your option offer warranty protection in exchange for a" + , "fee." + , "" + , " 2. You may modify your copy or copies of the Library or any portion" + , "of it, thus forming a work based on the Library, and copy and" + , "distribute such modifications or work under the terms of Section 1" + , "above, provided that you also meet all of these conditions:" + , "" + , " a) The modified work must itself be a software library." + , "" + , " b) You must cause the files modified to carry prominent notices" + , " stating that you changed the files and the date of any change." + , "" + , " c) You must cause the whole of the work to be licensed at no" + , " charge to all third parties under the terms of this License." + , "" + , " d) If a facility in the modified Library refers to a function or a" + , " table of data to be supplied by an application program that uses" + , " the facility, other than as an argument passed when the facility" + , " is invoked, then you must make a good faith effort to ensure that," + , " in the event an application does not supply such function or" + , " table, the facility still operates, and performs whatever part of" + , " its purpose remains meaningful." + , "" + , " (For example, a function in a library to compute square roots has" + , " a purpose that is entirely well-defined independent of the" + , " application. Therefore, Subsection 2d requires that any" + , " application-supplied function or table used by this function must" + , " be optional: if the application does not supply it, the square" + , " root function must still compute square roots.)" + , "" + , "These requirements apply to the modified work as a whole. If" + , "identifiable sections of that work are not derived from the Library," + , "and can be reasonably considered independent and separate works in" + , "themselves, then this License, and its terms, do not apply to those" + , "sections when you distribute them as separate works. But when you" + , "distribute the same sections as part of a whole which is a work based" + , "on the Library, the distribution of the whole must be on the terms of" + , "this License, whose permissions for other licensees extend to the" + , "entire whole, and thus to each and every part regardless of who wrote" + , "it." + , "" + , "Thus, it is not the intent of this section to claim rights or contest" + , "your rights to work written entirely by you; rather, the intent is to" + , "exercise the right to control the distribution of derivative or" + , "collective works based on the Library." + , "" + , "In addition, mere aggregation of another work not based on the Library" + , "with the Library (or with a work based on the Library) on a volume of" + , "a storage or distribution medium does not bring the other work under" + , "the scope of this License." + , "" + , " 3. You may opt to apply the terms of the ordinary GNU General Public" + , "License instead of this License to a given copy of the Library. To do" + , "this, you must alter all the notices that refer to this License, so" + , "that they refer to the ordinary GNU General Public License, version 2," + , "instead of to this License. (If a newer version than version 2 of the" + , "ordinary GNU General Public License has appeared, then you can specify" + , "that version instead if you wish.) Do not make any other change in" + , "these notices." + , "" + , " Once this change is made in a given copy, it is irreversible for" + , "that copy, so the ordinary GNU General Public License applies to all" + , "subsequent copies and derivative works made from that copy." + , "" + , " This option is useful when you wish to copy part of the code of" + , "the Library into a program that is not a library." + , "" + , " 4. You may copy and distribute the Library (or a portion or" + , "derivative of it, under Section 2) in object code or executable form" + , "under the terms of Sections 1 and 2 above provided that you accompany" + , "it with the complete corresponding machine-readable source code, which" + , "must be distributed under the terms of Sections 1 and 2 above on a" + , "medium customarily used for software interchange." + , "" + , " If distribution of object code is made by offering access to copy" + , "from a designated place, then offering equivalent access to copy the" + , "source code from the same place satisfies the requirement to" + , "distribute the source code, even though third parties are not" + , "compelled to copy the source along with the object code." + , "" + , " 5. A program that contains no derivative of any portion of the" + , "Library, but is designed to work with the Library by being compiled or" + , "linked with it, is called a \"work that uses the Library\". Such a" + , "work, in isolation, is not a derivative work of the Library, and" + , "therefore falls outside the scope of this License." + , "" + , " However, linking a \"work that uses the Library\" with the Library" + , "creates an executable that is a derivative of the Library (because it" + , "contains portions of the Library), rather than a \"work that uses the" + , "library\". The executable is therefore covered by this License." + , "Section 6 states terms for distribution of such executables." + , "" + , " When a \"work that uses the Library\" uses material from a header file" + , "that is part of the Library, the object code for the work may be a" + , "derivative work of the Library even though the source code is not." + , "Whether this is true is especially significant if the work can be" + , "linked without the Library, or if the work is itself a library. The" + , "threshold for this to be true is not precisely defined by law." + , "" + , " If such an object file uses only numerical parameters, data" + , "structure layouts and accessors, and small macros and small inline" + , "functions (ten lines or less in length), then the use of the object" + , "file is unrestricted, regardless of whether it is legally a derivative" + , "work. (Executables containing this object code plus portions of the" + , "Library will still fall under Section 6.)" + , "" + , " Otherwise, if the work is a derivative of the Library, you may" + , "distribute the object code for the work under the terms of Section 6." + , "Any executables containing that work also fall under Section 6," + , "whether or not they are linked directly with the Library itself." + , "" + , " 6. As an exception to the Sections above, you may also combine or" + , "link a \"work that uses the Library\" with the Library to produce a" + , "work containing portions of the Library, and distribute that work" + , "under terms of your choice, provided that the terms permit" + , "modification of the work for the customer's own use and reverse" + , "engineering for debugging such modifications." + , "" + , " You must give prominent notice with each copy of the work that the" + , "Library is used in it and that the Library and its use are covered by" + , "this License. You must supply a copy of this License. If the work" + , "during execution displays copyright notices, you must include the" + , "copyright notice for the Library among them, as well as a reference" + , "directing the user to the copy of this License. Also, you must do one" + , "of these things:" + , "" + , " a) Accompany the work with the complete corresponding" + , " machine-readable source code for the Library including whatever" + , " changes were used in the work (which must be distributed under" + , " Sections 1 and 2 above); and, if the work is an executable linked" + , " with the Library, with the complete machine-readable \"work that" + , " uses the Library\", as object code and/or source code, so that the" + , " user can modify the Library and then relink to produce a modified" + , " executable containing the modified Library. (It is understood" + , " that the user who changes the contents of definitions files in the" + , " Library will not necessarily be able to recompile the application" + , " to use the modified definitions.)" + , "" + , " b) Use a suitable shared library mechanism for linking with the" + , " Library. A suitable mechanism is one that (1) uses at run time a" + , " copy of the library already present on the user's computer system," + , " rather than copying library functions into the executable, and (2)" + , " will operate properly with a modified version of the library, if" + , " the user installs one, as long as the modified version is" + , " interface-compatible with the version that the work was made with." + , "" + , " c) Accompany the work with a written offer, valid for at" + , " least three years, to give the same user the materials" + , " specified in Subsection 6a, above, for a charge no more" + , " than the cost of performing this distribution." + , "" + , " d) If distribution of the work is made by offering access to copy" + , " from a designated place, offer equivalent access to copy the above" + , " specified materials from the same place." + , "" + , " e) Verify that the user has already received a copy of these" + , " materials or that you have already sent this user a copy." + , "" + , " For an executable, the required form of the \"work that uses the" + , "Library\" must include any data and utility programs needed for" + , "reproducing the executable from it. However, as a special exception," + , "the materials to be distributed need not include anything that is" + , "normally distributed (in either source or binary form) with the major" + , "components (compiler, kernel, and so on) of the operating system on" + , "which the executable runs, unless that component itself accompanies" + , "the executable." + , "" + , " It may happen that this requirement contradicts the license" + , "restrictions of other proprietary libraries that do not normally" + , "accompany the operating system. Such a contradiction means you cannot" + , "use both them and the Library together in an executable that you" + , "distribute." + , "" + , " 7. You may place library facilities that are a work based on the" + , "Library side-by-side in a single library together with other library" + , "facilities not covered by this License, and distribute such a combined" + , "library, provided that the separate distribution of the work based on" + , "the Library and of the other library facilities is otherwise" + , "permitted, and provided that you do these two things:" + , "" + , " a) Accompany the combined library with a copy of the same work" + , " based on the Library, uncombined with any other library" + , " facilities. This must be distributed under the terms of the" + , " Sections above." + , "" + , " b) Give prominent notice with the combined library of the fact" + , " that part of it is a work based on the Library, and explaining" + , " where to find the accompanying uncombined form of the same work." + , "" + , " 8. You may not copy, modify, sublicense, link with, or distribute" + , "the Library except as expressly provided under this License. Any" + , "attempt otherwise to copy, modify, sublicense, link with, or" + , "distribute the Library is void, and will automatically terminate your" + , "rights under this License. However, parties who have received copies," + , "or rights, from you under this License will not have their licenses" + , "terminated so long as such parties remain in full compliance." + , "" + , " 9. You are not required to accept this License, since you have not" + , "signed it. However, nothing else grants you permission to modify or" + , "distribute the Library or its derivative works. These actions are" + , "prohibited by law if you do not accept this License. Therefore, by" + , "modifying or distributing the Library (or any work based on the" + , "Library), you indicate your acceptance of this License to do so, and" + , "all its terms and conditions for copying, distributing or modifying" + , "the Library or works based on it." + , "" + , " 10. Each time you redistribute the Library (or any work based on the" + , "Library), the recipient automatically receives a license from the" + , "original licensor to copy, distribute, link with or modify the Library" + , "subject to these terms and conditions. You may not impose any further" + , "restrictions on the recipients' exercise of the rights granted herein." + , "You are not responsible for enforcing compliance by third parties with" + , "this License." + , "" + , " 11. If, as a consequence of a court judgment or allegation of patent" + , "infringement or for any other reason (not limited to patent issues)," + , "conditions are imposed on you (whether by court order, agreement or" + , "otherwise) that contradict the conditions of this License, they do not" + , "excuse you from the conditions of this License. If you cannot" + , "distribute so as to satisfy simultaneously your obligations under this" + , "License and any other pertinent obligations, then as a consequence you" + , "may not distribute the Library at all. For example, if a patent" + , "license would not permit royalty-free redistribution of the Library by" + , "all those who receive copies directly or indirectly through you, then" + , "the only way you could satisfy both it and this License would be to" + , "refrain entirely from distribution of the Library." + , "" + , "If any portion of this section is held invalid or unenforceable under any" + , "particular circumstance, the balance of the section is intended to apply," + , "and the section as a whole is intended to apply in other circumstances." + , "" + , "It is not the purpose of this section to induce you to infringe any" + , "patents or other property right claims or to contest validity of any" + , "such claims; this section has the sole purpose of protecting the" + , "integrity of the free software distribution system which is" + , "implemented by public license practices. Many people have made" + , "generous contributions to the wide range of software distributed" + , "through that system in reliance on consistent application of that" + , "system; it is up to the author/donor to decide if he or she is willing" + , "to distribute software through any other system and a licensee cannot" + , "impose that choice." + , "" + , "This section is intended to make thoroughly clear what is believed to" + , "be a consequence of the rest of this License." + , "" + , " 12. If the distribution and/or use of the Library is restricted in" + , "certain countries either by patents or by copyrighted interfaces, the" + , "original copyright holder who places the Library under this License may add" + , "an explicit geographical distribution limitation excluding those countries," + , "so that distribution is permitted only in or among countries not thus" + , "excluded. In such case, this License incorporates the limitation as if" + , "written in the body of this License." + , "" + , " 13. The Free Software Foundation may publish revised and/or new" + , "versions of the Lesser General Public License from time to time." + , "Such new versions will be similar in spirit to the present version," + , "but may differ in detail to address new problems or concerns." + , "" + , "Each version is given a distinguishing version number. If the Library" + , "specifies a version number of this License which applies to it and" + , "\"any later version\", you have the option of following the terms and" + , "conditions either of that version or of any later version published by" + , "the Free Software Foundation. If the Library does not specify a" + , "license version number, you may choose any version ever published by" + , "the Free Software Foundation." + , "" + , " 14. If you wish to incorporate parts of the Library into other free" + , "programs whose distribution conditions are incompatible with these," + , "write to the author to ask for permission. For software which is" + , "copyrighted by the Free Software Foundation, write to the Free" + , "Software Foundation; we sometimes make exceptions for this. Our" + , "decision will be guided by the two goals of preserving the free status" + , "of all derivatives of our free software and of promoting the sharing" + , "and reuse of software generally." + , "" + , " NO WARRANTY" + , "" + , " 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO" + , "WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW." + , "EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR" + , "OTHER PARTIES PROVIDE THE LIBRARY \"AS IS\" WITHOUT WARRANTY OF ANY" + , "KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE" + , "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR" + , "PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE" + , "LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME" + , "THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION." + , "" + , " 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN" + , "WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY" + , "AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU" + , "FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR" + , "CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE" + , "LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING" + , "RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A" + , "FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF" + , "SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH" + , "DAMAGES." + , "" + , " END OF TERMS AND CONDITIONS" + , "" + , " How to Apply These Terms to Your New Libraries" + , "" + , " If you develop a new library, and you want it to be of the greatest" + , "possible use to the public, we recommend making it free software that" + , "everyone can redistribute and change. You can do so by permitting" + , "redistribution under these terms (or, alternatively, under the terms of the" + , "ordinary General Public License)." + , "" + , " To apply these terms, attach the following notices to the library. It is" + , "safest to attach them to the start of each source file to most effectively" + , "convey the exclusion of warranty; and each file should have at least the" + , "\"copyright\" line and a pointer to where the full notice is found." + , "" + , " " + , " Copyright (C) " + , "" + , " This library is free software; you can redistribute it and/or" + , " modify it under the terms of the GNU Lesser General Public" + , " License as published by the Free Software Foundation; either" + , " version 2.1 of the License, or (at your option) any later version." + , "" + , " This library is distributed in the hope that it will be useful," + , " but WITHOUT ANY WARRANTY; without even the implied warranty of" + , " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU" + , " Lesser General Public License for more details." + , "" + , " You should have received a copy of the GNU Lesser General Public" + , " License along with this library; if not, write to the Free Software" + , " Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA" + , "" + , "Also add information on how to contact you by electronic and paper mail." + , "" + , "You should also get your employer (if you work as a programmer) or your" + , "school, if any, to sign a \"copyright disclaimer\" for the library, if" + , "necessary. Here is a sample; alter the names:" + , "" + , " Yoyodyne, Inc., hereby disclaims all copyright interest in the" + , " library `Frob' (a library for tweaking knobs) written by James Random Hacker." + , "" + , " , 1 April 1990" + , " Ty Coon, President of Vice" + , "" + , "That's all there is to it!" + ] + +lgpl3 :: License +lgpl3 = unlines + [ " GNU LESSER GENERAL PUBLIC LICENSE" + , " Version 3, 29 June 2007" + , "" + , " Copyright (C) 2007 Free Software Foundation, Inc. " + , " Everyone is permitted to copy and distribute verbatim copies" + , " of this license document, but changing it is not allowed." + , "" + , "" + , " This version of the GNU Lesser General Public License incorporates" + , "the terms and conditions of version 3 of the GNU General Public" + , "License, supplemented by the additional permissions listed below." + , "" + , " 0. Additional Definitions." + , "" + , " As used herein, \"this License\" refers to version 3 of the GNU Lesser" + , "General Public License, and the \"GNU GPL\" refers to version 3 of the GNU" + , "General Public License." + , "" + , " \"The Library\" refers to a covered work governed by this License," + , "other than an Application or a Combined Work as defined below." + , "" + , " An \"Application\" is any work that makes use of an interface provided" + , "by the Library, but which is not otherwise based on the Library." + , "Defining a subclass of a class defined by the Library is deemed a mode" + , "of using an interface provided by the Library." + , "" + , " A \"Combined Work\" is a work produced by combining or linking an" + , "Application with the Library. The particular version of the Library" + , "with which the Combined Work was made is also called the \"Linked" + , "Version\"." + , "" + , " The \"Minimal Corresponding Source\" for a Combined Work means the" + , "Corresponding Source for the Combined Work, excluding any source code" + , "for portions of the Combined Work that, considered in isolation, are" + , "based on the Application, and not on the Linked Version." + , "" + , " The \"Corresponding Application Code\" for a Combined Work means the" + , "object code and/or source code for the Application, including any data" + , "and utility programs needed for reproducing the Combined Work from the" + , "Application, but excluding the System Libraries of the Combined Work." + , "" + , " 1. Exception to Section 3 of the GNU GPL." + , "" + , " You may convey a covered work under sections 3 and 4 of this License" + , "without being bound by section 3 of the GNU GPL." + , "" + , " 2. Conveying Modified Versions." + , "" + , " If you modify a copy of the Library, and, in your modifications, a" + , "facility refers to a function or data to be supplied by an Application" + , "that uses the facility (other than as an argument passed when the" + , "facility is invoked), then you may convey a copy of the modified" + , "version:" + , "" + , " a) under this License, provided that you make a good faith effort to" + , " ensure that, in the event an Application does not supply the" + , " function or data, the facility still operates, and performs" + , " whatever part of its purpose remains meaningful, or" + , "" + , " b) under the GNU GPL, with none of the additional permissions of" + , " this License applicable to that copy." + , "" + , " 3. Object Code Incorporating Material from Library Header Files." + , "" + , " The object code form of an Application may incorporate material from" + , "a header file that is part of the Library. You may convey such object" + , "code under terms of your choice, provided that, if the incorporated" + , "material is not limited to numerical parameters, data structure" + , "layouts and accessors, or small macros, inline functions and templates" + , "(ten or fewer lines in length), you do both of the following:" + , "" + , " a) Give prominent notice with each copy of the object code that the" + , " Library is used in it and that the Library and its use are" + , " covered by this License." + , "" + , " b) Accompany the object code with a copy of the GNU GPL and this license" + , " document." + , "" + , " 4. Combined Works." + , "" + , " You may convey a Combined Work under terms of your choice that," + , "taken together, effectively do not restrict modification of the" + , "portions of the Library contained in the Combined Work and reverse" + , "engineering for debugging such modifications, if you also do each of" + , "the following:" + , "" + , " a) Give prominent notice with each copy of the Combined Work that" + , " the Library is used in it and that the Library and its use are" + , " covered by this License." + , "" + , " b) Accompany the Combined Work with a copy of the GNU GPL and this license" + , " document." + , "" + , " c) For a Combined Work that displays copyright notices during" + , " execution, include the copyright notice for the Library among" + , " these notices, as well as a reference directing the user to the" + , " copies of the GNU GPL and this license document." + , "" + , " d) Do one of the following:" + , "" + , " 0) Convey the Minimal Corresponding Source under the terms of this" + , " License, and the Corresponding Application Code in a form" + , " suitable for, and under terms that permit, the user to" + , " recombine or relink the Application with a modified version of" + , " the Linked Version to produce a modified Combined Work, in the" + , " manner specified by section 6 of the GNU GPL for conveying" + , " Corresponding Source." + , "" + , " 1) Use a suitable shared library mechanism for linking with the" + , " Library. A suitable mechanism is one that (a) uses at run time" + , " a copy of the Library already present on the user's computer" + , " system, and (b) will operate properly with a modified version" + , " of the Library that is interface-compatible with the Linked" + , " Version." + , "" + , " e) Provide Installation Information, but only if you would otherwise" + , " be required to provide such information under section 6 of the" + , " GNU GPL, and only to the extent that such information is" + , " necessary to install and execute a modified version of the" + , " Combined Work produced by recombining or relinking the" + , " Application with a modified version of the Linked Version. (If" + , " you use option 4d0, the Installation Information must accompany" + , " the Minimal Corresponding Source and Corresponding Application" + , " Code. If you use option 4d1, you must provide the Installation" + , " Information in the manner specified by section 6 of the GNU GPL" + , " for conveying Corresponding Source.)" + , "" + , " 5. Combined Libraries." + , "" + , " You may place library facilities that are a work based on the" + , "Library side by side in a single library together with other library" + , "facilities that are not Applications and are not covered by this" + , "License, and convey such a combined library under terms of your" + , "choice, if you do both of the following:" + , "" + , " a) Accompany the combined library with a copy of the same work based" + , " on the Library, uncombined with any other library facilities," + , " conveyed under the terms of this License." + , "" + , " b) Give prominent notice with the combined library that part of it" + , " is a work based on the Library, and explaining where to find the" + , " accompanying uncombined form of the same work." + , "" + , " 6. Revised Versions of the GNU Lesser General Public License." + , "" + , " The Free Software Foundation may publish revised and/or new versions" + , "of the GNU Lesser General Public License from time to time. Such new" + , "versions will be similar in spirit to the present version, but may" + , "differ in detail to address new problems or concerns." + , "" + , " Each version is given a distinguishing version number. If the" + , "Library as you received it specifies that a certain numbered version" + , "of the GNU Lesser General Public License \"or any later version\"" + , "applies to it, you have the option of following the terms and" + , "conditions either of that published version or of any later version" + , "published by the Free Software Foundation. If the Library as you" + , "received it does not specify a version number of the GNU Lesser" + , "General Public License, you may choose any version of the GNU Lesser" + , "General Public License ever published by the Free Software Foundation." + , "" + , " If the Library as you received it specifies that a proxy can decide" + , "whether future versions of the GNU Lesser General Public License shall" + , "apply, that proxy's public statement of acceptance of any version is" + , "permanent authorization for you to choose that version for the" + , "Library." + ] + +apache20 :: License +apache20 = unlines + [ "" + , " Apache License" + , " Version 2.0, January 2004" + , " http://www.apache.org/licenses/" + , "" + , " TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION" + , "" + , " 1. Definitions." + , "" + , " \"License\" shall mean the terms and conditions for use, reproduction," + , " and distribution as defined by Sections 1 through 9 of this document." + , "" + , " \"Licensor\" shall mean the copyright owner or entity authorized by" + , " the copyright owner that is granting the License." + , "" + , " \"Legal Entity\" shall mean the union of the acting entity and all" + , " other entities that control, are controlled by, or are under common" + , " control with that entity. For the purposes of this definition," + , " \"control\" means (i) the power, direct or indirect, to cause the" + , " direction or management of such entity, whether by contract or" + , " otherwise, or (ii) ownership of fifty percent (50%) or more of the" + , " outstanding shares, or (iii) beneficial ownership of such entity." + , "" + , " \"You\" (or \"Your\") shall mean an individual or Legal Entity" + , " exercising permissions granted by this License." + , "" + , " \"Source\" form shall mean the preferred form for making modifications," + , " including but not limited to software source code, documentation" + , " source, and configuration files." + , "" + , " \"Object\" form shall mean any form resulting from mechanical" + , " transformation or translation of a Source form, including but" + , " not limited to compiled object code, generated documentation," + , " and conversions to other media types." + , "" + , " \"Work\" shall mean the work of authorship, whether in Source or" + , " Object form, made available under the License, as indicated by a" + , " copyright notice that is included in or attached to the work" + , " (an example is provided in the Appendix below)." + , "" + , " \"Derivative Works\" shall mean any work, whether in Source or Object" + , " form, that is based on (or derived from) the Work and for which the" + , " editorial revisions, annotations, elaborations, or other modifications" + , " represent, as a whole, an original work of authorship. For the purposes" + , " of this License, Derivative Works shall not include works that remain" + , " separable from, or merely link (or bind by name) to the interfaces of," + , " the Work and Derivative Works thereof." + , "" + , " \"Contribution\" shall mean any work of authorship, including" + , " the original version of the Work and any modifications or additions" + , " to that Work or Derivative Works thereof, that is intentionally" + , " submitted to Licensor for inclusion in the Work by the copyright owner" + , " or by an individual or Legal Entity authorized to submit on behalf of" + , " the copyright owner. For the purposes of this definition, \"submitted\"" + , " means any form of electronic, verbal, or written communication sent" + , " to the Licensor or its representatives, including but not limited to" + , " communication on electronic mailing lists, source code control systems," + , " and issue tracking systems that are managed by, or on behalf of, the" + , " Licensor for the purpose of discussing and improving the Work, but" + , " excluding communication that is conspicuously marked or otherwise" + , " designated in writing by the copyright owner as \"Not a Contribution.\"" + , "" + , " \"Contributor\" shall mean Licensor and any individual or Legal Entity" + , " on behalf of whom a Contribution has been received by Licensor and" + , " subsequently incorporated within the Work." + , "" + , " 2. Grant of Copyright License. Subject to the terms and conditions of" + , " this License, each Contributor hereby grants to You a perpetual," + , " worldwide, non-exclusive, no-charge, royalty-free, irrevocable" + , " copyright license to reproduce, prepare Derivative Works of," + , " publicly display, publicly perform, sublicense, and distribute the" + , " Work and such Derivative Works in Source or Object form." + , "" + , " 3. Grant of Patent License. Subject to the terms and conditions of" + , " this License, each Contributor hereby grants to You a perpetual," + , " worldwide, non-exclusive, no-charge, royalty-free, irrevocable" + , " (except as stated in this section) patent license to make, have made," + , " use, offer to sell, sell, import, and otherwise transfer the Work," + , " where such license applies only to those patent claims licensable" + , " by such Contributor that are necessarily infringed by their" + , " Contribution(s) alone or by combination of their Contribution(s)" + , " with the Work to which such Contribution(s) was submitted. If You" + , " institute patent litigation against any entity (including a" + , " cross-claim or counterclaim in a lawsuit) alleging that the Work" + , " or a Contribution incorporated within the Work constitutes direct" + , " or contributory patent infringement, then any patent licenses" + , " granted to You under this License for that Work shall terminate" + , " as of the date such litigation is filed." + , "" + , " 4. Redistribution. You may reproduce and distribute copies of the" + , " Work or Derivative Works thereof in any medium, with or without" + , " modifications, and in Source or Object form, provided that You" + , " meet the following conditions:" + , "" + , " (a) You must give any other recipients of the Work or" + , " Derivative Works a copy of this License; and" + , "" + , " (b) You must cause any modified files to carry prominent notices" + , " stating that You changed the files; and" + , "" + , " (c) You must retain, in the Source form of any Derivative Works" + , " that You distribute, all copyright, patent, trademark, and" + , " attribution notices from the Source form of the Work," + , " excluding those notices that do not pertain to any part of" + , " the Derivative Works; and" + , "" + , " (d) If the Work includes a \"NOTICE\" text file as part of its" + , " distribution, then any Derivative Works that You distribute must" + , " include a readable copy of the attribution notices contained" + , " within such NOTICE file, excluding those notices that do not" + , " pertain to any part of the Derivative Works, in at least one" + , " of the following places: within a NOTICE text file distributed" + , " as part of the Derivative Works; within the Source form or" + , " documentation, if provided along with the Derivative Works; or," + , " within a display generated by the Derivative Works, if and" + , " wherever such third-party notices normally appear. The contents" + , " of the NOTICE file are for informational purposes only and" + , " do not modify the License. You may add Your own attribution" + , " notices within Derivative Works that You distribute, alongside" + , " or as an addendum to the NOTICE text from the Work, provided" + , " that such additional attribution notices cannot be construed" + , " as modifying the License." + , "" + , " You may add Your own copyright statement to Your modifications and" + , " may provide additional or different license terms and conditions" + , " for use, reproduction, or distribution of Your modifications, or" + , " for any such Derivative Works as a whole, provided Your use," + , " reproduction, and distribution of the Work otherwise complies with" + , " the conditions stated in this License." + , "" + , " 5. Submission of Contributions. Unless You explicitly state otherwise," + , " any Contribution intentionally submitted for inclusion in the Work" + , " by You to the Licensor shall be under the terms and conditions of" + , " this License, without any additional terms or conditions." + , " Notwithstanding the above, nothing herein shall supersede or modify" + , " the terms of any separate license agreement you may have executed" + , " with Licensor regarding such Contributions." + , "" + , " 6. Trademarks. This License does not grant permission to use the trade" + , " names, trademarks, service marks, or product names of the Licensor," + , " except as required for reasonable and customary use in describing the" + , " origin of the Work and reproducing the content of the NOTICE file." + , "" + , " 7. Disclaimer of Warranty. Unless required by applicable law or" + , " agreed to in writing, Licensor provides the Work (and each" + , " Contributor provides its Contributions) on an \"AS IS\" BASIS," + , " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or" + , " implied, including, without limitation, any warranties or conditions" + , " of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A" + , " PARTICULAR PURPOSE. You are solely responsible for determining the" + , " appropriateness of using or redistributing the Work and assume any" + , " risks associated with Your exercise of permissions under this License." + , "" + , " 8. Limitation of Liability. In no event and under no legal theory," + , " whether in tort (including negligence), contract, or otherwise," + , " unless required by applicable law (such as deliberate and grossly" + , " negligent acts) or agreed to in writing, shall any Contributor be" + , " liable to You for damages, including any direct, indirect, special," + , " incidental, or consequential damages of any character arising as a" + , " result of this License or out of the use or inability to use the" + , " Work (including but not limited to damages for loss of goodwill," + , " work stoppage, computer failure or malfunction, or any and all" + , " other commercial damages or losses), even if such Contributor" + , " has been advised of the possibility of such damages." + , "" + , " 9. Accepting Warranty or Additional Liability. While redistributing" + , " the Work or Derivative Works thereof, You may choose to offer," + , " and charge a fee for, acceptance of support, warranty, indemnity," + , " or other liability obligations and/or rights consistent with this" + , " License. However, in accepting such obligations, You may act only" + , " on Your own behalf and on Your sole responsibility, not on behalf" + , " of any other Contributor, and only if You agree to indemnify," + , " defend, and hold each Contributor harmless for any liability" + , " incurred by, or claims asserted against, such Contributor by reason" + , " of your accepting any such warranty or additional liability." + , "" + , " END OF TERMS AND CONDITIONS" + , "" + , " APPENDIX: How to apply the Apache License to your work." + , "" + , " To apply the Apache License to your work, attach the following" + , " boilerplate notice, with the fields enclosed by brackets \"[]\"" + , " replaced with your own identifying information. (Don't include" + , " the brackets!) The text should be enclosed in the appropriate" + , " comment syntax for the file format. We also recommend that a" + , " file or class name and description of purpose be included on the" + , " same \"printed page\" as the copyright notice for easier" + , " identification within third-party archives." + , "" + , " Copyright [yyyy] [name of copyright owner]" + , "" + , " Licensed under the Apache License, Version 2.0 (the \"License\");" + , " you may not use this file except in compliance with the License." + , " You may obtain a copy of the License at" + , "" + , " http://www.apache.org/licenses/LICENSE-2.0" + , "" + , " Unless required by applicable law or agreed to in writing, software" + , " distributed under the License is distributed on an \"AS IS\" BASIS," + , " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied." + , " See the License for the specific language governing permissions and" + , " limitations under the License." + ] + +mit :: String -> String -> License +mit authors year = unlines + [ "Copyright (c) " ++ year ++ " " ++ authors + , "" + , "Permission is hereby granted, free of charge, to any person obtaining" + , "a copy of this software and associated documentation files (the" + , "\"Software\"), to deal in the Software without restriction, including" + , "without limitation the rights to use, copy, modify, merge, publish," + , "distribute, sublicense, and/or sell copies of the Software, and to" + , "permit persons to whom the Software is furnished to do so, subject to" + , "the following conditions:" + , "" + , "The above copyright notice and this permission notice shall be included" + , "in all copies or substantial portions of the Software." + , "" + , "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND," + , "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF" + , "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT." + , "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY" + , "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT," + , "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE" + , "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE." + ] + +mpl20 :: License +mpl20 = unlines + [ "Mozilla Public License Version 2.0" + , "==================================" + , "" + , "1. Definitions" + , "--------------" + , "" + , "1.1. \"Contributor\"" + , " means each individual or legal entity that creates, contributes to" + , " the creation of, or owns Covered Software." + , "" + , "1.2. \"Contributor Version\"" + , " means the combination of the Contributions of others (if any) used" + , " by a Contributor and that particular Contributor's Contribution." + , "" + , "1.3. \"Contribution\"" + , " means Covered Software of a particular Contributor." + , "" + , "1.4. \"Covered Software\"" + , " means Source Code Form to which the initial Contributor has attached" + , " the notice in Exhibit A, the Executable Form of such Source Code" + , " Form, and Modifications of such Source Code Form, in each case" + , " including portions thereof." + , "" + , "1.5. \"Incompatible With Secondary Licenses\"" + , " means" + , "" + , " (a) that the initial Contributor has attached the notice described" + , " in Exhibit B to the Covered Software; or" + , "" + , " (b) that the Covered Software was made available under the terms of" + , " version 1.1 or earlier of the License, but not also under the" + , " terms of a Secondary License." + , "" + , "1.6. \"Executable Form\"" + , " means any form of the work other than Source Code Form." + , "" + , "1.7. \"Larger Work\"" + , " means a work that combines Covered Software with other material, in" + , " a separate file or files, that is not Covered Software." + , "" + , "1.8. \"License\"" + , " means this document." + , "" + , "1.9. \"Licensable\"" + , " means having the right to grant, to the maximum extent possible," + , " whether at the time of the initial grant or subsequently, any and" + , " all of the rights conveyed by this License." + , "" + , "1.10. \"Modifications\"" + , " means any of the following:" + , "" + , " (a) any file in Source Code Form that results from an addition to," + , " deletion from, or modification of the contents of Covered" + , " Software; or" + , "" + , " (b) any new file in Source Code Form that contains any Covered" + , " Software." + , "" + , "1.11. \"Patent Claims\" of a Contributor" + , " means any patent claim(s), including without limitation, method," + , " process, and apparatus claims, in any patent Licensable by such" + , " Contributor that would be infringed, but for the grant of the" + , " License, by the making, using, selling, offering for sale, having" + , " made, import, or transfer of either its Contributions or its" + , " Contributor Version." + , "" + , "1.12. \"Secondary License\"" + , " means either the GNU General Public License, Version 2.0, the GNU" + , " Lesser General Public License, Version 2.1, the GNU Affero General" + , " Public License, Version 3.0, or any later versions of those" + , " licenses." + , "" + , "1.13. \"Source Code Form\"" + , " means the form of the work preferred for making modifications." + , "" + , "1.14. \"You\" (or \"Your\")" + , " means an individual or a legal entity exercising rights under this" + , " License. For legal entities, \"You\" includes any entity that" + , " controls, is controlled by, or is under common control with You. For" + , " purposes of this definition, \"control\" means (a) the power, direct" + , " or indirect, to cause the direction or management of such entity," + , " whether by contract or otherwise, or (b) ownership of more than" + , " fifty percent (50%) of the outstanding shares or beneficial" + , " ownership of such entity." + , "" + , "2. License Grants and Conditions" + , "--------------------------------" + , "" + , "2.1. Grants" + , "" + , "Each Contributor hereby grants You a world-wide, royalty-free," + , "non-exclusive license:" + , "" + , "(a) under intellectual property rights (other than patent or trademark)" + , " Licensable by such Contributor to use, reproduce, make available," + , " modify, display, perform, distribute, and otherwise exploit its" + , " Contributions, either on an unmodified basis, with Modifications, or" + , " as part of a Larger Work; and" + , "" + , "(b) under Patent Claims of such Contributor to make, use, sell, offer" + , " for sale, have made, import, and otherwise transfer either its" + , " Contributions or its Contributor Version." + , "" + , "2.2. Effective Date" + , "" + , "The licenses granted in Section 2.1 with respect to any Contribution" + , "become effective for each Contribution on the date the Contributor first" + , "distributes such Contribution." + , "" + , "2.3. Limitations on Grant Scope" + , "" + , "The licenses granted in this Section 2 are the only rights granted under" + , "this License. No additional rights or licenses will be implied from the" + , "distribution or licensing of Covered Software under this License." + , "Notwithstanding Section 2.1(b) above, no patent license is granted by a" + , "Contributor:" + , "" + , "(a) for any code that a Contributor has removed from Covered Software;" + , " or" + , "" + , "(b) for infringements caused by: (i) Your and any other third party's" + , " modifications of Covered Software, or (ii) the combination of its" + , " Contributions with other software (except as part of its Contributor" + , " Version); or" + , "" + , "(c) under Patent Claims infringed by Covered Software in the absence of" + , " its Contributions." + , "" + , "This License does not grant any rights in the trademarks, service marks," + , "or logos of any Contributor (except as may be necessary to comply with" + , "the notice requirements in Section 3.4)." + , "" + , "2.4. Subsequent Licenses" + , "" + , "No Contributor makes additional grants as a result of Your choice to" + , "distribute the Covered Software under a subsequent version of this" + , "License (see Section 10.2) or under the terms of a Secondary License (if" + , "permitted under the terms of Section 3.3)." + , "" + , "2.5. Representation" + , "" + , "Each Contributor represents that the Contributor believes its" + , "Contributions are its original creation(s) or it has sufficient rights" + , "to grant the rights to its Contributions conveyed by this License." + , "" + , "2.6. Fair Use" + , "" + , "This License is not intended to limit any rights You have under" + , "applicable copyright doctrines of fair use, fair dealing, or other" + , "equivalents." + , "" + , "2.7. Conditions" + , "" + , "Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted" + , "in Section 2.1." + , "" + , "3. Responsibilities" + , "-------------------" + , "" + , "3.1. Distribution of Source Form" + , "" + , "All distribution of Covered Software in Source Code Form, including any" + , "Modifications that You create or to which You contribute, must be under" + , "the terms of this License. You must inform recipients that the Source" + , "Code Form of the Covered Software is governed by the terms of this" + , "License, and how they can obtain a copy of this License. You may not" + , "attempt to alter or restrict the recipients' rights in the Source Code" + , "Form." + , "" + , "3.2. Distribution of Executable Form" + , "" + , "If You distribute Covered Software in Executable Form then:" + , "" + , "(a) such Covered Software must also be made available in Source Code" + , " Form, as described in Section 3.1, and You must inform recipients of" + , " the Executable Form how they can obtain a copy of such Source Code" + , " Form by reasonable means in a timely manner, at a charge no more" + , " than the cost of distribution to the recipient; and" + , "" + , "(b) You may distribute such Executable Form under the terms of this" + , " License, or sublicense it under different terms, provided that the" + , " license for the Executable Form does not attempt to limit or alter" + , " the recipients' rights in the Source Code Form under this License." + , "" + , "3.3. Distribution of a Larger Work" + , "" + , "You may create and distribute a Larger Work under terms of Your choice," + , "provided that You also comply with the requirements of this License for" + , "the Covered Software. If the Larger Work is a combination of Covered" + , "Software with a work governed by one or more Secondary Licenses, and the" + , "Covered Software is not Incompatible With Secondary Licenses, this" + , "License permits You to additionally distribute such Covered Software" + , "under the terms of such Secondary License(s), so that the recipient of" + , "the Larger Work may, at their option, further distribute the Covered" + , "Software under the terms of either this License or such Secondary" + , "License(s)." + , "" + , "3.4. Notices" + , "" + , "You may not remove or alter the substance of any license notices" + , "(including copyright notices, patent notices, disclaimers of warranty," + , "or limitations of liability) contained within the Source Code Form of" + , "the Covered Software, except that You may alter any license notices to" + , "the extent required to remedy known factual inaccuracies." + , "" + , "3.5. Application of Additional Terms" + , "" + , "You may choose to offer, and to charge a fee for, warranty, support," + , "indemnity or liability obligations to one or more recipients of Covered" + , "Software. However, You may do so only on Your own behalf, and not on" + , "behalf of any Contributor. You must make it absolutely clear that any" + , "such warranty, support, indemnity, or liability obligation is offered by" + , "You alone, and You hereby agree to indemnify every Contributor for any" + , "liability incurred by such Contributor as a result of warranty, support," + , "indemnity or liability terms You offer. You may include additional" + , "disclaimers of warranty and limitations of liability specific to any" + , "jurisdiction." + , "" + , "4. Inability to Comply Due to Statute or Regulation" + , "---------------------------------------------------" + , "" + , "If it is impossible for You to comply with any of the terms of this" + , "License with respect to some or all of the Covered Software due to" + , "statute, judicial order, or regulation then You must: (a) comply with" + , "the terms of this License to the maximum extent possible; and (b)" + , "describe the limitations and the code they affect. Such description must" + , "be placed in a text file included with all distributions of the Covered" + , "Software under this License. Except to the extent prohibited by statute" + , "or regulation, such description must be sufficiently detailed for a" + , "recipient of ordinary skill to be able to understand it." + , "" + , "5. Termination" + , "--------------" + , "" + , "5.1. The rights granted under this License will terminate automatically" + , "if You fail to comply with any of its terms. However, if You become" + , "compliant, then the rights granted under this License from a particular" + , "Contributor are reinstated (a) provisionally, unless and until such" + , "Contributor explicitly and finally terminates Your grants, and (b) on an" + , "ongoing basis, if such Contributor fails to notify You of the" + , "non-compliance by some reasonable means prior to 60 days after You have" + , "come back into compliance. Moreover, Your grants from a particular" + , "Contributor are reinstated on an ongoing basis if such Contributor" + , "notifies You of the non-compliance by some reasonable means, this is the" + , "first time You have received notice of non-compliance with this License" + , "from such Contributor, and You become compliant prior to 30 days after" + , "Your receipt of the notice." + , "" + , "5.2. If You initiate litigation against any entity by asserting a patent" + , "infringement claim (excluding declaratory judgment actions," + , "counter-claims, and cross-claims) alleging that a Contributor Version" + , "directly or indirectly infringes any patent, then the rights granted to" + , "You by any and all Contributors for the Covered Software under Section" + , "2.1 of this License shall terminate." + , "" + , "5.3. In the event of termination under Sections 5.1 or 5.2 above, all" + , "end user license agreements (excluding distributors and resellers) which" + , "have been validly granted by You or Your distributors under this License" + , "prior to termination shall survive termination." + , "" + , "************************************************************************" + , "* *" + , "* 6. Disclaimer of Warranty *" + , "* ------------------------- *" + , "* *" + , "* Covered Software is provided under this License on an \"as is\" *" + , "* basis, without warranty of any kind, either expressed, implied, or *" + , "* statutory, including, without limitation, warranties that the *" + , "* Covered Software is free of defects, merchantable, fit for a *" + , "* particular purpose or non-infringing. The entire risk as to the *" + , "* quality and performance of the Covered Software is with You. *" + , "* Should any Covered Software prove defective in any respect, You *" + , "* (not any Contributor) assume the cost of any necessary servicing, *" + , "* repair, or correction. This disclaimer of warranty constitutes an *" + , "* essential part of this License. No use of any Covered Software is *" + , "* authorized under this License except under this disclaimer. *" + , "* *" + , "************************************************************************" + , "" + , "************************************************************************" + , "* *" + , "* 7. Limitation of Liability *" + , "* -------------------------- *" + , "* *" + , "* Under no circumstances and under no legal theory, whether tort *" + , "* (including negligence), contract, or otherwise, shall any *" + , "* Contributor, or anyone who distributes Covered Software as *" + , "* permitted above, be liable to You for any direct, indirect, *" + , "* special, incidental, or consequential damages of any character *" + , "* including, without limitation, damages for lost profits, loss of *" + , "* goodwill, work stoppage, computer failure or malfunction, or any *" + , "* and all other commercial damages or losses, even if such party *" + , "* shall have been informed of the possibility of such damages. This *" + , "* limitation of liability shall not apply to liability for death or *" + , "* personal injury resulting from such party's negligence to the *" + , "* extent applicable law prohibits such limitation. Some *" + , "* jurisdictions do not allow the exclusion or limitation of *" + , "* incidental or consequential damages, so this exclusion and *" + , "* limitation may not apply to You. *" + , "* *" + , "************************************************************************" + , "" + , "8. Litigation" + , "-------------" + , "" + , "Any litigation relating to this License may be brought only in the" + , "courts of a jurisdiction where the defendant maintains its principal" + , "place of business and such litigation shall be governed by laws of that" + , "jurisdiction, without reference to its conflict-of-law provisions." + , "Nothing in this Section shall prevent a party's ability to bring" + , "cross-claims or counter-claims." + , "" + , "9. Miscellaneous" + , "----------------" + , "" + , "This License represents the complete agreement concerning the subject" + , "matter hereof. If any provision of this License is held to be" + , "unenforceable, such provision shall be reformed only to the extent" + , "necessary to make it enforceable. Any law or regulation which provides" + , "that the language of a contract shall be construed against the drafter" + , "shall not be used to construe this License against a Contributor." + , "" + , "10. Versions of the License" + , "---------------------------" + , "" + , "10.1. New Versions" + , "" + , "Mozilla Foundation is the license steward. Except as provided in Section" + , "10.3, no one other than the license steward has the right to modify or" + , "publish new versions of this License. Each version will be given a" + , "distinguishing version number." + , "" + , "10.2. Effect of New Versions" + , "" + , "You may distribute the Covered Software under the terms of the version" + , "of the License under which You originally received the Covered Software," + , "or under the terms of any subsequent version published by the license" + , "steward." + , "" + , "10.3. Modified Versions" + , "" + , "If you create software not governed by this License, and you want to" + , "create a new license for such software, you may create and use a" + , "modified version of this License if you rename the license and remove" + , "any references to the name of the license steward (except to note that" + , "such modified license differs from this License)." + , "" + , "10.4. Distributing Source Code Form that is Incompatible With Secondary" + , "Licenses" + , "" + , "If You choose to distribute Source Code Form that is Incompatible With" + , "Secondary Licenses under the terms of this version of the License, the" + , "notice described in Exhibit B of this License must be attached." + , "" + , "Exhibit A - Source Code Form License Notice" + , "-------------------------------------------" + , "" + , " This Source Code Form is subject to the terms of the Mozilla Public" + , " License, v. 2.0. If a copy of the MPL was not distributed with this" + , " file, You can obtain one at http://mozilla.org/MPL/2.0/." + , "" + , "If it is not possible or desirable to put the notice in a particular" + , "file, then You may include the notice in a location (such as a LICENSE" + , "file in a relevant directory) where a recipient would be likely to look" + , "for such a notice." + , "" + , "You may add additional accurate notices of copyright ownership." + , "" + , "Exhibit B - \"Incompatible With Secondary Licenses\" Notice" + , "---------------------------------------------------------" + , "" + , " This Source Code Form is \"Incompatible With Secondary Licenses\", as" + , " defined by the Mozilla Public License, v. 2.0." + ] + +isc :: String -> String -> License +isc authors year = unlines + [ "Copyright (c) " ++ year ++ " " ++ authors + , "" + , "Permission to use, copy, modify, and/or distribute this software for any purpose" + , "with or without fee is hereby granted, provided that the above copyright notice" + , "and this permission notice appear in all copies." + , "" + , "THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH" + , "REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND" + , "FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT," + , "INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS" + , "OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER" + , "TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF" + , "THIS SOFTWARE." + ] diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Types.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Types.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Types.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Init/Types.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,170 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Init.Types +-- Copyright : (c) Brent Yorgey, Benedikt Huber 2009 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Some types used by the 'cabal init' command. +-- +----------------------------------------------------------------------------- +module Distribution.Client.Init.Types where + +import Distribution.Simple.Setup + ( Flag(..) ) + +import Distribution.Version +import Distribution.Verbosity +import qualified Distribution.Package as P +import Distribution.License +import Distribution.ModuleName +import Language.Haskell.Extension ( Language(..), Extension ) + +import qualified Text.PrettyPrint as Disp +import qualified Distribution.Compat.ReadP as Parse +import Distribution.Text + +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid +#endif + +-- | InitFlags is really just a simple type to represent certain +-- portions of a .cabal file. Rather than have a flag for EVERY +-- possible field, we just have one for each field that the user is +-- likely to want and/or that we are likely to be able to +-- intelligently guess. +data InitFlags = + InitFlags { nonInteractive :: Flag Bool + , quiet :: Flag Bool + , packageDir :: Flag FilePath + , noComments :: Flag Bool + , minimal :: Flag Bool + + , packageName :: Flag P.PackageName + , version :: Flag Version + , cabalVersion :: Flag VersionRange + , license :: Flag License + , author :: Flag String + , email :: Flag String + , homepage :: Flag String + + , synopsis :: Flag String + , category :: Flag (Either String Category) + , extraSrc :: Maybe [String] + + , packageType :: Flag PackageType + , mainIs :: Flag FilePath + , language :: Flag Language + + , exposedModules :: Maybe [ModuleName] + , otherModules :: Maybe [ModuleName] + , otherExts :: Maybe [Extension] + + , dependencies :: Maybe [P.Dependency] + , sourceDirs :: Maybe [String] + , buildTools :: Maybe [String] + + , initVerbosity :: Flag Verbosity + , overwrite :: Flag Bool + } + deriving (Show) + + -- the Monoid instance for Flag has later values override earlier + -- ones, which is why we want Maybe [foo] for collecting foo values, + -- not Flag [foo]. + +data PackageType = Library | Executable + deriving (Show, Read, Eq) + +instance Text PackageType where + disp = Disp.text . show + parse = Parse.choice $ map (fmap read . Parse.string . show) [Library, Executable] + +instance Monoid InitFlags where + mempty = InitFlags + { nonInteractive = mempty + , quiet = mempty + , packageDir = mempty + , noComments = mempty + , minimal = mempty + , packageName = mempty + , version = mempty + , cabalVersion = mempty + , license = mempty + , author = mempty + , email = mempty + , homepage = mempty + , synopsis = mempty + , category = mempty + , extraSrc = mempty + , packageType = mempty + , mainIs = mempty + , language = mempty + , exposedModules = mempty + , otherModules = mempty + , otherExts = mempty + , dependencies = mempty + , sourceDirs = mempty + , buildTools = mempty + , initVerbosity = mempty + , overwrite = mempty + } + mappend a b = InitFlags + { nonInteractive = combine nonInteractive + , quiet = combine quiet + , packageDir = combine packageDir + , noComments = combine noComments + , minimal = combine minimal + , packageName = combine packageName + , version = combine version + , cabalVersion = combine cabalVersion + , license = combine license + , author = combine author + , email = combine email + , homepage = combine homepage + , synopsis = combine synopsis + , category = combine category + , extraSrc = combine extraSrc + , packageType = combine packageType + , mainIs = combine mainIs + , language = combine language + , exposedModules = combine exposedModules + , otherModules = combine otherModules + , otherExts = combine otherExts + , dependencies = combine dependencies + , sourceDirs = combine sourceDirs + , buildTools = combine buildTools + , initVerbosity = combine initVerbosity + , overwrite = combine overwrite + } + where combine field = field a `mappend` field b + +-- | Some common package categories. +data Category + = Codec + | Concurrency + | Control + | Data + | Database + | Development + | Distribution + | Game + | Graphics + | Language + | Math + | Network + | Sound + | System + | Testing + | Text + | Web + deriving (Read, Show, Eq, Ord, Bounded, Enum) + +instance Text Category where + disp = Disp.text . show + parse = Parse.choice $ map (fmap read . Parse.string . show) [Codec .. ] + diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Init.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Init.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Init.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Init.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,864 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Init +-- Copyright : (c) Brent Yorgey 2009 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Implementation of the 'cabal init' command, which creates an initial .cabal +-- file for a project. +-- +----------------------------------------------------------------------------- + +module Distribution.Client.Init ( + + -- * Commands + initCabal + + ) where + +import System.IO + ( hSetBuffering, stdout, BufferMode(..) ) +import System.Directory + ( getCurrentDirectory, doesDirectoryExist, doesFileExist, copyFile + , getDirectoryContents, createDirectoryIfMissing ) +import System.FilePath + ( (), (<.>), takeBaseName ) +import Data.Time + ( getCurrentTime, utcToLocalTime, toGregorian, localDay, getCurrentTimeZone ) + +import Data.Char + ( toUpper ) +import Data.List + ( intercalate, nub, groupBy, (\\) ) +import Data.Maybe + ( fromMaybe, isJust, catMaybes, listToMaybe ) +import Data.Function + ( on ) +import qualified Data.Map as M +#if !MIN_VERSION_base(4,8,0) +import Data.Traversable + ( traverse ) +import Control.Applicative + ( (<$>) ) +#endif +import Control.Monad + ( when, unless, (>=>), join, forM_ ) +import Control.Arrow + ( (&&&), (***) ) + +import Text.PrettyPrint hiding (mode, cat) + +import Data.Version + ( Version(..) ) +import Distribution.Version + ( orLaterVersion, earlierVersion, intersectVersionRanges, VersionRange ) +import Distribution.Verbosity + ( Verbosity ) +import Distribution.ModuleName + ( ModuleName, fromString ) -- And for the Text instance +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo, sourcePackageId, exposed ) +import qualified Distribution.Package as P +import Language.Haskell.Extension ( Language(..) ) + +import Distribution.Client.Init.Types + ( InitFlags(..), PackageType(..), Category(..) ) +import Distribution.Client.Init.Licenses + ( bsd2, bsd3, gplv2, gplv3, lgpl21, lgpl3, agplv3, apache20, mit, mpl20, isc ) +import Distribution.Client.Init.Heuristics + ( guessPackageName, guessAuthorNameMail, guessMainFileCandidates, + SourceFileEntry(..), + scanForModules, neededBuildPrograms ) + +import Distribution.License + ( License(..), knownLicenses ) + +import Distribution.ReadE + ( runReadE, readP_to_E ) +import Distribution.Simple.Setup + ( Flag(..), flagToMaybe ) +import Distribution.Simple.Configure + ( getInstalledPackages ) +import Distribution.Simple.Compiler + ( PackageDBStack, Compiler ) +import Distribution.Simple.Program + ( ProgramConfiguration ) +import Distribution.Simple.PackageIndex + ( InstalledPackageIndex, moduleNameIndex ) +import Distribution.Text + ( display, Text(..) ) + +initCabal :: Verbosity + -> PackageDBStack + -> Compiler + -> ProgramConfiguration + -> InitFlags + -> IO () +initCabal verbosity packageDBs comp conf initFlags = do + + installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf + + hSetBuffering stdout NoBuffering + + initFlags' <- extendFlags installedPkgIndex initFlags + + writeLicense initFlags' + writeSetupFile initFlags' + createSourceDirectories initFlags' + success <- writeCabalFile initFlags' + + when success $ generateWarnings initFlags' + +--------------------------------------------------------------------------- +-- Flag acquisition ----------------------------------------------------- +--------------------------------------------------------------------------- + +-- | Fill in more details by guessing, discovering, or prompting the +-- user. +extendFlags :: InstalledPackageIndex -> InitFlags -> IO InitFlags +extendFlags pkgIx = + getPackageName + >=> getVersion + >=> getLicense + >=> getAuthorInfo + >=> getHomepage + >=> getSynopsis + >=> getCategory + >=> getExtraSourceFiles + >=> getLibOrExec + >=> getLanguage + >=> getGenComments + >=> getSrcDir + >=> getModulesBuildToolsAndDeps pkgIx + +-- | Combine two actions which may return a value, preferring the first. That +-- is, run the second action only if the first doesn't return a value. +infixr 1 ?>> +(?>>) :: IO (Maybe a) -> IO (Maybe a) -> IO (Maybe a) +f ?>> g = do + ma <- f + if isJust ma + then return ma + else g + +-- | Witness the isomorphism between Maybe and Flag. +maybeToFlag :: Maybe a -> Flag a +maybeToFlag = maybe NoFlag Flag + +-- | Get the package name: use the package directory (supplied, or the current +-- directory by default) as a guess. +getPackageName :: InitFlags -> IO InitFlags +getPackageName flags = do + guess <- traverse guessPackageName (flagToMaybe $ packageDir flags) + ?>> Just `fmap` (getCurrentDirectory >>= guessPackageName) + + pkgName' <- return (flagToMaybe $ packageName flags) + ?>> maybePrompt flags (prompt "Package name" guess) + ?>> return guess + + return $ flags { packageName = maybeToFlag pkgName' } + +-- | Package version: use 0.1.0.0 as a last resort, but try prompting the user +-- if possible. +getVersion :: InitFlags -> IO InitFlags +getVersion flags = do + let v = Just $ Version [0,1,0,0] [] + v' <- return (flagToMaybe $ version flags) + ?>> maybePrompt flags (prompt "Package version" v) + ?>> return v + return $ flags { version = maybeToFlag v' } + +-- | Choose a license. +getLicense :: InitFlags -> IO InitFlags +getLicense flags = do + lic <- return (flagToMaybe $ license flags) + ?>> fmap (fmap (either UnknownLicense id) . join) + (maybePrompt flags + (promptListOptional "Please choose a license" listedLicenses)) + return $ flags { license = maybeToFlag lic } + where + listedLicenses = + knownLicenses \\ [GPL Nothing, LGPL Nothing, AGPL Nothing + , Apache Nothing, OtherLicense] + +-- | The author's name and email. Prompt, or try to guess from an existing +-- darcs repo. +getAuthorInfo :: InitFlags -> IO InitFlags +getAuthorInfo flags = do + (authorName, authorEmail) <- + (flagToMaybe *** flagToMaybe) `fmap` guessAuthorNameMail + authorName' <- return (flagToMaybe $ author flags) + ?>> maybePrompt flags (promptStr "Author name" authorName) + ?>> return authorName + + authorEmail' <- return (flagToMaybe $ email flags) + ?>> maybePrompt flags (promptStr "Maintainer email" authorEmail) + ?>> return authorEmail + + return $ flags { author = maybeToFlag authorName' + , email = maybeToFlag authorEmail' + } + +-- | Prompt for a homepage URL. +getHomepage :: InitFlags -> IO InitFlags +getHomepage flags = do + hp <- queryHomepage + hp' <- return (flagToMaybe $ homepage flags) + ?>> maybePrompt flags (promptStr "Project homepage URL" hp) + ?>> return hp + + return $ flags { homepage = maybeToFlag hp' } + +-- | Right now this does nothing, but it could be changed to do some +-- intelligent guessing. +queryHomepage :: IO (Maybe String) +queryHomepage = return Nothing -- get default remote darcs repo? + +-- | Prompt for a project synopsis. +getSynopsis :: InitFlags -> IO InitFlags +getSynopsis flags = do + syn <- return (flagToMaybe $ synopsis flags) + ?>> maybePrompt flags (promptStr "Project synopsis" Nothing) + + return $ flags { synopsis = maybeToFlag syn } + +-- | Prompt for a package category. +-- Note that it should be possible to do some smarter guessing here too, i.e. +-- look at the name of the top level source directory. +getCategory :: InitFlags -> IO InitFlags +getCategory flags = do + cat <- return (flagToMaybe $ category flags) + ?>> fmap join (maybePrompt flags + (promptListOptional "Project category" [Codec ..])) + return $ flags { category = maybeToFlag cat } + +-- | Try to guess extra source files (don't prompt the user). +getExtraSourceFiles :: InitFlags -> IO InitFlags +getExtraSourceFiles flags = do + extraSrcFiles <- return (extraSrc flags) + ?>> Just `fmap` guessExtraSourceFiles flags + + return $ flags { extraSrc = extraSrcFiles } + +-- | Try to guess things to include in the extra-source-files field. +-- For now, we just look for things in the root directory named +-- 'readme', 'changes', or 'changelog', with any sort of +-- capitalization and any extension. +guessExtraSourceFiles :: InitFlags -> IO [FilePath] +guessExtraSourceFiles flags = do + dir <- + maybe getCurrentDirectory return . flagToMaybe $ packageDir flags + files <- getDirectoryContents dir + return $ filter isExtra files + + where + isExtra = (`elem` ["README", "CHANGES", "CHANGELOG"]) + . map toUpper + . takeBaseName + +-- | Ask whether the project builds a library or executable. +getLibOrExec :: InitFlags -> IO InitFlags +getLibOrExec flags = do + isLib <- return (flagToMaybe $ packageType flags) + ?>> maybePrompt flags (either (const Library) id `fmap` + promptList "What does the package build" + [Library, Executable] + Nothing display False) + ?>> return (Just Library) + mainFile <- if isLib /= Just Executable then return Nothing else + getMainFile flags + + return $ flags { packageType = maybeToFlag isLib + , mainIs = maybeToFlag mainFile + } + +-- | Try to guess the main file of the executable, and prompt the user to choose +-- one of them. Top-level modules including the word 'Main' in the file name +-- will be candidates, and shorter filenames will be preferred. +getMainFile :: InitFlags -> IO (Maybe FilePath) +getMainFile flags = + return (flagToMaybe $ mainIs flags) + ?>> do + candidates <- guessMainFileCandidates flags + let showCandidate = either (++" (does not yet exist)") id + defaultFile = listToMaybe candidates + maybePrompt flags (either id (either id id) `fmap` + promptList "What is the main module of the executable" + candidates + defaultFile showCandidate True) + ?>> return (fmap (either id id) defaultFile) + +-- | Ask for the base language of the package. +getLanguage :: InitFlags -> IO InitFlags +getLanguage flags = do + lang <- return (flagToMaybe $ language flags) + ?>> maybePrompt flags + (either UnknownLanguage id `fmap` + promptList "What base language is the package written in" + [Haskell2010, Haskell98] + (Just Haskell2010) display True) + ?>> return (Just Haskell2010) + + return $ flags { language = maybeToFlag lang } + +-- | Ask whether to generate explanatory comments. +getGenComments :: InitFlags -> IO InitFlags +getGenComments flags = do + genComments <- return (not <$> flagToMaybe (noComments flags)) + ?>> maybePrompt flags (promptYesNo promptMsg (Just False)) + ?>> return (Just False) + return $ flags { noComments = maybeToFlag (fmap not genComments) } + where + promptMsg = "Include documentation on what each field means (y/n)" + +-- | Ask for the source root directory. +getSrcDir :: InitFlags -> IO InitFlags +getSrcDir flags = do + srcDirs <- return (sourceDirs flags) + ?>> fmap (:[]) `fmap` guessSourceDir flags + ?>> fmap (fmap ((:[]) . either id id) . join) (maybePrompt + flags + (promptListOptional' "Source directory" ["src"] id)) + + return $ flags { sourceDirs = srcDirs } + +-- | Try to guess source directory. Could try harder; for the +-- moment just looks to see whether there is a directory called 'src'. +guessSourceDir :: InitFlags -> IO (Maybe String) +guessSourceDir flags = do + dir <- + maybe getCurrentDirectory return . flagToMaybe $ packageDir flags + srcIsDir <- doesDirectoryExist (dir "src") + return $ if srcIsDir + then Just "src" + else Nothing + +-- | Get the list of exposed modules and extra tools needed to build them. +getModulesBuildToolsAndDeps :: InstalledPackageIndex -> InitFlags -> IO InitFlags +getModulesBuildToolsAndDeps pkgIx flags = do + dir <- maybe getCurrentDirectory return . flagToMaybe $ packageDir flags + + -- XXX really should use guessed source roots. + sourceFiles <- scanForModules dir + + Just mods <- return (exposedModules flags) + ?>> (return . Just . map moduleName $ sourceFiles) + + tools <- return (buildTools flags) + ?>> (return . Just . neededBuildPrograms $ sourceFiles) + + deps <- return (dependencies flags) + ?>> Just <$> importsToDeps flags + (fromString "Prelude" : -- to ensure we get base as a dep + ( nub -- only need to consider each imported package once + . filter (`notElem` mods) -- don't consider modules from + -- this package itself + . concatMap imports + $ sourceFiles + ) + ) + pkgIx + + exts <- return (otherExts flags) + ?>> (return . Just . nub . concatMap extensions $ sourceFiles) + + return $ flags { exposedModules = Just mods + , buildTools = tools + , dependencies = deps + , otherExts = exts + } + +importsToDeps :: InitFlags -> [ModuleName] -> InstalledPackageIndex -> IO [P.Dependency] +importsToDeps flags mods pkgIx = do + + let modMap :: M.Map ModuleName [InstalledPackageInfo] + modMap = M.map (filter exposed) $ moduleNameIndex pkgIx + + modDeps :: [(ModuleName, Maybe [InstalledPackageInfo])] + modDeps = map (id &&& flip M.lookup modMap) mods + + message flags "\nGuessing dependencies..." + nub . catMaybes <$> mapM (chooseDep flags) modDeps + +-- Given a module and a list of installed packages providing it, +-- choose a dependency (i.e. package + version range) to use for that +-- module. +chooseDep :: InitFlags -> (ModuleName, Maybe [InstalledPackageInfo]) + -> IO (Maybe P.Dependency) + +chooseDep flags (m, Nothing) + = message flags ("\nWarning: no package found providing " ++ display m ++ ".") + >> return Nothing + +chooseDep flags (m, Just []) + = message flags ("\nWarning: no package found providing " ++ display m ++ ".") + >> return Nothing + + -- We found some packages: group them by name. +chooseDep flags (m, Just ps) + = case pkgGroups of + -- if there's only one group, i.e. multiple versions of a single package, + -- we make it into a dependency, choosing the latest-ish version (see toDep). + [grp] -> Just <$> toDep grp + -- otherwise, we refuse to choose between different packages and make the user + -- do it. + grps -> do message flags ("\nWarning: multiple packages found providing " + ++ display m + ++ ": " ++ intercalate ", " (map (display . P.pkgName . head) grps)) + message flags "You will need to pick one and manually add it to the Build-depends: field." + return Nothing + where + pkgGroups = groupBy ((==) `on` P.pkgName) (map sourcePackageId ps) + + -- Given a list of available versions of the same package, pick a dependency. + toDep :: [P.PackageIdentifier] -> IO P.Dependency + + -- If only one version, easy. We change e.g. 0.4.2 into 0.4.* + toDep [pid] = return $ P.Dependency (P.pkgName pid) (pvpize . P.pkgVersion $ pid) + + -- Otherwise, choose the latest version and issue a warning. + toDep pids = do + message flags ("\nWarning: multiple versions of " ++ display (P.pkgName . head $ pids) ++ " provide " ++ display m ++ ", choosing the latest.") + return $ P.Dependency (P.pkgName . head $ pids) + (pvpize . maximum . map P.pkgVersion $ pids) + + pvpize :: Version -> VersionRange + pvpize v = orLaterVersion v' + `intersectVersionRanges` + earlierVersion (incVersion 1 v') + where v' = (v { versionBranch = take 2 (versionBranch v) }) + +incVersion :: Int -> Version -> Version +incVersion n (Version vlist tags) = Version (incVersion' n vlist) tags + where + incVersion' 0 [] = [1] + incVersion' 0 (v:_) = [v+1] + incVersion' m [] = replicate m 0 ++ [1] + incVersion' m (v:vs) = v : incVersion' (m-1) vs + +--------------------------------------------------------------------------- +-- Prompting/user interaction ------------------------------------------- +--------------------------------------------------------------------------- + +-- | Run a prompt or not based on the nonInteractive flag of the +-- InitFlags structure. +maybePrompt :: InitFlags -> IO t -> IO (Maybe t) +maybePrompt flags p = + case nonInteractive flags of + Flag True -> return Nothing + _ -> Just `fmap` p + +-- | Create a prompt with optional default value that returns a +-- String. +promptStr :: String -> Maybe String -> IO String +promptStr = promptDefault' Just id + +-- | Create a yes/no prompt with optional default value. +-- +promptYesNo :: String -> Maybe Bool -> IO Bool +promptYesNo = + promptDefault' recogniseYesNo showYesNo + where + recogniseYesNo s | s == "y" || s == "Y" = Just True + | s == "n" || s == "N" = Just False + | otherwise = Nothing + showYesNo True = "y" + showYesNo False = "n" + +-- | Create a prompt with optional default value that returns a value +-- of some Text instance. +prompt :: Text t => String -> Maybe t -> IO t +prompt = promptDefault' + (either (const Nothing) Just . runReadE (readP_to_E id parse)) + display + +-- | Create a prompt with an optional default value. +promptDefault' :: (String -> Maybe t) -- ^ parser + -> (t -> String) -- ^ pretty-printer + -> String -- ^ prompt message + -> Maybe t -- ^ optional default value + -> IO t +promptDefault' parser pretty pr def = do + putStr $ mkDefPrompt pr (pretty `fmap` def) + inp <- getLine + case (inp, def) of + ("", Just d) -> return d + _ -> case parser inp of + Just t -> return t + Nothing -> do putStrLn $ "Couldn't parse " ++ inp ++ ", please try again!" + promptDefault' parser pretty pr def + +-- | Create a prompt from a prompt string and a String representation +-- of an optional default value. +mkDefPrompt :: String -> Maybe String -> String +mkDefPrompt pr def = pr ++ "?" ++ defStr def + where defStr Nothing = " " + defStr (Just s) = " [default: " ++ s ++ "] " + +promptListOptional :: (Text t, Eq t) + => String -- ^ prompt + -> [t] -- ^ choices + -> IO (Maybe (Either String t)) +promptListOptional pr choices = promptListOptional' pr choices display + +promptListOptional' :: Eq t + => String -- ^ prompt + -> [t] -- ^ choices + -> (t -> String) -- ^ show an item + -> IO (Maybe (Either String t)) +promptListOptional' pr choices displayItem = + fmap rearrange + $ promptList pr (Nothing : map Just choices) (Just Nothing) + (maybe "(none)" displayItem) True + where + rearrange = either (Just . Left) (fmap Right) + +-- | Create a prompt from a list of items. +promptList :: Eq t + => String -- ^ prompt + -> [t] -- ^ choices + -> Maybe t -- ^ optional default value + -> (t -> String) -- ^ show an item + -> Bool -- ^ whether to allow an 'other' option + -> IO (Either String t) +promptList pr choices def displayItem other = do + putStrLn $ pr ++ ":" + let options1 = map (\c -> (Just c == def, displayItem c)) choices + options2 = zip ([1..]::[Int]) + (options1 ++ [(False, "Other (specify)") | other]) + mapM_ (putStrLn . \(n,(i,s)) -> showOption n i ++ s) options2 + promptList' displayItem (length options2) choices def other + where showOption n i | n < 10 = " " ++ star i ++ " " ++ rest + | otherwise = " " ++ star i ++ rest + where rest = show n ++ ") " + star True = "*" + star False = " " + +promptList' :: (t -> String) -> Int -> [t] -> Maybe t -> Bool -> IO (Either String t) +promptList' displayItem numChoices choices def other = do + putStr $ mkDefPrompt "Your choice" (displayItem `fmap` def) + inp <- getLine + case (inp, def) of + ("", Just d) -> return $ Right d + _ -> case readMaybe inp of + Nothing -> invalidChoice inp + Just n -> getChoice n + where invalidChoice inp = do putStrLn $ inp ++ " is not a valid choice." + promptList' displayItem numChoices choices def other + getChoice n | n < 1 || n > numChoices = invalidChoice (show n) + | n < numChoices || + (n == numChoices && not other) + = return . Right $ choices !! (n-1) + | otherwise = Left `fmap` promptStr "Please specify" Nothing + +readMaybe :: (Read a) => String -> Maybe a +readMaybe s = case reads s of + [(a,"")] -> Just a + _ -> Nothing + +--------------------------------------------------------------------------- +-- File generation ------------------------------------------------------ +--------------------------------------------------------------------------- + +writeLicense :: InitFlags -> IO () +writeLicense flags = do + message flags "\nGenerating LICENSE..." + year <- show <$> getYear + let authors = fromMaybe "???" . flagToMaybe . author $ flags + let licenseFile = + case license flags of + Flag BSD2 + -> Just $ bsd2 authors year + + Flag BSD3 + -> Just $ bsd3 authors year + + Flag (GPL (Just (Version {versionBranch = [2]}))) + -> Just gplv2 + + Flag (GPL (Just (Version {versionBranch = [3]}))) + -> Just gplv3 + + Flag (LGPL (Just (Version {versionBranch = [2, 1]}))) + -> Just lgpl21 + + Flag (LGPL (Just (Version {versionBranch = [3]}))) + -> Just lgpl3 + + Flag (AGPL (Just (Version {versionBranch = [3]}))) + -> Just agplv3 + + Flag (Apache (Just (Version {versionBranch = [2, 0]}))) + -> Just apache20 + + Flag MIT + -> Just $ mit authors year + + Flag (MPL (Version {versionBranch = [2, 0]})) + -> Just mpl20 + + Flag ISC + -> Just $ isc authors year + + _ -> Nothing + + case licenseFile of + Just licenseText -> writeFileSafe flags "LICENSE" licenseText + Nothing -> message flags "Warning: unknown license type, you must put a copy in LICENSE yourself." + +getYear :: IO Integer +getYear = do + u <- getCurrentTime + z <- getCurrentTimeZone + let l = utcToLocalTime z u + (y, _, _) = toGregorian $ localDay l + return y + +writeSetupFile :: InitFlags -> IO () +writeSetupFile flags = do + message flags "Generating Setup.hs..." + writeFileSafe flags "Setup.hs" setupFile + where + setupFile = unlines + [ "import Distribution.Simple" + , "main = defaultMain" + ] + +writeCabalFile :: InitFlags -> IO Bool +writeCabalFile flags@(InitFlags{packageName = NoFlag}) = do + message flags "Error: no package name provided." + return False +writeCabalFile flags@(InitFlags{packageName = Flag p}) = do + let cabalFileName = display p ++ ".cabal" + message flags $ "Generating " ++ cabalFileName ++ "..." + writeFileSafe flags cabalFileName (generateCabalFile cabalFileName flags) + return True + +-- | Write a file \"safely\", backing up any existing version (unless +-- the overwrite flag is set). +writeFileSafe :: InitFlags -> FilePath -> String -> IO () +writeFileSafe flags fileName content = do + moveExistingFile flags fileName + writeFile fileName content + +-- | Create source directories, if they were given. +createSourceDirectories :: InitFlags -> IO () +createSourceDirectories flags = case sourceDirs flags of + Just dirs -> forM_ dirs (createDirectoryIfMissing True) + Nothing -> return () + +-- | Move an existing file, if there is one, and the overwrite flag is +-- not set. +moveExistingFile :: InitFlags -> FilePath -> IO () +moveExistingFile flags fileName = + unless (overwrite flags == Flag True) $ do + e <- doesFileExist fileName + when e $ do + newName <- findNewName fileName + message flags $ "Warning: " ++ fileName ++ " already exists, backing up old version in " ++ newName + copyFile fileName newName + +findNewName :: FilePath -> IO FilePath +findNewName oldName = findNewName' 0 + where + findNewName' :: Integer -> IO FilePath + findNewName' n = do + let newName = oldName <.> ("save" ++ show n) + e <- doesFileExist newName + if e then findNewName' (n+1) else return newName + +-- | Generate a .cabal file from an InitFlags structure. NOTE: this +-- is rather ad-hoc! What we would REALLY like is to have a +-- standard low-level AST type representing .cabal files, which +-- preserves things like comments, and to write an *inverse* +-- parser/pretty-printer pair between .cabal files and this AST. +-- Then instead of this ad-hoc code we could just map an InitFlags +-- structure onto a low-level AST structure and use the existing +-- pretty-printing code to generate the file. +generateCabalFile :: String -> InitFlags -> String +generateCabalFile fileName c = + renderStyle style { lineLength = 79, ribbonsPerLine = 1.1 } $ + (if minimal c /= Flag True + then showComment (Just $ "Initial " ++ fileName ++ " generated by cabal " + ++ "init. For further documentation, see " + ++ "http://haskell.org/cabal/users-guide/") + $$ text "" + else empty) + $$ + vcat [ field "name" (packageName c) + (Just "The name of the package.") + True + + , field "version" (version c) + (Just $ "The package version. See the Haskell package versioning policy (PVP) for standards guiding when and how versions should be incremented.\nhttp://www.haskell.org/haskellwiki/Package_versioning_policy\n" + ++ "PVP summary: +-+------- breaking API changes\n" + ++ " | | +----- non-breaking API additions\n" + ++ " | | | +--- code changes with no API change") + True + + , fieldS "synopsis" (synopsis c) + (Just "A short (one-line) description of the package.") + True + + , fieldS "description" NoFlag + (Just "A longer description of the package.") + True + + , fieldS "homepage" (homepage c) + (Just "URL for the project homepage or repository.") + False + + , fieldS "bug-reports" NoFlag + (Just "A URL where users can report bugs.") + False + + , field "license" (license c) + (Just "The license under which the package is released.") + True + + , fieldS "license-file" (Flag "LICENSE") + (Just "The file containing the license text.") + True + + , fieldS "author" (author c) + (Just "The package author(s).") + True + + , fieldS "maintainer" (email c) + (Just "An email address to which users can send suggestions, bug reports, and patches.") + True + + , fieldS "copyright" NoFlag + (Just "A copyright notice.") + True + + , fieldS "category" (either id display `fmap` category c) + Nothing + True + + , fieldS "build-type" (Flag "Simple") + Nothing + True + + , fieldS "extra-source-files" (listFieldS (extraSrc c)) + (Just "Extra files to be distributed with the package, such as examples or a README.") + True + + , field "cabal-version" (Flag $ orLaterVersion (Version [1,10] [])) + (Just "Constraint on the version of Cabal needed to build this package.") + False + + , case packageType c of + Flag Executable -> + text "\nexecutable" <+> + text (maybe "" display . flagToMaybe $ packageName c) $$ + nest 2 (vcat + [ fieldS "main-is" (mainIs c) (Just ".hs or .lhs file containing the Main module.") True + + , generateBuildInfo Executable c + ]) + Flag Library -> text "\nlibrary" $$ nest 2 (vcat + [ fieldS "exposed-modules" (listField (exposedModules c)) + (Just "Modules exported by the library.") + True + + , generateBuildInfo Library c + ]) + _ -> empty + ] + where + generateBuildInfo :: PackageType -> InitFlags -> Doc + generateBuildInfo pkgtype c' = vcat + [ fieldS "other-modules" (listField (otherModules c')) + (Just $ case pkgtype of + Library -> "Modules included in this library but not exported." + Executable -> "Modules included in this executable, other than Main.") + True + + , fieldS "other-extensions" (listField (otherExts c')) + (Just "LANGUAGE extensions used by modules in this package.") + True + + , fieldS "build-depends" (listField (dependencies c')) + (Just "Other library packages from which modules are imported.") + True + + , fieldS "hs-source-dirs" (listFieldS (sourceDirs c')) + (Just "Directories containing source files.") + True + + , fieldS "build-tools" (listFieldS (buildTools c')) + (Just "Extra tools (e.g. alex, hsc2hs, ...) needed to build the source.") + False + + , field "default-language" (language c') + (Just "Base language which the package is written in.") + True + ] + + listField :: Text s => Maybe [s] -> Flag String + listField = listFieldS . fmap (map display) + + listFieldS :: Maybe [String] -> Flag String + listFieldS = Flag . maybe "" (intercalate ", ") + + field :: Text t => String -> Flag t -> Maybe String -> Bool -> Doc + field s f = fieldS s (fmap display f) + + fieldS :: String -- ^ Name of the field + -> Flag String -- ^ Field contents + -> Maybe String -- ^ Comment to explain the field + -> Bool -- ^ Should the field be included (commented out) even if blank? + -> Doc + fieldS _ NoFlag _ inc | not inc || (minimal c == Flag True) = empty + fieldS _ (Flag "") _ inc | not inc || (minimal c == Flag True) = empty + fieldS s f com _ = case (isJust com, noComments c, minimal c) of + (_, _, Flag True) -> id + (_, Flag True, _) -> id + (True, _, _) -> (showComment com $$) . ($$ text "") + (False, _, _) -> ($$ text "") + $ + comment f <> text s <> colon + <> text (replicate (20 - length s) ' ') + <> text (fromMaybe "" . flagToMaybe $ f) + comment NoFlag = text "-- " + comment (Flag "") = text "-- " + comment _ = text "" + + showComment :: Maybe String -> Doc + showComment (Just t) = vcat + . map (text . ("-- "++)) . lines + . renderStyle style { + lineLength = 76, + ribbonsPerLine = 1.05 + } + . vcat + . map (fcat . map text . breakLine) + . lines + $ t + showComment Nothing = text "" + + breakLine [] = [] + breakLine cs = case break (==' ') cs of (w,cs') -> w : breakLine' cs' + breakLine' [] = [] + breakLine' cs = case span (==' ') cs of (w,cs') -> w : breakLine cs' + +-- | Generate warnings for missing fields etc. +generateWarnings :: InitFlags -> IO () +generateWarnings flags = do + message flags "" + when (synopsis flags `elem` [NoFlag, Flag ""]) + (message flags "Warning: no synopsis given. You should edit the .cabal file and add one.") + + message flags "You may want to edit the .cabal file and add a Description field." + +-- | Possibly generate a message to stdout, taking into account the +-- --quiet flag. +message :: InitFlags -> String -> IO () +message (InitFlags{quiet = Flag True}) _ = return () +message _ s = putStrLn s diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Install.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Install.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Install.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Install.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,1565 @@ +{-# LANGUAGE CPP #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Install +-- Copyright : (c) 2005 David Himmelstrup +-- 2007 Bjorn Bringert +-- 2007-2010 Duncan Coutts +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- High level interface to package installation. +----------------------------------------------------------------------------- +module Distribution.Client.Install ( + -- * High-level interface + install, + + -- * Lower-level interface that allows to manipulate the install plan + makeInstallContext, + makeInstallPlan, + processInstallPlan, + InstallArgs, + InstallContext, + + -- * Prune certain packages from the install plan + pruneInstallPlan + ) where + +import Data.List + ( isPrefixOf, unfoldr, nub, sort, (\\) ) +import qualified Data.Map as Map +import qualified Data.Set as S +import Data.Maybe + ( isJust, fromMaybe, mapMaybe, maybeToList ) +import Control.Exception as Exception + ( Exception(toException), bracket, catches + , Handler(Handler), handleJust, IOException, SomeException ) +#ifndef mingw32_HOST_OS +import Control.Exception as Exception + ( Exception(fromException) ) +#endif +import System.Exit + ( ExitCode(..) ) +import Distribution.Compat.Exception + ( catchIO, catchExit ) +#if !MIN_VERSION_base(4,8,0) +import Control.Applicative + ( (<$>) ) +#endif +import Control.Monad + ( forM_, when, unless ) +import System.Directory + ( getTemporaryDirectory, doesDirectoryExist, doesFileExist, + createDirectoryIfMissing, removeFile, renameDirectory ) +import System.FilePath + ( (), (<.>), equalFilePath, takeDirectory ) +import System.IO + ( openFile, IOMode(AppendMode), hClose ) +import System.IO.Error + ( isDoesNotExistError, ioeGetFileName ) + +import Distribution.Client.Targets +import Distribution.Client.Configure + ( chooseCabalVersion ) +import Distribution.Client.Dependency +import Distribution.Client.Dependency.Types + ( Solver(..) ) +import Distribution.Client.FetchUtils +import qualified Distribution.Client.Haddock as Haddock (regenerateHaddockIndex) +import Distribution.Client.IndexUtils as IndexUtils + ( getSourcePackages, getInstalledPackages ) +import qualified Distribution.Client.InstallPlan as InstallPlan +import Distribution.Client.InstallPlan (InstallPlan) +import Distribution.Client.Setup + ( GlobalFlags(..) + , ConfigFlags(..), configureCommand, filterConfigureFlags + , ConfigExFlags(..), InstallFlags(..) ) +import Distribution.Client.Config + ( defaultCabalDir, defaultUserInstall ) +import Distribution.Client.Sandbox.Timestamp + ( withUpdateTimestamps ) +import Distribution.Client.Sandbox.Types + ( SandboxPackageInfo(..), UseSandbox(..), isUseSandbox + , whenUsingSandbox ) +import Distribution.Client.Tar (extractTarGzFile) +import Distribution.Client.Types as Source +import Distribution.Client.BuildReports.Types + ( ReportLevel(..) ) +import Distribution.Client.SetupWrapper + ( setupWrapper, SetupScriptOptions(..), defaultSetupScriptOptions ) +import qualified Distribution.Client.BuildReports.Anonymous as BuildReports +import qualified Distribution.Client.BuildReports.Storage as BuildReports + ( storeAnonymous, storeLocal, fromInstallPlan, fromPlanningFailure ) +import qualified Distribution.Client.InstallSymlink as InstallSymlink + ( symlinkBinaries ) +import qualified Distribution.Client.PackageIndex as SourcePackageIndex +import qualified Distribution.Client.Win32SelfUpgrade as Win32SelfUpgrade +import qualified Distribution.Client.World as World +import qualified Distribution.InstalledPackageInfo as Installed +import Distribution.Client.Compat.ExecutablePath +import Distribution.Client.JobControl + +import Distribution.Utils.NubList +import Distribution.Simple.Compiler + ( CompilerId(..), Compiler(compilerId), compilerFlavor + , CompilerInfo(..), compilerInfo, PackageDB(..), PackageDBStack ) +import Distribution.Simple.Program (ProgramConfiguration, + defaultProgramConfiguration) +import qualified Distribution.Simple.InstallDirs as InstallDirs +import qualified Distribution.Simple.PackageIndex as PackageIndex +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import Distribution.Simple.Setup + ( haddockCommand, HaddockFlags(..) + , buildCommand, BuildFlags(..), emptyBuildFlags + , toFlag, fromFlag, fromFlagOrDefault, flagToMaybe, defaultDistPref ) +import qualified Distribution.Simple.Setup as Cabal + ( Flag(..) + , copyCommand, CopyFlags(..), emptyCopyFlags + , registerCommand, RegisterFlags(..), emptyRegisterFlags + , testCommand, TestFlags(..), emptyTestFlags ) +import Distribution.Simple.Utils + ( createDirectoryIfMissingVerbose, rawSystemExit, comparing + , writeFileAtomic, withTempFile , withUTF8FileContents ) +import Distribution.Simple.InstallDirs as InstallDirs + ( PathTemplate, fromPathTemplate, toPathTemplate, substPathTemplate + , initialPathTemplateEnv, installDirsTemplateEnv ) +import Distribution.Package + ( PackageIdentifier(..), PackageId, packageName, packageVersion + , Package(..), PackageFixedDeps(..), PackageKey + , Dependency(..), thisPackageVersion, InstalledPackageId, installedPackageId ) +import qualified Distribution.PackageDescription as PackageDescription +import Distribution.PackageDescription + ( PackageDescription, GenericPackageDescription(..), Flag(..) + , FlagName(..), FlagAssignment ) +import Distribution.PackageDescription.Configuration + ( finalizePackageDescription ) +import Distribution.ParseUtils + ( showPWarning ) +import Distribution.Version + ( Version, VersionRange, foldVersionRange ) +import Distribution.Simple.Utils as Utils + ( notice, info, warn, debug, debugNoWrap, die + , intercalate, withTempDirectory ) +import Distribution.Client.Utils + ( determineNumJobs, inDir, mergeBy, MergeResult(..) + , tryCanonicalizePath ) +import Distribution.System + ( Platform, OS(Windows), buildOS ) +import Distribution.Text + ( display ) +import Distribution.Verbosity as Verbosity + ( Verbosity, showForCabal, normal, verbose ) +import Distribution.Simple.BuildPaths ( exeExtension ) + +--TODO: +-- * assign flags to packages individually +-- * complain about flags that do not apply to any package given as target +-- so flags do not apply to dependencies, only listed, can use flag +-- constraints for dependencies +-- * only record applicable flags in world file +-- * allow flag constraints +-- * allow installed constraints +-- * allow flag and installed preferences +-- * change world file to use cabal section syntax +-- * allow persistent configure flags for each package individually + +-- ------------------------------------------------------------ +-- * Top level user actions +-- ------------------------------------------------------------ + +-- | Installs the packages needed to satisfy a list of dependencies. +-- +install + :: Verbosity + -> PackageDBStack + -> [Repo] + -> Compiler + -> Platform + -> ProgramConfiguration + -> UseSandbox + -> Maybe SandboxPackageInfo + -> GlobalFlags + -> ConfigFlags + -> ConfigExFlags + -> InstallFlags + -> HaddockFlags + -> [UserTarget] + -> IO () +install verbosity packageDBs repos comp platform conf useSandbox mSandboxPkgInfo + globalFlags configFlags configExFlags installFlags haddockFlags + userTargets0 = do + + installContext <- makeInstallContext verbosity args (Just userTargets0) + planResult <- foldProgress logMsg (return . Left) (return . Right) =<< + makeInstallPlan verbosity args installContext + + case planResult of + Left message -> do + reportPlanningFailure verbosity args installContext message + die' message + Right installPlan -> + processInstallPlan verbosity args installContext installPlan + where + args :: InstallArgs + args = (packageDBs, repos, comp, platform, conf, useSandbox, mSandboxPkgInfo, + globalFlags, configFlags, configExFlags, installFlags, + haddockFlags) + + die' message = die (message ++ if isUseSandbox useSandbox + then installFailedInSandbox else []) + -- TODO: use a better error message, remove duplication. + installFailedInSandbox = + "\nNote: when using a sandbox, all packages are required to have " + ++ "consistent dependencies. " + ++ "Try reinstalling/unregistering the offending packages or " + ++ "recreating the sandbox." + logMsg message rest = debugNoWrap verbosity message >> rest + +-- TODO: Make InstallContext a proper data type with documented fields. +-- | Common context for makeInstallPlan and processInstallPlan. +type InstallContext = ( InstalledPackageIndex, SourcePackageDb + , [UserTarget], [PackageSpecifier SourcePackage] ) + +-- TODO: Make InstallArgs a proper data type with documented fields or just get +-- rid of it completely. +-- | Initial arguments given to 'install' or 'makeInstallContext'. +type InstallArgs = ( PackageDBStack + , [Repo] + , Compiler + , Platform + , ProgramConfiguration + , UseSandbox + , Maybe SandboxPackageInfo + , GlobalFlags + , ConfigFlags + , ConfigExFlags + , InstallFlags + , HaddockFlags ) + +-- | Make an install context given install arguments. +makeInstallContext :: Verbosity -> InstallArgs -> Maybe [UserTarget] + -> IO InstallContext +makeInstallContext verbosity + (packageDBs, repos, comp, _, conf,_,_, + globalFlags, _, _, _, _) mUserTargets = do + + installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf + sourcePkgDb <- getSourcePackages verbosity repos + + (userTargets, pkgSpecifiers) <- case mUserTargets of + Nothing -> + -- We want to distinguish between the case where the user has given an + -- empty list of targets on the command-line and the case where we + -- specifically want to have an empty list of targets. + return ([], []) + Just userTargets0 -> do + -- For install, if no target is given it means we use the current + -- directory as the single target. + let userTargets | null userTargets0 = [UserTargetLocalDir "."] + | otherwise = userTargets0 + + pkgSpecifiers <- resolveUserTargets verbosity + (fromFlag $ globalWorldFile globalFlags) + (packageIndex sourcePkgDb) + userTargets + return (userTargets, pkgSpecifiers) + + return (installedPkgIndex, sourcePkgDb, userTargets, pkgSpecifiers) + +-- | Make an install plan given install context and install arguments. +makeInstallPlan :: Verbosity -> InstallArgs -> InstallContext + -> IO (Progress String String InstallPlan) +makeInstallPlan verbosity + (_, _, comp, platform, _, _, mSandboxPkgInfo, + _, configFlags, configExFlags, installFlags, + _) + (installedPkgIndex, sourcePkgDb, + _, pkgSpecifiers) = do + + solver <- chooseSolver verbosity (fromFlag (configSolver configExFlags)) + (compilerInfo comp) + notice verbosity "Resolving dependencies..." + return $ planPackages comp platform mSandboxPkgInfo solver + configFlags configExFlags installFlags + installedPkgIndex sourcePkgDb pkgSpecifiers + +-- | Given an install plan, perform the actual installations. +processInstallPlan :: Verbosity -> InstallArgs -> InstallContext + -> InstallPlan + -> IO () +processInstallPlan verbosity + args@(_,_, comp, _, _, _, _, _, _, _, installFlags, _) + (installedPkgIndex, sourcePkgDb, + userTargets, pkgSpecifiers) installPlan = do + checkPrintPlan verbosity comp installedPkgIndex installPlan sourcePkgDb + installFlags pkgSpecifiers + + unless (dryRun || nothingToInstall) $ do + installPlan' <- performInstallations verbosity + args installedPkgIndex installPlan + postInstallActions verbosity args userTargets installPlan' + where + dryRun = fromFlag (installDryRun installFlags) + nothingToInstall = null (InstallPlan.ready installPlan) + +-- ------------------------------------------------------------ +-- * Installation planning +-- ------------------------------------------------------------ + +planPackages :: Compiler + -> Platform + -> Maybe SandboxPackageInfo + -> Solver + -> ConfigFlags + -> ConfigExFlags + -> InstallFlags + -> InstalledPackageIndex + -> SourcePackageDb + -> [PackageSpecifier SourcePackage] + -> Progress String String InstallPlan +planPackages comp platform mSandboxPkgInfo solver + configFlags configExFlags installFlags + installedPkgIndex sourcePkgDb pkgSpecifiers = + + resolveDependencies + platform (compilerInfo comp) + solver + resolverParams + + >>= if onlyDeps then pruneInstallPlan pkgSpecifiers else return + + where + resolverParams = + + setMaxBackjumps (if maxBackjumps < 0 then Nothing + else Just maxBackjumps) + + . setIndependentGoals independentGoals + + . setReorderGoals reorderGoals + + . setAvoidReinstalls avoidReinstalls + + . setShadowPkgs shadowPkgs + + . setStrongFlags strongFlags + + . setPreferenceDefault (if upgradeDeps then PreferAllLatest + else PreferLatestForSelected) + + . removeUpperBounds allowNewer + + . addPreferences + -- preferences from the config file or command line + [ PackageVersionPreference name ver + | Dependency name ver <- configPreferences configExFlags ] + + . addConstraints + -- version constraints from the config file or command line + (map userToPackageConstraint (configExConstraints configExFlags)) + + . addConstraints + --FIXME: this just applies all flags to all targets which + -- is silly. We should check if the flags are appropriate + [ PackageConstraintFlags (pkgSpecifierTarget pkgSpecifier) flags + | let flags = configConfigurationsFlags configFlags + , not (null flags) + , pkgSpecifier <- pkgSpecifiers ] + + . addConstraints + [ PackageConstraintStanzas (pkgSpecifierTarget pkgSpecifier) stanzas + | pkgSpecifier <- pkgSpecifiers ] + + . maybe id applySandboxInstallPolicy mSandboxPkgInfo + + . (if reinstall then reinstallTargets else id) + + $ standardInstallPolicy + installedPkgIndex sourcePkgDb pkgSpecifiers + + stanzas = concat + [ if testsEnabled then [TestStanzas] else [] + , if benchmarksEnabled then [BenchStanzas] else [] + ] + testsEnabled = fromFlagOrDefault False $ configTests configFlags + benchmarksEnabled = fromFlagOrDefault False $ configBenchmarks configFlags + + reinstall = fromFlag (installReinstall installFlags) + reorderGoals = fromFlag (installReorderGoals installFlags) + independentGoals = fromFlag (installIndependentGoals installFlags) + avoidReinstalls = fromFlag (installAvoidReinstalls installFlags) + shadowPkgs = fromFlag (installShadowPkgs installFlags) + strongFlags = fromFlag (installStrongFlags installFlags) + maxBackjumps = fromFlag (installMaxBackjumps installFlags) + upgradeDeps = fromFlag (installUpgradeDeps installFlags) + onlyDeps = fromFlag (installOnlyDeps installFlags) + allowNewer = fromFlag (configAllowNewer configExFlags) + +-- | Remove the provided targets from the install plan. +pruneInstallPlan :: Package pkg => [PackageSpecifier pkg] -> InstallPlan + -> Progress String String InstallPlan +pruneInstallPlan pkgSpecifiers = + -- TODO: this is a general feature and should be moved to D.C.Dependency + -- Also, the InstallPlan.remove should return info more precise to the + -- problem, rather than the very general PlanProblem type. + either (Fail . explain) Done + . InstallPlan.remove (\pkg -> packageName pkg `elem` targetnames) + where + explain :: [InstallPlan.PlanProblem] -> String + explain problems = + "Cannot select only the dependencies (as requested by the " + ++ "'--only-dependencies' flag), " + ++ (case pkgids of + [pkgid] -> "the package " ++ display pkgid ++ " is " + _ -> "the packages " + ++ intercalate ", " (map display pkgids) ++ " are ") + ++ "required by a dependency of one of the other targets." + where + pkgids = + nub [ depid + | InstallPlan.PackageMissingDeps _ depids <- problems + , depid <- depids + , packageName depid `elem` targetnames ] + + targetnames = map pkgSpecifierTarget pkgSpecifiers + +-- ------------------------------------------------------------ +-- * Informational messages +-- ------------------------------------------------------------ + +-- | Perform post-solver checks of the install plan and print it if +-- either requested or needed. +checkPrintPlan :: Verbosity + -> Compiler + -> InstalledPackageIndex + -> InstallPlan + -> SourcePackageDb + -> InstallFlags + -> [PackageSpecifier SourcePackage] + -> IO () +checkPrintPlan verbosity comp installed installPlan sourcePkgDb + installFlags pkgSpecifiers = do + + -- User targets that are already installed. + let preExistingTargets = + [ p | let tgts = map pkgSpecifierTarget pkgSpecifiers, + InstallPlan.PreExisting p <- InstallPlan.toList installPlan, + packageName p `elem` tgts ] + + -- If there's nothing to install, we print the already existing + -- target packages as an explanation. + when nothingToInstall $ + notice verbosity $ unlines $ + "All the requested packages are already installed:" + : map (display . packageId) preExistingTargets + ++ ["Use --reinstall if you want to reinstall anyway."] + + let lPlan = linearizeInstallPlan comp installed installPlan + -- Are any packages classified as reinstalls? + let reinstalledPkgs = concatMap (extractReinstalls . snd) lPlan + -- Packages that are already broken. + let oldBrokenPkgs = + map Installed.installedPackageId + . PackageIndex.reverseDependencyClosure installed + . map (Installed.installedPackageId . fst) + . PackageIndex.brokenPackages + $ installed + let excluded = reinstalledPkgs ++ oldBrokenPkgs + -- Packages that are reverse dependencies of replaced packages are very + -- likely to be broken. We exclude packages that are already broken. + let newBrokenPkgs = + filter (\ p -> not (Installed.installedPackageId p `elem` excluded)) + (PackageIndex.reverseDependencyClosure installed reinstalledPkgs) + let containsReinstalls = not (null reinstalledPkgs) + let breaksPkgs = not (null newBrokenPkgs) + + let adaptedVerbosity + | containsReinstalls && not overrideReinstall = verbosity `max` verbose + | otherwise = verbosity + + -- We print the install plan if we are in a dry-run or if we are confronted + -- with a dangerous install plan. + when (dryRun || containsReinstalls && not overrideReinstall) $ + printPlan (dryRun || breaksPkgs && not overrideReinstall) + adaptedVerbosity lPlan sourcePkgDb + + -- If the install plan is dangerous, we print various warning messages. In + -- particular, if we can see that packages are likely to be broken, we even + -- bail out (unless installation has been forced with --force-reinstalls). + when containsReinstalls $ do + if breaksPkgs + then do + (if dryRun || overrideReinstall then warn verbosity else die) $ unlines $ + "The following packages are likely to be broken by the reinstalls:" + : map (display . Installed.sourcePackageId) newBrokenPkgs + ++ if overrideReinstall + then if dryRun then [] else + ["Continuing even though the plan contains dangerous reinstalls."] + else + ["Use --force-reinstalls if you want to install anyway."] + else unless dryRun $ warn verbosity + "Note that reinstalls are always dangerous. Continuing anyway..." + + where + nothingToInstall = null (InstallPlan.ready installPlan) + + dryRun = fromFlag (installDryRun installFlags) + overrideReinstall = fromFlag (installOverrideReinstall installFlags) + +linearizeInstallPlan :: Compiler + -> InstalledPackageIndex + -> InstallPlan + -> [(ReadyPackage, PackageStatus)] +linearizeInstallPlan comp installedPkgIndex plan = + unfoldr next plan + where + next plan' = case InstallPlan.ready plan' of + [] -> Nothing + (pkg:_) -> Just ((pkg, status), plan'') + where + pkgid = installedPackageId pkg + status = packageStatus comp installedPkgIndex pkg + plan'' = InstallPlan.completed pkgid + (BuildOk DocsNotTried TestsNotTried + (Just $ Installed.emptyInstalledPackageInfo + { Installed.sourcePackageId = packageId pkg + , Installed.installedPackageId = pkgid })) + (InstallPlan.processing [pkg] plan') + --FIXME: This is a bit of a hack, + -- pretending that each package is installed + -- It's doubly a hack because the installed package ID + -- didn't get updated... + +data PackageStatus = NewPackage + | NewVersion [Version] + | Reinstall [InstalledPackageId] [PackageChange] + +type PackageChange = MergeResult PackageIdentifier PackageIdentifier + +extractReinstalls :: PackageStatus -> [InstalledPackageId] +extractReinstalls (Reinstall ipids _) = ipids +extractReinstalls _ = [] + +packageStatus :: Compiler -> InstalledPackageIndex -> ReadyPackage -> PackageStatus +packageStatus _comp installedPkgIndex cpkg = + case PackageIndex.lookupPackageName installedPkgIndex + (packageName cpkg) of + [] -> NewPackage + ps -> case filter ((== packageId cpkg) + . Installed.sourcePackageId) (concatMap snd ps) of + [] -> NewVersion (map fst ps) + pkgs@(pkg:_) -> Reinstall (map Installed.installedPackageId pkgs) + (changes pkg cpkg) + + where + + changes :: Installed.InstalledPackageInfo + -> ReadyPackage + -> [MergeResult PackageIdentifier PackageIdentifier] + changes pkg pkg' = + filter changed + $ mergeBy (comparing packageName) + -- get dependencies of installed package (convert to source pkg ids via + -- index) + (nub . sort . concatMap + (maybeToList . fmap Installed.sourcePackageId . + PackageIndex.lookupInstalledPackageId installedPkgIndex) . + Installed.depends $ pkg) + -- get dependencies of configured package + (nub . sort . depends $ pkg') + + changed (InBoth pkgid pkgid') = pkgid /= pkgid' + changed _ = True + +printPlan :: Bool -- is dry run + -> Verbosity + -> [(ReadyPackage, PackageStatus)] + -> SourcePackageDb + -> IO () +printPlan dryRun verbosity plan sourcePkgDb = case plan of + [] -> return () + pkgs + | verbosity >= Verbosity.verbose -> putStr $ unlines $ + ("In order, the following " ++ wouldWill ++ " be installed:") + : map showPkgAndReason pkgs + | otherwise -> notice verbosity $ unlines $ + ("In order, the following " ++ wouldWill + ++ " be installed (use -v for more details):") + : map showPkg pkgs + where + wouldWill | dryRun = "would" + | otherwise = "will" + + showPkg (pkg, _) = display (packageId pkg) ++ + showLatest (pkg) + + showPkgAndReason (pkg', pr) = display (packageId pkg') ++ + showLatest pkg' ++ + showFlagAssignment (nonDefaultFlags pkg') ++ + showStanzas (stanzas pkg') ++ + showDep pkg' ++ + case pr of + NewPackage -> " (new package)" + NewVersion _ -> " (new version)" + Reinstall _ cs -> " (reinstall)" ++ case cs of + [] -> "" + diff -> " (changes: " ++ intercalate ", " (map change diff) ++ ")" + + showLatest :: ReadyPackage -> String + showLatest pkg = case mLatestVersion of + Just latestVersion -> + if packageVersion pkg < latestVersion + then (" (latest: " ++ display latestVersion ++ ")") + else "" + Nothing -> "" + where + mLatestVersion :: Maybe Version + mLatestVersion = case SourcePackageIndex.lookupPackageName + (packageIndex sourcePkgDb) + (packageName pkg) of + [] -> Nothing + x -> Just $ packageVersion $ last x + + toFlagAssignment :: [Flag] -> FlagAssignment + toFlagAssignment = map (\ f -> (flagName f, flagDefault f)) + + nonDefaultFlags :: ReadyPackage -> FlagAssignment + nonDefaultFlags (ReadyPackage spkg fa _ _) = + let defaultAssignment = + toFlagAssignment + (genPackageFlags (Source.packageDescription spkg)) + in fa \\ defaultAssignment + + stanzas :: ReadyPackage -> [OptionalStanza] + stanzas (ReadyPackage _ _ sts _) = sts + + showStanzas :: [OptionalStanza] -> String + showStanzas = concatMap ((' ' :) . showStanza) + showStanza TestStanzas = "*test" + showStanza BenchStanzas = "*bench" + + -- FIXME: this should be a proper function in a proper place + showFlagAssignment :: FlagAssignment -> String + showFlagAssignment = concatMap ((' ' :) . showFlagValue) + showFlagValue (f, True) = '+' : showFlagName f + showFlagValue (f, False) = '-' : showFlagName f + showFlagName (FlagName f) = f + + change (OnlyInLeft pkgid) = display pkgid ++ " removed" + change (InBoth pkgid pkgid') = display pkgid ++ " -> " + ++ display (packageVersion pkgid') + change (OnlyInRight pkgid') = display pkgid' ++ " added" + + showDep pkg | Just rdeps <- Map.lookup (packageId pkg) revDeps + = " (via: " ++ unwords (map display rdeps) ++ ")" + | otherwise = "" + + revDepGraphEdges = [ (rpid,packageId pkg) | (pkg,_) <- plan, rpid <- depends pkg ] + + revDeps = Map.fromListWith (++) (map (fmap (:[])) revDepGraphEdges) + +-- ------------------------------------------------------------ +-- * Post installation stuff +-- ------------------------------------------------------------ + +-- | Report a solver failure. This works slightly differently to +-- 'postInstallActions', as (by definition) we don't have an install plan. +reportPlanningFailure :: Verbosity -> InstallArgs -> InstallContext -> String -> IO () +reportPlanningFailure verbosity + (_, _, comp, platform, _, _, _ + ,_, configFlags, _, installFlags, _) + (_, sourcePkgDb, _, pkgSpecifiers) + message = do + + when reportFailure $ do + + -- Only create reports for explicitly named packages + let pkgids = + filter (SourcePackageIndex.elemByPackageId (packageIndex sourcePkgDb)) $ + mapMaybe theSpecifiedPackage pkgSpecifiers + + buildReports = BuildReports.fromPlanningFailure platform (compilerId comp) + pkgids (configConfigurationsFlags configFlags) + + when (not (null buildReports)) $ + info verbosity $ + "Solver failure will be reported for " + ++ intercalate "," (map display pkgids) + + -- Save reports + BuildReports.storeLocal (compilerInfo comp) + (fromNubList $ installSummaryFile installFlags) buildReports platform + + -- Save solver log + case logFile of + Nothing -> return () + Just template -> forM_ pkgids $ \pkgid -> + let env = initialPathTemplateEnv pkgid dummyPackageKey + (compilerInfo comp) platform + path = fromPathTemplate $ substPathTemplate env template + in writeFile path message + + where + reportFailure = fromFlag (installReportPlanningFailure installFlags) + logFile = flagToMaybe (installLogFile installFlags) + + -- A PackageKey is calculated from the transitive closure of + -- dependencies, but when the solver fails we don't have that. + -- So we fail. + dummyPackageKey = error "reportPlanningFailure: package key not available" + +-- | If a 'PackageSpecifier' refers to a single package, return Just that package. +theSpecifiedPackage :: Package pkg => PackageSpecifier pkg -> Maybe PackageId +theSpecifiedPackage pkgSpec = + case pkgSpec of + NamedPackage name [PackageConstraintVersion name' version] + | name == name' -> PackageIdentifier name <$> trivialRange version + NamedPackage _ _ -> Nothing + SpecificSourcePackage pkg -> Just $ packageId pkg + where + -- | If a range includes only a single version, return Just that version. + trivialRange :: VersionRange -> Maybe Version + trivialRange = foldVersionRange + Nothing + Just -- "== v" + (\_ -> Nothing) + (\_ -> Nothing) + (\_ _ -> Nothing) + (\_ _ -> Nothing) + +-- | Various stuff we do after successful or unsuccessfully installing a bunch +-- of packages. This includes: +-- +-- * build reporting, local and remote +-- * symlinking binaries +-- * updating indexes +-- * updating world file +-- * error reporting +-- +postInstallActions :: Verbosity + -> InstallArgs + -> [UserTarget] + -> InstallPlan + -> IO () +postInstallActions verbosity + (packageDBs, _, comp, platform, conf, useSandbox, mSandboxPkgInfo + ,globalFlags, configFlags, _, installFlags, _) + targets installPlan = do + + unless oneShot $ + World.insert verbosity worldFile + --FIXME: does not handle flags + [ World.WorldPkgInfo dep [] + | UserTargetNamed dep <- targets ] + + let buildReports = BuildReports.fromInstallPlan installPlan + BuildReports.storeLocal (compilerInfo comp) (fromNubList $ installSummaryFile installFlags) buildReports + (InstallPlan.planPlatform installPlan) + when (reportingLevel >= AnonymousReports) $ + BuildReports.storeAnonymous buildReports + when (reportingLevel == DetailedReports) $ + storeDetailedBuildReports verbosity logsDir buildReports + + regenerateHaddockIndex verbosity packageDBs comp platform conf useSandbox + configFlags installFlags installPlan + + symlinkBinaries verbosity comp configFlags installFlags installPlan + + printBuildFailures installPlan + + updateSandboxTimestampsFile useSandbox mSandboxPkgInfo + comp platform installPlan + + where + reportingLevel = fromFlag (installBuildReports installFlags) + logsDir = fromFlag (globalLogsDir globalFlags) + oneShot = fromFlag (installOneShot installFlags) + worldFile = fromFlag $ globalWorldFile globalFlags + +storeDetailedBuildReports :: Verbosity -> FilePath + -> [(BuildReports.BuildReport, Maybe Repo)] -> IO () +storeDetailedBuildReports verbosity logsDir reports = sequence_ + [ do dotCabal <- defaultCabalDir + let logFileName = display (BuildReports.package report) <.> "log" + logFile = logsDir logFileName + reportsDir = dotCabal "reports" remoteRepoName remoteRepo + reportFile = reportsDir logFileName + + handleMissingLogFile $ do + buildLog <- readFile logFile + createDirectoryIfMissing True reportsDir -- FIXME + writeFile reportFile (show (BuildReports.show report, buildLog)) + + | (report, Just Repo { repoKind = Left remoteRepo }) <- reports + , isLikelyToHaveLogFile (BuildReports.installOutcome report) ] + + where + isLikelyToHaveLogFile BuildReports.ConfigureFailed {} = True + isLikelyToHaveLogFile BuildReports.BuildFailed {} = True + isLikelyToHaveLogFile BuildReports.InstallFailed {} = True + isLikelyToHaveLogFile BuildReports.InstallOk {} = True + isLikelyToHaveLogFile _ = False + + handleMissingLogFile = Exception.handleJust missingFile $ \ioe -> + warn verbosity $ "Missing log file for build report: " + ++ fromMaybe "" (ioeGetFileName ioe) + + missingFile ioe + | isDoesNotExistError ioe = Just ioe + missingFile _ = Nothing + + +regenerateHaddockIndex :: Verbosity + -> [PackageDB] + -> Compiler + -> Platform + -> ProgramConfiguration + -> UseSandbox + -> ConfigFlags + -> InstallFlags + -> InstallPlan + -> IO () +regenerateHaddockIndex verbosity packageDBs comp platform conf useSandbox + configFlags installFlags installPlan + | haddockIndexFileIsRequested && shouldRegenerateHaddockIndex = do + + defaultDirs <- InstallDirs.defaultInstallDirs + (compilerFlavor comp) + (fromFlag (configUserInstall configFlags)) + True + let indexFileTemplate = fromFlag (installHaddockIndex installFlags) + indexFile = substHaddockIndexFileName defaultDirs indexFileTemplate + + notice verbosity $ + "Updating documentation index " ++ indexFile + + --TODO: might be nice if the install plan gave us the new InstalledPackageInfo + installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf + Haddock.regenerateHaddockIndex verbosity installedPkgIndex conf indexFile + + | otherwise = return () + where + haddockIndexFileIsRequested = + fromFlag (installDocumentation installFlags) + && isJust (flagToMaybe (installHaddockIndex installFlags)) + + -- We want to regenerate the index if some new documentation was actually + -- installed. Since the index can be only per-user or per-sandbox (see + -- #1337), we don't do it for global installs or special cases where we're + -- installing into a specific db. + shouldRegenerateHaddockIndex = (isUseSandbox useSandbox || normalUserInstall) + && someDocsWereInstalled installPlan + where + someDocsWereInstalled = any installedDocs . InstallPlan.toList + normalUserInstall = (UserPackageDB `elem` packageDBs) + && all (not . isSpecificPackageDB) packageDBs + + installedDocs (InstallPlan.Installed _ (BuildOk DocsOk _ _)) = True + installedDocs _ = False + isSpecificPackageDB (SpecificPackageDB _) = True + isSpecificPackageDB _ = False + + substHaddockIndexFileName defaultDirs = fromPathTemplate + . substPathTemplate env + where + env = env0 ++ installDirsTemplateEnv absoluteDirs + env0 = InstallDirs.compilerTemplateEnv (compilerInfo comp) + ++ InstallDirs.platformTemplateEnv platform + ++ InstallDirs.abiTemplateEnv (compilerInfo comp) platform + absoluteDirs = InstallDirs.substituteInstallDirTemplates + env0 templateDirs + templateDirs = InstallDirs.combineInstallDirs fromFlagOrDefault + defaultDirs (configInstallDirs configFlags) + + +symlinkBinaries :: Verbosity + -> Compiler + -> ConfigFlags + -> InstallFlags + -> InstallPlan -> IO () +symlinkBinaries verbosity comp configFlags installFlags plan = do + failed <- InstallSymlink.symlinkBinaries comp configFlags installFlags plan + case failed of + [] -> return () + [(_, exe, path)] -> + warn verbosity $ + "could not create a symlink in " ++ bindir ++ " for " + ++ exe ++ " because the file exists there already but is not " + ++ "managed by cabal. You can create a symlink for this executable " + ++ "manually if you wish. The executable file has been installed at " + ++ path + exes -> + warn verbosity $ + "could not create symlinks in " ++ bindir ++ " for " + ++ intercalate ", " [ exe | (_, exe, _) <- exes ] + ++ " because the files exist there already and are not " + ++ "managed by cabal. You can create symlinks for these executables " + ++ "manually if you wish. The executable files have been installed at " + ++ intercalate ", " [ path | (_, _, path) <- exes ] + where + bindir = fromFlag (installSymlinkBinDir installFlags) + + +printBuildFailures :: InstallPlan -> IO () +printBuildFailures plan = + case [ (pkg, reason) + | InstallPlan.Failed pkg reason <- InstallPlan.toList plan ] of + [] -> return () + failed -> die . unlines + $ "Error: some packages failed to install:" + : [ display (packageId pkg) ++ printFailureReason reason + | (pkg, reason) <- failed ] + where + printFailureReason reason = case reason of + DependentFailed pkgid -> " depends on " ++ display pkgid + ++ " which failed to install." + DownloadFailed e -> " failed while downloading the package." + ++ showException e + UnpackFailed e -> " failed while unpacking the package." + ++ showException e + ConfigureFailed e -> " failed during the configure step." + ++ showException e + BuildFailed e -> " failed during the building phase." + ++ showException e + TestsFailed e -> " failed during the tests phase." + ++ showException e + InstallFailed e -> " failed during the final install step." + ++ showException e + + -- This will never happen, but we include it for completeness + PlanningFailed -> " failed during the planning phase." + + showException e = " The exception was:\n " ++ show e ++ maybeOOM e +#ifdef mingw32_HOST_OS + maybeOOM _ = "" +#else + maybeOOM e = maybe "" onExitFailure (fromException e) + onExitFailure (ExitFailure n) + | n == 9 || n == -9 = + "\nThis may be due to an out-of-memory condition." + onExitFailure _ = "" +#endif + + +-- | If we're working inside a sandbox and some add-source deps were installed, +-- update the timestamps of those deps. +updateSandboxTimestampsFile :: UseSandbox -> Maybe SandboxPackageInfo + -> Compiler -> Platform -> InstallPlan + -> IO () +updateSandboxTimestampsFile (UseSandbox sandboxDir) + (Just (SandboxPackageInfo _ _ _ allAddSourceDeps)) + comp platform installPlan = + withUpdateTimestamps sandboxDir (compilerId comp) platform $ \_ -> do + let allInstalled = [ pkg | InstallPlan.Installed pkg _ + <- InstallPlan.toList installPlan ] + allSrcPkgs = [ pkg | ReadyPackage pkg _ _ _ <- allInstalled ] + allPaths = [ pth | LocalUnpackedPackage pth + <- map packageSource allSrcPkgs] + allPathsCanonical <- mapM tryCanonicalizePath allPaths + return $! filter (`S.member` allAddSourceDeps) allPathsCanonical + +updateSandboxTimestampsFile _ _ _ _ _ = return () + +-- ------------------------------------------------------------ +-- * Actually do the installations +-- ------------------------------------------------------------ + +data InstallMisc = InstallMisc { + rootCmd :: Maybe FilePath, + libVersion :: Maybe Version + } + +-- | If logging is enabled, contains location of the log file and the verbosity +-- level for logging. +type UseLogFile = Maybe (PackageIdentifier -> PackageKey -> FilePath, Verbosity) + +performInstallations :: Verbosity + -> InstallArgs + -> InstalledPackageIndex + -> InstallPlan + -> IO InstallPlan +performInstallations verbosity + (packageDBs, _, comp, _, conf, useSandbox, _, + globalFlags, configFlags, configExFlags, installFlags, haddockFlags) + installedPkgIndex installPlan = do + + -- With 'install -j' it can be a bit hard to tell whether a sandbox is used. + whenUsingSandbox useSandbox $ \sandboxDir -> + when parallelInstall $ + notice verbosity $ "Notice: installing into a sandbox located at " + ++ sandboxDir + + jobControl <- if parallelInstall then newParallelJobControl + else newSerialJobControl + buildLimit <- newJobLimit numJobs + fetchLimit <- newJobLimit (min numJobs numFetchJobs) + installLock <- newLock -- serialise installation + cacheLock <- newLock -- serialise access to setup exe cache + + + executeInstallPlan verbosity comp jobControl useLogFile installPlan $ \rpkg -> + -- Calculate the package key (ToDo: Is this right for source install) + let pkg_key = readyPackageKey comp rpkg in + installReadyPackage platform cinfo configFlags + rpkg $ \configFlags' src pkg pkgoverride -> + fetchSourcePackage verbosity fetchLimit src $ \src' -> + installLocalPackage verbosity buildLimit + (packageId pkg) src' distPref $ \mpath -> + installUnpackedPackage verbosity buildLimit installLock numJobs pkg_key + (setupScriptOptions installedPkgIndex cacheLock) + miscOptions configFlags' installFlags haddockFlags + cinfo platform pkg pkgoverride mpath useLogFile + + where + platform = InstallPlan.planPlatform installPlan + cinfo = InstallPlan.planCompiler installPlan + + numJobs = determineNumJobs (installNumJobs installFlags) + numFetchJobs = 2 + parallelInstall = numJobs >= 2 + distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) + (configDistPref configFlags) + + setupScriptOptions index lock = SetupScriptOptions { + useCabalVersion = chooseCabalVersion configExFlags + (libVersion miscOptions), + useCompiler = Just comp, + usePlatform = Just platform, + -- Hack: we typically want to allow the UserPackageDB for finding the + -- Cabal lib when compiling any Setup.hs even if we're doing a global + -- install. However we also allow looking in a specific package db. + usePackageDB = if UserPackageDB `elem` packageDBs + then packageDBs + else let (db@GlobalPackageDB:dbs) = packageDBs + in db : UserPackageDB : dbs, + --TODO: use Ord instance: + -- insert UserPackageDB packageDBs + usePackageIndex = if UserPackageDB `elem` packageDBs + then Just index + else Nothing, + useProgramConfig = conf, + useDistPref = distPref, + useLoggingHandle = Nothing, + useWorkingDir = Nothing, + forceExternalSetupMethod = parallelInstall, + useWin32CleanHack = False, + setupCacheLock = Just lock + } + reportingLevel = fromFlag (installBuildReports installFlags) + logsDir = fromFlag (globalLogsDir globalFlags) + + -- Should the build output be written to a log file instead of stdout? + useLogFile :: UseLogFile + useLogFile = fmap ((\f -> (f, loggingVerbosity)) . substLogFileName) + logFileTemplate + where + installLogFile' = flagToMaybe $ installLogFile installFlags + defaultTemplate = toPathTemplate $ logsDir "$pkgid" <.> "log" + + -- If the user has specified --remote-build-reporting=detailed, use the + -- default log file location. If the --build-log option is set, use the + -- provided location. Otherwise don't use logging, unless building in + -- parallel (in which case the default location is used). + logFileTemplate :: Maybe PathTemplate + logFileTemplate + | useDefaultTemplate = Just defaultTemplate + | otherwise = installLogFile' + + -- If the user has specified --remote-build-reporting=detailed or + -- --build-log, use more verbose logging. + loggingVerbosity :: Verbosity + loggingVerbosity | overrideVerbosity = max Verbosity.verbose verbosity + | otherwise = verbosity + + useDefaultTemplate :: Bool + useDefaultTemplate + | reportingLevel == DetailedReports = True + | isJust installLogFile' = False + | parallelInstall = True + | otherwise = False + + overrideVerbosity :: Bool + overrideVerbosity + | reportingLevel == DetailedReports = True + | isJust installLogFile' = True + | parallelInstall = False + | otherwise = False + + substLogFileName :: PathTemplate -> PackageIdentifier -> PackageKey -> FilePath + substLogFileName template pkg pkg_key = fromPathTemplate + . substPathTemplate env + $ template + where env = initialPathTemplateEnv (packageId pkg) pkg_key + (compilerInfo comp) platform + + miscOptions = InstallMisc { + rootCmd = if fromFlag (configUserInstall configFlags) + || (isUseSandbox useSandbox) + then Nothing -- ignore --root-cmd if --user + -- or working inside a sandbox. + else flagToMaybe (installRootCmd installFlags), + libVersion = flagToMaybe (configCabalVersion configExFlags) + } + + +executeInstallPlan :: Verbosity + -> Compiler + -> JobControl IO (PackageId, PackageKey, BuildResult) + -> UseLogFile + -> InstallPlan + -> (ReadyPackage -> IO BuildResult) + -> IO InstallPlan +executeInstallPlan verbosity comp jobCtl useLogFile plan0 installPkg = + tryNewTasks 0 plan0 + where + tryNewTasks taskCount plan = do + case InstallPlan.ready plan of + [] | taskCount == 0 -> return plan + | otherwise -> waitForTasks taskCount plan + pkgs -> do + sequence_ + [ do info verbosity $ "Ready to install " ++ display pkgid + spawnJob jobCtl $ do + buildResult <- installPkg pkg + return (packageId pkg, pkg_key, buildResult) + | pkg <- pkgs + , let pkgid = packageId pkg + pkg_key = readyPackageKey comp pkg ] + + let taskCount' = taskCount + length pkgs + plan' = InstallPlan.processing pkgs plan + waitForTasks taskCount' plan' + + waitForTasks taskCount plan = do + info verbosity $ "Waiting for install task to finish..." + (pkgid, pkg_key, buildResult) <- collectJob jobCtl + printBuildResult pkgid pkg_key buildResult + let taskCount' = taskCount-1 + plan' = updatePlan pkgid buildResult plan + tryNewTasks taskCount' plan' + + updatePlan :: PackageIdentifier -> BuildResult -> InstallPlan -> InstallPlan + updatePlan pkgid (Right buildSuccess) = + InstallPlan.completed (Source.fakeInstalledPackageId pkgid) buildSuccess + + updatePlan pkgid (Left buildFailure) = + InstallPlan.failed (Source.fakeInstalledPackageId pkgid) buildFailure depsFailure + where + depsFailure = DependentFailed pkgid + -- So this first pkgid failed for whatever reason (buildFailure). + -- All the other packages that depended on this pkgid, which we + -- now cannot build, we mark as failing due to 'DependentFailed' + -- which kind of means it was not their fault. + + -- Print build log if something went wrong, and 'Installed $PKGID' + -- otherwise. + printBuildResult :: PackageId -> PackageKey -> BuildResult -> IO () + printBuildResult pkgid pkg_key buildResult = case buildResult of + (Right _) -> notice verbosity $ "Installed " ++ display pkgid + (Left _) -> do + notice verbosity $ "Failed to install " ++ display pkgid + when (verbosity >= normal) $ + case useLogFile of + Nothing -> return () + Just (mkLogFileName, _) -> do + let logName = mkLogFileName pkgid pkg_key + putStr $ "Build log ( " ++ logName ++ " ):\n" + printFile logName + + printFile :: FilePath -> IO () + printFile path = readFile path >>= putStr + +-- | Call an installer for an 'SourcePackage' but override the configure +-- flags with the ones given by the 'ReadyPackage'. In particular the +-- 'ReadyPackage' specifies an exact 'FlagAssignment' and exactly +-- versioned package dependencies. So we ignore any previous partial flag +-- assignment or dependency constraints and use the new ones. +-- +-- NB: when updating this function, don't forget to also update +-- 'configurePackage' in D.C.Configure. +installReadyPackage :: Platform -> CompilerInfo + -> ConfigFlags + -> ReadyPackage + -> (ConfigFlags -> PackageLocation (Maybe FilePath) + -> PackageDescription + -> PackageDescriptionOverride -> a) + -> a +installReadyPackage platform cinfo configFlags + (ReadyPackage (SourcePackage _ gpkg source pkgoverride) + flags stanzas deps) + installPkg = installPkg configFlags { + configConfigurationsFlags = flags, + -- We generate the legacy constraints as well as the new style precise deps. + -- In the end only one set gets passed to Setup.hs configure, depending on + -- the Cabal version we are talking to. + configConstraints = [ thisPackageVersion (packageId deppkg) + | deppkg <- deps ], + configDependencies = [ (packageName (Installed.sourcePackageId deppkg), + Installed.installedPackageId deppkg) + | deppkg <- deps ], + -- Use '--exact-configuration' if supported. + configExactConfiguration = toFlag True, + configBenchmarks = toFlag False, + configTests = toFlag (TestStanzas `elem` stanzas) + } source pkg pkgoverride + where + pkg = case finalizePackageDescription flags + (const True) + platform cinfo [] (enableStanzas stanzas gpkg) of + Left _ -> error "finalizePackageDescription ReadyPackage failed" + Right (desc, _) -> desc + +fetchSourcePackage + :: Verbosity + -> JobLimit + -> PackageLocation (Maybe FilePath) + -> (PackageLocation FilePath -> IO BuildResult) + -> IO BuildResult +fetchSourcePackage verbosity fetchLimit src installPkg = do + fetched <- checkFetched src + case fetched of + Just src' -> installPkg src' + Nothing -> onFailure DownloadFailed $ do + loc <- withJobLimit fetchLimit $ + fetchPackage verbosity src + installPkg loc + + +installLocalPackage + :: Verbosity + -> JobLimit + -> PackageIdentifier -> PackageLocation FilePath -> FilePath + -> (Maybe FilePath -> IO BuildResult) + -> IO BuildResult +installLocalPackage verbosity jobLimit pkgid location distPref installPkg = + + case location of + + LocalUnpackedPackage dir -> + installPkg (Just dir) + + LocalTarballPackage tarballPath -> + installLocalTarballPackage verbosity jobLimit + pkgid tarballPath distPref installPkg + + RemoteTarballPackage _ tarballPath -> + installLocalTarballPackage verbosity jobLimit + pkgid tarballPath distPref installPkg + + RepoTarballPackage _ _ tarballPath -> + installLocalTarballPackage verbosity jobLimit + pkgid tarballPath distPref installPkg + + +installLocalTarballPackage + :: Verbosity + -> JobLimit + -> PackageIdentifier -> FilePath -> FilePath + -> (Maybe FilePath -> IO BuildResult) + -> IO BuildResult +installLocalTarballPackage verbosity jobLimit pkgid + tarballPath distPref installPkg = do + tmp <- getTemporaryDirectory + withTempDirectory verbosity tmp "cabal-tmp" $ \tmpDirPath -> + onFailure UnpackFailed $ do + let relUnpackedPath = display pkgid + absUnpackedPath = tmpDirPath relUnpackedPath + descFilePath = absUnpackedPath + display (packageName pkgid) <.> "cabal" + withJobLimit jobLimit $ do + info verbosity $ "Extracting " ++ tarballPath + ++ " to " ++ tmpDirPath ++ "..." + extractTarGzFile tmpDirPath relUnpackedPath tarballPath + exists <- doesFileExist descFilePath + when (not exists) $ + die $ "Package .cabal file not found: " ++ show descFilePath + maybeRenameDistDir absUnpackedPath + + installPkg (Just absUnpackedPath) + + where + -- 'cabal sdist' puts pre-generated files in the 'dist' + -- directory. This fails when a nonstandard build directory name + -- is used (as is the case with sandboxes), so we need to rename + -- the 'dist' dir here. + -- + -- TODO: 'cabal get happy && cd sandbox && cabal install ../happy' still + -- fails even with this workaround. We probably can live with that. + maybeRenameDistDir :: FilePath -> IO () + maybeRenameDistDir absUnpackedPath = do + let distDirPath = absUnpackedPath defaultDistPref + distDirPathTmp = absUnpackedPath (defaultDistPref ++ "-tmp") + distDirPathNew = absUnpackedPath distPref + distDirExists <- doesDirectoryExist distDirPath + when (distDirExists + && (not $ distDirPath `equalFilePath` distDirPathNew)) $ do + -- NB: we need to handle the case when 'distDirPathNew' is a + -- subdirectory of 'distDirPath' (e.g. the former is + -- 'dist/dist-sandbox-3688fbc2' and the latter is 'dist'). + debug verbosity $ "Renaming '" ++ distDirPath ++ "' to '" + ++ distDirPathTmp ++ "'." + renameDirectory distDirPath distDirPathTmp + when (distDirPath `isPrefixOf` distDirPathNew) $ + createDirectoryIfMissingVerbose verbosity False distDirPath + debug verbosity $ "Renaming '" ++ distDirPathTmp ++ "' to '" + ++ distDirPathNew ++ "'." + renameDirectory distDirPathTmp distDirPathNew + +installUnpackedPackage + :: Verbosity + -> JobLimit + -> Lock + -> Int + -> PackageKey + -> SetupScriptOptions + -> InstallMisc + -> ConfigFlags + -> InstallFlags + -> HaddockFlags + -> CompilerInfo + -> Platform + -> PackageDescription + -> PackageDescriptionOverride + -> Maybe FilePath -- ^ Directory to change to before starting the installation. + -> UseLogFile -- ^ File to log output to (if any) + -> IO BuildResult +installUnpackedPackage verbosity buildLimit installLock numJobs pkg_key + scriptOptions miscOptions + configFlags installFlags haddockFlags + cinfo platform pkg pkgoverride workingDir useLogFile = do + + -- Override the .cabal file if necessary + case pkgoverride of + Nothing -> return () + Just pkgtxt -> do + let descFilePath = fromMaybe "." workingDir + display (packageName pkgid) <.> "cabal" + info verbosity $ + "Updating " ++ display (packageName pkgid) <.> "cabal" + ++ " with the latest revision from the index." + writeFileAtomic descFilePath pkgtxt + + -- Make sure that we pass --libsubdir etc to 'setup configure' (necessary if + -- the setup script was compiled against an old version of the Cabal lib). + configFlags' <- addDefaultInstallDirs configFlags + -- Filter out flags not supported by the old versions of the Cabal lib. + let configureFlags :: Version -> ConfigFlags + configureFlags = filterConfigureFlags configFlags' { + configVerbosity = toFlag verbosity' + } + + -- Path to the optional log file. + mLogPath <- maybeLogPath + + -- Configure phase + onFailure ConfigureFailed $ withJobLimit buildLimit $ do + when (numJobs > 1) $ notice verbosity $ + "Configuring " ++ display pkgid ++ "..." + setup configureCommand configureFlags mLogPath + + -- Build phase + onFailure BuildFailed $ do + when (numJobs > 1) $ notice verbosity $ + "Building " ++ display pkgid ++ "..." + setup buildCommand' buildFlags mLogPath + + -- Doc generation phase + docsResult <- if shouldHaddock + then (do setup haddockCommand haddockFlags' mLogPath + return DocsOk) + `catchIO` (\_ -> return DocsFailed) + `catchExit` (\_ -> return DocsFailed) + else return DocsNotTried + + -- Tests phase + onFailure TestsFailed $ do + when (testsEnabled && PackageDescription.hasTests pkg) $ + setup Cabal.testCommand testFlags mLogPath + + let testsResult | testsEnabled = TestsOk + | otherwise = TestsNotTried + + -- Install phase + onFailure InstallFailed $ criticalSection installLock $ do + -- Capture installed package configuration file + maybePkgConf <- maybeGenPkgConf mLogPath + + -- Actual installation + withWin32SelfUpgrade verbosity pkg_key configFlags cinfo platform pkg $ do + case rootCmd miscOptions of + (Just cmd) -> reexec cmd + Nothing -> do + setup Cabal.copyCommand copyFlags mLogPath + when shouldRegister $ do + setup Cabal.registerCommand registerFlags mLogPath + return (Right (BuildOk docsResult testsResult maybePkgConf)) + + where + pkgid = packageId pkg + buildCommand' = buildCommand defaultProgramConfiguration + buildFlags _ = emptyBuildFlags { + buildDistPref = configDistPref configFlags, + buildVerbosity = toFlag verbosity' + } + shouldHaddock = fromFlag (installDocumentation installFlags) + haddockFlags' _ = haddockFlags { + haddockVerbosity = toFlag verbosity', + haddockDistPref = configDistPref configFlags + } + testsEnabled = fromFlag (configTests configFlags) + && fromFlagOrDefault False (installRunTests installFlags) + testFlags _ = Cabal.emptyTestFlags { + Cabal.testDistPref = configDistPref configFlags + } + copyFlags _ = Cabal.emptyCopyFlags { + Cabal.copyDistPref = configDistPref configFlags, + Cabal.copyDest = toFlag InstallDirs.NoCopyDest, + Cabal.copyVerbosity = toFlag verbosity' + } + shouldRegister = PackageDescription.hasLibs pkg + registerFlags _ = Cabal.emptyRegisterFlags { + Cabal.regDistPref = configDistPref configFlags, + Cabal.regVerbosity = toFlag verbosity' + } + verbosity' = maybe verbosity snd useLogFile + tempTemplate name = name ++ "-" ++ display pkgid + + addDefaultInstallDirs :: ConfigFlags -> IO ConfigFlags + addDefaultInstallDirs configFlags' = do + defInstallDirs <- InstallDirs.defaultInstallDirs flavor userInstall False + return $ configFlags' { + configInstallDirs = fmap Cabal.Flag . + InstallDirs.substituteInstallDirTemplates env $ + InstallDirs.combineInstallDirs fromFlagOrDefault + defInstallDirs (configInstallDirs configFlags) + } + where + CompilerId flavor _ = compilerInfoId cinfo + env = initialPathTemplateEnv pkgid pkg_key cinfo platform + userInstall = fromFlagOrDefault defaultUserInstall + (configUserInstall configFlags') + + maybeGenPkgConf :: Maybe FilePath + -> IO (Maybe Installed.InstalledPackageInfo) + maybeGenPkgConf mLogPath = + if shouldRegister then do + tmp <- getTemporaryDirectory + withTempFile tmp (tempTemplate "pkgConf") $ \pkgConfFile handle -> do + hClose handle + let registerFlags' version = (registerFlags version) { + Cabal.regGenPkgConf = toFlag (Just pkgConfFile) + } + setup Cabal.registerCommand registerFlags' mLogPath + withUTF8FileContents pkgConfFile $ \pkgConfText -> + case Installed.parseInstalledPackageInfo pkgConfText of + Installed.ParseFailed perror -> pkgConfParseFailed perror + Installed.ParseOk warns pkgConf -> do + unless (null warns) $ + warn verbosity $ unlines (map (showPWarning pkgConfFile) warns) + return (Just pkgConf) + else return Nothing + + pkgConfParseFailed :: Installed.PError -> IO a + pkgConfParseFailed perror = + die $ "Couldn't parse the output of 'setup register --gen-pkg-config':" + ++ show perror + + maybeLogPath :: IO (Maybe FilePath) + maybeLogPath = + case useLogFile of + Nothing -> return Nothing + Just (mkLogFileName, _) -> do + let logFileName = mkLogFileName (packageId pkg) pkg_key + logDir = takeDirectory logFileName + unless (null logDir) $ createDirectoryIfMissing True logDir + logFileExists <- doesFileExist logFileName + when logFileExists $ removeFile logFileName + return (Just logFileName) + + setup cmd flags mLogPath = + Exception.bracket + (maybe (return Nothing) + (\path -> Just `fmap` openFile path AppendMode) mLogPath) + (maybe (return ()) hClose) + (\logFileHandle -> + setupWrapper verbosity + scriptOptions { useLoggingHandle = logFileHandle + , useWorkingDir = workingDir } + (Just pkg) + cmd flags []) + + reexec cmd = do + -- look for our own executable file and re-exec ourselves using a helper + -- program like sudo to elevate privileges: + self <- getExecutablePath + weExist <- doesFileExist self + if weExist + then inDir workingDir $ + rawSystemExit verbosity cmd + [self, "install", "--only" + ,"--verbose=" ++ showForCabal verbosity] + else die $ "Unable to find cabal executable at: " ++ self + + +-- helper +onFailure :: (SomeException -> BuildFailure) -> IO BuildResult -> IO BuildResult +onFailure result action = + action `catches` + [ Handler $ \ioe -> handler (ioe :: IOException) + , Handler $ \exit -> handler (exit :: ExitCode) + ] + where + handler :: Exception e => e -> IO BuildResult + handler = return . Left . result . toException + + +-- ------------------------------------------------------------ +-- * Weird windows hacks +-- ------------------------------------------------------------ + +withWin32SelfUpgrade :: Verbosity + -> PackageKey + -> ConfigFlags + -> CompilerInfo + -> Platform + -> PackageDescription + -> IO a -> IO a +withWin32SelfUpgrade _ _ _ _ _ _ action | buildOS /= Windows = action +withWin32SelfUpgrade verbosity pkg_key configFlags cinfo platform pkg action = do + + defaultDirs <- InstallDirs.defaultInstallDirs + compFlavor + (fromFlag (configUserInstall configFlags)) + (PackageDescription.hasLibs pkg) + + Win32SelfUpgrade.possibleSelfUpgrade verbosity + (exeInstallPaths defaultDirs) action + + where + pkgid = packageId pkg + (CompilerId compFlavor _) = compilerInfoId cinfo + + exeInstallPaths defaultDirs = + [ InstallDirs.bindir absoluteDirs exeName <.> exeExtension + | exe <- PackageDescription.executables pkg + , PackageDescription.buildable (PackageDescription.buildInfo exe) + , let exeName = prefix ++ PackageDescription.exeName exe ++ suffix + prefix = substTemplate prefixTemplate + suffix = substTemplate suffixTemplate ] + where + fromFlagTemplate = fromFlagOrDefault (InstallDirs.toPathTemplate "") + prefixTemplate = fromFlagTemplate (configProgPrefix configFlags) + suffixTemplate = fromFlagTemplate (configProgSuffix configFlags) + templateDirs = InstallDirs.combineInstallDirs fromFlagOrDefault + defaultDirs (configInstallDirs configFlags) + absoluteDirs = InstallDirs.absoluteInstallDirs + pkgid pkg_key + cinfo InstallDirs.NoCopyDest + platform templateDirs + substTemplate = InstallDirs.fromPathTemplate + . InstallDirs.substPathTemplate env + where env = InstallDirs.initialPathTemplateEnv pkgid pkg_key cinfo platform diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/InstallPlan.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/InstallPlan.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/InstallPlan.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/InstallPlan.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,627 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.InstallPlan +-- Copyright : (c) Duncan Coutts 2008 +-- License : BSD-like +-- +-- Maintainer : duncan@community.haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Package installation plan +-- +----------------------------------------------------------------------------- +module Distribution.Client.InstallPlan ( + InstallPlan, + ConfiguredPackage(..), + PlanPackage(..), + + -- * Operations on 'InstallPlan's + new, + toList, + ready, + processing, + completed, + failed, + remove, + showPlanIndex, + showInstallPlan, + + -- ** Query functions + planPlatform, + planCompiler, + + -- * Checking validity of plans + valid, + closed, + consistent, + acyclic, + configuredPackageValid, + + -- ** Details on invalid plans + PlanProblem(..), + showPlanProblem, + PackageProblem(..), + showPackageProblem, + problems, + configuredPackageProblems + ) where + +import Distribution.Client.Types + ( SourcePackage(packageDescription), ConfiguredPackage(..) + , ReadyPackage(..), readyPackageToConfiguredPackage + , InstalledPackage, BuildFailure, BuildSuccess(..), enableStanzas + , InstalledPackage(..), fakeInstalledPackageId ) +import Distribution.Package + ( PackageIdentifier(..), PackageName(..), Package(..), packageName + , PackageFixedDeps(..), Dependency(..), InstalledPackageId + , PackageInstalled(..) ) +import Distribution.Version + ( Version, withinRange ) +import Distribution.PackageDescription + ( GenericPackageDescription(genPackageFlags) + , Flag(flagName), FlagName(..) ) +import Distribution.Client.PackageUtils + ( externalBuildDepends ) +import Distribution.PackageDescription.Configuration + ( finalizePackageDescription ) +import Distribution.Simple.PackageIndex + ( PackageIndex, FakeMap ) +import qualified Distribution.Simple.PackageIndex as PackageIndex +import Distribution.Text + ( display ) +import Distribution.System + ( Platform ) +import Distribution.Compiler + ( CompilerInfo(..) ) +import Distribution.Client.Utils + ( duplicates, duplicatesBy, mergeBy, MergeResult(..) ) +import Distribution.Simple.Utils + ( comparing, intercalate ) +import qualified Distribution.InstalledPackageInfo as Installed + +import Data.List + ( sort, sortBy ) +import Data.Maybe + ( fromMaybe, maybeToList ) +import qualified Data.Graph as Graph +import Data.Graph (Graph) +import Control.Exception + ( assert ) +import Data.Maybe (catMaybes) +import qualified Data.Map as Map + +type PlanIndex = PackageIndex PlanPackage + +-- When cabal tries to install a number of packages, including all their +-- dependencies it has a non-trivial problem to solve. +-- +-- The Problem: +-- +-- In general we start with a set of installed packages and a set of source +-- packages. +-- +-- Installed packages have fixed dependencies. They have already been built and +-- we know exactly what packages they were built against, including their exact +-- versions. +-- +-- Source package have somewhat flexible dependencies. They are specified as +-- version ranges, though really they're predicates. To make matters worse they +-- have conditional flexible dependencies. Configuration flags can affect which +-- packages are required and can place additional constraints on their +-- versions. +-- +-- These two sets of package can and usually do overlap. There can be installed +-- packages that are also available as source packages which means they could +-- be re-installed if required, though there will also be packages which are +-- not available as source and cannot be re-installed. Very often there will be +-- extra versions available than are installed. Sometimes we may like to prefer +-- installed packages over source ones or perhaps always prefer the latest +-- available version whether installed or not. +-- +-- The goal is to calculate an installation plan that is closed, acyclic and +-- consistent and where every configured package is valid. +-- +-- An installation plan is a set of packages that are going to be used +-- together. It will consist of a mixture of installed packages and source +-- packages along with their exact version dependencies. An installation plan +-- is closed if for every package in the set, all of its dependencies are +-- also in the set. It is consistent if for every package in the set, all +-- dependencies which target that package have the same version. + +-- Note that plans do not necessarily compose. You might have a valid plan for +-- package A and a valid plan for package B. That does not mean the composition +-- is simultaneously valid for A and B. In particular you're most likely to +-- have problems with inconsistent dependencies. +-- On the other hand it is true that every closed sub plan is valid. + +data PlanPackage = PreExisting InstalledPackage + | Configured ConfiguredPackage + | Processing ReadyPackage + | Installed ReadyPackage BuildSuccess + | Failed ConfiguredPackage BuildFailure + -- ^ NB: packages in the Failed state can be *either* Ready + -- or Configured. + +instance Package PlanPackage where + packageId (PreExisting pkg) = packageId pkg + packageId (Configured pkg) = packageId pkg + packageId (Processing pkg) = packageId pkg + packageId (Installed pkg _) = packageId pkg + packageId (Failed pkg _) = packageId pkg + +instance PackageFixedDeps PlanPackage where + depends (PreExisting pkg) = depends pkg + depends (Configured pkg) = depends pkg + depends (Processing pkg) = depends pkg + depends (Installed pkg _) = depends pkg + depends (Failed pkg _) = depends pkg + +instance PackageInstalled PlanPackage where + installedPackageId (PreExisting pkg) = installedPackageId pkg + installedPackageId (Configured pkg) = installedPackageId pkg + installedPackageId (Processing pkg) = installedPackageId pkg + -- NB: defer to the actual installed package info in this case + installedPackageId (Installed _ (BuildOk _ _ (Just ipkg))) = installedPackageId ipkg + installedPackageId (Installed pkg _) = installedPackageId pkg + installedPackageId (Failed pkg _) = installedPackageId pkg + + installedDepends (PreExisting pkg) = installedDepends pkg + installedDepends (Configured pkg) = installedDepends pkg + installedDepends (Processing pkg) = installedDepends pkg + installedDepends (Installed _ (BuildOk _ _ (Just ipkg))) = installedDepends ipkg + installedDepends (Installed pkg _) = installedDepends pkg + installedDepends (Failed pkg _) = installedDepends pkg + +data InstallPlan = InstallPlan { + planIndex :: PlanIndex, + planFakeMap :: FakeMap, + planGraph :: Graph, + planGraphRev :: Graph, + planPkgOf :: Graph.Vertex -> PlanPackage, + planVertexOf :: InstalledPackageId -> Graph.Vertex, + planPlatform :: Platform, + planCompiler :: CompilerInfo + } + +invariant :: InstallPlan -> Bool +invariant plan = + valid (planPlatform plan) (planCompiler plan) (planFakeMap plan) (planIndex plan) + +internalError :: String -> a +internalError msg = error $ "InstallPlan: internal error: " ++ msg + +showPlanIndex :: PlanIndex -> String +showPlanIndex index = + intercalate "\n" (map showPlanPackage (PackageIndex.allPackages index)) + where showPlanPackage p = + showPlanPackageTag p ++ " " + ++ display (packageId p) ++ " (" + ++ display (installedPackageId p) ++ ")" + +showInstallPlan :: InstallPlan -> String +showInstallPlan plan = + showPlanIndex (planIndex plan) ++ "\n" ++ + "fake map:\n " ++ intercalate "\n " (map showKV (Map.toList (planFakeMap plan))) + where showKV (k,v) = display k ++ " -> " ++ display v + +showPlanPackageTag :: PlanPackage -> String +showPlanPackageTag (PreExisting _) = "PreExisting" +showPlanPackageTag (Configured _) = "Configured" +showPlanPackageTag (Processing _) = "Processing" +showPlanPackageTag (Installed _ _) = "Installed" +showPlanPackageTag (Failed _ _) = "Failed" + +-- | Build an installation plan from a valid set of resolved packages. +-- +new :: Platform -> CompilerInfo -> PlanIndex + -> Either [PlanProblem] InstallPlan +new platform cinfo index = + -- NB: Need to pre-initialize the fake-map with pre-existing + -- packages + let isPreExisting (PreExisting _) = True + isPreExisting _ = False + fakeMap = Map.fromList + . map (\p -> (fakeInstalledPackageId (packageId p), installedPackageId p)) + . filter isPreExisting + $ PackageIndex.allPackages index in + case problems platform cinfo fakeMap index of + [] -> Right InstallPlan { + planIndex = index, + planFakeMap = fakeMap, + planGraph = graph, + planGraphRev = Graph.transposeG graph, + planPkgOf = vertexToPkgId, + planVertexOf = fromMaybe noSuchPkgId . pkgIdToVertex, + planPlatform = platform, + planCompiler = cinfo + } + where (graph, vertexToPkgId, pkgIdToVertex) = + PackageIndex.dependencyGraph index + -- NB: doesn't need to know planFakeMap because the + -- fakemap is empty at this point. + noSuchPkgId = internalError "package is not in the graph" + probs -> Left probs + +toList :: InstallPlan -> [PlanPackage] +toList = PackageIndex.allPackages . planIndex + +-- | Remove packages from the install plan. This will result in an +-- error if there are remaining packages that depend on any matching +-- package. This is primarily useful for obtaining an install plan for +-- the dependencies of a package or set of packages without actually +-- installing the package itself, as when doing development. +-- +remove :: (PlanPackage -> Bool) + -> InstallPlan + -> Either [PlanProblem] InstallPlan +remove shouldRemove plan = + new (planPlatform plan) (planCompiler plan) newIndex + where + newIndex = PackageIndex.fromList $ + filter (not . shouldRemove) (toList plan) + +-- | The packages that are ready to be installed. That is they are in the +-- configured state and have all their dependencies installed already. +-- The plan is complete if the result is @[]@. +-- +ready :: InstallPlan -> [ReadyPackage] +ready plan = assert check readyPackages + where + check = if null readyPackages && null processingPackages + then null configuredPackages + else True + configuredPackages = [ pkg | Configured pkg <- toList plan ] + processingPackages = [ pkg | Processing pkg <- toList plan] + + readyPackages :: [ReadyPackage] + readyPackages = + [ ReadyPackage srcPkg flags stanzas deps + | pkg@(ConfiguredPackage srcPkg flags stanzas _) <- configuredPackages + -- select only the package that have all of their deps installed: + , deps <- maybeToList (hasAllInstalledDeps pkg) + ] + + hasAllInstalledDeps :: ConfiguredPackage -> Maybe [Installed.InstalledPackageInfo] + hasAllInstalledDeps = mapM isInstalledDep . installedDepends + + isInstalledDep :: InstalledPackageId -> Maybe Installed.InstalledPackageInfo + isInstalledDep pkgid = + -- NB: Need to check if the ID has been updated in planFakeMap, in which case we + -- might be dealing with an old pointer + case PackageIndex.fakeLookupInstalledPackageId (planFakeMap plan) (planIndex plan) pkgid of + Just (Configured _) -> Nothing + Just (Processing _) -> Nothing + Just (Failed _ _) -> internalError depOnFailed + Just (PreExisting (InstalledPackage instPkg _)) -> Just instPkg + Just (Installed _ (BuildOk _ _ (Just instPkg))) -> Just instPkg + Just (Installed _ (BuildOk _ _ Nothing)) -> internalError depOnNonLib + Nothing -> internalError incomplete + incomplete = "install plan is not closed" + depOnFailed = "configured package depends on failed package" + depOnNonLib = "configured package depends on a non-library package" + +-- | Marks packages in the graph as currently processing (e.g. building). +-- +-- * The package must exist in the graph and be in the configured state. +-- +processing :: [ReadyPackage] -> InstallPlan -> InstallPlan +processing pkgs plan = assert (invariant plan') plan' + where + plan' = plan { + planIndex = PackageIndex.merge (planIndex plan) processingPkgs + } + processingPkgs = PackageIndex.fromList [Processing pkg | pkg <- pkgs] + +-- | Marks a package in the graph as completed. Also saves the build result for +-- the completed package in the plan. +-- +-- * The package must exist in the graph and be in the processing state. +-- * The package must have had no uninstalled dependent packages. +-- +completed :: InstalledPackageId + -> BuildSuccess + -> InstallPlan -> InstallPlan +completed pkgid buildResult plan = assert (invariant plan') plan' + where + plan' = plan { + -- NB: installation can change the IPID, so better + -- record it in the fake mapping... + planFakeMap = insert_fake_mapping buildResult + $ planFakeMap plan, + planIndex = PackageIndex.insert installed + . PackageIndex.deleteInstalledPackageId pkgid + $ planIndex plan + } + -- ...but be sure to use the *old* IPID for the lookup for the + -- preexisting record + installed = Installed (lookupProcessingPackage plan pkgid) buildResult + insert_fake_mapping (BuildOk _ _ (Just ipi)) = Map.insert pkgid (installedPackageId ipi) + insert_fake_mapping _ = id + +-- | Marks a package in the graph as having failed. It also marks all the +-- packages that depended on it as having failed. +-- +-- * The package must exist in the graph and be in the processing +-- state. +-- +failed :: InstalledPackageId -- ^ The id of the package that failed to install + -> BuildFailure -- ^ The build result to use for the failed package + -> BuildFailure -- ^ The build result to use for its dependencies + -> InstallPlan + -> InstallPlan +failed pkgid buildResult buildResult' plan = assert (invariant plan') plan' + where + -- NB: failures don't update IPIDs + plan' = plan { + planIndex = PackageIndex.merge (planIndex plan) failures + } + pkg = lookupProcessingPackage plan pkgid + failures = PackageIndex.fromList + $ Failed (readyPackageToConfiguredPackage pkg) buildResult + : [ Failed pkg' buildResult' + | Just pkg' <- map checkConfiguredPackage + $ packagesThatDependOn plan pkgid ] + +-- | Lookup the reachable packages in the reverse dependency graph. +-- +packagesThatDependOn :: InstallPlan + -> InstalledPackageId -> [PlanPackage] +packagesThatDependOn plan pkgid = map (planPkgOf plan) + . tail + . Graph.reachable (planGraphRev plan) + . planVertexOf plan + $ Map.findWithDefault pkgid pkgid (planFakeMap plan) + +-- | Lookup a package that we expect to be in the processing state. +-- +lookupProcessingPackage :: InstallPlan + -> InstalledPackageId -> ReadyPackage +lookupProcessingPackage plan pkgid = + -- NB: processing packages are guaranteed to not indirect through + -- planFakeMap + case PackageIndex.lookupInstalledPackageId (planIndex plan) pkgid of + Just (Processing pkg) -> pkg + _ -> internalError $ "not in processing state or no such pkg " ++ display pkgid + +-- | Check a package that we expect to be in the configured or failed state. +-- +checkConfiguredPackage :: PlanPackage -> Maybe ConfiguredPackage +checkConfiguredPackage (Configured pkg) = Just pkg +checkConfiguredPackage (Failed _ _) = Nothing +checkConfiguredPackage pkg = + internalError $ "not configured or no such pkg " ++ display (packageId pkg) + +-- ------------------------------------------------------------ +-- * Checking validity of plans +-- ------------------------------------------------------------ + +-- | A valid installation plan is a set of packages that is 'acyclic', +-- 'closed' and 'consistent'. Also, every 'ConfiguredPackage' in the +-- plan has to have a valid configuration (see 'configuredPackageValid'). +-- +-- * if the result is @False@ use 'problems' to get a detailed list. +-- +valid :: Platform -> CompilerInfo -> FakeMap -> PlanIndex -> Bool +valid platform cinfo fakeMap index = null (problems platform cinfo fakeMap index) + +data PlanProblem = + PackageInvalid ConfiguredPackage [PackageProblem] + | PackageMissingDeps PlanPackage [PackageIdentifier] + | PackageCycle [PlanPackage] + | PackageInconsistency PackageName [(PackageIdentifier, Version)] + | PackageStateInvalid PlanPackage PlanPackage + +showPlanProblem :: PlanProblem -> String +showPlanProblem (PackageInvalid pkg packageProblems) = + "Package " ++ display (packageId pkg) + ++ " has an invalid configuration, in particular:\n" + ++ unlines [ " " ++ showPackageProblem problem + | problem <- packageProblems ] + +showPlanProblem (PackageMissingDeps pkg missingDeps) = + "Package " ++ display (packageId pkg) + ++ " depends on the following packages which are missing from the plan: " + ++ intercalate ", " (map display missingDeps) + +showPlanProblem (PackageCycle cycleGroup) = + "The following packages are involved in a dependency cycle " + ++ intercalate ", " (map (display.packageId) cycleGroup) + +showPlanProblem (PackageInconsistency name inconsistencies) = + "Package " ++ display name + ++ " is required by several packages," + ++ " but they require inconsistent versions:\n" + ++ unlines [ " package " ++ display pkg ++ " requires " + ++ display (PackageIdentifier name ver) + | (pkg, ver) <- inconsistencies ] + +showPlanProblem (PackageStateInvalid pkg pkg') = + "Package " ++ display (packageId pkg) + ++ " is in the " ++ showPlanState pkg + ++ " state but it depends on package " ++ display (packageId pkg') + ++ " which is in the " ++ showPlanState pkg' + ++ " state" + where + showPlanState (PreExisting _) = "pre-existing" + showPlanState (Configured _) = "configured" + showPlanState (Processing _) = "processing" + showPlanState (Installed _ _) = "installed" + showPlanState (Failed _ _) = "failed" + +-- | For an invalid plan, produce a detailed list of problems as human readable +-- error messages. This is mainly intended for debugging purposes. +-- Use 'showPlanProblem' for a human readable explanation. +-- +problems :: Platform -> CompilerInfo -> FakeMap + -> PlanIndex -> [PlanProblem] +problems platform cinfo fakeMap index = + [ PackageInvalid pkg packageProblems + | Configured pkg <- PackageIndex.allPackages index + , let packageProblems = configuredPackageProblems platform cinfo pkg + , not (null packageProblems) ] + + ++ [ PackageMissingDeps pkg (catMaybes (map (fmap packageId . PackageIndex.fakeLookupInstalledPackageId fakeMap index) missingDeps)) + | (pkg, missingDeps) <- PackageIndex.brokenPackages' fakeMap index ] + + ++ [ PackageCycle cycleGroup + | cycleGroup <- PackageIndex.dependencyCycles' fakeMap index ] + + ++ [ PackageInconsistency name inconsistencies + | (name, inconsistencies) <- PackageIndex.dependencyInconsistencies' fakeMap index ] + + ++ [ PackageStateInvalid pkg pkg' + | pkg <- PackageIndex.allPackages index + , Just pkg' <- map (PackageIndex.fakeLookupInstalledPackageId fakeMap index) (installedDepends pkg) + , not (stateDependencyRelation pkg pkg') ] + +-- | The graph of packages (nodes) and dependencies (edges) must be acyclic. +-- +-- * if the result is @False@ use 'PackageIndex.dependencyCycles' to find out +-- which packages are involved in dependency cycles. +-- +acyclic :: PlanIndex -> Bool +acyclic = null . PackageIndex.dependencyCycles + +-- | An installation plan is closed if for every package in the set, all of +-- its dependencies are also in the set. That is, the set is closed under the +-- dependency relation. +-- +-- * if the result is @False@ use 'PackageIndex.brokenPackages' to find out +-- which packages depend on packages not in the index. +-- +closed :: PlanIndex -> Bool +closed = null . PackageIndex.brokenPackages + +-- | An installation plan is consistent if all dependencies that target a +-- single package name, target the same version. +-- +-- This is slightly subtle. It is not the same as requiring that there be at +-- most one version of any package in the set. It only requires that of +-- packages which have more than one other package depending on them. We could +-- actually make the condition even more precise and say that different +-- versions are OK so long as they are not both in the transitive closure of +-- any other package (or equivalently that their inverse closures do not +-- intersect). The point is we do not want to have any packages depending +-- directly or indirectly on two different versions of the same package. The +-- current definition is just a safe approximation of that. +-- +-- * if the result is @False@ use 'PackageIndex.dependencyInconsistencies' to +-- find out which packages are. +-- +consistent :: PlanIndex -> Bool +consistent = null . PackageIndex.dependencyInconsistencies + +-- | The states of packages have that depend on each other must respect +-- this relation. That is for very case where package @a@ depends on +-- package @b@ we require that @dependencyStatesOk a b = True@. +-- +stateDependencyRelation :: PlanPackage -> PlanPackage -> Bool +stateDependencyRelation (PreExisting _) (PreExisting _) = True + +stateDependencyRelation (Configured _) (PreExisting _) = True +stateDependencyRelation (Configured _) (Configured _) = True +stateDependencyRelation (Configured _) (Processing _) = True +stateDependencyRelation (Configured _) (Installed _ _) = True + +stateDependencyRelation (Processing _) (PreExisting _) = True +stateDependencyRelation (Processing _) (Installed _ _) = True + +stateDependencyRelation (Installed _ _) (PreExisting _) = True +stateDependencyRelation (Installed _ _) (Installed _ _) = True + +stateDependencyRelation (Failed _ _) (PreExisting _) = True +-- failed can depends on configured because a package can depend on +-- several other packages and if one of the deps fail then we fail +-- but we still depend on the other ones that did not fail: +stateDependencyRelation (Failed _ _) (Configured _) = True +stateDependencyRelation (Failed _ _) (Processing _) = True +stateDependencyRelation (Failed _ _) (Installed _ _) = True +stateDependencyRelation (Failed _ _) (Failed _ _) = True + +stateDependencyRelation _ _ = False + +-- | A 'ConfiguredPackage' is valid if the flag assignment is total and if +-- in the configuration given by the flag assignment, all the package +-- dependencies are satisfied by the specified packages. +-- +configuredPackageValid :: Platform -> CompilerInfo -> ConfiguredPackage -> Bool +configuredPackageValid platform cinfo pkg = + null (configuredPackageProblems platform cinfo pkg) + +data PackageProblem = DuplicateFlag FlagName + | MissingFlag FlagName + | ExtraFlag FlagName + | DuplicateDeps [PackageIdentifier] + | MissingDep Dependency + | ExtraDep PackageIdentifier + | InvalidDep Dependency PackageIdentifier + +showPackageProblem :: PackageProblem -> String +showPackageProblem (DuplicateFlag (FlagName flag)) = + "duplicate flag in the flag assignment: " ++ flag + +showPackageProblem (MissingFlag (FlagName flag)) = + "missing an assignment for the flag: " ++ flag + +showPackageProblem (ExtraFlag (FlagName flag)) = + "extra flag given that is not used by the package: " ++ flag + +showPackageProblem (DuplicateDeps pkgids) = + "duplicate packages specified as selected dependencies: " + ++ intercalate ", " (map display pkgids) + +showPackageProblem (MissingDep dep) = + "the package has a dependency " ++ display dep + ++ " but no package has been selected to satisfy it." + +showPackageProblem (ExtraDep pkgid) = + "the package configuration specifies " ++ display pkgid + ++ " but (with the given flag assignment) the package does not actually" + ++ " depend on any version of that package." + +showPackageProblem (InvalidDep dep pkgid) = + "the package depends on " ++ display dep + ++ " but the configuration specifies " ++ display pkgid + ++ " which does not satisfy the dependency." + +configuredPackageProblems :: Platform -> CompilerInfo + -> ConfiguredPackage -> [PackageProblem] +configuredPackageProblems platform cinfo + (ConfiguredPackage pkg specifiedFlags stanzas specifiedDeps) = + [ DuplicateFlag flag | ((flag,_):_) <- duplicates specifiedFlags ] + ++ [ MissingFlag flag | OnlyInLeft flag <- mergedFlags ] + ++ [ ExtraFlag flag | OnlyInRight flag <- mergedFlags ] + ++ [ DuplicateDeps pkgs + | pkgs <- duplicatesBy (comparing packageName) specifiedDeps ] + ++ [ MissingDep dep | OnlyInLeft dep <- mergedDeps ] + ++ [ ExtraDep pkgid | OnlyInRight pkgid <- mergedDeps ] + ++ [ InvalidDep dep pkgid | InBoth dep pkgid <- mergedDeps + , not (packageSatisfiesDependency pkgid dep) ] + where + mergedFlags = mergeBy compare + (sort $ map flagName (genPackageFlags (packageDescription pkg))) + (sort $ map fst specifiedFlags) + + mergedDeps = mergeBy + (\dep pkgid -> dependencyName dep `compare` packageName pkgid) + (sortBy (comparing dependencyName) requiredDeps) + (sortBy (comparing packageName) specifiedDeps) + + packageSatisfiesDependency + (PackageIdentifier name version) + (Dependency name' versionRange) = assert (name == name') $ + version `withinRange` versionRange + + dependencyName (Dependency name _) = name + + requiredDeps :: [Dependency] + requiredDeps = + --TODO: use something lower level than finalizePackageDescription + case finalizePackageDescription specifiedFlags + (const True) + platform cinfo + [] + (enableStanzas stanzas $ packageDescription pkg) of + Right (resolvedPkg, _) -> externalBuildDepends resolvedPkg + Left _ -> error "configuredPackageInvalidDeps internal error" diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/InstallSymlink.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/InstallSymlink.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/InstallSymlink.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/InstallSymlink.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,245 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.InstallSymlink +-- Copyright : (c) Duncan Coutts 2008 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Managing installing binaries with symlinks. +----------------------------------------------------------------------------- +module Distribution.Client.InstallSymlink ( + symlinkBinaries, + symlinkBinary, + ) where + +#if mingw32_HOST_OS + +import Distribution.Package (PackageIdentifier) +import Distribution.Client.InstallPlan (InstallPlan) +import Distribution.Client.Setup (InstallFlags) +import Distribution.Simple.Setup (ConfigFlags) +import Distribution.Simple.Compiler + +symlinkBinaries :: Compiler + -> ConfigFlags + -> InstallFlags + -> InstallPlan + -> IO [(PackageIdentifier, String, FilePath)] +symlinkBinaries _ _ _ _ = return [] + +symlinkBinary :: FilePath -> FilePath -> String -> String -> IO Bool +symlinkBinary _ _ _ _ = fail "Symlinking feature not available on Windows" + +#else + +import Distribution.Client.Types + ( SourcePackage(..), ReadyPackage(..), enableStanzas ) +import Distribution.Client.Setup + ( InstallFlags(installSymlinkBinDir) ) +import qualified Distribution.Client.InstallPlan as InstallPlan +import Distribution.Client.InstallPlan (InstallPlan) + +import Distribution.Package + ( PackageIdentifier, Package(packageId), mkPackageKey, PackageKey ) +import Distribution.Compiler + ( CompilerId(..) ) +import qualified Distribution.PackageDescription as PackageDescription +import Distribution.PackageDescription + ( PackageDescription ) +import Distribution.PackageDescription.Configuration + ( finalizePackageDescription ) +import Distribution.Simple.Setup + ( ConfigFlags(..), fromFlag, fromFlagOrDefault, flagToMaybe ) +import qualified Distribution.Simple.InstallDirs as InstallDirs +import qualified Distribution.InstalledPackageInfo as Installed +import Distribution.Simple.Compiler + ( Compiler, CompilerInfo(..), packageKeySupported ) + +import System.Posix.Files + ( getSymbolicLinkStatus, isSymbolicLink, createSymbolicLink + , removeLink ) +import System.Directory + ( canonicalizePath ) +import System.FilePath + ( (), splitPath, joinPath, isAbsolute ) + +import Prelude hiding (ioError) +import System.IO.Error + ( isDoesNotExistError, ioError ) +import Distribution.Compat.Exception ( catchIO ) +import Control.Exception + ( assert ) +import Data.Maybe + ( catMaybes ) + +-- | We would like by default to install binaries into some location that is on +-- the user's PATH. For per-user installations on Unix systems that basically +-- means the @~/bin/@ directory. On the majority of platforms the @~/bin/@ +-- directory will be on the user's PATH. However some people are a bit nervous +-- about letting a package manager install programs into @~/bin/@. +-- +-- A compromise solution is that instead of installing binaries directly into +-- @~/bin/@, we could install them in a private location under @~/.cabal/bin@ +-- and then create symlinks in @~/bin/@. We can be careful when setting up the +-- symlinks that we do not overwrite any binary that the user installed. We can +-- check if it was a symlink we made because it would point to the private dir +-- where we install our binaries. This means we can install normally without +-- worrying and in a later phase set up symlinks, and if that fails then we +-- report it to the user, but even in this case the package is still in an OK +-- installed state. +-- +-- This is an optional feature that users can choose to use or not. It is +-- controlled from the config file. Of course it only works on POSIX systems +-- with symlinks so is not available to Windows users. +-- +symlinkBinaries :: Compiler + -> ConfigFlags + -> InstallFlags + -> InstallPlan + -> IO [(PackageIdentifier, String, FilePath)] +symlinkBinaries comp configFlags installFlags plan = + case flagToMaybe (installSymlinkBinDir installFlags) of + Nothing -> return [] + Just symlinkBinDir + | null exes -> return [] + | otherwise -> do + publicBinDir <- canonicalizePath symlinkBinDir +-- TODO: do we want to do this here? : +-- createDirectoryIfMissing True publicBinDir + fmap catMaybes $ sequence + [ do privateBinDir <- pkgBinDir pkg pkg_key + ok <- symlinkBinary + publicBinDir privateBinDir + publicExeName privateExeName + if ok + then return Nothing + else return (Just (pkgid, publicExeName, + privateBinDir privateExeName)) + | (ReadyPackage _ _flags _ deps, pkg, exe) <- exes + , let pkgid = packageId pkg + pkg_key = mkPackageKey (packageKeySupported comp) pkgid + (map Installed.packageKey deps) [] + publicExeName = PackageDescription.exeName exe + privateExeName = prefix ++ publicExeName ++ suffix + prefix = substTemplate pkgid pkg_key prefixTemplate + suffix = substTemplate pkgid pkg_key suffixTemplate ] + where + exes = + [ (cpkg, pkg, exe) + | InstallPlan.Installed cpkg _ <- InstallPlan.toList plan + , let pkg = pkgDescription cpkg + , exe <- PackageDescription.executables pkg + , PackageDescription.buildable (PackageDescription.buildInfo exe) ] + + pkgDescription :: ReadyPackage -> PackageDescription + pkgDescription (ReadyPackage (SourcePackage _ pkg _ _) flags stanzas _) = + case finalizePackageDescription flags + (const True) + platform cinfo [] (enableStanzas stanzas pkg) of + Left _ -> error "finalizePackageDescription ReadyPackage failed" + Right (desc, _) -> desc + + -- This is sadly rather complicated. We're kind of re-doing part of the + -- configuration for the package. :-( + pkgBinDir :: PackageDescription -> PackageKey -> IO FilePath + pkgBinDir pkg pkg_key = do + defaultDirs <- InstallDirs.defaultInstallDirs + compilerFlavor + (fromFlag (configUserInstall configFlags)) + (PackageDescription.hasLibs pkg) + let templateDirs = InstallDirs.combineInstallDirs fromFlagOrDefault + defaultDirs (configInstallDirs configFlags) + absoluteDirs = InstallDirs.absoluteInstallDirs + (packageId pkg) pkg_key + cinfo InstallDirs.NoCopyDest + platform templateDirs + canonicalizePath (InstallDirs.bindir absoluteDirs) + + substTemplate pkgid pkg_key = InstallDirs.fromPathTemplate + . InstallDirs.substPathTemplate env + where env = InstallDirs.initialPathTemplateEnv pkgid pkg_key + cinfo platform + + fromFlagTemplate = fromFlagOrDefault (InstallDirs.toPathTemplate "") + prefixTemplate = fromFlagTemplate (configProgPrefix configFlags) + suffixTemplate = fromFlagTemplate (configProgSuffix configFlags) + platform = InstallPlan.planPlatform plan + cinfo = InstallPlan.planCompiler plan + (CompilerId compilerFlavor _) = compilerInfoId cinfo + +symlinkBinary :: FilePath -- ^ The canonical path of the public bin dir + -- eg @/home/user/bin@ + -> FilePath -- ^ The canonical path of the private bin dir + -- eg @/home/user/.cabal/bin@ + -> String -- ^ The name of the executable to go in the public + -- bin dir, eg @foo@ + -> String -- ^ The name of the executable to in the private bin + -- dir, eg @foo-1.0@ + -> IO Bool -- ^ If creating the symlink was successful. @False@ + -- if there was another file there already that we + -- did not own. Other errors like permission errors + -- just propagate as exceptions. +symlinkBinary publicBindir privateBindir publicName privateName = do + ok <- targetOkToOverwrite (publicBindir publicName) + (privateBindir privateName) + case ok of + NotOurFile -> return False + NotExists -> mkLink >> return True + OkToOverwrite -> rmLink >> mkLink >> return True + where + relativeBindir = makeRelative publicBindir privateBindir + mkLink = createSymbolicLink (relativeBindir privateName) + (publicBindir publicName) + rmLink = removeLink (publicBindir publicName) + +-- | Check a file path of a symlink that we would like to create to see if it +-- is OK. For it to be OK to overwrite it must either not already exist yet or +-- be a symlink to our target (in which case we can assume ownership). +-- +targetOkToOverwrite :: FilePath -- ^ The file path of the symlink to the private + -- binary that we would like to create + -> FilePath -- ^ The canonical path of the private binary. + -- Use 'canonicalizePath' to make this. + -> IO SymlinkStatus +targetOkToOverwrite symlink target = handleNotExist $ do + status <- getSymbolicLinkStatus symlink + if not (isSymbolicLink status) + then return NotOurFile + else do target' <- canonicalizePath symlink + -- This relies on canonicalizePath handling symlinks + if target == target' + then return OkToOverwrite + else return NotOurFile + + where + handleNotExist action = catchIO action $ \ioexception -> + -- If the target doesn't exist then there's no problem overwriting it! + if isDoesNotExistError ioexception + then return NotExists + else ioError ioexception + +data SymlinkStatus + = NotExists -- ^ The file doesn't exist so we can make a symlink. + | OkToOverwrite -- ^ A symlink already exists, though it is ours. We'll + -- have to delete it first before we make a new symlink. + | NotOurFile -- ^ A file already exists and it is not one of our existing + -- symlinks (either because it is not a symlink or because + -- it points somewhere other than our managed space). + deriving Show + +-- | Take two canonical paths and produce a relative path to get from the first +-- to the second, even if it means adding @..@ path components. +-- +makeRelative :: FilePath -> FilePath -> FilePath +makeRelative a b = assert (isAbsolute a && isAbsolute b) $ + let as = splitPath a + bs = splitPath b + commonLen = length $ takeWhile id $ zipWith (==) as bs + in joinPath $ [ ".." | _ <- drop commonLen as ] + ++ drop commonLen bs + +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/JobControl.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/JobControl.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/JobControl.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/JobControl.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,89 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.JobControl +-- Copyright : (c) Duncan Coutts 2012 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- A job control concurrency abstraction +----------------------------------------------------------------------------- +module Distribution.Client.JobControl ( + JobControl, + newSerialJobControl, + newParallelJobControl, + spawnJob, + collectJob, + + JobLimit, + newJobLimit, + withJobLimit, + + Lock, + newLock, + criticalSection + ) where + +import Control.Monad +import Control.Concurrent hiding (QSem, newQSem, waitQSem, signalQSem) +import Control.Exception (SomeException, bracket_, mask, throw, try) +import Distribution.Client.Compat.Semaphore + +data JobControl m a = JobControl { + spawnJob :: m a -> m (), + collectJob :: m a + } + + +newSerialJobControl :: IO (JobControl IO a) +newSerialJobControl = do + queue <- newChan + return JobControl { + spawnJob = spawn queue, + collectJob = collect queue + } + where + spawn :: Chan (IO a) -> IO a -> IO () + spawn = writeChan + + collect :: Chan (IO a) -> IO a + collect = join . readChan + +newParallelJobControl :: IO (JobControl IO a) +newParallelJobControl = do + resultVar <- newEmptyMVar + return JobControl { + spawnJob = spawn resultVar, + collectJob = collect resultVar + } + where + spawn :: MVar (Either SomeException a) -> IO a -> IO () + spawn resultVar job = + mask $ \restore -> + forkIO (do res <- try (restore job) + putMVar resultVar res) + >> return () + + collect :: MVar (Either SomeException a) -> IO a + collect resultVar = + takeMVar resultVar >>= either throw return + +data JobLimit = JobLimit QSem + +newJobLimit :: Int -> IO JobLimit +newJobLimit n = + fmap JobLimit (newQSem n) + +withJobLimit :: JobLimit -> IO a -> IO a +withJobLimit (JobLimit sem) = + bracket_ (waitQSem sem) (signalQSem sem) + +newtype Lock = Lock (MVar ()) + +newLock :: IO Lock +newLock = fmap Lock $ newMVar () + +criticalSection :: Lock -> IO a -> IO a +criticalSection (Lock lck) act = bracket_ (takeMVar lck) (putMVar lck ()) act diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/List.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/List.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/List.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/List.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,589 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.List +-- Copyright : (c) David Himmelstrup 2005 +-- Duncan Coutts 2008-2011 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- +-- Search for and print information about packages +----------------------------------------------------------------------------- +module Distribution.Client.List ( + list, info + ) where + +import Distribution.Package + ( PackageName(..), Package(..), packageName, packageVersion + , Dependency(..), simplifyDependency ) +import Distribution.ModuleName (ModuleName) +import Distribution.License (License) +import qualified Distribution.InstalledPackageInfo as Installed +import qualified Distribution.PackageDescription as Source +import Distribution.PackageDescription + ( Flag(..), FlagName(..) ) +import Distribution.PackageDescription.Configuration + ( flattenPackageDescription ) + +import Distribution.Simple.Compiler + ( Compiler, PackageDBStack ) +import Distribution.Simple.Program (ProgramConfiguration) +import Distribution.Simple.Utils + ( equating, comparing, die, notice ) +import Distribution.Simple.Setup (fromFlag) +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex +import qualified Distribution.Client.PackageIndex as PackageIndex +import Distribution.Version + ( Version(..), VersionRange, withinRange, anyVersion + , intersectVersionRanges, simplifyVersionRange ) +import Distribution.Verbosity (Verbosity) +import Distribution.Text + ( Text(disp), display ) + +import Distribution.Client.Types + ( SourcePackage(..), Repo, SourcePackageDb(..) ) +import Distribution.Client.Dependency.Types + ( PackageConstraint(..), ExtDependency(..) ) +import Distribution.Client.Targets + ( UserTarget, resolveUserTargets, PackageSpecifier(..) ) +import Distribution.Client.Setup + ( GlobalFlags(..), ListFlags(..), InfoFlags(..) ) +import Distribution.Client.Utils + ( mergeBy, MergeResult(..) ) +import Distribution.Client.IndexUtils as IndexUtils + ( getSourcePackages, getInstalledPackages ) +import Distribution.Client.FetchUtils + ( isFetched ) + +import Data.List + ( sortBy, groupBy, sort, nub, intersperse, maximumBy, partition ) +import Data.Maybe + ( listToMaybe, fromJust, fromMaybe, isJust ) +import qualified Data.Map as Map +import Data.Tree as Tree +import Control.Monad + ( MonadPlus(mplus), join ) +import Control.Exception + ( assert ) +import Text.PrettyPrint as Disp +import System.Directory + ( doesDirectoryExist ) + + +-- | Return a list of packages matching given search strings. +getPkgList :: Verbosity + -> PackageDBStack + -> [Repo] + -> Compiler + -> ProgramConfiguration + -> ListFlags + -> [String] + -> IO [PackageDisplayInfo] +getPkgList verbosity packageDBs repos comp conf listFlags pats = do + installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf + sourcePkgDb <- getSourcePackages verbosity repos + let sourcePkgIndex = packageIndex sourcePkgDb + prefs name = fromMaybe anyVersion + (Map.lookup name (packagePreferences sourcePkgDb)) + + pkgsInfo :: + [(PackageName, [Installed.InstalledPackageInfo], [SourcePackage])] + pkgsInfo + -- gather info for all packages + | null pats = mergePackages + (InstalledPackageIndex.allPackages installedPkgIndex) + ( PackageIndex.allPackages sourcePkgIndex) + + -- gather info for packages matching search term + | otherwise = pkgsInfoMatching + + pkgsInfoMatching :: + [(PackageName, [Installed.InstalledPackageInfo], [SourcePackage])] + pkgsInfoMatching = + let matchingInstalled = matchingPackages + InstalledPackageIndex.searchByNameSubstring + installedPkgIndex + matchingSource = matchingPackages + (\ idx n -> + concatMap snd + (PackageIndex.searchByNameSubstring idx n)) + sourcePkgIndex + in mergePackages matchingInstalled matchingSource + + matches :: [PackageDisplayInfo] + matches = [ mergePackageInfo pref + installedPkgs sourcePkgs selectedPkg False + | (pkgname, installedPkgs, sourcePkgs) <- pkgsInfo + , not onlyInstalled || not (null installedPkgs) + , let pref = prefs pkgname + selectedPkg = latestWithPref pref sourcePkgs ] + return matches + where + onlyInstalled = fromFlag (listInstalled listFlags) + matchingPackages search index = + [ pkg + | pat <- pats + , pkg <- search index pat ] + + +-- | Show information about packages. +list :: Verbosity + -> PackageDBStack + -> [Repo] + -> Compiler + -> ProgramConfiguration + -> ListFlags + -> [String] + -> IO () +list verbosity packageDBs repos comp conf listFlags pats = do + matches <- getPkgList verbosity packageDBs repos comp conf listFlags pats + + if simpleOutput + then putStr $ unlines + [ display (pkgName pkg) ++ " " ++ display version + | pkg <- matches + , version <- if onlyInstalled + then installedVersions pkg + else nub . sort $ installedVersions pkg + ++ sourceVersions pkg ] + -- Note: this only works because for 'list', one cannot currently + -- specify any version constraints, so listing all installed + -- and source ones works. + else + if null matches + then notice verbosity "No matches found." + else putStr $ unlines (map showPackageSummaryInfo matches) + where + onlyInstalled = fromFlag (listInstalled listFlags) + simpleOutput = fromFlag (listSimpleOutput listFlags) + +info :: Verbosity + -> PackageDBStack + -> [Repo] + -> Compiler + -> ProgramConfiguration + -> GlobalFlags + -> InfoFlags + -> [UserTarget] + -> IO () +info verbosity _ _ _ _ _ _ [] = + notice verbosity "No packages requested. Nothing to do." + +info verbosity packageDBs repos comp conf + globalFlags _listFlags userTargets = do + + installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf + sourcePkgDb <- getSourcePackages verbosity repos + let sourcePkgIndex = packageIndex sourcePkgDb + prefs name = fromMaybe anyVersion + (Map.lookup name (packagePreferences sourcePkgDb)) + + -- Users may specify names of packages that are only installed, not + -- just available source packages, so we must resolve targets using + -- the combination of installed and source packages. + let sourcePkgs' = PackageIndex.fromList + $ map packageId + (InstalledPackageIndex.allPackages installedPkgIndex) + ++ map packageId + (PackageIndex.allPackages sourcePkgIndex) + pkgSpecifiers <- resolveUserTargets verbosity + (fromFlag $ globalWorldFile globalFlags) + sourcePkgs' userTargets + + pkgsinfo <- sequence + [ do pkginfo <- either die return $ + gatherPkgInfo prefs + installedPkgIndex sourcePkgIndex + pkgSpecifier + updateFileSystemPackageDetails pkginfo + | pkgSpecifier <- pkgSpecifiers ] + + putStr $ unlines (map showPackageDetailedInfo pkgsinfo) + + where + gatherPkgInfo :: (PackageName -> VersionRange) -> + InstalledPackageIndex -> + PackageIndex.PackageIndex SourcePackage -> + PackageSpecifier SourcePackage -> + Either String PackageDisplayInfo + gatherPkgInfo prefs installedPkgIndex sourcePkgIndex + (NamedPackage name constraints) + | null (selectedInstalledPkgs) && null (selectedSourcePkgs) + = Left $ "There is no available version of " ++ display name + ++ " that satisfies " + ++ display (simplifyVersionRange verConstraint) + + | otherwise + = Right $ mergePackageInfo pref installedPkgs + sourcePkgs selectedSourcePkg' + showPkgVersion + where + (pref, installedPkgs, sourcePkgs) = + sourcePkgsInfo prefs name installedPkgIndex sourcePkgIndex + + selectedInstalledPkgs = InstalledPackageIndex.lookupDependency + installedPkgIndex + (Dependency name verConstraint) + selectedSourcePkgs = PackageIndex.lookupDependency sourcePkgIndex + (Dependency name verConstraint) + selectedSourcePkg' = latestWithPref pref selectedSourcePkgs + + -- display a specific package version if the user + -- supplied a non-trivial version constraint + showPkgVersion = not (null verConstraints) + verConstraint = foldr intersectVersionRanges anyVersion verConstraints + verConstraints = [ vr | PackageConstraintVersion _ vr <- constraints ] + + gatherPkgInfo prefs installedPkgIndex sourcePkgIndex + (SpecificSourcePackage pkg) = + Right $ mergePackageInfo pref installedPkgs sourcePkgs + selectedPkg True + where + name = packageName pkg + selectedPkg = Just pkg + (pref, installedPkgs, sourcePkgs) = + sourcePkgsInfo prefs name installedPkgIndex sourcePkgIndex + +sourcePkgsInfo :: + (PackageName -> VersionRange) + -> PackageName + -> InstalledPackageIndex + -> PackageIndex.PackageIndex SourcePackage + -> (VersionRange, [Installed.InstalledPackageInfo], [SourcePackage]) +sourcePkgsInfo prefs name installedPkgIndex sourcePkgIndex = + (pref, installedPkgs, sourcePkgs) + where + pref = prefs name + installedPkgs = concatMap snd (InstalledPackageIndex.lookupPackageName + installedPkgIndex name) + sourcePkgs = PackageIndex.lookupPackageName sourcePkgIndex name + + +-- | The info that we can display for each package. It is information per +-- package name and covers all installed and available versions. +-- +data PackageDisplayInfo = PackageDisplayInfo { + pkgName :: PackageName, + selectedVersion :: Maybe Version, + selectedSourcePkg :: Maybe SourcePackage, + installedVersions :: [Version], + sourceVersions :: [Version], + preferredVersions :: VersionRange, + homepage :: String, + bugReports :: String, + sourceRepo :: String, + synopsis :: String, + description :: String, + category :: String, + license :: License, + author :: String, + maintainer :: String, + dependencies :: [ExtDependency], + flags :: [Flag], + hasLib :: Bool, + hasExe :: Bool, + executables :: [String], + modules :: [ModuleName], + haddockHtml :: FilePath, + haveTarball :: Bool + } + +showPackageSummaryInfo :: PackageDisplayInfo -> String +showPackageSummaryInfo pkginfo = + renderStyle (style {lineLength = 80, ribbonsPerLine = 1}) $ + char '*' <+> disp (pkgName pkginfo) + $+$ + (nest 4 $ vcat [ + maybeShow (synopsis pkginfo) "Synopsis:" reflowParagraphs + , text "Default available version:" <+> + case selectedSourcePkg pkginfo of + Nothing -> text "[ Not available from any configured repository ]" + Just pkg -> disp (packageVersion pkg) + , text "Installed versions:" <+> + case installedVersions pkginfo of + [] | hasLib pkginfo -> text "[ Not installed ]" + | otherwise -> text "[ Unknown ]" + versions -> dispTopVersions 4 + (preferredVersions pkginfo) versions + , maybeShow (homepage pkginfo) "Homepage:" text + , text "License: " <+> text (display (license pkginfo)) + ]) + $+$ text "" + where + maybeShow [] _ _ = empty + maybeShow l s f = text s <+> (f l) + +showPackageDetailedInfo :: PackageDisplayInfo -> String +showPackageDetailedInfo pkginfo = + renderStyle (style {lineLength = 80, ribbonsPerLine = 1}) $ + char '*' <+> disp (pkgName pkginfo) + <> maybe empty (\v -> char '-' <> disp v) (selectedVersion pkginfo) + <+> text (replicate (16 - length (display (pkgName pkginfo))) ' ') + <> parens pkgkind + $+$ + (nest 4 $ vcat [ + entry "Synopsis" synopsis hideIfNull reflowParagraphs + , entry "Versions available" sourceVersions + (altText null "[ Not available from server ]") + (dispTopVersions 9 (preferredVersions pkginfo)) + , entry "Versions installed" installedVersions + (altText null (if hasLib pkginfo then "[ Not installed ]" + else "[ Unknown ]")) + (dispTopVersions 4 (preferredVersions pkginfo)) + , entry "Homepage" homepage orNotSpecified text + , entry "Bug reports" bugReports orNotSpecified text + , entry "Description" description hideIfNull reflowParagraphs + , entry "Category" category hideIfNull text + , entry "License" license alwaysShow disp + , entry "Author" author hideIfNull reflowLines + , entry "Maintainer" maintainer hideIfNull reflowLines + , entry "Source repo" sourceRepo orNotSpecified text + , entry "Executables" executables hideIfNull (commaSep text) + , entry "Flags" flags hideIfNull (commaSep dispFlag) + , entry "Dependencies" dependencies hideIfNull (commaSep disp) + , entry "Documentation" haddockHtml showIfInstalled text + , entry "Cached" haveTarball alwaysShow dispYesNo + , if not (hasLib pkginfo) then empty else + text "Modules:" $+$ nest 4 (vcat (map disp . sort . modules $ pkginfo)) + ]) + $+$ text "" + where + entry fname field cond format = case cond (field pkginfo) of + Nothing -> label <+> format (field pkginfo) + Just Nothing -> empty + Just (Just other) -> label <+> text other + where + label = text fname <> char ':' <> padding + padding = text (replicate (13 - length fname ) ' ') + + normal = Nothing + hide = Just Nothing + replace msg = Just (Just msg) + + alwaysShow = const normal + hideIfNull v = if null v then hide else normal + showIfInstalled v + | not isInstalled = hide + | null v = replace "[ Not installed ]" + | otherwise = normal + altText nul msg v = if nul v then replace msg else normal + orNotSpecified = altText null "[ Not specified ]" + + commaSep f = Disp.fsep . Disp.punctuate (Disp.char ',') . map f + dispFlag f = case flagName f of FlagName n -> text n + dispYesNo True = text "Yes" + dispYesNo False = text "No" + + isInstalled = not (null (installedVersions pkginfo)) + hasExes = length (executables pkginfo) >= 2 + --TODO: exclude non-buildable exes + pkgkind | hasLib pkginfo && hasExes = text "programs and library" + | hasLib pkginfo && hasExe pkginfo = text "program and library" + | hasLib pkginfo = text "library" + | hasExes = text "programs" + | hasExe pkginfo = text "program" + | otherwise = empty + + +reflowParagraphs :: String -> Doc +reflowParagraphs = + vcat + . intersperse (text "") -- re-insert blank lines + . map (fsep . map text . concatMap words) -- reflow paragraphs + . filter (/= [""]) + . groupBy (\x y -> "" `notElem` [x,y]) -- break on blank lines + . lines + +reflowLines :: String -> Doc +reflowLines = vcat . map text . lines + +-- | We get the 'PackageDisplayInfo' by combining the info for the installed +-- and available versions of a package. +-- +-- * We're building info about a various versions of a single named package so +-- the input package info records are all supposed to refer to the same +-- package name. +-- +mergePackageInfo :: VersionRange + -> [Installed.InstalledPackageInfo] + -> [SourcePackage] + -> Maybe SourcePackage + -> Bool + -> PackageDisplayInfo +mergePackageInfo versionPref installedPkgs sourcePkgs selectedPkg showVer = + assert (length installedPkgs + length sourcePkgs > 0) $ + PackageDisplayInfo { + pkgName = combine packageName source + packageName installed, + selectedVersion = if showVer then fmap packageVersion selectedPkg + else Nothing, + selectedSourcePkg = sourceSelected, + installedVersions = map packageVersion installedPkgs, + sourceVersions = map packageVersion sourcePkgs, + preferredVersions = versionPref, + + license = combine Source.license source + Installed.license installed, + maintainer = combine Source.maintainer source + Installed.maintainer installed, + author = combine Source.author source + Installed.author installed, + homepage = combine Source.homepage source + Installed.homepage installed, + bugReports = maybe "" Source.bugReports source, + sourceRepo = fromMaybe "" . join + . fmap (uncons Nothing Source.repoLocation + . sortBy (comparing Source.repoKind) + . Source.sourceRepos) + $ source, + --TODO: installed package info is missing synopsis + synopsis = maybe "" Source.synopsis source, + description = combine Source.description source + Installed.description installed, + category = combine Source.category source + Installed.category installed, + flags = maybe [] Source.genPackageFlags sourceGeneric, + hasLib = isJust installed + || fromMaybe False + (fmap (isJust . Source.condLibrary) sourceGeneric), + hasExe = fromMaybe False + (fmap (not . null . Source.condExecutables) sourceGeneric), + executables = map fst (maybe [] Source.condExecutables sourceGeneric), + modules = combine (map Installed.exposedName . Installed.exposedModules) + installed + (maybe [] getListOfExposedModules . Source.library) + source, + dependencies = + combine (map (SourceDependency . simplifyDependency) + . Source.buildDepends) source + (map InstalledDependency . Installed.depends) installed, + haddockHtml = fromMaybe "" . join + . fmap (listToMaybe . Installed.haddockHTMLs) + $ installed, + haveTarball = False + } + where + combine f x g y = fromJust (fmap f x `mplus` fmap g y) + installed :: Maybe Installed.InstalledPackageInfo + installed = latestWithPref versionPref installedPkgs + + getListOfExposedModules lib = Source.exposedModules lib + ++ map Source.moduleReexportName + (Source.reexportedModules lib) + + sourceSelected + | isJust selectedPkg = selectedPkg + | otherwise = latestWithPref versionPref sourcePkgs + sourceGeneric = fmap packageDescription sourceSelected + source = fmap flattenPackageDescription sourceGeneric + + uncons :: b -> (a -> b) -> [a] -> b + uncons z _ [] = z + uncons _ f (x:_) = f x + + +-- | Not all the info is pure. We have to check if the docs really are +-- installed, because the registered package info lies. Similarly we have to +-- check if the tarball has indeed been fetched. +-- +updateFileSystemPackageDetails :: PackageDisplayInfo -> IO PackageDisplayInfo +updateFileSystemPackageDetails pkginfo = do + fetched <- maybe (return False) (isFetched . packageSource) + (selectedSourcePkg pkginfo) + docsExist <- doesDirectoryExist (haddockHtml pkginfo) + return pkginfo { + haveTarball = fetched, + haddockHtml = if docsExist then haddockHtml pkginfo else "" + } + +latestWithPref :: Package pkg => VersionRange -> [pkg] -> Maybe pkg +latestWithPref _ [] = Nothing +latestWithPref pref pkgs = Just (maximumBy (comparing prefThenVersion) pkgs) + where + prefThenVersion pkg = let ver = packageVersion pkg + in (withinRange ver pref, ver) + + +-- | Rearrange installed and source packages into groups referring to the +-- same package by name. In the result pairs, the lists are guaranteed to not +-- both be empty. +-- +mergePackages :: [Installed.InstalledPackageInfo] + -> [SourcePackage] + -> [( PackageName + , [Installed.InstalledPackageInfo] + , [SourcePackage] )] +mergePackages installedPkgs sourcePkgs = + map collect + $ mergeBy (\i a -> fst i `compare` fst a) + (groupOn packageName installedPkgs) + (groupOn packageName sourcePkgs) + where + collect (OnlyInLeft (name,is) ) = (name, is, []) + collect ( InBoth (_,is) (name,as)) = (name, is, as) + collect (OnlyInRight (name,as)) = (name, [], as) + +groupOn :: Ord key => (a -> key) -> [a] -> [(key,[a])] +groupOn key = map (\xs -> (key (head xs), xs)) + . groupBy (equating key) + . sortBy (comparing key) + +dispTopVersions :: Int -> VersionRange -> [Version] -> Doc +dispTopVersions n pref vs = + (Disp.fsep . Disp.punctuate (Disp.char ',') + . map (\ver -> if ispref ver then disp ver else parens (disp ver)) + . sort . take n . interestingVersions ispref + $ vs) + <+> trailingMessage + + where + ispref ver = withinRange ver pref + extra = length vs - n + trailingMessage + | extra <= 0 = Disp.empty + | otherwise = Disp.parens $ Disp.text "and" + <+> Disp.int (length vs - n) + <+> if extra == 1 then Disp.text "other" + else Disp.text "others" + +-- | Reorder a bunch of versions to put the most interesting / significant +-- versions first. A preferred version range is taken into account. +-- +-- This may be used in a user interface to select a small number of versions +-- to present to the user, e.g. +-- +-- > let selectVersions = sort . take 5 . interestingVersions pref +-- +interestingVersions :: (Version -> Bool) -> [Version] -> [Version] +interestingVersions pref = + map ((\ns -> Version ns []) . fst) . filter snd + . concat . Tree.levels + . swizzleTree + . reorderTree (\(Node (v,_) _) -> pref (Version v [])) + . reverseTree + . mkTree + . map versionBranch + + where + swizzleTree = unfoldTree (spine []) + where + spine ts' (Node x []) = (x, ts') + spine ts' (Node x (t:ts)) = spine (Node x ts:ts') t + + reorderTree _ (Node x []) = Node x [] + reorderTree p (Node x ts) = Node x (ts' ++ ts'') + where + (ts',ts'') = partition p (map (reorderTree p) ts) + + reverseTree (Node x cs) = Node x (reverse (map reverseTree cs)) + + mkTree xs = unfoldTree step (False, [], xs) + where + step (node,ns,vs) = + ( (reverse ns, node) + , [ (any null vs', n:ns, filter (not . null) vs') + | (n, vs') <- groups vs ] + ) + groups = map (\g -> (head (head g), map tail g)) + . groupBy (equating head) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/PackageIndex.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/PackageIndex.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/PackageIndex.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/PackageIndex.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,490 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveFunctor #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.PackageIndex +-- Copyright : (c) David Himmelstrup 2005, +-- Bjorn Bringert 2007, +-- Duncan Coutts 2008 +-- +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- An index of packages. +-- +module Distribution.Client.PackageIndex ( + -- * Package index data type + PackageIndex, + + -- * Creating an index + fromList, + + -- * Updates + merge, + insert, + deletePackageName, + deletePackageId, + deleteDependency, + + -- * Queries + + -- ** Precise lookups + elemByPackageId, + elemByPackageName, + lookupPackageName, + lookupPackageId, + lookupDependency, + + -- ** Case-insensitive searches + searchByName, + SearchResult(..), + searchByNameSubstring, + + -- ** Bulk queries + allPackages, + allPackagesByName, + + -- ** Special queries + brokenPackages, + dependencyClosure, + reverseDependencyClosure, + topologicalOrder, + reverseTopologicalOrder, + dependencyInconsistencies, + dependencyCycles, + dependencyGraph, + ) where + +import Prelude hiding (lookup) +import Control.Exception (assert) +import qualified Data.Map as Map +import Data.Map (Map) +import qualified Data.Tree as Tree +import qualified Data.Graph as Graph +import qualified Data.Array as Array +import Data.Array ((!)) +import Data.List (groupBy, sortBy, nub, isInfixOf) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid (Monoid(..)) +#endif +import Data.Maybe (isJust, isNothing, fromMaybe, catMaybes) + +import Distribution.Package + ( PackageName(..), PackageIdentifier(..) + , Package(..), packageName, packageVersion + , Dependency(Dependency), PackageFixedDeps(..) ) +import Distribution.Version + ( Version, withinRange ) +import Distribution.Simple.Utils (lowercase, equating, comparing) + + +-- | The collection of information about packages from one or more 'PackageDB's. +-- +-- It can be searched efficiently by package name and version. +-- +newtype PackageIndex pkg = PackageIndex + -- This index package names to all the package records matching that package + -- name case-sensitively. It includes all versions. + -- + -- This allows us to find all versions satisfying a dependency. + -- Most queries are a map lookup followed by a linear scan of the bucket. + -- + (Map PackageName [pkg]) + + deriving (Show, Read, Functor) + +instance Package pkg => Monoid (PackageIndex pkg) where + mempty = PackageIndex Map.empty + mappend = merge + --save one mappend with empty in the common case: + mconcat [] = mempty + mconcat xs = foldr1 mappend xs + +invariant :: Package pkg => PackageIndex pkg -> Bool +invariant (PackageIndex m) = all (uncurry goodBucket) (Map.toList m) + where + goodBucket _ [] = False + goodBucket name (pkg0:pkgs0) = check (packageId pkg0) pkgs0 + where + check pkgid [] = packageName pkgid == name + check pkgid (pkg':pkgs) = packageName pkgid == name + && pkgid < pkgid' + && check pkgid' pkgs + where pkgid' = packageId pkg' + +-- +-- * Internal helpers +-- + +mkPackageIndex :: Package pkg => Map PackageName [pkg] -> PackageIndex pkg +mkPackageIndex index = assert (invariant (PackageIndex index)) + (PackageIndex index) + +internalError :: String -> a +internalError name = error ("PackageIndex." ++ name ++ ": internal error") + +-- | Lookup a name in the index to get all packages that match that name +-- case-sensitively. +-- +lookup :: Package pkg => PackageIndex pkg -> PackageName -> [pkg] +lookup (PackageIndex m) name = fromMaybe [] $ Map.lookup name m + +-- +-- * Construction +-- + +-- | Build an index out of a bunch of packages. +-- +-- If there are duplicates, later ones mask earlier ones. +-- +fromList :: Package pkg => [pkg] -> PackageIndex pkg +fromList pkgs = mkPackageIndex + . Map.map fixBucket + . Map.fromListWith (++) + $ [ (packageName pkg, [pkg]) + | pkg <- pkgs ] + where + fixBucket = -- out of groups of duplicates, later ones mask earlier ones + -- but Map.fromListWith (++) constructs groups in reverse order + map head + -- Eq instance for PackageIdentifier is wrong, so use Ord: + . groupBy (\a b -> EQ == comparing packageId a b) + -- relies on sortBy being a stable sort so we + -- can pick consistently among duplicates + . sortBy (comparing packageId) + +-- +-- * Updates +-- + +-- | Merge two indexes. +-- +-- Packages from the second mask packages of the same exact name +-- (case-sensitively) from the first. +-- +merge :: Package pkg => PackageIndex pkg -> PackageIndex pkg -> PackageIndex pkg +merge i1@(PackageIndex m1) i2@(PackageIndex m2) = + assert (invariant i1 && invariant i2) $ + mkPackageIndex (Map.unionWith mergeBuckets m1 m2) + +-- | Elements in the second list mask those in the first. +mergeBuckets :: Package pkg => [pkg] -> [pkg] -> [pkg] +mergeBuckets [] ys = ys +mergeBuckets xs [] = xs +mergeBuckets xs@(x:xs') ys@(y:ys') = + case packageId x `compare` packageId y of + GT -> y : mergeBuckets xs ys' + EQ -> y : mergeBuckets xs' ys' + LT -> x : mergeBuckets xs' ys + +-- | Inserts a single package into the index. +-- +-- This is equivalent to (but slightly quicker than) using 'mappend' or +-- 'merge' with a singleton index. +-- +insert :: Package pkg => pkg -> PackageIndex pkg -> PackageIndex pkg +insert pkg (PackageIndex index) = mkPackageIndex $ + Map.insertWith (\_ -> insertNoDup) (packageName pkg) [pkg] index + where + pkgid = packageId pkg + insertNoDup [] = [pkg] + insertNoDup pkgs@(pkg':pkgs') = case compare pkgid (packageId pkg') of + LT -> pkg : pkgs + EQ -> pkg : pkgs' + GT -> pkg' : insertNoDup pkgs' + +-- | Internal delete helper. +-- +delete :: Package pkg => PackageName -> (pkg -> Bool) -> PackageIndex pkg -> PackageIndex pkg +delete name p (PackageIndex index) = mkPackageIndex $ + Map.update filterBucket name index + where + filterBucket = deleteEmptyBucket + . filter (not . p) + deleteEmptyBucket [] = Nothing + deleteEmptyBucket remaining = Just remaining + +-- | Removes a single package from the index. +-- +deletePackageId :: Package pkg => PackageIdentifier -> PackageIndex pkg -> PackageIndex pkg +deletePackageId pkgid = + delete (packageName pkgid) (\pkg -> packageId pkg == pkgid) + +-- | Removes all packages with this (case-sensitive) name from the index. +-- +deletePackageName :: Package pkg => PackageName -> PackageIndex pkg -> PackageIndex pkg +deletePackageName name = + delete name (\pkg -> packageName pkg == name) + +-- | Removes all packages satisfying this dependency from the index. +-- +deleteDependency :: Package pkg => Dependency -> PackageIndex pkg -> PackageIndex pkg +deleteDependency (Dependency name verstionRange) = + delete name (\pkg -> packageVersion pkg `withinRange` verstionRange) + +-- +-- * Bulk queries +-- + +-- | Get all the packages from the index. +-- +allPackages :: Package pkg => PackageIndex pkg -> [pkg] +allPackages (PackageIndex m) = concat (Map.elems m) + +-- | Get all the packages from the index. +-- +-- They are grouped by package name, case-sensitively. +-- +allPackagesByName :: Package pkg => PackageIndex pkg -> [[pkg]] +allPackagesByName (PackageIndex m) = Map.elems m + +-- +-- * Lookups +-- + +elemByPackageId :: Package pkg => PackageIndex pkg -> PackageIdentifier -> Bool +elemByPackageId index = isJust . lookupPackageId index + +elemByPackageName :: Package pkg => PackageIndex pkg -> PackageName -> Bool +elemByPackageName index = not . null . lookupPackageName index + + +-- | Does a lookup by package id (name & version). +-- +-- Since multiple package DBs mask each other case-sensitively by package name, +-- then we get back at most one package. +-- +lookupPackageId :: Package pkg => PackageIndex pkg -> PackageIdentifier -> Maybe pkg +lookupPackageId index pkgid = + case [ pkg | pkg <- lookup index (packageName pkgid) + , packageId pkg == pkgid ] of + [] -> Nothing + [pkg] -> Just pkg + _ -> internalError "lookupPackageIdentifier" + +-- | Does a case-sensitive search by package name. +-- +lookupPackageName :: Package pkg => PackageIndex pkg -> PackageName -> [pkg] +lookupPackageName index name = + [ pkg | pkg <- lookup index name + , packageName pkg == name ] + +-- | Does a case-sensitive search by package name and a range of versions. +-- +-- We get back any number of versions of the specified package name, all +-- satisfying the version range constraint. +-- +lookupDependency :: Package pkg => PackageIndex pkg -> Dependency -> [pkg] +lookupDependency index (Dependency name versionRange) = + [ pkg | pkg <- lookup index name + , packageName pkg == name + , packageVersion pkg `withinRange` versionRange ] + +-- +-- * Case insensitive name lookups +-- + +-- | Does a case-insensitive search by package name. +-- +-- If there is only one package that compares case-insensitively to this name +-- then the search is unambiguous and we get back all versions of that package. +-- If several match case-insensitively but one matches exactly then it is also +-- unambiguous. +-- +-- If however several match case-insensitively and none match exactly then we +-- have an ambiguous result, and we get back all the versions of all the +-- packages. The list of ambiguous results is split by exact package name. So +-- it is a non-empty list of non-empty lists. +-- +searchByName :: Package pkg => PackageIndex pkg + -> String -> [(PackageName, [pkg])] +searchByName (PackageIndex m) name = + [ pkgs + | pkgs@(PackageName name',_) <- Map.toList m + , lowercase name' == lname ] + where + lname = lowercase name + +data SearchResult a = None | Unambiguous a | Ambiguous [a] + +-- | Does a case-insensitive substring search by package name. +-- +-- That is, all packages that contain the given string in their name. +-- +searchByNameSubstring :: Package pkg => PackageIndex pkg + -> String -> [(PackageName, [pkg])] +searchByNameSubstring (PackageIndex m) searchterm = + [ pkgs + | pkgs@(PackageName name, _) <- Map.toList m + , lsearchterm `isInfixOf` lowercase name ] + where + lsearchterm = lowercase searchterm + +-- +-- * Special queries +-- + +-- | All packages that have dependencies that are not in the index. +-- +-- Returns such packages along with the dependencies that they're missing. +-- +brokenPackages :: PackageFixedDeps pkg + => PackageIndex pkg + -> [(pkg, [PackageIdentifier])] +brokenPackages index = + [ (pkg, missing) + | pkg <- allPackages index + , let missing = [ pkg' | pkg' <- depends pkg + , isNothing (lookupPackageId index pkg') ] + , not (null missing) ] + +-- | Tries to take the transitive closure of the package dependencies. +-- +-- If the transitive closure is complete then it returns that subset of the +-- index. Otherwise it returns the broken packages as in 'brokenPackages'. +-- +-- * Note that if the result is @Right []@ it is because at least one of +-- the original given 'PackageIdentifier's do not occur in the index. +-- +dependencyClosure :: PackageFixedDeps pkg + => PackageIndex pkg + -> [PackageIdentifier] + -> Either (PackageIndex pkg) + [(pkg, [PackageIdentifier])] +dependencyClosure index pkgids0 = case closure mempty [] pkgids0 of + (completed, []) -> Left completed + (completed, _) -> Right (brokenPackages completed) + where + closure completed failed [] = (completed, failed) + closure completed failed (pkgid:pkgids) = case lookupPackageId index pkgid of + Nothing -> closure completed (pkgid:failed) pkgids + Just pkg -> case lookupPackageId completed (packageId pkg) of + Just _ -> closure completed failed pkgids + Nothing -> closure completed' failed pkgids' + where completed' = insert pkg completed + pkgids' = depends pkg ++ pkgids + +-- | Takes the transitive closure of the packages reverse dependencies. +-- +-- * The given 'PackageIdentifier's must be in the index. +-- +reverseDependencyClosure :: PackageFixedDeps pkg + => PackageIndex pkg + -> [PackageIdentifier] + -> [pkg] +reverseDependencyClosure index = + map vertexToPkg + . concatMap Tree.flatten + . Graph.dfs reverseDepGraph + . map (fromMaybe noSuchPkgId . pkgIdToVertex) + + where + (depGraph, vertexToPkg, pkgIdToVertex) = dependencyGraph index + reverseDepGraph = Graph.transposeG depGraph + noSuchPkgId = error "reverseDependencyClosure: package is not in the graph" + +topologicalOrder :: PackageFixedDeps pkg => PackageIndex pkg -> [pkg] +topologicalOrder index = map toPkgId + . Graph.topSort + $ graph + where (graph, toPkgId, _) = dependencyGraph index + +reverseTopologicalOrder :: PackageFixedDeps pkg => PackageIndex pkg -> [pkg] +reverseTopologicalOrder index = map toPkgId + . Graph.topSort + . Graph.transposeG + $ graph + where (graph, toPkgId, _) = dependencyGraph index + +-- | Given a package index where we assume we want to use all the packages +-- (use 'dependencyClosure' if you need to get such a index subset) find out +-- if the dependencies within it use consistent versions of each package. +-- Return all cases where multiple packages depend on different versions of +-- some other package. +-- +-- Each element in the result is a package name along with the packages that +-- depend on it and the versions they require. These are guaranteed to be +-- distinct. +-- +dependencyInconsistencies :: PackageFixedDeps pkg + => PackageIndex pkg + -> [(PackageName, [(PackageIdentifier, Version)])] +dependencyInconsistencies index = + [ (name, inconsistencies) + | (name, uses) <- Map.toList inverseIndex + , let inconsistencies = duplicatesBy uses + versions = map snd inconsistencies + , reallyIsInconsistent name (nub versions) ] + + where inverseIndex = Map.fromListWith (++) + [ (packageName dep, [(packageId pkg, packageVersion dep)]) + | pkg <- allPackages index + , dep <- depends pkg ] + + duplicatesBy = (\groups -> if length groups == 1 + then [] + else concat groups) + . groupBy (equating snd) + . sortBy (comparing snd) + + reallyIsInconsistent :: PackageName -> [Version] -> Bool + reallyIsInconsistent _ [] = False + reallyIsInconsistent name [v1, v2] = + case (mpkg1, mpkg2) of + (Just pkg1, Just pkg2) -> pkgid1 `notElem` depends pkg2 + && pkgid2 `notElem` depends pkg1 + _ -> True + where + pkgid1 = PackageIdentifier name v1 + pkgid2 = PackageIdentifier name v2 + mpkg1 = lookupPackageId index pkgid1 + mpkg2 = lookupPackageId index pkgid2 + + reallyIsInconsistent _ _ = True + +-- | Find if there are any cycles in the dependency graph. If there are no +-- cycles the result is @[]@. +-- +-- This actually computes the strongly connected components. So it gives us a +-- list of groups of packages where within each group they all depend on each +-- other, directly or indirectly. +-- +dependencyCycles :: PackageFixedDeps pkg + => PackageIndex pkg + -> [[pkg]] +dependencyCycles index = + [ vs | Graph.CyclicSCC vs <- Graph.stronglyConnComp adjacencyList ] + where + adjacencyList = [ (pkg, packageId pkg, depends pkg) + | pkg <- allPackages index ] + +-- | Builds a graph of the package dependencies. +-- +-- Dependencies on other packages that are not in the index are discarded. +-- You can check if there are any such dependencies with 'brokenPackages'. +-- +dependencyGraph :: PackageFixedDeps pkg + => PackageIndex pkg + -> (Graph.Graph, + Graph.Vertex -> pkg, + PackageIdentifier -> Maybe Graph.Vertex) +dependencyGraph index = (graph, vertexToPkg, pkgIdToVertex) + where + graph = Array.listArray bounds $ + map (catMaybes . map pkgIdToVertex . depends) pkgs + vertexToPkg vertex = pkgTable ! vertex + pkgIdToVertex = binarySearch 0 topBound + + pkgTable = Array.listArray bounds pkgs + pkgIdTable = Array.listArray bounds (map packageId pkgs) + pkgs = sortBy (comparing packageId) (allPackages index) + topBound = length pkgs - 1 + bounds = (0, topBound) + + binarySearch a b key + | a > b = Nothing + | otherwise = case compare key (pkgIdTable ! mid) of + LT -> binarySearch a (mid-1) key + EQ -> Just mid + GT -> binarySearch (mid+1) b key + where mid = (a + b) `div` 2 diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/PackageUtils.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/PackageUtils.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/PackageUtils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/PackageUtils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,34 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.PackageUtils +-- Copyright : (c) Duncan Coutts 2010 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Various package description utils that should be in the Cabal lib +----------------------------------------------------------------------------- +module Distribution.Client.PackageUtils ( + externalBuildDepends, + ) where + +import Distribution.Package + ( packageVersion, packageName, Dependency(..) ) +import Distribution.PackageDescription + ( PackageDescription(..) ) +import Distribution.Version + ( withinRange ) + +-- | The list of dependencies that refer to external packages +-- rather than internal package components. +-- +externalBuildDepends :: PackageDescription -> [Dependency] +externalBuildDepends pkg = filter (not . internal) (buildDepends pkg) + where + -- True if this dependency is an internal one (depends on a library + -- defined in the same package). + internal (Dependency depName versionRange) = + depName == packageName pkg && + packageVersion pkg `withinRange` versionRange diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/ParseUtils.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/ParseUtils.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/ParseUtils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/ParseUtils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,62 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.ParseUtils +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Parsing utilities. +----------------------------------------------------------------------------- + +module Distribution.Client.ParseUtils ( parseFields, ppFields, ppSection ) + where + +import Distribution.ParseUtils + ( FieldDescr(..), ParseResult(..), warning, lineNo ) +import qualified Distribution.ParseUtils as ParseUtils + ( Field(..) ) + +import Control.Monad ( foldM ) +import Text.PrettyPrint ( (<>), (<+>), ($+$) ) +import qualified Data.Map as Map +import qualified Text.PrettyPrint as Disp + ( Doc, text, colon, vcat, empty, isEmpty, nest ) + +--FIXME: replace this with something better +parseFields :: [FieldDescr a] -> a -> [ParseUtils.Field] -> ParseResult a +parseFields fields = foldM setField + where + fieldMap = Map.fromList + [ (name, f) | f@(FieldDescr name _ _) <- fields ] + setField accum (ParseUtils.F line name value) = + case Map.lookup name fieldMap of + Just (FieldDescr _ _ set) -> set line value accum + Nothing -> do + warning $ "Unrecognized field " ++ name ++ " on line " ++ show line + return accum + setField accum f = do + warning $ "Unrecognized stanza on line " ++ show (lineNo f) + return accum + +-- | This is a customised version of the functions from Distribution.ParseUtils +-- that also optionally print default values for empty fields as comments. +-- +ppFields :: [FieldDescr a] -> (Maybe a) -> a -> Disp.Doc +ppFields fields def cur = Disp.vcat [ ppField name (fmap getter def) (getter cur) + | FieldDescr name getter _ <- fields] + +ppField :: String -> (Maybe Disp.Doc) -> Disp.Doc -> Disp.Doc +ppField name mdef cur + | Disp.isEmpty cur = maybe Disp.empty + (\def -> Disp.text "--" <+> Disp.text name + <> Disp.colon <+> def) mdef + | otherwise = Disp.text name <> Disp.colon <+> cur + +ppSection :: String -> String -> [FieldDescr a] -> (Maybe a) -> a -> Disp.Doc +ppSection name arg fields def cur + | Disp.isEmpty fieldsDoc = Disp.empty + | otherwise = Disp.text name <+> argDoc + $+$ (Disp.nest 2 fieldsDoc) + where + fieldsDoc = ppFields fields def cur + argDoc | arg == "" = Disp.empty + | otherwise = Disp.text arg diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Run.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Run.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Run.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Run.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,95 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Run +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Implementation of the 'run' command. +----------------------------------------------------------------------------- + +module Distribution.Client.Run ( run, splitRunArgs ) + where + +import Distribution.Client.Utils (tryCanonicalizePath) + +import Distribution.PackageDescription (Executable (..), + PackageDescription (..)) +import Distribution.Simple.Compiler (compilerFlavor, CompilerFlavor(..)) +import Distribution.Simple.Build.PathsModule (pkgPathEnvVar) +import Distribution.Simple.BuildPaths (exeExtension) +import Distribution.Simple.LocalBuildInfo (ComponentName (..), + LocalBuildInfo (..), + getComponentLocalBuildInfo, + depLibraryPaths) +import Distribution.Simple.Utils (die, notice, rawSystemExitWithEnv, + addLibraryPath) +import Distribution.System (Platform (..)) +import Distribution.Verbosity (Verbosity) + +import qualified Distribution.Simple.GHCJS as GHCJS + +#if !MIN_VERSION_base(4,8,0) +import Data.Functor ((<$>)) +#endif +import Data.List (find) +import System.Directory (getCurrentDirectory) +import Distribution.Compat.Environment (getEnvironment) +import System.FilePath ((<.>), ()) + + +-- | Return the executable to run and any extra arguments that should be +-- forwarded to it. +splitRunArgs :: LocalBuildInfo -> [String] -> IO (Executable, [String]) +splitRunArgs lbi args = + case exes of + [] -> die "Couldn't find any executables." + [exe] -> case args of + [] -> return (exe, []) + (x:xs) | x == exeName exe -> return (exe, xs) + | otherwise -> return (exe, args) + _ -> case args of + [] -> die $ "This package contains multiple executables. " + ++ "You must pass the executable name as the first argument " + ++ "to 'cabal run'." + (x:xs) -> case find (\exe -> exeName exe == x) exes of + Nothing -> die $ "No executable named '" ++ x ++ "'." + Just exe -> return (exe, xs) + where + pkg_descr = localPkgDescr lbi + exes = executables pkg_descr + + +-- | Run a given executable. +run :: Verbosity -> LocalBuildInfo -> Executable -> [String] -> IO () +run verbosity lbi exe exeArgs = do + curDir <- getCurrentDirectory + let buildPref = buildDir lbi + pkg_descr = localPkgDescr lbi + dataDirEnvVar = (pkgPathEnvVar pkg_descr "datadir", + curDir dataDir pkg_descr) + + (path, runArgs) <- + case compilerFlavor (compiler lbi) of + GHCJS -> do + let (script, cmd, cmdArgs) = + GHCJS.runCmd (withPrograms lbi) + (buildPref exeName exe exeName exe) + script' <- tryCanonicalizePath script + return (cmd, cmdArgs ++ [script']) + _ -> do + p <- tryCanonicalizePath $ + buildPref exeName exe (exeName exe <.> exeExtension) + return (p, []) + + env <- (dataDirEnvVar:) <$> getEnvironment + -- Add (DY)LD_LIBRARY_PATH if needed + env' <- if withDynExe lbi + then do let (Platform _ os) = hostPlatform lbi + clbi = getComponentLocalBuildInfo lbi + (CExeName (exeName exe)) + paths <- depLibraryPaths True False lbi clbi + return (addLibraryPath os paths env) + else return env + notice verbosity $ "Running " ++ exeName exe ++ "..." + rawSystemExitWithEnv verbosity path (runArgs++exeArgs) env' diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Index.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Index.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Index.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Index.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,242 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Sandbox.Index +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Querying and modifying local build tree references in the package index. +----------------------------------------------------------------------------- + +module Distribution.Client.Sandbox.Index ( + createEmpty, + addBuildTreeRefs, + removeBuildTreeRefs, + ListIgnoredBuildTreeRefs(..), RefTypesToList(..), + listBuildTreeRefs, + validateIndexPath, + + defaultIndexFileName + ) where + +import qualified Distribution.Client.Tar as Tar +import Distribution.Client.IndexUtils ( BuildTreeRefType(..) + , refTypeFromTypeCode + , typeCodeFromRefType + , updatePackageIndexCacheFile + , getSourcePackagesStrict ) +import Distribution.Client.PackageIndex ( allPackages ) +import Distribution.Client.Types ( Repo(..), LocalRepo(..) + , SourcePackageDb(..) + , SourcePackage(..), PackageLocation(..) ) +import Distribution.Client.Utils ( byteStringToFilePath, filePathToByteString + , makeAbsoluteToCwd, tryCanonicalizePath + , canonicalizePathNoThrow + , tryFindAddSourcePackageDesc ) + +import Distribution.Simple.Utils ( die, debug ) +import Distribution.Verbosity ( Verbosity ) + +import qualified Data.ByteString.Lazy as BS +import Control.Exception ( evaluate ) +import Control.Monad ( liftM, unless ) +import Data.List ( (\\), intersect, nub ) +import Data.Maybe ( catMaybes ) +import System.Directory ( createDirectoryIfMissing, + doesDirectoryExist, doesFileExist, + renameFile ) +import System.FilePath ( (), (<.>), takeDirectory, takeExtension + , replaceExtension ) +import System.IO ( IOMode(..), SeekMode(..) + , hSeek, withBinaryFile ) + +-- | A reference to a local build tree. +data BuildTreeRef = BuildTreeRef { + buildTreeRefType :: !BuildTreeRefType, + buildTreePath :: !FilePath + } + +defaultIndexFileName :: FilePath +defaultIndexFileName = "00-index.tar" + +-- | Given a path, ensure that it refers to a local build tree. +buildTreeRefFromPath :: BuildTreeRefType -> FilePath -> IO (Maybe BuildTreeRef) +buildTreeRefFromPath refType dir = do + dirExists <- doesDirectoryExist dir + unless dirExists $ + die $ "directory '" ++ dir ++ "' does not exist" + _ <- tryFindAddSourcePackageDesc dir "Error adding source reference." + return . Just $ BuildTreeRef refType dir + +-- | Given a tar archive entry, try to parse it as a local build tree reference. +readBuildTreeRef :: Tar.Entry -> Maybe BuildTreeRef +readBuildTreeRef entry = case Tar.entryContent entry of + (Tar.OtherEntryType typeCode bs size) + | (Tar.isBuildTreeRefTypeCode typeCode) + && (size == BS.length bs) -> Just $! BuildTreeRef + (refTypeFromTypeCode typeCode) + (byteStringToFilePath bs) + | otherwise -> Nothing + _ -> Nothing + +-- | Given a sequence of tar archive entries, extract all references to local +-- build trees. +readBuildTreeRefs :: Tar.Entries -> [BuildTreeRef] +readBuildTreeRefs = + catMaybes + . Tar.foldrEntries (\e r -> readBuildTreeRef e : r) + [] error + +-- | Given a path to a tar archive, extract all references to local build trees. +readBuildTreeRefsFromFile :: FilePath -> IO [BuildTreeRef] +readBuildTreeRefsFromFile = liftM (readBuildTreeRefs . Tar.read) . BS.readFile + +-- | Given a local build tree ref, serialise it to a tar archive entry. +writeBuildTreeRef :: BuildTreeRef -> Tar.Entry +writeBuildTreeRef (BuildTreeRef refType path) = Tar.simpleEntry tarPath content + where + bs = filePathToByteString path + -- Provide a filename for tools that treat custom entries as ordinary files. + tarPath' = "local-build-tree-reference" + -- fromRight can't fail because the path is shorter than 255 characters. + tarPath = fromRight $ Tar.toTarPath True tarPath' + content = Tar.OtherEntryType (typeCodeFromRefType refType) bs (BS.length bs) + + -- TODO: Move this to D.C.Utils? + fromRight (Left err) = error err + fromRight (Right a) = a + +-- | Check that the provided path is either an existing directory, or a tar +-- archive in an existing directory. +validateIndexPath :: FilePath -> IO FilePath +validateIndexPath path' = do + path <- makeAbsoluteToCwd path' + if (== ".tar") . takeExtension $ path + then return path + else do dirExists <- doesDirectoryExist path + unless dirExists $ + die $ "directory does not exist: '" ++ path ++ "'" + return $ path defaultIndexFileName + +-- | Create an empty index file. +createEmpty :: Verbosity -> FilePath -> IO () +createEmpty verbosity path = do + indexExists <- doesFileExist path + if indexExists + then debug verbosity $ "Package index already exists: " ++ path + else do + debug verbosity $ "Creating the index file '" ++ path ++ "'" + createDirectoryIfMissing True (takeDirectory path) + -- Equivalent to 'tar cvf empty.tar --files-from /dev/null'. + let zeros = BS.replicate (512*20) 0 + BS.writeFile path zeros + +-- | Add given local build tree references to the index. +addBuildTreeRefs :: Verbosity -> FilePath -> [FilePath] -> BuildTreeRefType + -> IO () +addBuildTreeRefs _ _ [] _ = + error "Distribution.Client.Sandbox.Index.addBuildTreeRefs: unexpected" +addBuildTreeRefs verbosity path l' refType = do + checkIndexExists path + l <- liftM nub . mapM tryCanonicalizePath $ l' + treesInIndex <- fmap (map buildTreePath) (readBuildTreeRefsFromFile path) + -- Add only those paths that aren't already in the index. + treesToAdd <- mapM (buildTreeRefFromPath refType) (l \\ treesInIndex) + let entries = map writeBuildTreeRef (catMaybes treesToAdd) + unless (null entries) $ do + offset <- + fmap (Tar.foldrEntries (\e acc -> Tar.entrySizeInBytes e + acc) 0 error + . Tar.read) $ BS.readFile path + _ <- evaluate offset + debug verbosity $ "Writing at offset: " ++ show offset + withBinaryFile path ReadWriteMode $ \h -> do + hSeek h AbsoluteSeek (fromIntegral offset) + BS.hPut h (Tar.write entries) + debug verbosity $ "Successfully appended to '" ++ path ++ "'" + updatePackageIndexCacheFile verbosity path + (path `replaceExtension` "cache") + +-- | Remove given local build tree references from the index. +removeBuildTreeRefs :: Verbosity -> FilePath -> [FilePath] -> IO [FilePath] +removeBuildTreeRefs _ _ [] = + error "Distribution.Client.Sandbox.Index.removeBuildTreeRefs: unexpected" +removeBuildTreeRefs verbosity path l' = do + checkIndexExists path + l <- mapM canonicalizePathNoThrow l' + let tmpFile = path <.> "tmp" + -- Performance note: on my system, it takes 'index --remove-source' + -- approx. 3,5s to filter a 65M file. Real-life indices are expected to be + -- much smaller. + BS.writeFile tmpFile . Tar.writeEntries . Tar.filterEntries (p l) . Tar.read + =<< BS.readFile path + renameFile tmpFile path + debug verbosity $ "Successfully renamed '" ++ tmpFile + ++ "' to '" ++ path ++ "'" + updatePackageIndexCacheFile verbosity path (path `replaceExtension` "cache") + -- FIXME: return only the refs that vere actually removed. + return l + where + p l entry = case readBuildTreeRef entry of + Nothing -> True + -- FIXME: removing snapshot deps is done with `delete-source + -- .cabal-sandbox/snapshots/$SNAPSHOT_NAME`. Perhaps we also want to + -- support removing snapshots by providing the original path. + (Just (BuildTreeRef _ pth)) -> pth `notElem` l + +-- | A build tree ref can become ignored if the user later adds a build tree ref +-- with the same package ID. We display ignored build tree refs when the user +-- runs 'cabal sandbox list-sources', but do not look at their timestamps in +-- 'reinstallAddSourceDeps'. +data ListIgnoredBuildTreeRefs = ListIgnored | DontListIgnored + +-- | Which types of build tree refs should be listed? +data RefTypesToList = OnlySnapshots | OnlyLinks | LinksAndSnapshots + +-- | List the local build trees that are referred to from the index. +listBuildTreeRefs :: Verbosity -> ListIgnoredBuildTreeRefs -> RefTypesToList + -> FilePath + -> IO [FilePath] +listBuildTreeRefs verbosity listIgnored refTypesToList path = do + checkIndexExists path + buildTreeRefs <- + case listIgnored of + DontListIgnored -> do + paths <- listWithoutIgnored + case refTypesToList of + LinksAndSnapshots -> return paths + _ -> do + allPathsFiltered <- fmap (map buildTreePath . filter predicate) + listWithIgnored + _ <- evaluate (length allPathsFiltered) + return (paths `intersect` allPathsFiltered) + + ListIgnored -> fmap (map buildTreePath . filter predicate) listWithIgnored + + _ <- evaluate (length buildTreeRefs) + return buildTreeRefs + + where + predicate :: BuildTreeRef -> Bool + predicate = case refTypesToList of + OnlySnapshots -> (==) SnapshotRef . buildTreeRefType + OnlyLinks -> (==) LinkRef . buildTreeRefType + LinksAndSnapshots -> const True + + listWithIgnored :: IO [BuildTreeRef] + listWithIgnored = readBuildTreeRefsFromFile $ path + + listWithoutIgnored :: IO [FilePath] + listWithoutIgnored = do + let repo = Repo { repoKind = Right LocalRepo + , repoLocalDir = takeDirectory path } + pkgIndex <- fmap packageIndex + . getSourcePackagesStrict verbosity $ [repo] + return [ pkgPath | (LocalUnpackedPackage pkgPath) <- + map packageSource . allPackages $ pkgIndex ] + + +-- | Check that the package index file exists and exit with error if it does not. +checkIndexExists :: FilePath -> IO () +checkIndexExists path = do + indexExists <- doesFileExist path + unless indexExists $ + die $ "index does not exist: '" ++ path ++ "'" diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/PackageEnvironment.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/PackageEnvironment.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/PackageEnvironment.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/PackageEnvironment.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,573 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Sandbox.PackageEnvironment +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Utilities for working with the package environment file. Patterned after +-- Distribution.Client.Config. +----------------------------------------------------------------------------- + +module Distribution.Client.Sandbox.PackageEnvironment ( + PackageEnvironment(..) + , IncludeComments(..) + , PackageEnvironmentType(..) + , classifyPackageEnvironment + , createPackageEnvironmentFile + , tryLoadSandboxPackageEnvironmentFile + , readPackageEnvironmentFile + , showPackageEnvironment + , showPackageEnvironmentWithComments + , setPackageDB + , sandboxPackageDBPath + , loadUserConfig + + , basePackageEnvironment + , initialPackageEnvironment + , commentPackageEnvironment + , sandboxPackageEnvironmentFile + , userPackageEnvironmentFile + ) where + +import Distribution.Client.Config ( SavedConfig(..), commentSavedConfig + , loadConfig, configFieldDescriptions + , haddockFlagsFields + , installDirsFields, withProgramsFields + , withProgramOptionsFields + , defaultCompiler ) +import Distribution.Client.ParseUtils ( parseFields, ppFields, ppSection ) +import Distribution.Client.Setup ( GlobalFlags(..), ConfigExFlags(..) + , InstallFlags(..) + , defaultSandboxLocation ) +import Distribution.Utils.NubList ( toNubList ) +import Distribution.Simple.Compiler ( Compiler, PackageDB(..) + , compilerFlavor, showCompilerIdWithAbi ) +import Distribution.Simple.InstallDirs ( InstallDirs(..), PathTemplate + , defaultInstallDirs, combineInstallDirs + , fromPathTemplate, toPathTemplate ) +import Distribution.Simple.Setup ( Flag(..) + , ConfigFlags(..), HaddockFlags(..) + , fromFlagOrDefault, toFlag, flagToMaybe ) +import Distribution.Simple.Utils ( die, info, notice, warn ) +import Distribution.ParseUtils ( FieldDescr(..), ParseResult(..) + , commaListField, commaNewLineListField + , liftField, lineNo, locatedErrorMsg + , parseFilePathQ, readFields + , showPWarning, simpleField + , syntaxError, warning ) +import Distribution.System ( Platform ) +import Distribution.Verbosity ( Verbosity, normal ) +import Control.Monad ( foldM, liftM2, when, unless ) +import Data.List ( partition ) +import Data.Maybe ( isJust ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid ( Monoid(..) ) +#endif +import Distribution.Compat.Exception ( catchIO ) +import System.Directory ( doesDirectoryExist, doesFileExist + , renameFile ) +import System.FilePath ( (<.>), (), takeDirectory ) +import System.IO.Error ( isDoesNotExistError ) +import Text.PrettyPrint ( ($+$) ) + +import qualified Text.PrettyPrint as Disp +import qualified Distribution.Compat.ReadP as Parse +import qualified Distribution.ParseUtils as ParseUtils ( Field(..) ) +import qualified Distribution.Text as Text + + +-- +-- * Configuration saved in the package environment file +-- + +-- TODO: would be nice to remove duplication between +-- D.C.Sandbox.PackageEnvironment and D.C.Config. +data PackageEnvironment = PackageEnvironment { + -- The 'inherit' feature is not used ATM, but could be useful in the future + -- for constructing nested sandboxes (see discussion in #1196). + pkgEnvInherit :: Flag FilePath, + pkgEnvSavedConfig :: SavedConfig +} + +instance Monoid PackageEnvironment where + mempty = PackageEnvironment { + pkgEnvInherit = mempty, + pkgEnvSavedConfig = mempty + } + + mappend a b = PackageEnvironment { + pkgEnvInherit = combine pkgEnvInherit, + pkgEnvSavedConfig = combine pkgEnvSavedConfig + } + where + combine f = f a `mappend` f b + +-- | The automatically-created package environment file that should not be +-- touched by the user. +sandboxPackageEnvironmentFile :: FilePath +sandboxPackageEnvironmentFile = "cabal.sandbox.config" + +-- | Optional package environment file that can be used to customize the default +-- settings. Created by the user. +userPackageEnvironmentFile :: FilePath +userPackageEnvironmentFile = "cabal.config" + +-- | Type of the current package environment. +data PackageEnvironmentType = + SandboxPackageEnvironment -- ^ './cabal.sandbox.config' + | UserPackageEnvironment -- ^ './cabal.config' + | AmbientPackageEnvironment -- ^ '~/.cabal/config' + +-- | Is there a 'cabal.sandbox.config' or 'cabal.config' in this +-- directory? +classifyPackageEnvironment :: FilePath -> Flag FilePath -> Flag Bool + -> IO PackageEnvironmentType +classifyPackageEnvironment pkgEnvDir sandboxConfigFileFlag ignoreSandboxFlag = + do isSandbox <- liftM2 (||) (return forceSandboxConfig) + (configExists sandboxPackageEnvironmentFile) + isUser <- configExists userPackageEnvironmentFile + return (classify isSandbox isUser) + where + configExists fname = doesFileExist (pkgEnvDir fname) + ignoreSandbox = fromFlagOrDefault False ignoreSandboxFlag + forceSandboxConfig = isJust . flagToMaybe $ sandboxConfigFileFlag + + classify :: Bool -> Bool -> PackageEnvironmentType + classify True _ + | not ignoreSandbox = SandboxPackageEnvironment + classify _ True = UserPackageEnvironment + classify _ False = AmbientPackageEnvironment + +-- | Defaults common to 'initialPackageEnvironment' and +-- 'commentPackageEnvironment'. +commonPackageEnvironmentConfig :: FilePath -> SavedConfig +commonPackageEnvironmentConfig sandboxDir = + mempty { + savedConfigureFlags = mempty { + -- TODO: Currently, we follow cabal-dev and set 'user-install: False' in + -- the config file. In the future we may want to distinguish between + -- global, sandbox and user install types. + configUserInstall = toFlag False, + configInstallDirs = installDirs + }, + savedUserInstallDirs = installDirs, + savedGlobalInstallDirs = installDirs, + savedGlobalFlags = mempty { + globalLogsDir = toFlag $ sandboxDir "logs", + -- Is this right? cabal-dev uses the global world file. + globalWorldFile = toFlag $ sandboxDir "world" + } + } + where + installDirs = sandboxInstallDirs sandboxDir + +-- | 'commonPackageEnvironmentConfig' wrapped inside a 'PackageEnvironment'. +commonPackageEnvironment :: FilePath -> PackageEnvironment +commonPackageEnvironment sandboxDir = mempty { + pkgEnvSavedConfig = commonPackageEnvironmentConfig sandboxDir + } + +-- | Given a path to a sandbox, return the corresponding InstallDirs record. +sandboxInstallDirs :: FilePath -> InstallDirs (Flag PathTemplate) +sandboxInstallDirs sandboxDir = mempty { + prefix = toFlag (toPathTemplate sandboxDir) + } + +-- | These are the absolute basic defaults, the fields that must be +-- initialised. When we load the package environment from the file we layer the +-- loaded values over these ones. +basePackageEnvironment :: PackageEnvironment +basePackageEnvironment = + mempty { + pkgEnvSavedConfig = mempty { + savedConfigureFlags = mempty { + configHcFlavor = toFlag defaultCompiler, + configVerbosity = toFlag normal + } + } + } + +-- | Initial configuration that we write out to the package environment file if +-- it does not exist. When the package environment gets loaded this +-- configuration gets layered on top of 'basePackageEnvironment'. +initialPackageEnvironment :: FilePath -> Compiler -> Platform + -> IO PackageEnvironment +initialPackageEnvironment sandboxDir compiler platform = do + defInstallDirs <- defaultInstallDirs (compilerFlavor compiler) + {- userInstall= -} False {- _hasLibs= -} False + let initialConfig = commonPackageEnvironmentConfig sandboxDir + installDirs = combineInstallDirs (\d f -> Flag $ fromFlagOrDefault d f) + defInstallDirs (savedUserInstallDirs initialConfig) + return $ mempty { + pkgEnvSavedConfig = initialConfig { + savedUserInstallDirs = installDirs, + savedGlobalInstallDirs = installDirs, + savedGlobalFlags = (savedGlobalFlags initialConfig) { + globalLocalRepos = toNubList [sandboxDir "packages"] + }, + savedConfigureFlags = setPackageDB sandboxDir compiler platform + (savedConfigureFlags initialConfig), + savedInstallFlags = (savedInstallFlags initialConfig) { + installSummaryFile = toNubList [toPathTemplate (sandboxDir + "logs" "build.log")] + } + } + } + +-- | Return the path to the sandbox package database. +sandboxPackageDBPath :: FilePath -> Compiler -> Platform -> String +sandboxPackageDBPath sandboxDir compiler platform = + sandboxDir + (Text.display platform ++ "-" + ++ showCompilerIdWithAbi compiler + ++ "-packages.conf.d") +-- The path in sandboxPackageDBPath should be kept in sync with the +-- path in the bootstrap.sh which is used to bootstrap cabal-install +-- into a sandbox. + +-- | Use the package DB location specific for this compiler. +setPackageDB :: FilePath -> Compiler -> Platform -> ConfigFlags -> ConfigFlags +setPackageDB sandboxDir compiler platform configFlags = + configFlags { + configPackageDBs = [Just (SpecificPackageDB $ sandboxPackageDBPath + sandboxDir + compiler + platform)] + } + +-- | Almost the same as 'savedConf `mappend` pkgEnv', but some settings are +-- overridden instead of mappend'ed. +overrideSandboxSettings :: PackageEnvironment -> PackageEnvironment -> + PackageEnvironment +overrideSandboxSettings pkgEnv0 pkgEnv = + pkgEnv { + pkgEnvSavedConfig = mappendedConf { + savedConfigureFlags = (savedConfigureFlags mappendedConf) { + configPackageDBs = configPackageDBs pkgEnvConfigureFlags + } + , savedInstallFlags = (savedInstallFlags mappendedConf) { + installSummaryFile = installSummaryFile pkgEnvInstallFlags + } + }, + pkgEnvInherit = pkgEnvInherit pkgEnv0 + } + where + pkgEnvConf = pkgEnvSavedConfig pkgEnv + mappendedConf = (pkgEnvSavedConfig pkgEnv0) `mappend` pkgEnvConf + pkgEnvConfigureFlags = savedConfigureFlags pkgEnvConf + pkgEnvInstallFlags = savedInstallFlags pkgEnvConf + +-- | Default values that get used if no value is given. Used here to include in +-- comments when we write out the initial package environment. +commentPackageEnvironment :: FilePath -> IO PackageEnvironment +commentPackageEnvironment sandboxDir = do + commentConf <- commentSavedConfig + let baseConf = commonPackageEnvironmentConfig sandboxDir + return $ mempty { + pkgEnvSavedConfig = commentConf `mappend` baseConf + } + +-- | If this package environment inherits from some other package environment, +-- return that package environment; otherwise return mempty. +inheritedPackageEnvironment :: Verbosity -> PackageEnvironment + -> IO PackageEnvironment +inheritedPackageEnvironment verbosity pkgEnv = do + case (pkgEnvInherit pkgEnv) of + NoFlag -> return mempty + confPathFlag@(Flag _) -> do + conf <- loadConfig verbosity confPathFlag NoFlag + return $ mempty { pkgEnvSavedConfig = conf } + +-- | Load the user package environment if it exists (the optional "cabal.config" +-- file). +userPackageEnvironment :: Verbosity -> FilePath -> IO PackageEnvironment +userPackageEnvironment verbosity pkgEnvDir = do + let path = pkgEnvDir userPackageEnvironmentFile + minp <- readPackageEnvironmentFile mempty path + case minp of + Nothing -> return mempty + Just (ParseOk warns parseResult) -> do + when (not $ null warns) $ warn verbosity $ + unlines (map (showPWarning path) warns) + return parseResult + Just (ParseFailed err) -> do + let (line, msg) = locatedErrorMsg err + warn verbosity $ "Error parsing user package environment file " ++ path + ++ maybe "" (\n -> ":" ++ show n) line ++ ":\n" ++ msg + return mempty + +-- | Same as @userPackageEnvironmentFile@, but returns a SavedConfig. +loadUserConfig :: Verbosity -> FilePath -> IO SavedConfig +loadUserConfig verbosity pkgEnvDir = fmap pkgEnvSavedConfig + $ userPackageEnvironment verbosity pkgEnvDir + +-- | Common error handling code used by 'tryLoadSandboxPackageEnvironment' and +-- 'updatePackageEnvironment'. +handleParseResult :: Verbosity -> FilePath + -> Maybe (ParseResult PackageEnvironment) + -> IO PackageEnvironment +handleParseResult verbosity path minp = + case minp of + Nothing -> die $ + "The package environment file '" ++ path ++ "' doesn't exist" + Just (ParseOk warns parseResult) -> do + when (not $ null warns) $ warn verbosity $ + unlines (map (showPWarning path) warns) + return parseResult + Just (ParseFailed err) -> do + let (line, msg) = locatedErrorMsg err + die $ "Error parsing package environment file " ++ path + ++ maybe "" (\n -> ":" ++ show n) line ++ ":\n" ++ msg + +-- | Try to load the given package environment file, exiting with error if it +-- doesn't exist. Also returns the path to the sandbox directory. The path +-- parameter should refer to an existing file. +tryLoadSandboxPackageEnvironmentFile :: Verbosity -> FilePath -> (Flag FilePath) + -> IO (FilePath, PackageEnvironment) +tryLoadSandboxPackageEnvironmentFile verbosity pkgEnvFile configFileFlag = do + let pkgEnvDir = takeDirectory pkgEnvFile + minp <- readPackageEnvironmentFile mempty pkgEnvFile + pkgEnv <- handleParseResult verbosity pkgEnvFile minp + + -- Get the saved sandbox directory. + -- TODO: Use substPathTemplate with + -- compilerTemplateEnv ++ platformTemplateEnv ++ abiTemplateEnv. + let sandboxDir = fromFlagOrDefault defaultSandboxLocation + . fmap fromPathTemplate . prefix . savedUserInstallDirs + . pkgEnvSavedConfig $ pkgEnv + + -- Do some sanity checks + dirExists <- doesDirectoryExist sandboxDir + -- TODO: Also check for an initialised package DB? + unless dirExists $ + die ("No sandbox exists at " ++ sandboxDir) + info verbosity $ "Using a sandbox located at " ++ sandboxDir + + let base = basePackageEnvironment + let common = commonPackageEnvironment sandboxDir + user <- userPackageEnvironment verbosity pkgEnvDir + inherited <- inheritedPackageEnvironment verbosity user + + -- Layer the package environment settings over settings from ~/.cabal/config. + cabalConfig <- fmap unsetSymlinkBinDir $ + loadConfig verbosity configFileFlag NoFlag + return (sandboxDir, + updateInstallDirs $ + (base `mappend` (toPkgEnv cabalConfig) `mappend` + common `mappend` inherited `mappend` user) + `overrideSandboxSettings` pkgEnv) + where + toPkgEnv config = mempty { pkgEnvSavedConfig = config } + + updateInstallDirs pkgEnv = + let config = pkgEnvSavedConfig pkgEnv + configureFlags = savedConfigureFlags config + installDirs = savedUserInstallDirs config + in pkgEnv { + pkgEnvSavedConfig = config { + savedConfigureFlags = configureFlags { + configInstallDirs = installDirs + } + } + } + + -- We don't want to inherit the value of 'symlink-bindir' from + -- '~/.cabal/config'. See #1514. + unsetSymlinkBinDir config = + let installFlags = savedInstallFlags config + in config { + savedInstallFlags = installFlags { + installSymlinkBinDir = NoFlag + } + } + +-- | Should the generated package environment file include comments? +data IncludeComments = IncludeComments | NoComments + +-- | Create a new package environment file, replacing the existing one if it +-- exists. Note that the path parameters should point to existing directories. +createPackageEnvironmentFile :: Verbosity -> FilePath -> FilePath + -> IncludeComments + -> Compiler + -> Platform + -> IO () +createPackageEnvironmentFile verbosity sandboxDir pkgEnvFile incComments + compiler platform = do + notice verbosity $ "Writing a default package environment file to " + ++ pkgEnvFile + + commentPkgEnv <- commentPackageEnvironment sandboxDir + initialPkgEnv <- initialPackageEnvironment sandboxDir compiler platform + writePackageEnvironmentFile pkgEnvFile incComments commentPkgEnv initialPkgEnv + +-- | Descriptions of all fields in the package environment file. +pkgEnvFieldDescrs :: [FieldDescr PackageEnvironment] +pkgEnvFieldDescrs = [ + simpleField "inherit" + (fromFlagOrDefault Disp.empty . fmap Disp.text) (optional parseFilePathQ) + pkgEnvInherit (\v pkgEnv -> pkgEnv { pkgEnvInherit = v }) + + -- FIXME: Should we make these fields part of ~/.cabal/config ? + , commaNewLineListField "constraints" + Text.disp Text.parse + (configExConstraints . savedConfigureExFlags . pkgEnvSavedConfig) + (\v pkgEnv -> updateConfigureExFlags pkgEnv + (\flags -> flags { configExConstraints = v })) + + , commaListField "preferences" + Text.disp Text.parse + (configPreferences . savedConfigureExFlags . pkgEnvSavedConfig) + (\v pkgEnv -> updateConfigureExFlags pkgEnv + (\flags -> flags { configPreferences = v })) + ] + ++ map toPkgEnv configFieldDescriptions' + where + optional = Parse.option mempty . fmap toFlag + + configFieldDescriptions' :: [FieldDescr SavedConfig] + configFieldDescriptions' = filter + (\(FieldDescr name _ _) -> name /= "preference" && name /= "constraint") + configFieldDescriptions + + toPkgEnv :: FieldDescr SavedConfig -> FieldDescr PackageEnvironment + toPkgEnv fieldDescr = + liftField pkgEnvSavedConfig + (\savedConfig pkgEnv -> pkgEnv { pkgEnvSavedConfig = savedConfig}) + fieldDescr + + updateConfigureExFlags :: PackageEnvironment + -> (ConfigExFlags -> ConfigExFlags) + -> PackageEnvironment + updateConfigureExFlags pkgEnv f = pkgEnv { + pkgEnvSavedConfig = (pkgEnvSavedConfig pkgEnv) { + savedConfigureExFlags = f . savedConfigureExFlags . pkgEnvSavedConfig + $ pkgEnv + } + } + +-- | Read the package environment file. +readPackageEnvironmentFile :: PackageEnvironment -> FilePath + -> IO (Maybe (ParseResult PackageEnvironment)) +readPackageEnvironmentFile initial file = + handleNotExists $ + fmap (Just . parsePackageEnvironment initial) (readFile file) + where + handleNotExists action = catchIO action $ \ioe -> + if isDoesNotExistError ioe + then return Nothing + else ioError ioe + +-- | Parse the package environment file. +parsePackageEnvironment :: PackageEnvironment -> String + -> ParseResult PackageEnvironment +parsePackageEnvironment initial str = do + fields <- readFields str + let (knownSections, others) = partition isKnownSection fields + pkgEnv <- parse others + let config = pkgEnvSavedConfig pkgEnv + installDirs0 = savedUserInstallDirs config + (haddockFlags, installDirs, paths, args) <- + foldM parseSections + (savedHaddockFlags config, installDirs0, [], []) + knownSections + return pkgEnv { + pkgEnvSavedConfig = config { + savedConfigureFlags = (savedConfigureFlags config) { + configProgramPaths = paths, + configProgramArgs = args + }, + savedHaddockFlags = haddockFlags, + savedUserInstallDirs = installDirs, + savedGlobalInstallDirs = installDirs + } + } + + where + isKnownSection :: ParseUtils.Field -> Bool + isKnownSection (ParseUtils.Section _ "haddock" _ _) = True + isKnownSection (ParseUtils.Section _ "install-dirs" _ _) = True + isKnownSection (ParseUtils.Section _ "program-locations" _ _) = True + isKnownSection (ParseUtils.Section _ "program-default-options" _ _) = True + isKnownSection _ = False + + parse :: [ParseUtils.Field] -> ParseResult PackageEnvironment + parse = parseFields pkgEnvFieldDescrs initial + + parseSections :: SectionsAccum -> ParseUtils.Field + -> ParseResult SectionsAccum + parseSections accum@(h,d,p,a) + (ParseUtils.Section _ "haddock" name fs) + | name == "" = do h' <- parseFields haddockFlagsFields h fs + return (h', d, p, a) + | otherwise = do + warning "The 'haddock' section should be unnamed" + return accum + parseSections (h,d,p,a) + (ParseUtils.Section line "install-dirs" name fs) + | name == "" = do d' <- parseFields installDirsFields d fs + return (h, d',p,a) + | otherwise = + syntaxError line $ + "Named 'install-dirs' section: '" ++ name + ++ "'. Note that named 'install-dirs' sections are not allowed in the '" + ++ userPackageEnvironmentFile ++ "' file." + parseSections accum@(h, d,p,a) + (ParseUtils.Section _ "program-locations" name fs) + | name == "" = do p' <- parseFields withProgramsFields p fs + return (h, d, p', a) + | otherwise = do + warning "The 'program-locations' section should be unnamed" + return accum + parseSections accum@(h, d, p, a) + (ParseUtils.Section _ "program-default-options" name fs) + | name == "" = do a' <- parseFields withProgramOptionsFields a fs + return (h, d, p, a') + | otherwise = do + warning "The 'program-default-options' section should be unnamed" + return accum + parseSections accum f = do + warning $ "Unrecognized stanza on line " ++ show (lineNo f) + return accum + +-- | Accumulator type for 'parseSections'. +type SectionsAccum = (HaddockFlags, InstallDirs (Flag PathTemplate) + , [(String, FilePath)], [(String, [String])]) + +-- | Write out the package environment file. +writePackageEnvironmentFile :: FilePath -> IncludeComments + -> PackageEnvironment -> PackageEnvironment + -> IO () +writePackageEnvironmentFile path incComments comments pkgEnv = do + let tmpPath = (path <.> "tmp") + writeFile tmpPath $ explanation ++ pkgEnvStr ++ "\n" + renameFile tmpPath path + where + pkgEnvStr = case incComments of + IncludeComments -> showPackageEnvironmentWithComments + (Just comments) pkgEnv + NoComments -> showPackageEnvironment pkgEnv + explanation = unlines + ["-- This is a Cabal package environment file." + ,"-- THIS FILE IS AUTO-GENERATED. DO NOT EDIT DIRECTLY." + ,"-- Please create a 'cabal.config' file in the same directory" + ,"-- if you want to change the default settings for this sandbox." + ,"","" + ] + +-- | Pretty-print the package environment. +showPackageEnvironment :: PackageEnvironment -> String +showPackageEnvironment pkgEnv = showPackageEnvironmentWithComments Nothing pkgEnv + +-- | Pretty-print the package environment with default values for empty fields +-- commented out (just like the default ~/.cabal/config). +showPackageEnvironmentWithComments :: (Maybe PackageEnvironment) + -> PackageEnvironment + -> String +showPackageEnvironmentWithComments mdefPkgEnv pkgEnv = Disp.render $ + ppFields pkgEnvFieldDescrs mdefPkgEnv pkgEnv + $+$ Disp.text "" + $+$ ppSection "install-dirs" "" installDirsFields + (fmap installDirsSection mdefPkgEnv) (installDirsSection pkgEnv) + where + installDirsSection = savedUserInstallDirs . pkgEnvSavedConfig diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Timestamp.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Timestamp.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Timestamp.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Timestamp.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,292 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Sandbox.Timestamp +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Timestamp file handling (for add-source dependencies). +----------------------------------------------------------------------------- + +module Distribution.Client.Sandbox.Timestamp ( + AddSourceTimestamp, + withAddTimestamps, + withRemoveTimestamps, + withUpdateTimestamps, + maybeAddCompilerTimestampRecord, + listModifiedDeps, + ) where + +import Control.Exception (IOException) +import Control.Monad (filterM, forM, when) +import Data.Char (isSpace) +import Data.List (partition) +import System.Directory (renameFile) +import System.FilePath ((<.>), ()) +import qualified Data.Map as M + +import Distribution.Compiler (CompilerId) +import Distribution.Package (packageName) +import Distribution.PackageDescription.Configuration (flattenPackageDescription) +import Distribution.PackageDescription.Parse (readPackageDescription) +import Distribution.Simple.Setup (Flag (..), + SDistFlags (..), + defaultSDistFlags, + sdistCommand) +import Distribution.Simple.Utils (debug, die, warn) +import Distribution.System (Platform) +import Distribution.Text (display) +import Distribution.Verbosity (Verbosity, lessVerbose, + normal) +import Distribution.Version (Version (..), + orLaterVersion) + +import Distribution.Client.Sandbox.Index + (ListIgnoredBuildTreeRefs (ListIgnored), RefTypesToList(OnlyLinks) + ,listBuildTreeRefs) +import Distribution.Client.SetupWrapper (SetupScriptOptions (..), + defaultSetupScriptOptions, + setupWrapper) +import Distribution.Client.Utils + (inDir, removeExistingFile, tryCanonicalizePath, tryFindAddSourcePackageDesc) + +import Distribution.Compat.Exception (catchIO) +import Distribution.Client.Compat.Time (EpochTime, getCurTime, + getModTime) + + +-- | Timestamp of an add-source dependency. +type AddSourceTimestamp = (FilePath, EpochTime) +-- | Timestamp file record - a string identifying the compiler & platform plus a +-- list of add-source timestamps. +type TimestampFileRecord = (String, [AddSourceTimestamp]) + +timestampRecordKey :: CompilerId -> Platform -> String +timestampRecordKey compId platform = display platform ++ "-" ++ display compId + +-- | The 'add-source-timestamps' file keeps the timestamps of all add-source +-- dependencies. It is initially populated by 'sandbox add-source' and kept +-- current by 'reinstallAddSourceDeps' and 'configure -w'. The user can install +-- add-source deps manually with 'cabal install' after having edited them, so we +-- can err on the side of caution sometimes. +-- FIXME: We should keep this info in the index file, together with build tree +-- refs. +timestampFileName :: FilePath +timestampFileName = "add-source-timestamps" + +-- | Read the timestamp file. Exits with error if the timestamp file is +-- corrupted. Returns an empty list if the file doesn't exist. +readTimestampFile :: FilePath -> IO [TimestampFileRecord] +readTimestampFile timestampFile = do + timestampString <- readFile timestampFile `catchIO` \_ -> return "[]" + case reads timestampString of + [(timestamps, s)] | all isSpace s -> return timestamps + _ -> + die $ "The timestamps file is corrupted. " + ++ "Please delete & recreate the sandbox." + +-- | Write the timestamp file, atomically. +writeTimestampFile :: FilePath -> [TimestampFileRecord] -> IO () +writeTimestampFile timestampFile timestamps = do + writeFile timestampTmpFile (show timestamps) + renameFile timestampTmpFile timestampFile + where + timestampTmpFile = timestampFile <.> "tmp" + +-- | Read, process and write the timestamp file in one go. +withTimestampFile :: FilePath + -> ([TimestampFileRecord] -> IO [TimestampFileRecord]) + -> IO () +withTimestampFile sandboxDir process = do + let timestampFile = sandboxDir timestampFileName + timestampRecords <- readTimestampFile timestampFile >>= process + writeTimestampFile timestampFile timestampRecords + +-- | Given a list of 'AddSourceTimestamp's, a list of paths to add-source deps +-- we've added and an initial timestamp, add an 'AddSourceTimestamp' to the list +-- for each path. If a timestamp for a given path already exists in the list, +-- update it. +addTimestamps :: EpochTime -> [AddSourceTimestamp] -> [FilePath] + -> [AddSourceTimestamp] +addTimestamps initial timestamps newPaths = + [ (p, initial) | p <- newPaths ] ++ oldTimestamps + where + (oldTimestamps, _toBeUpdated) = + partition (\(path, _) -> path `notElem` newPaths) timestamps + +-- | Given a list of 'AddSourceTimestamp's, a list of paths to add-source deps +-- we've reinstalled and a new timestamp value, update the timestamp value for +-- the deps in the list. If there are new paths in the list, ignore them. +updateTimestamps :: [AddSourceTimestamp] -> [FilePath] -> EpochTime + -> [AddSourceTimestamp] +updateTimestamps timestamps pathsToUpdate newTimestamp = + foldr updateTimestamp [] timestamps + where + updateTimestamp t@(path, _oldTimestamp) rest + | path `elem` pathsToUpdate = (path, newTimestamp) : rest + | otherwise = t : rest + +-- | Given a list of 'TimestampFileRecord's and a list of paths to add-source +-- deps we've removed, remove those deps from the list. +removeTimestamps :: [AddSourceTimestamp] -> [FilePath] -> [AddSourceTimestamp] +removeTimestamps l pathsToRemove = foldr removeTimestamp [] l + where + removeTimestamp t@(path, _oldTimestamp) rest = + if path `elem` pathsToRemove + then rest + else t : rest + +-- | If a timestamp record for this compiler doesn't exist, add a new one. +maybeAddCompilerTimestampRecord :: Verbosity -> FilePath -> FilePath + -> CompilerId -> Platform + -> IO () +maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile + compId platform = do + let key = timestampRecordKey compId platform + withTimestampFile sandboxDir $ \timestampRecords -> do + case lookup key timestampRecords of + Just _ -> return timestampRecords + Nothing -> do + buildTreeRefs <- listBuildTreeRefs verbosity ListIgnored OnlyLinks + indexFile + now <- getCurTime + let timestamps = map (\p -> (p, now)) buildTreeRefs + return $ (key, timestamps):timestampRecords + +-- | Given an IO action that returns a list of build tree refs, add those +-- build tree refs to the timestamps file (for all compilers). +withAddTimestamps :: FilePath -> IO [FilePath] -> IO () +withAddTimestamps sandboxDir act = do + let initialTimestamp = 0 + withActionOnAllTimestamps (addTimestamps initialTimestamp) sandboxDir act + +-- | Given an IO action that returns a list of build tree refs, remove those +-- build tree refs from the timestamps file (for all compilers). +withRemoveTimestamps :: FilePath -> IO [FilePath] -> IO () +withRemoveTimestamps = withActionOnAllTimestamps removeTimestamps + +-- | Given an IO action that returns a list of build tree refs, update the +-- timestamps of the returned build tree refs to the current time (only for the +-- given compiler & platform). +withUpdateTimestamps :: FilePath -> CompilerId -> Platform + ->([AddSourceTimestamp] -> IO [FilePath]) + -> IO () +withUpdateTimestamps = + withActionOnCompilerTimestamps updateTimestamps + +-- | Helper for implementing 'withAddTimestamps' and +-- 'withRemoveTimestamps'. Runs a given action on the list of +-- 'AddSourceTimestamp's for all compilers, applies 'f' to the result and then +-- updates the timestamp file. The IO action is run only once. +withActionOnAllTimestamps :: ([AddSourceTimestamp] -> [FilePath] + -> [AddSourceTimestamp]) + -> FilePath + -> IO [FilePath] + -> IO () +withActionOnAllTimestamps f sandboxDir act = + withTimestampFile sandboxDir $ \timestampRecords -> do + paths <- act + return [(key, f timestamps paths) | (key, timestamps) <- timestampRecords] + +-- | Helper for implementing 'withUpdateTimestamps'. Runs a given action on the +-- list of 'AddSourceTimestamp's for this compiler, applies 'f' to the result +-- and then updates the timestamp file record. The IO action is run only once. +withActionOnCompilerTimestamps :: ([AddSourceTimestamp] + -> [FilePath] -> EpochTime + -> [AddSourceTimestamp]) + -> FilePath + -> CompilerId + -> Platform + -> ([AddSourceTimestamp] -> IO [FilePath]) + -> IO () +withActionOnCompilerTimestamps f sandboxDir compId platform act = do + let needle = timestampRecordKey compId platform + withTimestampFile sandboxDir $ \timestampRecords -> do + timestampRecords' <- forM timestampRecords $ \r@(key, timestamps) -> + if key == needle + then do paths <- act timestamps + now <- getCurTime + return (key, f timestamps paths now) + else return r + return timestampRecords' + +-- | List all source files of a given add-source dependency. Exits with error if +-- something is wrong (e.g. there is no .cabal file in the given directory). +-- FIXME: This function is not thread-safe because of 'inDir'. +allPackageSourceFiles :: Verbosity -> FilePath -> IO [FilePath] +allPackageSourceFiles verbosity packageDir = inDir (Just packageDir) $ do + pkg <- do + let err = "Error reading source files of add-source dependency." + desc <- tryFindAddSourcePackageDesc packageDir err + flattenPackageDescription `fmap` readPackageDescription verbosity desc + let file = "cabal-sdist-list-sources" + flags = defaultSDistFlags { + sDistVerbosity = Flag $ if verbosity == normal + then lessVerbose verbosity else verbosity, + sDistListSources = Flag file + } + setupOpts = defaultSetupScriptOptions { + -- 'sdist --list-sources' was introduced in Cabal 1.18. + useCabalVersion = orLaterVersion $ Version [1,18,0] [] + } + + doListSources :: IO [FilePath] + doListSources = do + setupWrapper verbosity setupOpts (Just pkg) sdistCommand (const flags) [] + srcs <- fmap lines . readFile $ file + mapM tryCanonicalizePath srcs + + onFailedListSources :: IOException -> IO () + onFailedListSources e = do + warn verbosity $ + "Could not list sources of the add-source dependency '" + ++ display (packageName pkg) ++ "'. Skipping the timestamp check." + debug verbosity $ + "Exception was: " ++ show e + + -- Run setup sdist --list-sources=TMPFILE + ret <- doListSources `catchIO` (\e -> onFailedListSources e >> return []) + removeExistingFile file + return ret + +-- | Has this dependency been modified since we have last looked at it? +isDepModified :: Verbosity -> EpochTime -> AddSourceTimestamp -> IO Bool +isDepModified verbosity now (packageDir, timestamp) = do + debug verbosity ("Checking whether the dependency is modified: " ++ packageDir) + depSources <- allPackageSourceFiles verbosity packageDir + go depSources + + where + go [] = return False + go (dep:rest) = do + -- FIXME: What if the clock jumps backwards at any point? For now we only + -- print a warning. + modTime <- getModTime dep + when (modTime > now) $ + warn verbosity $ "File '" ++ dep + ++ "' has a modification time that is in the future." + if modTime >= timestamp + then do + debug verbosity ("Dependency has a modified source file: " ++ dep) + return True + else go rest + +-- | List all modified dependencies. +listModifiedDeps :: Verbosity -> FilePath -> CompilerId -> Platform + -> M.Map FilePath a + -- ^ The set of all installed add-source deps. + -> IO [FilePath] +listModifiedDeps verbosity sandboxDir compId platform installedDepsMap = do + timestampRecords <- readTimestampFile (sandboxDir timestampFileName) + let needle = timestampRecordKey compId platform + timestamps <- maybe noTimestampRecord return + (lookup needle timestampRecords) + now <- getCurTime + fmap (map fst) . filterM (isDepModified verbosity now) + . filter (\ts -> fst ts `M.member` installedDepsMap) + $ timestamps + + where + noTimestampRecord = die $ "Сouldn't find a timestamp record for the given " + ++ "compiler/platform pair. " + ++ "Please report this on the Cabal bug tracker: " + ++ "https://github.com/haskell/cabal/issues/new ." diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Types.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Types.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Types.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox/Types.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,64 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Sandbox.Types +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- Helpers for writing code that works both inside and outside a sandbox. +----------------------------------------------------------------------------- + +module Distribution.Client.Sandbox.Types ( + UseSandbox(..), isUseSandbox, whenUsingSandbox, + SandboxPackageInfo(..) + ) where + +import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex +import Distribution.Client.Types (SourcePackage) + +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid +#endif +import qualified Data.Set as S + +-- | Are we using a sandbox? +data UseSandbox = UseSandbox FilePath | NoSandbox + +instance Monoid UseSandbox where + mempty = NoSandbox + + NoSandbox `mappend` s = s + u0@(UseSandbox _) `mappend` NoSandbox = u0 + (UseSandbox _) `mappend` u1@(UseSandbox _) = u1 + +-- | Convert a @UseSandbox@ value to a boolean. Useful in conjunction with +-- @when@. +isUseSandbox :: UseSandbox -> Bool +isUseSandbox (UseSandbox _) = True +isUseSandbox NoSandbox = False + +-- | Execute an action only if we're in a sandbox, feeding to it the path to the +-- sandbox directory. +whenUsingSandbox :: UseSandbox -> (FilePath -> IO ()) -> IO () +whenUsingSandbox NoSandbox _ = return () +whenUsingSandbox (UseSandbox sandboxDir) act = act sandboxDir + +-- | Data about the packages installed in the sandbox that is passed from +-- 'reinstallAddSourceDeps' to the solver. +data SandboxPackageInfo = SandboxPackageInfo { + modifiedAddSourceDependencies :: ![SourcePackage], + -- ^ Modified add-source deps that we want to reinstall. These are guaranteed + -- to be already installed in the sandbox. + + otherAddSourceDependencies :: ![SourcePackage], + -- ^ Remaining add-source deps. Some of these may be not installed in the + -- sandbox. + + otherInstalledSandboxPackages :: !InstalledPackageIndex.InstalledPackageIndex, + -- ^ All packages installed in the sandbox. Intersection with + -- 'modifiedAddSourceDependencies' and/or 'otherAddSourceDependencies' can be + -- non-empty. + + allAddSourceDependencies :: !(S.Set FilePath) + -- ^ A set of paths to all add-source dependencies, for convenience. + } diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Sandbox.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,766 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Sandbox +-- Maintainer : cabal-devel@haskell.org +-- Portability : portable +-- +-- UI for the sandboxing functionality. +----------------------------------------------------------------------------- + +module Distribution.Client.Sandbox ( + sandboxInit, + sandboxDelete, + sandboxAddSource, + sandboxAddSourceSnapshot, + sandboxDeleteSource, + sandboxListSources, + sandboxHcPkg, + dumpPackageEnvironment, + withSandboxBinDirOnSearchPath, + + getSandboxConfigFilePath, + loadConfigOrSandboxConfig, + initPackageDBIfNeeded, + maybeWithSandboxDirOnSearchPath, + + WereDepsReinstalled(..), + reinstallAddSourceDeps, + maybeReinstallAddSourceDeps, + + SandboxPackageInfo(..), + maybeWithSandboxPackageInfo, + + tryGetIndexFilePath, + sandboxBuildDir, + getInstalledPackagesInSandbox, + updateSandboxConfigFileFlag, + + -- FIXME: move somewhere else + configPackageDB', configCompilerAux' + ) where + +import Distribution.Client.Setup + ( SandboxFlags(..), ConfigFlags(..), ConfigExFlags(..), InstallFlags(..) + , GlobalFlags(..), defaultConfigExFlags, defaultInstallFlags + , defaultSandboxLocation, globalRepos ) +import Distribution.Client.Sandbox.Timestamp ( listModifiedDeps + , maybeAddCompilerTimestampRecord + , withAddTimestamps + , withRemoveTimestamps ) +import Distribution.Client.Config ( SavedConfig(..), loadConfig ) +import Distribution.Client.Dependency ( foldProgress ) +import Distribution.Client.IndexUtils ( BuildTreeRefType(..) ) +import Distribution.Client.Install ( InstallArgs, + makeInstallContext, + makeInstallPlan, + processInstallPlan ) +import Distribution.Utils.NubList ( fromNubList ) + +import Distribution.Client.Sandbox.PackageEnvironment + ( PackageEnvironment(..), IncludeComments(..), PackageEnvironmentType(..) + , createPackageEnvironmentFile, classifyPackageEnvironment + , tryLoadSandboxPackageEnvironmentFile, loadUserConfig + , commentPackageEnvironment, showPackageEnvironmentWithComments + , sandboxPackageEnvironmentFile, userPackageEnvironmentFile ) +import Distribution.Client.Sandbox.Types ( SandboxPackageInfo(..) + , UseSandbox(..) ) +import Distribution.Client.Types ( PackageLocation(..) + , SourcePackage(..) ) +import Distribution.Client.Utils ( inDir, tryCanonicalizePath + , tryFindAddSourcePackageDesc ) +import Distribution.PackageDescription.Configuration + ( flattenPackageDescription ) +import Distribution.PackageDescription.Parse ( readPackageDescription ) +import Distribution.Simple.Compiler ( Compiler(..), PackageDB(..) + , PackageDBStack ) +import Distribution.Simple.Configure ( configCompilerAuxEx + , interpretPackageDbFlags + , getPackageDBContents ) +import Distribution.Simple.PreProcess ( knownSuffixHandlers ) +import Distribution.Simple.Program ( ProgramConfiguration ) +import Distribution.Simple.Setup ( Flag(..), HaddockFlags(..) + , fromFlagOrDefault ) +import Distribution.Simple.SrcDist ( prepareTree ) +import Distribution.Simple.Utils ( die, debug, notice, info, warn + , debugNoWrap, defaultPackageDesc + , intercalate, topHandlerWith + , createDirectoryIfMissingVerbose ) +import Distribution.Package ( Package(..) ) +import Distribution.System ( Platform ) +import Distribution.Text ( display ) +import Distribution.Verbosity ( Verbosity, lessVerbose ) +import Distribution.Client.Compat.Environment ( lookupEnv, setEnv ) +import Distribution.Client.Compat.FilePerms ( setFileHidden ) +import qualified Distribution.Client.Sandbox.Index as Index +import Distribution.Simple.PackageIndex ( InstalledPackageIndex ) +import qualified Distribution.Simple.PackageIndex as InstalledPackageIndex +import qualified Distribution.Simple.Register as Register +import qualified Data.Map as M +import qualified Data.Set as S +import Control.Exception ( assert, bracket_ ) +import Control.Monad ( forM, liftM2, unless, when ) +import Data.Bits ( shiftL, shiftR, xor ) +import Data.Char ( ord ) +import Data.IORef ( newIORef, writeIORef, readIORef ) +import Data.List ( delete, foldl' ) +import Data.Maybe ( fromJust, fromMaybe ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid ( mempty, mappend ) +#endif +import Data.Word ( Word32 ) +import Numeric ( showHex ) +import System.Directory ( createDirectory + , doesDirectoryExist + , doesFileExist + , getCurrentDirectory + , removeDirectoryRecursive + , removeFile + , renameDirectory ) +import System.FilePath ( (), getSearchPath + , searchPathSeparator + , takeDirectory ) + + +-- +-- * Constants +-- + +-- | The name of the sandbox subdirectory where we keep snapshots of add-source +-- dependencies. +snapshotDirectoryName :: FilePath +snapshotDirectoryName = "snapshots" + +-- | Non-standard build dir that is used for building add-source deps instead of +-- "dist". Fixes surprising behaviour in some cases (see issue #1281). +sandboxBuildDir :: FilePath -> FilePath +sandboxBuildDir sandboxDir = "dist/dist-sandbox-" ++ showHex sandboxDirHash "" + where + sandboxDirHash = jenkins sandboxDir + + -- See http://en.wikipedia.org/wiki/Jenkins_hash_function + jenkins :: String -> Word32 + jenkins str = loop_finish $ foldl' loop 0 str + where + loop :: Word32 -> Char -> Word32 + loop hash key_i' = hash''' + where + key_i = toEnum . ord $ key_i' + hash' = hash + key_i + hash'' = hash' + (shiftL hash' 10) + hash''' = hash'' `xor` (shiftR hash'' 6) + + loop_finish :: Word32 -> Word32 + loop_finish hash = hash''' + where + hash' = hash + (shiftL hash 3) + hash'' = hash' `xor` (shiftR hash' 11) + hash''' = hash'' + (shiftL hash'' 15) + +-- +-- * Basic sandbox functions. +-- + +-- | If @--sandbox-config-file@ wasn't given on the command-line, set it to the +-- value of the @CABAL_SANDBOX_CONFIG@ environment variable, or else to +-- 'NoFlag'. +updateSandboxConfigFileFlag :: GlobalFlags -> IO GlobalFlags +updateSandboxConfigFileFlag globalFlags = + case globalSandboxConfigFile globalFlags of + Flag _ -> return globalFlags + NoFlag -> do + f' <- fmap (fromMaybe NoFlag . fmap Flag) . lookupEnv + $ "CABAL_SANDBOX_CONFIG" + return globalFlags { globalSandboxConfigFile = f' } + +-- | Return the path to the sandbox config file - either the default or the one +-- specified with @--sandbox-config-file@. +getSandboxConfigFilePath :: GlobalFlags -> IO FilePath +getSandboxConfigFilePath globalFlags = do + let sandboxConfigFileFlag = globalSandboxConfigFile globalFlags + case sandboxConfigFileFlag of + NoFlag -> do pkgEnvDir <- getCurrentDirectory + return (pkgEnvDir sandboxPackageEnvironmentFile) + Flag path -> return path + +-- | Load the @cabal.sandbox.config@ file (and possibly the optional +-- @cabal.config@). In addition to a @PackageEnvironment@, also return a +-- canonical path to the sandbox. Exit with error if the sandbox directory or +-- the package environment file do not exist. +tryLoadSandboxConfig :: Verbosity -> GlobalFlags + -> IO (FilePath, PackageEnvironment) +tryLoadSandboxConfig verbosity globalFlags = do + path <- getSandboxConfigFilePath globalFlags + tryLoadSandboxPackageEnvironmentFile verbosity path + (globalConfigFile globalFlags) + +-- | Return the name of the package index file for this package environment. +tryGetIndexFilePath :: SavedConfig -> IO FilePath +tryGetIndexFilePath config = tryGetIndexFilePath' (savedGlobalFlags config) + +-- | The same as 'tryGetIndexFilePath', but takes 'GlobalFlags' instead of +-- 'SavedConfig'. +tryGetIndexFilePath' :: GlobalFlags -> IO FilePath +tryGetIndexFilePath' globalFlags = do + let paths = fromNubList $ globalLocalRepos globalFlags + case paths of + [] -> die $ "Distribution.Client.Sandbox.tryGetIndexFilePath: " ++ + "no local repos found. " ++ checkConfiguration + _ -> return $ (last paths) Index.defaultIndexFileName + where + checkConfiguration = "Please check your configuration ('" + ++ userPackageEnvironmentFile ++ "')." + +-- | Try to extract a 'PackageDB' from 'ConfigFlags'. Gives a better error +-- message than just pattern-matching. +getSandboxPackageDB :: ConfigFlags -> IO PackageDB +getSandboxPackageDB configFlags = do + case configPackageDBs configFlags of + [Just sandboxDB@(SpecificPackageDB _)] -> return sandboxDB + -- TODO: should we allow multiple package DBs (e.g. with 'inherit')? + + [] -> + die $ "Sandbox package DB is not specified. " ++ sandboxConfigCorrupt + [_] -> + die $ "Unexpected contents of the 'package-db' field. " + ++ sandboxConfigCorrupt + _ -> + die $ "Too many package DBs provided. " ++ sandboxConfigCorrupt + + where + sandboxConfigCorrupt = "Your 'cabal.sandbox.config' is probably corrupt." + + +-- | Which packages are installed in the sandbox package DB? +getInstalledPackagesInSandbox :: Verbosity -> ConfigFlags + -> Compiler -> ProgramConfiguration + -> IO InstalledPackageIndex +getInstalledPackagesInSandbox verbosity configFlags comp conf = do + sandboxDB <- getSandboxPackageDB configFlags + getPackageDBContents verbosity comp sandboxDB conf + +-- | Temporarily add $SANDBOX_DIR/bin to $PATH. +withSandboxBinDirOnSearchPath :: FilePath -> IO a -> IO a +withSandboxBinDirOnSearchPath sandboxDir = bracket_ addBinDir rmBinDir + where + -- TODO: Instead of modifying the global process state, it'd be better to + -- set the environment individually for each subprocess invocation. This + -- will have to wait until the Shell monad is implemented; without it the + -- required changes are too intrusive. + addBinDir :: IO () + addBinDir = do + mbOldPath <- lookupEnv "PATH" + let newPath = maybe sandboxBin ((++) sandboxBin . (:) searchPathSeparator) + mbOldPath + setEnv "PATH" newPath + + rmBinDir :: IO () + rmBinDir = do + oldPath <- getSearchPath + let newPath = intercalate [searchPathSeparator] + (delete sandboxBin oldPath) + setEnv "PATH" newPath + + sandboxBin = sandboxDir "bin" + +-- | Initialise a package DB for this compiler if it doesn't exist. +initPackageDBIfNeeded :: Verbosity -> ConfigFlags + -> Compiler -> ProgramConfiguration + -> IO () +initPackageDBIfNeeded verbosity configFlags comp conf = do + SpecificPackageDB dbPath <- getSandboxPackageDB configFlags + packageDBExists <- doesDirectoryExist dbPath + unless packageDBExists $ + Register.initPackageDB verbosity comp conf dbPath + when packageDBExists $ + debug verbosity $ "The package database already exists: " ++ dbPath + +-- | Entry point for the 'cabal sandbox dump-pkgenv' command. +dumpPackageEnvironment :: Verbosity -> SandboxFlags -> GlobalFlags -> IO () +dumpPackageEnvironment verbosity _sandboxFlags globalFlags = do + (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags + commentPkgEnv <- commentPackageEnvironment sandboxDir + putStrLn . showPackageEnvironmentWithComments (Just commentPkgEnv) $ pkgEnv + +-- | Entry point for the 'cabal sandbox init' command. +sandboxInit :: Verbosity -> SandboxFlags -> GlobalFlags -> IO () +sandboxInit verbosity sandboxFlags globalFlags = do + -- Warn if there's a 'cabal-dev' sandbox. + isCabalDevSandbox <- liftM2 (&&) (doesDirectoryExist "cabal-dev") + (doesFileExist $ "cabal-dev" "cabal.config") + when isCabalDevSandbox $ + warn verbosity $ + "You are apparently using a legacy (cabal-dev) sandbox. " + ++ "Legacy sandboxes may interact badly with native Cabal sandboxes. " + ++ "You may want to delete the 'cabal-dev' directory to prevent issues." + + -- Create the sandbox directory. + let sandboxDir' = fromFlagOrDefault defaultSandboxLocation + (sandboxLocation sandboxFlags) + createDirectoryIfMissingVerbose verbosity True sandboxDir' + sandboxDir <- tryCanonicalizePath sandboxDir' + setFileHidden sandboxDir + + -- Determine which compiler to use (using the value from ~/.cabal/config). + userConfig <- loadConfig verbosity (globalConfigFile globalFlags) NoFlag + (comp, platform, conf) <- configCompilerAuxEx (savedConfigureFlags userConfig) + + -- Create the package environment file. + pkgEnvFile <- getSandboxConfigFilePath globalFlags + createPackageEnvironmentFile verbosity sandboxDir pkgEnvFile + NoComments comp platform + (_sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags + let config = pkgEnvSavedConfig pkgEnv + configFlags = savedConfigureFlags config + + -- Create the index file if it doesn't exist. + indexFile <- tryGetIndexFilePath config + indexFileExists <- doesFileExist indexFile + if indexFileExists + then notice verbosity $ "Using an existing sandbox located at " ++ sandboxDir + else notice verbosity $ "Creating a new sandbox at " ++ sandboxDir + Index.createEmpty verbosity indexFile + + -- Create the package DB for the default compiler. + initPackageDBIfNeeded verbosity configFlags comp conf + maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile + (compilerId comp) platform + +-- | Entry point for the 'cabal sandbox delete' command. +sandboxDelete :: Verbosity -> SandboxFlags -> GlobalFlags -> IO () +sandboxDelete verbosity _sandboxFlags globalFlags = do + (useSandbox, _) <- loadConfigOrSandboxConfig + verbosity + globalFlags { globalRequireSandbox = Flag False } + mempty + case useSandbox of + NoSandbox -> warn verbosity "Not in a sandbox." + UseSandbox sandboxDir -> do + curDir <- getCurrentDirectory + pkgEnvFile <- getSandboxConfigFilePath globalFlags + + -- Remove the @cabal.sandbox.config@ file, unless it's in a non-standard + -- location. + let isNonDefaultConfigLocation = + pkgEnvFile /= (curDir sandboxPackageEnvironmentFile) + + if isNonDefaultConfigLocation + then warn verbosity $ "Sandbox config file is in non-default location: '" + ++ pkgEnvFile ++ "'.\n Please delete manually." + else removeFile pkgEnvFile + + -- Remove the sandbox directory, unless we're using a shared sandbox. + let isNonDefaultSandboxLocation = + sandboxDir /= (curDir defaultSandboxLocation) + + when isNonDefaultSandboxLocation $ + die $ "Non-default sandbox location used: '" ++ sandboxDir + ++ "'.\nAssuming a shared sandbox. Please delete '" + ++ sandboxDir ++ "' manually." + + notice verbosity $ "Deleting the sandbox located at " ++ sandboxDir + removeDirectoryRecursive sandboxDir + +-- Common implementation of 'sandboxAddSource' and 'sandboxAddSourceSnapshot'. +doAddSource :: Verbosity -> [FilePath] -> FilePath -> PackageEnvironment + -> BuildTreeRefType + -> IO () +doAddSource verbosity buildTreeRefs sandboxDir pkgEnv refType = do + let savedConfig = pkgEnvSavedConfig pkgEnv + indexFile <- tryGetIndexFilePath savedConfig + + -- If we're running 'sandbox add-source' for the first time for this compiler, + -- we need to create an initial timestamp record. + (comp, platform, _) <- configCompilerAuxEx . savedConfigureFlags $ savedConfig + maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile + (compilerId comp) platform + + withAddTimestamps sandboxDir $ do + -- FIXME: path canonicalisation is done in addBuildTreeRefs, but we do it + -- twice because of the timestamps file. + buildTreeRefs' <- mapM tryCanonicalizePath buildTreeRefs + Index.addBuildTreeRefs verbosity indexFile buildTreeRefs' refType + return buildTreeRefs' + +-- | Entry point for the 'cabal sandbox add-source' command. +sandboxAddSource :: Verbosity -> [FilePath] -> SandboxFlags -> GlobalFlags + -> IO () +sandboxAddSource verbosity buildTreeRefs sandboxFlags globalFlags = do + (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags + + if fromFlagOrDefault False (sandboxSnapshot sandboxFlags) + then sandboxAddSourceSnapshot verbosity buildTreeRefs sandboxDir pkgEnv + else doAddSource verbosity buildTreeRefs sandboxDir pkgEnv LinkRef + +-- | Entry point for the 'cabal sandbox add-source --snapshot' command. +sandboxAddSourceSnapshot :: Verbosity -> [FilePath] -> FilePath + -> PackageEnvironment + -> IO () +sandboxAddSourceSnapshot verbosity buildTreeRefs sandboxDir pkgEnv = do + let snapshotDir = sandboxDir snapshotDirectoryName + + -- Use 'D.S.SrcDist.prepareTree' to copy each package's files to our private + -- location. + createDirectoryIfMissingVerbose verbosity True snapshotDir + + -- Collect the package descriptions first, so that if some path does not refer + -- to a cabal package, we fail immediately. + pkgs <- forM buildTreeRefs $ \buildTreeRef -> + inDir (Just buildTreeRef) $ + return . flattenPackageDescription + =<< readPackageDescription verbosity + =<< defaultPackageDesc verbosity + + -- Copy the package sources to "snapshots/$PKGNAME-$VERSION-tmp". If + -- 'prepareTree' throws an error at any point, the old snapshots will still be + -- in consistent state. + tmpDirs <- forM (zip buildTreeRefs pkgs) $ \(buildTreeRef, pkg) -> + inDir (Just buildTreeRef) $ do + let targetDir = snapshotDir (display . packageId $ pkg) + targetTmpDir = targetDir ++ "-tmp" + dirExists <- doesDirectoryExist targetTmpDir + when dirExists $ + removeDirectoryRecursive targetDir + createDirectory targetTmpDir + prepareTree verbosity pkg Nothing targetTmpDir knownSuffixHandlers + return (targetTmpDir, targetDir) + + -- Now rename the "snapshots/$PKGNAME-$VERSION-tmp" dirs to + -- "snapshots/$PKGNAME-$VERSION". + snapshots <- forM tmpDirs $ \(targetTmpDir, targetDir) -> do + dirExists <- doesDirectoryExist targetDir + when dirExists $ + removeDirectoryRecursive targetDir + renameDirectory targetTmpDir targetDir + return targetDir + + -- Once the packages are copied, just 'add-source' them as usual. + doAddSource verbosity snapshots sandboxDir pkgEnv SnapshotRef + +-- | Entry point for the 'cabal sandbox delete-source' command. +sandboxDeleteSource :: Verbosity -> [FilePath] -> SandboxFlags -> GlobalFlags + -> IO () +sandboxDeleteSource verbosity buildTreeRefs _sandboxFlags globalFlags = do + (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags + indexFile <- tryGetIndexFilePath (pkgEnvSavedConfig pkgEnv) + + withRemoveTimestamps sandboxDir $ do + Index.removeBuildTreeRefs verbosity indexFile buildTreeRefs + + notice verbosity $ "Note: 'sandbox delete-source' only unregisters the " ++ + "source dependency, but does not remove the package " ++ + "from the sandbox package DB.\n\n" ++ + "Use 'sandbox hc-pkg -- unregister' to do that." + +-- | Entry point for the 'cabal sandbox list-sources' command. +sandboxListSources :: Verbosity -> SandboxFlags -> GlobalFlags + -> IO () +sandboxListSources verbosity _sandboxFlags globalFlags = do + (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags + indexFile <- tryGetIndexFilePath (pkgEnvSavedConfig pkgEnv) + + refs <- Index.listBuildTreeRefs verbosity + Index.ListIgnored Index.LinksAndSnapshots indexFile + when (null refs) $ + notice verbosity $ "Index file '" ++ indexFile + ++ "' has no references to local build trees." + when (not . null $ refs) $ do + notice verbosity $ "Source dependencies registered " + ++ "in the current sandbox ('" ++ sandboxDir ++ "'):\n\n" + mapM_ putStrLn refs + notice verbosity $ "\nTo unregister source dependencies, " + ++ "use the 'sandbox delete-source' command." + +-- | Entry point for the 'cabal sandbox hc-pkg' command. Invokes the @hc-pkg@ +-- tool with provided arguments, restricted to the sandbox. +sandboxHcPkg :: Verbosity -> SandboxFlags -> GlobalFlags -> [String] -> IO () +sandboxHcPkg verbosity _sandboxFlags globalFlags extraArgs = do + (_sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags + let configFlags = savedConfigureFlags . pkgEnvSavedConfig $ pkgEnv + dbStack = configPackageDB' configFlags + (comp, _platform, conf) <- configCompilerAux' configFlags + + Register.invokeHcPkg verbosity comp conf dbStack extraArgs + +-- | Check which type of package environment we're in and return a +-- correctly-initialised @SavedConfig@ and a @UseSandbox@ value that indicates +-- whether we're working in a sandbox. +loadConfigOrSandboxConfig :: Verbosity + -> GlobalFlags -- ^ For @--config-file@ and + -- @--sandbox-config-file@. + -> Flag Bool -- ^ Ignored if we're in a sandbox. + -> IO (UseSandbox, SavedConfig) +loadConfigOrSandboxConfig verbosity globalFlags userInstallFlag = do + let configFileFlag = globalConfigFile globalFlags + sandboxConfigFileFlag = globalSandboxConfigFile globalFlags + ignoreSandboxFlag = globalIgnoreSandbox globalFlags + + pkgEnvDir <- getPkgEnvDir sandboxConfigFileFlag + pkgEnvType <- classifyPackageEnvironment pkgEnvDir sandboxConfigFileFlag + ignoreSandboxFlag + case pkgEnvType of + -- A @cabal.sandbox.config@ file (and possibly @cabal.config@) is present. + SandboxPackageEnvironment -> do + (sandboxDir, pkgEnv) <- tryLoadSandboxConfig verbosity globalFlags + -- ^ Prints an error message and exits on error. + let config = pkgEnvSavedConfig pkgEnv + return (UseSandbox sandboxDir, config) + + -- Only @cabal.config@ is present. + UserPackageEnvironment -> do + config <- loadConfig verbosity configFileFlag userInstallFlag + userConfig <- loadUserConfig verbosity pkgEnvDir + let config' = config `mappend` userConfig + dieIfSandboxRequired config' + return (NoSandbox, config') + + -- Neither @cabal.sandbox.config@ nor @cabal.config@ are present. + AmbientPackageEnvironment -> do + config <- loadConfig verbosity configFileFlag userInstallFlag + dieIfSandboxRequired config + return (NoSandbox, config) + + where + -- Return the path to the package environment directory - either the + -- current directory or the one that @--sandbox-config-file@ resides in. + getPkgEnvDir :: (Flag FilePath) -> IO FilePath + getPkgEnvDir sandboxConfigFileFlag = do + case sandboxConfigFileFlag of + NoFlag -> getCurrentDirectory + Flag path -> tryCanonicalizePath . takeDirectory $ path + + -- Die if @--require-sandbox@ was specified and we're not inside a sandbox. + dieIfSandboxRequired :: SavedConfig -> IO () + dieIfSandboxRequired config = checkFlag flag + where + flag = (globalRequireSandbox . savedGlobalFlags $ config) + `mappend` (globalRequireSandbox globalFlags) + checkFlag (Flag True) = + die $ "'require-sandbox' is set to True, but no sandbox is present. " + ++ "Use '--no-require-sandbox' if you want to override " + ++ "'require-sandbox' temporarily." + checkFlag (Flag False) = return () + checkFlag (NoFlag) = return () + +-- | If we're in a sandbox, call @withSandboxBinDirOnSearchPath@, otherwise do +-- nothing. +maybeWithSandboxDirOnSearchPath :: UseSandbox -> IO a -> IO a +maybeWithSandboxDirOnSearchPath NoSandbox act = act +maybeWithSandboxDirOnSearchPath (UseSandbox sandboxDir) act = + withSandboxBinDirOnSearchPath sandboxDir $ act + +-- | Had reinstallAddSourceDeps actually reinstalled any dependencies? +data WereDepsReinstalled = ReinstalledSomeDeps | NoDepsReinstalled + +-- | Reinstall those add-source dependencies that have been modified since +-- we've last installed them. Assumes that we're working inside a sandbox. +reinstallAddSourceDeps :: Verbosity + -> ConfigFlags -> ConfigExFlags + -> InstallFlags -> GlobalFlags + -> FilePath + -> IO WereDepsReinstalled +reinstallAddSourceDeps verbosity configFlags' configExFlags + installFlags globalFlags sandboxDir = topHandler' $ do + let sandboxDistPref = sandboxBuildDir sandboxDir + configFlags = configFlags' + { configDistPref = Flag sandboxDistPref } + haddockFlags = mempty + { haddockDistPref = Flag sandboxDistPref } + (comp, platform, conf) <- configCompilerAux' configFlags + retVal <- newIORef NoDepsReinstalled + + withSandboxPackageInfo verbosity configFlags globalFlags + comp platform conf sandboxDir $ \sandboxPkgInfo -> + unless (null $ modifiedAddSourceDependencies sandboxPkgInfo) $ do + + let args :: InstallArgs + args = ((configPackageDB' configFlags) + ,(globalRepos globalFlags) + ,comp, platform, conf + ,UseSandbox sandboxDir, Just sandboxPkgInfo + ,globalFlags, configFlags, configExFlags, installFlags + ,haddockFlags) + + -- This can actually be replaced by a call to 'install', but we use a + -- lower-level API because of layer separation reasons. Additionally, we + -- might want to use some lower-level features this in the future. + withSandboxBinDirOnSearchPath sandboxDir $ do + installContext <- makeInstallContext verbosity args Nothing + installPlan <- foldProgress logMsg die' return =<< + makeInstallPlan verbosity args installContext + + processInstallPlan verbosity args installContext installPlan + writeIORef retVal ReinstalledSomeDeps + + readIORef retVal + + where + die' message = die (message ++ installFailedInSandbox) + -- TODO: use a better error message, remove duplication. + installFailedInSandbox = + "Note: when using a sandbox, all packages are required to have " + ++ "consistent dependencies. " + ++ "Try reinstalling/unregistering the offending packages or " + ++ "recreating the sandbox." + logMsg message rest = debugNoWrap verbosity message >> rest + + topHandler' = topHandlerWith $ \_ -> do + warn verbosity "Couldn't reinstall some add-source dependencies." + -- Here we can't know whether any deps have been reinstalled, so we have + -- to be conservative. + return ReinstalledSomeDeps + +-- | Produce a 'SandboxPackageInfo' and feed it to the given action. Note that +-- we don't update the timestamp file here - this is done in +-- 'postInstallActions'. +withSandboxPackageInfo :: Verbosity -> ConfigFlags -> GlobalFlags + -> Compiler -> Platform -> ProgramConfiguration + -> FilePath + -> (SandboxPackageInfo -> IO ()) + -> IO () +withSandboxPackageInfo verbosity configFlags globalFlags + comp platform conf sandboxDir cont = do + -- List all add-source deps. + indexFile <- tryGetIndexFilePath' globalFlags + buildTreeRefs <- Index.listBuildTreeRefs verbosity + Index.DontListIgnored Index.OnlyLinks indexFile + let allAddSourceDepsSet = S.fromList buildTreeRefs + + -- List all packages installed in the sandbox. + installedPkgIndex <- getInstalledPackagesInSandbox verbosity + configFlags comp conf + let err = "Error reading sandbox package information." + -- Get the package descriptions for all add-source deps. + depsCabalFiles <- mapM (flip tryFindAddSourcePackageDesc err) buildTreeRefs + depsPkgDescs <- mapM (readPackageDescription verbosity) depsCabalFiles + let depsMap = M.fromList (zip buildTreeRefs depsPkgDescs) + isInstalled pkgid = not . null + . InstalledPackageIndex.lookupSourcePackageId installedPkgIndex $ pkgid + installedDepsMap = M.filter (isInstalled . packageId) depsMap + + -- Get the package ids of modified (and installed) add-source deps. + modifiedAddSourceDeps <- listModifiedDeps verbosity sandboxDir + (compilerId comp) platform installedDepsMap + -- 'fromJust' here is safe because 'modifiedAddSourceDeps' are guaranteed to + -- be a subset of the keys of 'depsMap'. + let modifiedDeps = [ (modDepPath, fromJust $ M.lookup modDepPath depsMap) + | modDepPath <- modifiedAddSourceDeps ] + modifiedDepsMap = M.fromList modifiedDeps + + assert (all (`S.member` allAddSourceDepsSet) modifiedAddSourceDeps) (return ()) + if (null modifiedDeps) + then info verbosity $ "Found no modified add-source deps." + else notice verbosity $ "Some add-source dependencies have been modified. " + ++ "They will be reinstalled..." + + -- Get the package ids of the remaining add-source deps (some are possibly not + -- installed). + let otherDeps = M.assocs (depsMap `M.difference` modifiedDepsMap) + + -- Finally, assemble a 'SandboxPackageInfo'. + cont $ SandboxPackageInfo (map toSourcePackage modifiedDeps) + (map toSourcePackage otherDeps) installedPkgIndex allAddSourceDepsSet + + where + toSourcePackage (path, pkgDesc) = SourcePackage + (packageId pkgDesc) pkgDesc (LocalUnpackedPackage path) Nothing + +-- | Same as 'withSandboxPackageInfo' if we're inside a sandbox and a no-op +-- otherwise. +maybeWithSandboxPackageInfo :: Verbosity -> ConfigFlags -> GlobalFlags + -> Compiler -> Platform -> ProgramConfiguration + -> UseSandbox + -> (Maybe SandboxPackageInfo -> IO ()) + -> IO () +maybeWithSandboxPackageInfo verbosity configFlags globalFlags + comp platform conf useSandbox cont = + case useSandbox of + NoSandbox -> cont Nothing + UseSandbox sandboxDir -> withSandboxPackageInfo verbosity + configFlags globalFlags + comp platform conf sandboxDir + (\spi -> cont (Just spi)) + +-- | Check if a sandbox is present and call @reinstallAddSourceDeps@ in that +-- case. +maybeReinstallAddSourceDeps :: Verbosity + -> Flag (Maybe Int) -- ^ The '-j' flag + -> ConfigFlags -- ^ Saved configure flags + -- (from dist/setup-config) + -> GlobalFlags + -> IO (UseSandbox, SavedConfig + ,WereDepsReinstalled) +maybeReinstallAddSourceDeps verbosity numJobsFlag configFlags' globalFlags' = do + (useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags' + (configUserInstall configFlags') + case useSandbox of + NoSandbox -> return (NoSandbox, config, NoDepsReinstalled) + UseSandbox sandboxDir -> do + -- Reinstall the modified add-source deps. + let configFlags = savedConfigureFlags config + `mappendSomeSavedFlags` + configFlags' + configExFlags = defaultConfigExFlags + `mappend` savedConfigureExFlags config + installFlags' = defaultInstallFlags + `mappend` savedInstallFlags config + installFlags = installFlags' { + installNumJobs = installNumJobs installFlags' + `mappend` numJobsFlag + } + globalFlags = savedGlobalFlags config + -- This makes it possible to override things like 'remote-repo-cache' + -- from the command line. These options are hidden, and are only + -- useful for debugging, so this should be fine. + `mappend` globalFlags' + depsReinstalled <- reinstallAddSourceDeps verbosity + configFlags configExFlags installFlags globalFlags + sandboxDir + return (UseSandbox sandboxDir, config, depsReinstalled) + + where + + -- NOTE: we can't simply do @sandboxConfigFlags `mappend` savedFlags@ + -- because we don't want to auto-enable things like 'library-profiling' for + -- all add-source dependencies even if the user has passed + -- '--enable-library-profiling' to 'cabal configure'. These options are + -- supposed to be set in 'cabal.config'. + mappendSomeSavedFlags :: ConfigFlags -> ConfigFlags -> ConfigFlags + mappendSomeSavedFlags sandboxConfigFlags savedFlags = + sandboxConfigFlags { + configHcFlavor = configHcFlavor sandboxConfigFlags + `mappend` configHcFlavor savedFlags, + configHcPath = configHcPath sandboxConfigFlags + `mappend` configHcPath savedFlags, + configHcPkg = configHcPkg sandboxConfigFlags + `mappend` configHcPkg savedFlags, + configProgramPaths = configProgramPaths sandboxConfigFlags + `mappend` configProgramPaths savedFlags, + configProgramArgs = configProgramArgs sandboxConfigFlags + `mappend` configProgramArgs savedFlags, + -- NOTE: Unconditionally choosing the value from + -- 'dist/setup-config'. Sandbox package DB location may have been + -- changed by 'configure -w'. + configPackageDBs = configPackageDBs savedFlags + -- FIXME: Is this compatible with the 'inherit' feature? + } + +-- +-- Utils (transitionary) +-- +-- FIXME: configPackageDB' and configCompilerAux' don't really belong in this +-- module +-- + +configPackageDB' :: ConfigFlags -> PackageDBStack +configPackageDB' cfg = + interpretPackageDbFlags userInstall (configPackageDBs cfg) + where + userInstall = fromFlagOrDefault True (configUserInstall cfg) + +configCompilerAux' :: ConfigFlags + -> IO (Compiler, Platform, ProgramConfiguration) +configCompilerAux' configFlags = + configCompilerAuxEx configFlags + --FIXME: make configCompilerAux use a sensible verbosity + { configVerbosity = fmap lessVerbose (configVerbosity configFlags) } diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Setup.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Setup.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Setup.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,2155 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Setup +-- Copyright : (c) David Himmelstrup 2005 +-- License : BSD-like +-- +-- Maintainer : lemmih@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- +----------------------------------------------------------------------------- +module Distribution.Client.Setup + ( globalCommand, GlobalFlags(..), defaultGlobalFlags, globalRepos + , configureCommand, ConfigFlags(..), filterConfigureFlags + , configureExCommand, ConfigExFlags(..), defaultConfigExFlags + , configureExOptions + , buildCommand, BuildFlags(..), BuildExFlags(..), SkipAddSourceDepsCheck(..) + , replCommand, testCommand, benchmarkCommand + , installCommand, InstallFlags(..), installOptions, defaultInstallFlags + , listCommand, ListFlags(..) + , updateCommand + , upgradeCommand + , infoCommand, InfoFlags(..) + , fetchCommand, FetchFlags(..) + , freezeCommand, FreezeFlags(..) + , getCommand, unpackCommand, GetFlags(..) + , checkCommand + , formatCommand + , uploadCommand, UploadFlags(..) + , reportCommand, ReportFlags(..) + , runCommand + , initCommand, IT.InitFlags(..) + , sdistCommand, SDistFlags(..), SDistExFlags(..), ArchiveFormat(..) + , win32SelfUpgradeCommand, Win32SelfUpgradeFlags(..) + , sandboxCommand, defaultSandboxLocation, SandboxFlags(..) + , execCommand, ExecFlags(..) + , userConfigCommand, UserConfigFlags(..) + + , parsePackageArgs + --TODO: stop exporting these: + , showRepo + , parseRepo + ) where + +import Distribution.Client.Types + ( Username(..), Password(..), Repo(..), RemoteRepo(..), LocalRepo(..) ) +import Distribution.Client.BuildReports.Types + ( ReportLevel(..) ) +import Distribution.Client.Dependency.Types + ( AllowNewer(..), PreSolver(..) ) +import qualified Distribution.Client.Init.Types as IT + ( InitFlags(..), PackageType(..) ) +import Distribution.Client.Targets + ( UserConstraint, readUserConstraint ) +import Distribution.Utils.NubList + ( NubList, toNubList, fromNubList) + +import Distribution.Simple.Compiler (PackageDB) +import Distribution.Simple.Program + ( defaultProgramConfiguration ) +import Distribution.Simple.Command hiding (boolOpt, boolOpt') +import qualified Distribution.Simple.Command as Command +import qualified Distribution.Simple.Setup as Cabal +import Distribution.Simple.Setup + ( ConfigFlags(..), BuildFlags(..), ReplFlags + , TestFlags(..), BenchmarkFlags(..) + , SDistFlags(..), HaddockFlags(..) + , readPackageDbList, showPackageDbList + , Flag(..), toFlag, fromFlag, flagToMaybe, flagToList + , optionVerbosity, boolOpt, boolOpt', trueArg, falseArg, optionNumJobs ) +import Distribution.Simple.InstallDirs + ( PathTemplate, InstallDirs(sysconfdir) + , toPathTemplate, fromPathTemplate ) +import Distribution.Version + ( Version(Version), anyVersion, thisVersion ) +import Distribution.Package + ( PackageIdentifier, packageName, packageVersion, Dependency(..) ) +import Distribution.PackageDescription + ( RepoKind(..) ) +import Distribution.Text + ( Text(..), display ) +import Distribution.ReadE + ( ReadE(..), readP_to_E, succeedReadE ) +import qualified Distribution.Compat.ReadP as Parse + ( ReadP, readP_to_S, readS_to_P, char, munch1, pfail, sepBy1, (+++) ) +import Distribution.Verbosity + ( Verbosity, normal ) +import Distribution.Simple.Utils + ( wrapText, wrapLine ) + +import Data.Char + ( isSpace, isAlphaNum ) +import Data.List + ( intercalate, delete, deleteFirstsBy ) +import Data.Maybe + ( listToMaybe, maybeToList, fromMaybe ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid + ( Monoid(..) ) +#endif +import Control.Monad + ( liftM ) +import System.FilePath + ( () ) +import Network.URI + ( parseAbsoluteURI, uriToString ) + +-- ------------------------------------------------------------ +-- * Global flags +-- ------------------------------------------------------------ + +-- | Flags that apply at the top level, not to any sub-command. +data GlobalFlags = GlobalFlags { + globalVersion :: Flag Bool, + globalNumericVersion :: Flag Bool, + globalConfigFile :: Flag FilePath, + globalSandboxConfigFile :: Flag FilePath, + globalRemoteRepos :: NubList RemoteRepo, -- ^ Available Hackage servers. + globalCacheDir :: Flag FilePath, + globalLocalRepos :: NubList FilePath, + globalLogsDir :: Flag FilePath, + globalWorldFile :: Flag FilePath, + globalRequireSandbox :: Flag Bool, + globalIgnoreSandbox :: Flag Bool + } + +defaultGlobalFlags :: GlobalFlags +defaultGlobalFlags = GlobalFlags { + globalVersion = Flag False, + globalNumericVersion = Flag False, + globalConfigFile = mempty, + globalSandboxConfigFile = mempty, + globalRemoteRepos = mempty, + globalCacheDir = mempty, + globalLocalRepos = mempty, + globalLogsDir = mempty, + globalWorldFile = mempty, + globalRequireSandbox = Flag False, + globalIgnoreSandbox = Flag False + } + +globalCommand :: [Command action] -> CommandUI GlobalFlags +globalCommand commands = CommandUI { + commandName = "", + commandSynopsis = + "Command line interface to the Haskell Cabal infrastructure.", + commandUsage = \pname -> + "See http://www.haskell.org/cabal/ for more information.\n" + ++ "\n" + ++ "Usage: " ++ pname ++ " [GLOBAL FLAGS] [COMMAND [FLAGS]]\n", + commandDescription = Just $ \pname -> + let + commands' = commands ++ [commandAddAction helpCommandUI undefined] + cmdDescs = getNormalCommandDescriptions commands' + -- if new commands are added, we want them to appear even if they + -- are not included in the custom listing below. Thus, we calculate + -- the `otherCmds` list and append it under the `other` category. + -- Alternatively, a new testcase could be added that ensures that + -- the set of commands listed here is equal to the set of commands + -- that are actually available. + otherCmds = deleteFirstsBy (==) (map fst cmdDescs) + [ "help" + , "update" + , "install" + , "fetch" + , "list" + , "info" + , "user-config" + , "get" + , "init" + , "configure" + , "build" + , "clean" + , "run" + , "repl" + , "test" + , "bench" + , "check" + , "sdist" + , "upload" + , "report" + , "freeze" + , "haddock" + , "hscolour" + , "copy" + , "register" + , "sandbox" + , "exec" + ] + maxlen = maximum $ [length name | (name, _) <- cmdDescs] + align str = str ++ replicate (maxlen - length str) ' ' + startGroup n = " ["++n++"]" + par = "" + addCmd n = case lookup n cmdDescs of + Nothing -> "" + Just d -> " " ++ align n ++ " " ++ d + addCmdCustom n d = case lookup n cmdDescs of -- make sure that the + -- command still exists. + Nothing -> "" + Just _ -> " " ++ align n ++ " " ++ d + in + "Commands:\n" + ++ unlines ( + [ startGroup "global" + , addCmd "update" + , addCmd "install" + , par + , addCmd "help" + , addCmd "info" + , addCmd "list" + , addCmd "fetch" + , addCmd "user-config" + , par + , startGroup "package" + , addCmd "get" + , addCmd "init" + , par + , addCmd "configure" + , addCmd "build" + , addCmd "clean" + , par + , addCmd "run" + , addCmd "repl" + , addCmd "test" + , addCmd "bench" + , par + , addCmd "check" + , addCmd "sdist" + , addCmd "upload" + , addCmd "report" + , par + , addCmd "freeze" + , addCmd "haddock" + , addCmd "hscolour" + , addCmd "copy" + , addCmd "register" + , par + , startGroup "sandbox" + , addCmd "sandbox" + , addCmd "exec" + , addCmdCustom "repl" "Open interpreter with access to sandbox packages." + ] ++ if null otherCmds then [] else par + :startGroup "other" + :[addCmd n | n <- otherCmds]) + ++ "\n" + ++ "For more information about a command use:\n" + ++ " " ++ pname ++ " COMMAND --help\n" + ++ "or " ++ pname ++ " help COMMAND\n" + ++ "\n" + ++ "To install Cabal packages from hackage use:\n" + ++ " " ++ pname ++ " install foo [--dry-run]\n" + ++ "\n" + ++ "Occasionally you need to update the list of available packages:\n" + ++ " " ++ pname ++ " update\n", + commandNotes = Nothing, + commandDefaultFlags = mempty, + commandOptions = \showOrParseArgs -> + (case showOrParseArgs of ShowArgs -> take 6; ParseArgs -> id) + [option ['V'] ["version"] + "Print version information" + globalVersion (\v flags -> flags { globalVersion = v }) + trueArg + + ,option [] ["numeric-version"] + "Print just the version number" + globalNumericVersion (\v flags -> flags { globalNumericVersion = v }) + trueArg + + ,option [] ["config-file"] + "Set an alternate location for the config file" + globalConfigFile (\v flags -> flags { globalConfigFile = v }) + (reqArgFlag "FILE") + + ,option [] ["sandbox-config-file"] + "Set an alternate location for the sandbox config file (default: './cabal.sandbox.config')" + globalConfigFile (\v flags -> flags { globalSandboxConfigFile = v }) + (reqArgFlag "FILE") + + ,option [] ["require-sandbox"] + "requiring the presence of a sandbox for sandbox-aware commands" + globalRequireSandbox (\v flags -> flags { globalRequireSandbox = v }) + (boolOpt' ([], ["require-sandbox"]) ([], ["no-require-sandbox"])) + + ,option [] ["ignore-sandbox"] + "Ignore any existing sandbox" + globalIgnoreSandbox (\v flags -> flags { globalIgnoreSandbox = v }) + trueArg + + ,option [] ["remote-repo"] + "The name and url for a remote repository" + globalRemoteRepos (\v flags -> flags { globalRemoteRepos = v }) + (reqArg' "NAME:URL" (toNubList . maybeToList . readRepo) (map showRepo . fromNubList)) + + ,option [] ["remote-repo-cache"] + "The location where downloads from all remote repos are cached" + globalCacheDir (\v flags -> flags { globalCacheDir = v }) + (reqArgFlag "DIR") + + ,option [] ["local-repo"] + "The location of a local repository" + globalLocalRepos (\v flags -> flags { globalLocalRepos = v }) + (reqArg' "DIR" (\x -> toNubList [x]) fromNubList) + + ,option [] ["logs-dir"] + "The location to put log files" + globalLogsDir (\v flags -> flags { globalLogsDir = v }) + (reqArgFlag "DIR") + + ,option [] ["world-file"] + "The location of the world file" + globalWorldFile (\v flags -> flags { globalWorldFile = v }) + (reqArgFlag "FILE") + ] + } + +instance Monoid GlobalFlags where + mempty = GlobalFlags { + globalVersion = mempty, + globalNumericVersion = mempty, + globalConfigFile = mempty, + globalSandboxConfigFile = mempty, + globalRemoteRepos = mempty, + globalCacheDir = mempty, + globalLocalRepos = mempty, + globalLogsDir = mempty, + globalWorldFile = mempty, + globalRequireSandbox = mempty, + globalIgnoreSandbox = mempty + } + mappend a b = GlobalFlags { + globalVersion = combine globalVersion, + globalNumericVersion = combine globalNumericVersion, + globalConfigFile = combine globalConfigFile, + globalSandboxConfigFile = combine globalConfigFile, + globalRemoteRepos = combine globalRemoteRepos, + globalCacheDir = combine globalCacheDir, + globalLocalRepos = combine globalLocalRepos, + globalLogsDir = combine globalLogsDir, + globalWorldFile = combine globalWorldFile, + globalRequireSandbox = combine globalRequireSandbox, + globalIgnoreSandbox = combine globalIgnoreSandbox + } + where combine field = field a `mappend` field b + +globalRepos :: GlobalFlags -> [Repo] +globalRepos globalFlags = remoteRepos ++ localRepos + where + remoteRepos = + [ Repo (Left remote) cacheDir + | remote <- fromNubList $ globalRemoteRepos globalFlags + , let cacheDir = fromFlag (globalCacheDir globalFlags) + remoteRepoName remote ] + localRepos = + [ Repo (Right LocalRepo) local + | local <- fromNubList $ globalLocalRepos globalFlags ] + +-- ------------------------------------------------------------ +-- * Config flags +-- ------------------------------------------------------------ + +configureCommand :: CommandUI ConfigFlags +configureCommand = (Cabal.configureCommand defaultProgramConfiguration) { + commandDefaultFlags = mempty + } + +configureOptions :: ShowOrParseArgs -> [OptionField ConfigFlags] +configureOptions = commandOptions configureCommand + +filterConfigureFlags :: ConfigFlags -> Version -> ConfigFlags +filterConfigureFlags flags cabalLibVersion + | cabalLibVersion >= Version [1,22,0] [] = flags_latest + -- ^ NB: we expect the latest version to be the most common case. + | cabalLibVersion < Version [1,3,10] [] = flags_1_3_10 + | cabalLibVersion < Version [1,10,0] [] = flags_1_10_0 + | cabalLibVersion < Version [1,14,0] [] = flags_1_14_0 + | cabalLibVersion < Version [1,18,0] [] = flags_1_18_0 + | cabalLibVersion < Version [1,19,1] [] = flags_1_19_0 + | cabalLibVersion < Version [1,19,2] [] = flags_1_19_1 + | cabalLibVersion < Version [1,21,1] [] = flags_1_20_0 + | cabalLibVersion < Version [1,22,0] [] = flags_1_21_0 + | otherwise = flags_latest + where + -- Cabal >= 1.19.1 uses '--dependency' and does not need '--constraint'. + flags_latest = flags { configConstraints = [] } + + -- Cabal < 1.22 doesn't know about '--disable-debug-info'. + flags_1_21_0 = flags_latest { configDebugInfo = NoFlag } + + -- Cabal < 1.21.1 doesn't know about 'disable-relocatable' + -- Cabal < 1.21.1 doesn't know about 'enable-profiling' + flags_1_20_0 = + flags_1_21_0 { configRelocatable = NoFlag + , configProfExe = configProfExe flags + , configProfLib = configProfLib flags + , configCoverage = NoFlag + , configLibCoverage = configCoverage flags + -- HACK: See #2409. + , configProgramPaths = + ("cabalConfProf", "/TRUE") `delete` configProgramPaths flags + } + -- Cabal < 1.19.2 doesn't know about '--exact-configuration' and + -- '--enable-library-stripping'. + flags_1_19_1 = flags_1_20_0 { configExactConfiguration = NoFlag + , configStripLibs = NoFlag } + -- Cabal < 1.19.1 uses '--constraint' instead of '--dependency'. + flags_1_19_0 = flags_1_19_1 { configDependencies = [] + , configConstraints = configConstraints flags } + -- Cabal < 1.18.0 doesn't know about --extra-prog-path and --sysconfdir. + flags_1_18_0 = flags_1_19_0 { configProgramPathExtra = toNubList [] + , configInstallDirs = configInstallDirs_1_18_0} + configInstallDirs_1_18_0 = (configInstallDirs flags) { sysconfdir = NoFlag } + -- Cabal < 1.14.0 doesn't know about '--disable-benchmarks'. + flags_1_14_0 = flags_1_18_0 { configBenchmarks = NoFlag } + -- Cabal < 1.10.0 doesn't know about '--disable-tests'. + flags_1_10_0 = flags_1_14_0 { configTests = NoFlag } + -- Cabal < 1.3.10 does not grok the '--constraints' flag. + flags_1_3_10 = flags_1_10_0 { configConstraints = [] } + +-- ------------------------------------------------------------ +-- * Config extra flags +-- ------------------------------------------------------------ + +-- | cabal configure takes some extra flags beyond runghc Setup configure +-- +data ConfigExFlags = ConfigExFlags { + configCabalVersion :: Flag Version, + configExConstraints:: [UserConstraint], + configPreferences :: [Dependency], + configSolver :: Flag PreSolver, + configAllowNewer :: Flag AllowNewer + } + +defaultConfigExFlags :: ConfigExFlags +defaultConfigExFlags = mempty { configSolver = Flag defaultSolver + , configAllowNewer = Flag AllowNewerNone } + +configureExCommand :: CommandUI (ConfigFlags, ConfigExFlags) +configureExCommand = configureCommand { + commandDefaultFlags = (mempty, defaultConfigExFlags), + commandOptions = \showOrParseArgs -> + liftOptions fst setFst + (filter ((`notElem` ["constraint", "dependency", "exact-configuration"]) + . optionName) $ configureOptions showOrParseArgs) + ++ liftOptions snd setSnd (configureExOptions showOrParseArgs) + } + where + setFst a (_,b) = (a,b) + setSnd b (a,_) = (a,b) + +configureExOptions :: ShowOrParseArgs -> [OptionField ConfigExFlags] +configureExOptions _showOrParseArgs = + [ option [] ["cabal-lib-version"] + ("Select which version of the Cabal lib to use to build packages " + ++ "(useful for testing).") + configCabalVersion (\v flags -> flags { configCabalVersion = v }) + (reqArg "VERSION" (readP_to_E ("Cannot parse cabal lib version: "++) + (fmap toFlag parse)) + (map display . flagToList)) + , option [] ["constraint"] + "Specify constraints on a package (version, installed/source, flags)" + configExConstraints (\v flags -> flags { configExConstraints = v }) + (reqArg "CONSTRAINT" + (fmap (\x -> [x]) (ReadE readUserConstraint)) + (map display)) + + , option [] ["preference"] + "Specify preferences (soft constraints) on the version of a package" + configPreferences (\v flags -> flags { configPreferences = v }) + (reqArg "CONSTRAINT" + (readP_to_E (const "dependency expected") + (fmap (\x -> [x]) parse)) + (map display)) + + , optionSolver configSolver (\v flags -> flags { configSolver = v }) + + , option [] ["allow-newer"] + ("Ignore upper bounds in all dependencies or " ++ allowNewerArgument) + configAllowNewer (\v flags -> flags { configAllowNewer = v}) + (optArg allowNewerArgument + (fmap Flag allowNewerParser) (Flag AllowNewerAll) + allowNewerPrinter) + + ] + where allowNewerArgument = "DEPS" + +instance Monoid ConfigExFlags where + mempty = ConfigExFlags { + configCabalVersion = mempty, + configExConstraints= mempty, + configPreferences = mempty, + configSolver = mempty, + configAllowNewer = mempty + } + mappend a b = ConfigExFlags { + configCabalVersion = combine configCabalVersion, + configExConstraints= combine configExConstraints, + configPreferences = combine configPreferences, + configSolver = combine configSolver, + configAllowNewer = combine configAllowNewer + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Build flags +-- ------------------------------------------------------------ + +data SkipAddSourceDepsCheck = + SkipAddSourceDepsCheck | DontSkipAddSourceDepsCheck + deriving Eq + +data BuildExFlags = BuildExFlags { + buildOnly :: Flag SkipAddSourceDepsCheck +} + +buildExOptions :: ShowOrParseArgs -> [OptionField BuildExFlags] +buildExOptions _showOrParseArgs = + option [] ["only"] + "Don't reinstall add-source dependencies (sandbox-only)" + buildOnly (\v flags -> flags { buildOnly = v }) + (noArg (Flag SkipAddSourceDepsCheck)) + + : [] + +buildCommand :: CommandUI (BuildFlags, BuildExFlags) +buildCommand = parent { + commandDefaultFlags = (commandDefaultFlags parent, mempty), + commandOptions = + \showOrParseArgs -> liftOptions fst setFst + (commandOptions parent showOrParseArgs) + ++ + liftOptions snd setSnd (buildExOptions showOrParseArgs) + } + where + setFst a (_,b) = (a,b) + setSnd b (a,_) = (a,b) + + parent = Cabal.buildCommand defaultProgramConfiguration + +instance Monoid BuildExFlags where + mempty = BuildExFlags { + buildOnly = mempty + } + mappend a b = BuildExFlags { + buildOnly = combine buildOnly + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Repl command +-- ------------------------------------------------------------ + +replCommand :: CommandUI (ReplFlags, BuildExFlags) +replCommand = parent { + commandDefaultFlags = (commandDefaultFlags parent, mempty), + commandOptions = + \showOrParseArgs -> liftOptions fst setFst + (commandOptions parent showOrParseArgs) + ++ + liftOptions snd setSnd (buildExOptions showOrParseArgs) + } + where + setFst a (_,b) = (a,b) + setSnd b (a,_) = (a,b) + + parent = Cabal.replCommand defaultProgramConfiguration + +-- ------------------------------------------------------------ +-- * Test command +-- ------------------------------------------------------------ + +testCommand :: CommandUI (TestFlags, BuildFlags, BuildExFlags) +testCommand = parent { + commandDefaultFlags = (commandDefaultFlags parent, + Cabal.defaultBuildFlags, mempty), + commandOptions = + \showOrParseArgs -> liftOptions get1 set1 + (commandOptions parent showOrParseArgs) + ++ + liftOptions get2 set2 + (Cabal.buildOptions progConf showOrParseArgs) + ++ + liftOptions get3 set3 (buildExOptions showOrParseArgs) + } + where + get1 (a,_,_) = a; set1 a (_,b,c) = (a,b,c) + get2 (_,b,_) = b; set2 b (a,_,c) = (a,b,c) + get3 (_,_,c) = c; set3 c (a,b,_) = (a,b,c) + + parent = Cabal.testCommand + progConf = defaultProgramConfiguration + +-- ------------------------------------------------------------ +-- * Bench command +-- ------------------------------------------------------------ + +benchmarkCommand :: CommandUI (BenchmarkFlags, BuildFlags, BuildExFlags) +benchmarkCommand = parent { + commandDefaultFlags = (commandDefaultFlags parent, + Cabal.defaultBuildFlags, mempty), + commandOptions = + \showOrParseArgs -> liftOptions get1 set1 + (commandOptions parent showOrParseArgs) + ++ + liftOptions get2 set2 + (Cabal.buildOptions progConf showOrParseArgs) + ++ + liftOptions get3 set3 (buildExOptions showOrParseArgs) + } + where + get1 (a,_,_) = a; set1 a (_,b,c) = (a,b,c) + get2 (_,b,_) = b; set2 b (a,_,c) = (a,b,c) + get3 (_,_,c) = c; set3 c (a,b,_) = (a,b,c) + + parent = Cabal.benchmarkCommand + progConf = defaultProgramConfiguration + +-- ------------------------------------------------------------ +-- * Fetch command +-- ------------------------------------------------------------ + +data FetchFlags = FetchFlags { +-- fetchOutput :: Flag FilePath, + fetchDeps :: Flag Bool, + fetchDryRun :: Flag Bool, + fetchSolver :: Flag PreSolver, + fetchMaxBackjumps :: Flag Int, + fetchReorderGoals :: Flag Bool, + fetchIndependentGoals :: Flag Bool, + fetchShadowPkgs :: Flag Bool, + fetchStrongFlags :: Flag Bool, + fetchVerbosity :: Flag Verbosity + } + +defaultFetchFlags :: FetchFlags +defaultFetchFlags = FetchFlags { +-- fetchOutput = mempty, + fetchDeps = toFlag True, + fetchDryRun = toFlag False, + fetchSolver = Flag defaultSolver, + fetchMaxBackjumps = Flag defaultMaxBackjumps, + fetchReorderGoals = Flag False, + fetchIndependentGoals = Flag False, + fetchShadowPkgs = Flag False, + fetchStrongFlags = Flag False, + fetchVerbosity = toFlag normal + } + +fetchCommand :: CommandUI FetchFlags +fetchCommand = CommandUI { + commandName = "fetch", + commandSynopsis = "Downloads packages for later installation.", + commandUsage = usageAlternatives "fetch" [ "[FLAGS] PACKAGES" + ], + commandDescription = Just $ \_ -> + "Note that it currently is not possible to fetch the dependencies for a\n" + ++ "package in the current directory.\n", + commandNotes = Nothing, + commandDefaultFlags = defaultFetchFlags, + commandOptions = \ showOrParseArgs -> [ + optionVerbosity fetchVerbosity (\v flags -> flags { fetchVerbosity = v }) + +-- , option "o" ["output"] +-- "Put the package(s) somewhere specific rather than the usual cache." +-- fetchOutput (\v flags -> flags { fetchOutput = v }) +-- (reqArgFlag "PATH") + + , option [] ["dependencies", "deps"] + "Resolve and fetch dependencies (default)" + fetchDeps (\v flags -> flags { fetchDeps = v }) + trueArg + + , option [] ["no-dependencies", "no-deps"] + "Ignore dependencies" + fetchDeps (\v flags -> flags { fetchDeps = v }) + falseArg + + , option [] ["dry-run"] + "Do not install anything, only print what would be installed." + fetchDryRun (\v flags -> flags { fetchDryRun = v }) + trueArg + + ] ++ + + optionSolver fetchSolver (\v flags -> flags { fetchSolver = v }) : + optionSolverFlags showOrParseArgs + fetchMaxBackjumps (\v flags -> flags { fetchMaxBackjumps = v }) + fetchReorderGoals (\v flags -> flags { fetchReorderGoals = v }) + fetchIndependentGoals (\v flags -> flags { fetchIndependentGoals = v }) + fetchShadowPkgs (\v flags -> flags { fetchShadowPkgs = v }) + fetchStrongFlags (\v flags -> flags { fetchStrongFlags = v }) + + } + +-- ------------------------------------------------------------ +-- * Freeze command +-- ------------------------------------------------------------ + +data FreezeFlags = FreezeFlags { + freezeDryRun :: Flag Bool, + freezeTests :: Flag Bool, + freezeBenchmarks :: Flag Bool, + freezeSolver :: Flag PreSolver, + freezeMaxBackjumps :: Flag Int, + freezeReorderGoals :: Flag Bool, + freezeIndependentGoals :: Flag Bool, + freezeShadowPkgs :: Flag Bool, + freezeStrongFlags :: Flag Bool, + freezeVerbosity :: Flag Verbosity + } + +defaultFreezeFlags :: FreezeFlags +defaultFreezeFlags = FreezeFlags { + freezeDryRun = toFlag False, + freezeTests = toFlag False, + freezeBenchmarks = toFlag False, + freezeSolver = Flag defaultSolver, + freezeMaxBackjumps = Flag defaultMaxBackjumps, + freezeReorderGoals = Flag False, + freezeIndependentGoals = Flag False, + freezeShadowPkgs = Flag False, + freezeStrongFlags = Flag False, + freezeVerbosity = toFlag normal + } + +freezeCommand :: CommandUI FreezeFlags +freezeCommand = CommandUI { + commandName = "freeze", + commandSynopsis = "Freeze dependencies.", + commandDescription = Just $ \_ -> wrapText $ + "Calculates a valid set of dependencies and their exact versions. " + ++ "If successful, saves the result to the file `cabal.config`.\n" + ++ "\n" + ++ "The package versions specified in `cabal.config` will be used for " + ++ "any future installs.\n" + ++ "\n" + ++ "An existing `cabal.config` is ignored and overwritten.\n", + commandNotes = Nothing, + commandUsage = usageAlternatives "freeze" ["" + ,"PACKAGES" + ], + commandDefaultFlags = defaultFreezeFlags, + commandOptions = \ showOrParseArgs -> [ + optionVerbosity freezeVerbosity (\v flags -> flags { freezeVerbosity = v }) + + , option [] ["dry-run"] + "Do not freeze anything, only print what would be frozen" + freezeDryRun (\v flags -> flags { freezeDryRun = v }) + trueArg + + , option [] ["tests"] + "freezing of the dependencies of any tests suites in the package description file." + freezeTests (\v flags -> flags { freezeTests = v }) + (boolOpt [] []) + + , option [] ["benchmarks"] + "freezing of the dependencies of any benchmarks suites in the package description file." + freezeBenchmarks (\v flags -> flags { freezeBenchmarks = v }) + (boolOpt [] []) + + ] ++ + + optionSolver freezeSolver (\v flags -> flags { freezeSolver = v }) : + optionSolverFlags showOrParseArgs + freezeMaxBackjumps (\v flags -> flags { freezeMaxBackjumps = v }) + freezeReorderGoals (\v flags -> flags { freezeReorderGoals = v }) + freezeIndependentGoals (\v flags -> flags { freezeIndependentGoals = v }) + freezeShadowPkgs (\v flags -> flags { freezeShadowPkgs = v }) + freezeStrongFlags (\v flags -> flags { freezeStrongFlags = v }) + + } + +-- ------------------------------------------------------------ +-- * Other commands +-- ------------------------------------------------------------ + +updateCommand :: CommandUI (Flag Verbosity) +updateCommand = CommandUI { + commandName = "update", + commandSynopsis = "Updates list of known packages.", + commandDescription = Just $ \_ -> + "For all known remote repositories, download the package list.\n", + commandNotes = Just $ \_ -> + relevantConfigValuesText ["remote-repo" + ,"remote-repo-cache" + ,"local-repo"], + commandUsage = usageFlags "update", + commandDefaultFlags = toFlag normal, + commandOptions = \_ -> [optionVerbosity id const] + } + +upgradeCommand :: CommandUI (ConfigFlags, ConfigExFlags, InstallFlags, HaddockFlags) +upgradeCommand = configureCommand { + commandName = "upgrade", + commandSynopsis = "(command disabled, use install instead)", + commandDescription = Nothing, + commandUsage = usageFlagsOrPackages "upgrade", + commandDefaultFlags = (mempty, mempty, mempty, mempty), + commandOptions = commandOptions installCommand + } + +{- +cleanCommand :: CommandUI () +cleanCommand = makeCommand name shortDesc longDesc emptyFlags options + where + name = "clean" + shortDesc = "Removes downloaded files" + longDesc = Nothing + emptyFlags = () + options _ = [] +-} + +checkCommand :: CommandUI (Flag Verbosity) +checkCommand = CommandUI { + commandName = "check", + commandSynopsis = "Check the package for common mistakes.", + commandDescription = Just $ \_ -> wrapText $ + "Expects a .cabal package file in the current directory.\n" + ++ "\n" + ++ "The checks correspond to the requirements to packages on Hackage. " + ++ "If no errors and warnings are reported, Hackage will accept this " + ++ "package.\n", + commandNotes = Nothing, + commandUsage = \pname -> "Usage: " ++ pname ++ " check\n", + commandDefaultFlags = toFlag normal, + commandOptions = \_ -> [] + } + +formatCommand :: CommandUI (Flag Verbosity) +formatCommand = CommandUI { + commandName = "format", + commandSynopsis = "Reformat the .cabal file using the standard style.", + commandDescription = Nothing, + commandNotes = Nothing, + commandUsage = usageAlternatives "format" ["[FILE]"], + commandDefaultFlags = toFlag normal, + commandOptions = \_ -> [] + } + +runCommand :: CommandUI (BuildFlags, BuildExFlags) +runCommand = CommandUI { + commandName = "run", + commandSynopsis = "Builds and runs an executable.", + commandDescription = Just $ \_ -> wrapText $ + "Builds and then runs the specified executable. If no executable is " + ++ "specified, but the package contains just one executable, that one " + ++ "is built and executed.\n", + commandNotes = Nothing, + commandUsage = usageAlternatives "run" + ["[FLAGS] [EXECUTABLE] [-- EXECUTABLE_FLAGS]"], + commandDefaultFlags = mempty, + commandOptions = + \showOrParseArgs -> liftOptions fst setFst + (commandOptions parent showOrParseArgs) + ++ + liftOptions snd setSnd + (buildExOptions showOrParseArgs) + } + where + setFst a (_,b) = (a,b) + setSnd b (a,_) = (a,b) + + parent = Cabal.buildCommand defaultProgramConfiguration + +-- ------------------------------------------------------------ +-- * Report flags +-- ------------------------------------------------------------ + +data ReportFlags = ReportFlags { + reportUsername :: Flag Username, + reportPassword :: Flag Password, + reportVerbosity :: Flag Verbosity + } + +defaultReportFlags :: ReportFlags +defaultReportFlags = ReportFlags { + reportUsername = mempty, + reportPassword = mempty, + reportVerbosity = toFlag normal + } + +reportCommand :: CommandUI ReportFlags +reportCommand = CommandUI { + commandName = "report", + commandSynopsis = "Upload build reports to a remote server.", + commandDescription = Nothing, + commandNotes = Just $ \_ -> + "You can store your Hackage login in the ~/.cabal/config file\n", + commandUsage = usageAlternatives "report" ["[FLAGS]"], + commandDefaultFlags = defaultReportFlags, + commandOptions = \_ -> + [optionVerbosity reportVerbosity (\v flags -> flags { reportVerbosity = v }) + + ,option ['u'] ["username"] + "Hackage username." + reportUsername (\v flags -> flags { reportUsername = v }) + (reqArg' "USERNAME" (toFlag . Username) + (flagToList . fmap unUsername)) + + ,option ['p'] ["password"] + "Hackage password." + reportPassword (\v flags -> flags { reportPassword = v }) + (reqArg' "PASSWORD" (toFlag . Password) + (flagToList . fmap unPassword)) + ] + } + +instance Monoid ReportFlags where + mempty = ReportFlags { + reportUsername = mempty, + reportPassword = mempty, + reportVerbosity = mempty + } + mappend a b = ReportFlags { + reportUsername = combine reportUsername, + reportPassword = combine reportPassword, + reportVerbosity = combine reportVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Get flags +-- ------------------------------------------------------------ + +data GetFlags = GetFlags { + getDestDir :: Flag FilePath, + getPristine :: Flag Bool, + getSourceRepository :: Flag (Maybe RepoKind), + getVerbosity :: Flag Verbosity + } + +defaultGetFlags :: GetFlags +defaultGetFlags = GetFlags { + getDestDir = mempty, + getPristine = mempty, + getSourceRepository = mempty, + getVerbosity = toFlag normal + } + +getCommand :: CommandUI GetFlags +getCommand = CommandUI { + commandName = "get", + commandSynopsis = "Download/Extract a package's source code (repository).", + commandDescription = Just $ \_ -> wrapText $ + "Creates a local copy of a package's source code. By default it gets " + ++ "the source\ntarball and unpacks it in a local subdirectory. " + ++ "Alternatively, with -s it will\nget the code from the source " + ++ "repository specified by the package.\n", + commandNotes = Nothing, + commandUsage = usagePackages "get", + commandDefaultFlags = defaultGetFlags, + commandOptions = \_ -> [ + optionVerbosity getVerbosity (\v flags -> flags { getVerbosity = v }) + + ,option "d" ["destdir"] + "Where to place the package source, defaults to the current directory." + getDestDir (\v flags -> flags { getDestDir = v }) + (reqArgFlag "PATH") + + ,option "s" ["source-repository"] + "Copy the package's source repository (ie git clone, darcs get, etc as appropriate)." + getSourceRepository (\v flags -> flags { getSourceRepository = v }) + (optArg "[head|this|...]" (readP_to_E (const "invalid source-repository") + (fmap (toFlag . Just) parse)) + (Flag Nothing) + (map (fmap show) . flagToList)) + + , option [] ["pristine"] + ("Unpack the original pristine tarball, rather than updating the " + ++ ".cabal file with the latest revision from the package archive.") + getPristine (\v flags -> flags { getPristine = v }) + trueArg + ] + } + +-- 'cabal unpack' is a deprecated alias for 'cabal get'. +unpackCommand :: CommandUI GetFlags +unpackCommand = getCommand { + commandName = "unpack", + commandUsage = usagePackages "unpack" + } + +instance Monoid GetFlags where + mempty = GetFlags { + getDestDir = mempty, + getPristine = mempty, + getSourceRepository = mempty, + getVerbosity = mempty + } + mappend a b = GetFlags { + getDestDir = combine getDestDir, + getPristine = combine getPristine, + getSourceRepository = combine getSourceRepository, + getVerbosity = combine getVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * List flags +-- ------------------------------------------------------------ + +data ListFlags = ListFlags { + listInstalled :: Flag Bool, + listSimpleOutput :: Flag Bool, + listVerbosity :: Flag Verbosity, + listPackageDBs :: [Maybe PackageDB] + } + +defaultListFlags :: ListFlags +defaultListFlags = ListFlags { + listInstalled = Flag False, + listSimpleOutput = Flag False, + listVerbosity = toFlag normal, + listPackageDBs = [] + } + +listCommand :: CommandUI ListFlags +listCommand = CommandUI { + commandName = "list", + commandSynopsis = "List packages matching a search string.", + commandDescription = Just $ \_ -> wrapText $ + "List all packages, or all packages matching one of the search" + ++ " strings.\n" + ++ "\n" + ++ "If there is a sandbox in the current directory and " + ++ "config:ignore-sandbox is False, use the sandbox package database. " + ++ "Otherwise, use the package database specified with --package-db. " + ++ "If not specified, use the user package database.\n", + commandNotes = Nothing, + commandUsage = usageAlternatives "list" [ "[FLAGS]" + , "[FLAGS] STRINGS"], + commandDefaultFlags = defaultListFlags, + commandOptions = \_ -> [ + optionVerbosity listVerbosity (\v flags -> flags { listVerbosity = v }) + + , option [] ["installed"] + "Only print installed packages" + listInstalled (\v flags -> flags { listInstalled = v }) + trueArg + + , option [] ["simple-output"] + "Print in a easy-to-parse format" + listSimpleOutput (\v flags -> flags { listSimpleOutput = v }) + trueArg + + , option "" ["package-db"] + "Use a given package database. May be a specific file, 'global', 'user' or 'clear'." + listPackageDBs (\v flags -> flags { listPackageDBs = v }) + (reqArg' "DB" readPackageDbList showPackageDbList) + + ] + } + +instance Monoid ListFlags where + mempty = ListFlags { + listInstalled = mempty, + listSimpleOutput = mempty, + listVerbosity = mempty, + listPackageDBs = mempty + } + mappend a b = ListFlags { + listInstalled = combine listInstalled, + listSimpleOutput = combine listSimpleOutput, + listVerbosity = combine listVerbosity, + listPackageDBs = combine listPackageDBs + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Info flags +-- ------------------------------------------------------------ + +data InfoFlags = InfoFlags { + infoVerbosity :: Flag Verbosity, + infoPackageDBs :: [Maybe PackageDB] + } + +defaultInfoFlags :: InfoFlags +defaultInfoFlags = InfoFlags { + infoVerbosity = toFlag normal, + infoPackageDBs = [] + } + +infoCommand :: CommandUI InfoFlags +infoCommand = CommandUI { + commandName = "info", + commandSynopsis = "Display detailed information about a particular package.", + commandDescription = Just $ \_ -> wrapText $ + "If there is a sandbox in the current directory and " + ++ "config:ignore-sandbox is False, use the sandbox package database. " + ++ "Otherwise, use the package database specified with --package-db. " + ++ "If not specified, use the user package database.\n", + commandNotes = Nothing, + commandUsage = usageAlternatives "info" ["[FLAGS] PACKAGES"], + commandDefaultFlags = defaultInfoFlags, + commandOptions = \_ -> [ + optionVerbosity infoVerbosity (\v flags -> flags { infoVerbosity = v }) + + , option "" ["package-db"] + "Use a given package database. May be a specific file, 'global', 'user' or 'clear'." + infoPackageDBs (\v flags -> flags { infoPackageDBs = v }) + (reqArg' "DB" readPackageDbList showPackageDbList) + + ] + } + +instance Monoid InfoFlags where + mempty = InfoFlags { + infoVerbosity = mempty, + infoPackageDBs = mempty + } + mappend a b = InfoFlags { + infoVerbosity = combine infoVerbosity, + infoPackageDBs = combine infoPackageDBs + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Install flags +-- ------------------------------------------------------------ + +-- | Install takes the same flags as configure along with a few extras. +-- +data InstallFlags = InstallFlags { + installDocumentation :: Flag Bool, + installHaddockIndex :: Flag PathTemplate, + installDryRun :: Flag Bool, + installMaxBackjumps :: Flag Int, + installReorderGoals :: Flag Bool, + installIndependentGoals :: Flag Bool, + installShadowPkgs :: Flag Bool, + installStrongFlags :: Flag Bool, + installReinstall :: Flag Bool, + installAvoidReinstalls :: Flag Bool, + installOverrideReinstall :: Flag Bool, + installUpgradeDeps :: Flag Bool, + installOnly :: Flag Bool, + installOnlyDeps :: Flag Bool, + installRootCmd :: Flag String, + installSummaryFile :: NubList PathTemplate, + installLogFile :: Flag PathTemplate, + installBuildReports :: Flag ReportLevel, + installReportPlanningFailure :: Flag Bool, + installSymlinkBinDir :: Flag FilePath, + installOneShot :: Flag Bool, + installNumJobs :: Flag (Maybe Int), + installRunTests :: Flag Bool + } + +defaultInstallFlags :: InstallFlags +defaultInstallFlags = InstallFlags { + installDocumentation = Flag False, + installHaddockIndex = Flag docIndexFile, + installDryRun = Flag False, + installMaxBackjumps = Flag defaultMaxBackjumps, + installReorderGoals = Flag False, + installIndependentGoals= Flag False, + installShadowPkgs = Flag False, + installStrongFlags = Flag False, + installReinstall = Flag False, + installAvoidReinstalls = Flag False, + installOverrideReinstall = Flag False, + installUpgradeDeps = Flag False, + installOnly = Flag False, + installOnlyDeps = Flag False, + installRootCmd = mempty, + installSummaryFile = mempty, + installLogFile = mempty, + installBuildReports = Flag NoReports, + installReportPlanningFailure = Flag False, + installSymlinkBinDir = mempty, + installOneShot = Flag False, + installNumJobs = mempty, + installRunTests = mempty + } + where + docIndexFile = toPathTemplate ("$datadir" "doc" + "$arch-$os-$compiler" "index.html") + +allowNewerParser :: ReadE AllowNewer +allowNewerParser = ReadE $ \s -> + case s of + "" -> Right AllowNewerNone + "False" -> Right AllowNewerNone + "True" -> Right AllowNewerAll + _ -> + case readPToMaybe pkgsParser s of + Just pkgs -> Right . AllowNewerSome $ pkgs + Nothing -> Left ("Cannot parse the list of packages: " ++ s) + where + pkgsParser = Parse.sepBy1 parse (Parse.char ',') + +allowNewerPrinter :: Flag AllowNewer -> [Maybe String] +allowNewerPrinter (Flag AllowNewerNone) = [Just "False"] +allowNewerPrinter (Flag AllowNewerAll) = [Just "True"] +allowNewerPrinter (Flag (AllowNewerSome pkgs)) = + [Just . intercalate "," . map display $ pkgs] +allowNewerPrinter NoFlag = [] + + +defaultMaxBackjumps :: Int +defaultMaxBackjumps = 2000 + +defaultSolver :: PreSolver +defaultSolver = Choose + +allSolvers :: String +allSolvers = intercalate ", " (map display ([minBound .. maxBound] :: [PreSolver])) + +installCommand :: CommandUI (ConfigFlags, ConfigExFlags, InstallFlags, HaddockFlags) +installCommand = CommandUI { + commandName = "install", + commandSynopsis = "Install packages.", + commandUsage = usageAlternatives "install" [ "[FLAGS]" + , "[FLAGS] PACKAGES" + ], + commandDescription = Just $ \_ -> wrapText $ + "Installs one or more packages. By default, the installed package" + ++ " will be registered in the user's package database or, if a sandbox" + ++ " is present in the current directory, inside the sandbox.\n" + ++ "\n" + ++ "If PACKAGES are specified, downloads and installs those packages." + ++ " Otherwise, install the package in the current directory (and/or its" + ++ " dependencies) (there must be exactly one .cabal file in the current" + ++ " directory).\n" + ++ "\n" + ++ "When using a sandbox, the flags for `install` only affect the" + ++ " current command and have no effect on future commands. (To achieve" + ++ " that, `configure` must be used.)\n" + ++ " In contrast, without a sandbox, the flags to `install` are saved and" + ++ " affect future commands such as `build` and `repl`. See the help for" + ++ " `configure` for a list of commands being affected.\n", + commandNotes = Just $ \pname -> + ( case commandNotes configureCommand of + Just desc -> desc pname ++ "\n" + Nothing -> "" ) + ++ "Examples:\n" + ++ " " ++ pname ++ " install " + ++ " Package in the current directory\n" + ++ " " ++ pname ++ " install foo " + ++ " Package from the hackage server\n" + ++ " " ++ pname ++ " install foo-1.0 " + ++ " Specific version of a package\n" + ++ " " ++ pname ++ " install 'foo < 2' " + ++ " Constrained package version\n", + commandDefaultFlags = (mempty, mempty, mempty, mempty), + commandOptions = \showOrParseArgs -> + liftOptions get1 set1 + (filter ((`notElem` ["constraint", "dependency" + , "exact-configuration"]) + . optionName) $ + configureOptions showOrParseArgs) + ++ liftOptions get2 set2 (configureExOptions showOrParseArgs) + ++ liftOptions get3 set3 (installOptions showOrParseArgs) + ++ liftOptions get4 set4 (haddockOptions showOrParseArgs) + } + where + get1 (a,_,_,_) = a; set1 a (_,b,c,d) = (a,b,c,d) + get2 (_,b,_,_) = b; set2 b (a,_,c,d) = (a,b,c,d) + get3 (_,_,c,_) = c; set3 c (a,b,_,d) = (a,b,c,d) + get4 (_,_,_,d) = d; set4 d (a,b,c,_) = (a,b,c,d) + +haddockOptions :: ShowOrParseArgs -> [OptionField HaddockFlags] +haddockOptions showOrParseArgs + = [ opt { optionName = "haddock-" ++ name, + optionDescr = [ fmapOptFlags (\(_, lflags) -> ([], map ("haddock-" ++) lflags)) descr + | descr <- optionDescr opt] } + | opt <- commandOptions Cabal.haddockCommand showOrParseArgs + , let name = optionName opt + , name `elem` ["hoogle", "html", "html-location" + ,"executables", "tests", "benchmarks", "all", "internal", "css" + ,"hyperlink-source", "hscolour-css" + ,"contents-location"] + ] + where + fmapOptFlags :: (OptFlags -> OptFlags) -> OptDescr a -> OptDescr a + fmapOptFlags modify (ReqArg d f p r w) = ReqArg d (modify f) p r w + fmapOptFlags modify (OptArg d f p r i w) = OptArg d (modify f) p r i w + fmapOptFlags modify (ChoiceOpt xs) = ChoiceOpt [(d, modify f, i, w) | (d, f, i, w) <- xs] + fmapOptFlags modify (BoolOpt d f1 f2 r w) = BoolOpt d (modify f1) (modify f2) r w + +installOptions :: ShowOrParseArgs -> [OptionField InstallFlags] +installOptions showOrParseArgs = + [ option "" ["documentation"] + "building of documentation" + installDocumentation (\v flags -> flags { installDocumentation = v }) + (boolOpt [] []) + + , option [] ["doc-index-file"] + "A central index of haddock API documentation (template cannot use $pkgid)" + installHaddockIndex (\v flags -> flags { installHaddockIndex = v }) + (reqArg' "TEMPLATE" (toFlag.toPathTemplate) + (flagToList . fmap fromPathTemplate)) + + , option [] ["dry-run"] + "Do not install anything, only print what would be installed." + installDryRun (\v flags -> flags { installDryRun = v }) + trueArg + ] ++ + + optionSolverFlags showOrParseArgs + installMaxBackjumps (\v flags -> flags { installMaxBackjumps = v }) + installReorderGoals (\v flags -> flags { installReorderGoals = v }) + installIndependentGoals (\v flags -> flags { installIndependentGoals = v }) + installShadowPkgs (\v flags -> flags { installShadowPkgs = v }) + installStrongFlags (\v flags -> flags { installStrongFlags = v }) ++ + + [ option [] ["reinstall"] + "Install even if it means installing the same version again." + installReinstall (\v flags -> flags { installReinstall = v }) + (yesNoOpt showOrParseArgs) + + , option [] ["avoid-reinstalls"] + "Do not select versions that would destructively overwrite installed packages." + installAvoidReinstalls (\v flags -> flags { installAvoidReinstalls = v }) + (yesNoOpt showOrParseArgs) + + , option [] ["force-reinstalls"] + "Reinstall packages even if they will most likely break other installed packages." + installOverrideReinstall (\v flags -> flags { installOverrideReinstall = v }) + (yesNoOpt showOrParseArgs) + + , option [] ["upgrade-dependencies"] + "Pick the latest version for all dependencies, rather than trying to pick an installed version." + installUpgradeDeps (\v flags -> flags { installUpgradeDeps = v }) + (yesNoOpt showOrParseArgs) + + , option [] ["only-dependencies"] + "Install only the dependencies necessary to build the given packages" + installOnlyDeps (\v flags -> flags { installOnlyDeps = v }) + (yesNoOpt showOrParseArgs) + + , option [] ["dependencies-only"] + "A synonym for --only-dependencies" + installOnlyDeps (\v flags -> flags { installOnlyDeps = v }) + (yesNoOpt showOrParseArgs) + + , option [] ["root-cmd"] + "Command used to gain root privileges, when installing with --global." + installRootCmd (\v flags -> flags { installRootCmd = v }) + (reqArg' "COMMAND" toFlag flagToList) + + , option [] ["symlink-bindir"] + "Add symlinks to installed executables into this directory." + installSymlinkBinDir (\v flags -> flags { installSymlinkBinDir = v }) + (reqArgFlag "DIR") + + , option [] ["build-summary"] + "Save build summaries to file (name template can use $pkgid, $compiler, $os, $arch)" + installSummaryFile (\v flags -> flags { installSummaryFile = v }) + (reqArg' "TEMPLATE" (\x -> toNubList [toPathTemplate x]) (map fromPathTemplate . fromNubList)) + + , option [] ["build-log"] + "Log all builds to file (name template can use $pkgid, $compiler, $os, $arch)" + installLogFile (\v flags -> flags { installLogFile = v }) + (reqArg' "TEMPLATE" (toFlag.toPathTemplate) + (flagToList . fmap fromPathTemplate)) + + , option [] ["remote-build-reporting"] + "Generate build reports to send to a remote server (none, anonymous or detailed)." + installBuildReports (\v flags -> flags { installBuildReports = v }) + (reqArg "LEVEL" (readP_to_E (const $ "report level must be 'none', " + ++ "'anonymous' or 'detailed'") + (toFlag `fmap` parse)) + (flagToList . fmap display)) + + , option [] ["report-planning-failure"] + "Generate build reports when the dependency solver fails. This is used by the Hackage build bot." + installReportPlanningFailure (\v flags -> flags { installReportPlanningFailure = v }) + trueArg + + , option [] ["one-shot"] + "Do not record the packages in the world file." + installOneShot (\v flags -> flags { installOneShot = v }) + (yesNoOpt showOrParseArgs) + + , option [] ["run-tests"] + "Run package test suites during installation." + installRunTests (\v flags -> flags { installRunTests = v }) + trueArg + + , optionNumJobs + installNumJobs (\v flags -> flags { installNumJobs = v }) + + ] ++ case showOrParseArgs of -- TODO: remove when "cabal install" + -- avoids + ParseArgs -> + [ option [] ["only"] + "Only installs the package in the current directory." + installOnly (\v flags -> flags { installOnly = v }) + trueArg ] + _ -> [] + + +instance Monoid InstallFlags where + mempty = InstallFlags { + installDocumentation = mempty, + installHaddockIndex = mempty, + installDryRun = mempty, + installReinstall = mempty, + installAvoidReinstalls = mempty, + installOverrideReinstall = mempty, + installMaxBackjumps = mempty, + installUpgradeDeps = mempty, + installReorderGoals = mempty, + installIndependentGoals= mempty, + installShadowPkgs = mempty, + installStrongFlags = mempty, + installOnly = mempty, + installOnlyDeps = mempty, + installRootCmd = mempty, + installSummaryFile = mempty, + installLogFile = mempty, + installBuildReports = mempty, + installReportPlanningFailure = mempty, + installSymlinkBinDir = mempty, + installOneShot = mempty, + installNumJobs = mempty, + installRunTests = mempty + } + mappend a b = InstallFlags { + installDocumentation = combine installDocumentation, + installHaddockIndex = combine installHaddockIndex, + installDryRun = combine installDryRun, + installReinstall = combine installReinstall, + installAvoidReinstalls = combine installAvoidReinstalls, + installOverrideReinstall = combine installOverrideReinstall, + installMaxBackjumps = combine installMaxBackjumps, + installUpgradeDeps = combine installUpgradeDeps, + installReorderGoals = combine installReorderGoals, + installIndependentGoals= combine installIndependentGoals, + installShadowPkgs = combine installShadowPkgs, + installStrongFlags = combine installStrongFlags, + installOnly = combine installOnly, + installOnlyDeps = combine installOnlyDeps, + installRootCmd = combine installRootCmd, + installSummaryFile = combine installSummaryFile, + installLogFile = combine installLogFile, + installBuildReports = combine installBuildReports, + installReportPlanningFailure = combine installReportPlanningFailure, + installSymlinkBinDir = combine installSymlinkBinDir, + installOneShot = combine installOneShot, + installNumJobs = combine installNumJobs, + installRunTests = combine installRunTests + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Upload flags +-- ------------------------------------------------------------ + +data UploadFlags = UploadFlags { + uploadCheck :: Flag Bool, + uploadUsername :: Flag Username, + uploadPassword :: Flag Password, + uploadVerbosity :: Flag Verbosity + } + +defaultUploadFlags :: UploadFlags +defaultUploadFlags = UploadFlags { + uploadCheck = toFlag False, + uploadUsername = mempty, + uploadPassword = mempty, + uploadVerbosity = toFlag normal + } + +uploadCommand :: CommandUI UploadFlags +uploadCommand = CommandUI { + commandName = "upload", + commandSynopsis = "Uploads source packages to Hackage.", + commandDescription = Nothing, + commandNotes = Just $ \_ -> + "You can store your Hackage login in the ~/.cabal/config file\n" + ++ relevantConfigValuesText ["username", "password"], + commandUsage = \pname -> + "Usage: " ++ pname ++ " upload [FLAGS] TARFILES\n", + commandDefaultFlags = defaultUploadFlags, + commandOptions = \_ -> + [optionVerbosity uploadVerbosity (\v flags -> flags { uploadVerbosity = v }) + + ,option ['c'] ["check"] + "Do not upload, just do QA checks." + uploadCheck (\v flags -> flags { uploadCheck = v }) + trueArg + + ,option ['u'] ["username"] + "Hackage username." + uploadUsername (\v flags -> flags { uploadUsername = v }) + (reqArg' "USERNAME" (toFlag . Username) + (flagToList . fmap unUsername)) + + ,option ['p'] ["password"] + "Hackage password." + uploadPassword (\v flags -> flags { uploadPassword = v }) + (reqArg' "PASSWORD" (toFlag . Password) + (flagToList . fmap unPassword)) + ] + } + +instance Monoid UploadFlags where + mempty = UploadFlags { + uploadCheck = mempty, + uploadUsername = mempty, + uploadPassword = mempty, + uploadVerbosity = mempty + } + mappend a b = UploadFlags { + uploadCheck = combine uploadCheck, + uploadUsername = combine uploadUsername, + uploadPassword = combine uploadPassword, + uploadVerbosity = combine uploadVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Init flags +-- ------------------------------------------------------------ + +emptyInitFlags :: IT.InitFlags +emptyInitFlags = mempty + +defaultInitFlags :: IT.InitFlags +defaultInitFlags = emptyInitFlags { IT.initVerbosity = toFlag normal } + +initCommand :: CommandUI IT.InitFlags +initCommand = CommandUI { + commandName = "init", + commandSynopsis = "Create a new .cabal package file (interactively).", + commandDescription = Just $ \_ -> wrapText $ + "Cabalise a project by creating a .cabal, Setup.hs, and " + ++ "optionally a LICENSE file.\n" + ++ "\n" + ++ "Calling init with no arguments (recommended) uses an " + ++ "interactive mode, which will try to guess as much as " + ++ "possible and prompt you for the rest. Command-line " + ++ "arguments are provided for scripting purposes. " + ++ "If you don't want interactive mode, be sure to pass " + ++ "the -n flag.\n", + commandNotes = Nothing, + commandUsage = \pname -> + "Usage: " ++ pname ++ " init [FLAGS]\n", + commandDefaultFlags = defaultInitFlags, + commandOptions = \_ -> + [ option ['n'] ["non-interactive"] + "Non-interactive mode." + IT.nonInteractive (\v flags -> flags { IT.nonInteractive = v }) + trueArg + + , option ['q'] ["quiet"] + "Do not generate log messages to stdout." + IT.quiet (\v flags -> flags { IT.quiet = v }) + trueArg + + , option [] ["no-comments"] + "Do not generate explanatory comments in the .cabal file." + IT.noComments (\v flags -> flags { IT.noComments = v }) + trueArg + + , option ['m'] ["minimal"] + "Generate a minimal .cabal file, that is, do not include extra empty fields. Also implies --no-comments." + IT.minimal (\v flags -> flags { IT.minimal = v }) + trueArg + + , option [] ["overwrite"] + "Overwrite any existing .cabal, LICENSE, or Setup.hs files without warning." + IT.overwrite (\v flags -> flags { IT.overwrite = v }) + trueArg + + , option [] ["package-dir"] + "Root directory of the package (default = current directory)." + IT.packageDir (\v flags -> flags { IT.packageDir = v }) + (reqArgFlag "DIRECTORY") + + , option ['p'] ["package-name"] + "Name of the Cabal package to create." + IT.packageName (\v flags -> flags { IT.packageName = v }) + (reqArg "PACKAGE" (readP_to_E ("Cannot parse package name: "++) + (toFlag `fmap` parse)) + (flagToList . fmap display)) + + , option [] ["version"] + "Initial version of the package." + IT.version (\v flags -> flags { IT.version = v }) + (reqArg "VERSION" (readP_to_E ("Cannot parse package version: "++) + (toFlag `fmap` parse)) + (flagToList . fmap display)) + + , option [] ["cabal-version"] + "Required version of the Cabal library." + IT.cabalVersion (\v flags -> flags { IT.cabalVersion = v }) + (reqArg "VERSION_RANGE" (readP_to_E ("Cannot parse Cabal version range: "++) + (toFlag `fmap` parse)) + (flagToList . fmap display)) + + , option ['l'] ["license"] + "Project license." + IT.license (\v flags -> flags { IT.license = v }) + (reqArg "LICENSE" (readP_to_E ("Cannot parse license: "++) + (toFlag `fmap` parse)) + (flagToList . fmap display)) + + , option ['a'] ["author"] + "Name of the project's author." + IT.author (\v flags -> flags { IT.author = v }) + (reqArgFlag "NAME") + + , option ['e'] ["email"] + "Email address of the maintainer." + IT.email (\v flags -> flags { IT.email = v }) + (reqArgFlag "EMAIL") + + , option ['u'] ["homepage"] + "Project homepage and/or repository." + IT.homepage (\v flags -> flags { IT.homepage = v }) + (reqArgFlag "URL") + + , option ['s'] ["synopsis"] + "Short project synopsis." + IT.synopsis (\v flags -> flags { IT.synopsis = v }) + (reqArgFlag "TEXT") + + , option ['c'] ["category"] + "Project category." + IT.category (\v flags -> flags { IT.category = v }) + (reqArg' "CATEGORY" (\s -> toFlag $ maybe (Left s) Right (readMaybe s)) + (flagToList . fmap (either id show))) + + , option ['x'] ["extra-source-file"] + "Extra source file to be distributed with tarball." + IT.extraSrc (\v flags -> flags { IT.extraSrc = v }) + (reqArg' "FILE" (Just . (:[])) + (fromMaybe [])) + + , option [] ["is-library"] + "Build a library." + IT.packageType (\v flags -> flags { IT.packageType = v }) + (noArg (Flag IT.Library)) + + , option [] ["is-executable"] + "Build an executable." + IT.packageType + (\v flags -> flags { IT.packageType = v }) + (noArg (Flag IT.Executable)) + + , option [] ["main-is"] + "Specify the main module." + IT.mainIs + (\v flags -> flags { IT.mainIs = v }) + (reqArgFlag "FILE") + + , option [] ["language"] + "Specify the default language." + IT.language + (\v flags -> flags { IT.language = v }) + (reqArg "LANGUAGE" (readP_to_E ("Cannot parse language: "++) + (toFlag `fmap` parse)) + (flagToList . fmap display)) + + , option ['o'] ["expose-module"] + "Export a module from the package." + IT.exposedModules + (\v flags -> flags { IT.exposedModules = v }) + (reqArg "MODULE" (readP_to_E ("Cannot parse module name: "++) + ((Just . (:[])) `fmap` parse)) + (maybe [] (fmap display))) + + , option [] ["extension"] + "Use a LANGUAGE extension (in the other-extensions field)." + IT.otherExts + (\v flags -> flags { IT.otherExts = v }) + (reqArg "EXTENSION" (readP_to_E ("Cannot parse extension: "++) + ((Just . (:[])) `fmap` parse)) + (maybe [] (fmap display))) + + , option ['d'] ["dependency"] + "Package dependency." + IT.dependencies (\v flags -> flags { IT.dependencies = v }) + (reqArg "PACKAGE" (readP_to_E ("Cannot parse dependency: "++) + ((Just . (:[])) `fmap` parse)) + (maybe [] (fmap display))) + + , option [] ["source-dir"] + "Directory containing package source." + IT.sourceDirs (\v flags -> flags { IT.sourceDirs = v }) + (reqArg' "DIR" (Just . (:[])) + (fromMaybe [])) + + , option [] ["build-tool"] + "Required external build tool." + IT.buildTools (\v flags -> flags { IT.buildTools = v }) + (reqArg' "TOOL" (Just . (:[])) + (fromMaybe [])) + + , optionVerbosity IT.initVerbosity (\v flags -> flags { IT.initVerbosity = v }) + ] + } + where readMaybe s = case reads s of + [(x,"")] -> Just x + _ -> Nothing + +-- ------------------------------------------------------------ +-- * SDist flags +-- ------------------------------------------------------------ + +-- | Extra flags to @sdist@ beyond runghc Setup sdist +-- +data SDistExFlags = SDistExFlags { + sDistFormat :: Flag ArchiveFormat + } + deriving Show + +data ArchiveFormat = TargzFormat | ZipFormat -- | ... + deriving (Show, Eq) + +defaultSDistExFlags :: SDistExFlags +defaultSDistExFlags = SDistExFlags { + sDistFormat = Flag TargzFormat + } + +sdistCommand :: CommandUI (SDistFlags, SDistExFlags) +sdistCommand = Cabal.sdistCommand { + commandDefaultFlags = (commandDefaultFlags Cabal.sdistCommand, defaultSDistExFlags), + commandOptions = \showOrParseArgs -> + liftOptions fst setFst (commandOptions Cabal.sdistCommand showOrParseArgs) + ++ liftOptions snd setSnd sdistExOptions + } + where + setFst a (_,b) = (a,b) + setSnd b (a,_) = (a,b) + + sdistExOptions = + [option [] ["archive-format"] "archive-format" + sDistFormat (\v flags -> flags { sDistFormat = v }) + (choiceOpt + [ (Flag TargzFormat, ([], ["targz"]), + "Produce a '.tar.gz' format archive (default and required for uploading to hackage)") + , (Flag ZipFormat, ([], ["zip"]), + "Produce a '.zip' format archive") + ]) + ] + +instance Monoid SDistExFlags where + mempty = SDistExFlags { + sDistFormat = mempty + } + mappend a b = SDistExFlags { + sDistFormat = combine sDistFormat + } + where + combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Win32SelfUpgrade flags +-- ------------------------------------------------------------ + +data Win32SelfUpgradeFlags = Win32SelfUpgradeFlags { + win32SelfUpgradeVerbosity :: Flag Verbosity +} + +defaultWin32SelfUpgradeFlags :: Win32SelfUpgradeFlags +defaultWin32SelfUpgradeFlags = Win32SelfUpgradeFlags { + win32SelfUpgradeVerbosity = toFlag normal +} + +win32SelfUpgradeCommand :: CommandUI Win32SelfUpgradeFlags +win32SelfUpgradeCommand = CommandUI { + commandName = "win32selfupgrade", + commandSynopsis = "Self-upgrade the executable on Windows", + commandDescription = Nothing, + commandNotes = Nothing, + commandUsage = \pname -> + "Usage: " ++ pname ++ " win32selfupgrade PID PATH\n", + commandDefaultFlags = defaultWin32SelfUpgradeFlags, + commandOptions = \_ -> + [optionVerbosity win32SelfUpgradeVerbosity + (\v flags -> flags { win32SelfUpgradeVerbosity = v}) + ] +} + +instance Monoid Win32SelfUpgradeFlags where + mempty = Win32SelfUpgradeFlags { + win32SelfUpgradeVerbosity = mempty + } + mappend a b = Win32SelfUpgradeFlags { + win32SelfUpgradeVerbosity = combine win32SelfUpgradeVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Sandbox-related flags +-- ------------------------------------------------------------ + +data SandboxFlags = SandboxFlags { + sandboxVerbosity :: Flag Verbosity, + sandboxSnapshot :: Flag Bool, -- FIXME: this should be an 'add-source'-only + -- flag. + sandboxLocation :: Flag FilePath +} + +defaultSandboxLocation :: FilePath +defaultSandboxLocation = ".cabal-sandbox" + +defaultSandboxFlags :: SandboxFlags +defaultSandboxFlags = SandboxFlags { + sandboxVerbosity = toFlag normal, + sandboxSnapshot = toFlag False, + sandboxLocation = toFlag defaultSandboxLocation + } + +sandboxCommand :: CommandUI SandboxFlags +sandboxCommand = CommandUI { + commandName = "sandbox", + commandSynopsis = "Create/modify/delete a sandbox.", + commandDescription = Just $ \pname -> concat + [ paragraph $ "Sandboxes are isolated package databases that can be used" + ++ " to prevent dependency conflicts that arise when many different" + ++ " packages are installed in the same database (i.e. the user's" + ++ " database in the home directory)." + , paragraph $ "A sandbox in the current directory (created by" + ++ " `sandbox init`) will be used instead of the user's database for" + ++ " commands such as `install` and `build`. Note that (a directly" + ++ " invoked) GHC will not automatically be aware of sandboxes;" + ++ " only if called via appropriate " ++ pname + ++ " commands, e.g. `repl`, `build`, `exec`." + , paragraph $ "Currently, " ++ pname ++ " will not search for a sandbox" + ++ " in folders above the current one, so cabal will not see the sandbox" + ++ " if you are in a subfolder of a sandboxes." + , paragraph "Subcommands:" + , headLine "init:" + , indentParagraph $ "Initialize a sandbox in the current directory." + ++ " An existing package database will not be modified, but settings" + ++ " (such as the location of the database) can be modified this way." + , headLine "delete:" + , indentParagraph $ "Remove the sandbox; deleting all the packages" + ++ " installed inside." + , headLine "add-source:" + , indentParagraph $ "Make one or more local package available in the" + ++ " sandbox. PATHS may be relative or absolute." + ++ " Typical usecase is when you need" + ++ " to make a (temporary) modification to a dependency: You download" + ++ " the package into a different directory, make the modification," + ++ " and add that directory to the sandbox with `add-source`." + , indentParagraph $ "Unless given `--snapshot`, any add-source'd" + ++ " dependency that was modified since the last build will be" + ++ " re-installed automatically." + , headLine "delete-source:" + , indentParagraph $ "Remove an add-source dependency; however, this will" + ++ " not delete the package(s) that have been installed in the sandbox" + ++ " from this dependency. You can either unregister the package(s) via" + ++ " `" ++ pname ++ " sandbox hc-pkg unregister` or re-create the" + ++ " sandbox (`sandbox delete; sandbox init`)." + , headLine "list-sources:" + , indentParagraph $ "List the directories of local packages made" + ++ " available via `" ++ pname ++ " add-source`." + , headLine "hc-pkg:" + , indentParagraph $ "Similar to `ghc-pkg`, but for the sandbox package" + ++ " database. Can be used to list specific/all packages that are" + ++ " installed in the sandbox. For subcommands, see the help for" + ++ " ghc-pkg. Affected by the compiler version specified by `configure`." + ], + commandNotes = Just $ \_ -> + relevantConfigValuesText ["require-sandbox" + ,"ignore-sandbox"], + commandUsage = usageAlternatives "sandbox" + [ "init [FLAGS]" + , "delete [FLAGS]" + , "add-source [FLAGS] PATHS" + , "delete-source [FLAGS] PATHS" + , "list-sources [FLAGS]" + , "hc-pkg [FLAGS] [--] COMMAND [--] [ARGS]" + ], + + commandDefaultFlags = defaultSandboxFlags, + commandOptions = \_ -> + [ optionVerbosity sandboxVerbosity + (\v flags -> flags { sandboxVerbosity = v }) + + , option [] ["snapshot"] + "Take a snapshot instead of creating a link (only applies to 'add-source')" + sandboxSnapshot (\v flags -> flags { sandboxSnapshot = v }) + trueArg + + , option [] ["sandbox"] + "Sandbox location (default: './.cabal-sandbox')." + sandboxLocation (\v flags -> flags { sandboxLocation = v }) + (reqArgFlag "DIR") + ] + } + +instance Monoid SandboxFlags where + mempty = SandboxFlags { + sandboxVerbosity = mempty, + sandboxSnapshot = mempty, + sandboxLocation = mempty + } + mappend a b = SandboxFlags { + sandboxVerbosity = combine sandboxVerbosity, + sandboxSnapshot = combine sandboxSnapshot, + sandboxLocation = combine sandboxLocation + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * Exec Flags +-- ------------------------------------------------------------ + +data ExecFlags = ExecFlags { + execVerbosity :: Flag Verbosity +} + +defaultExecFlags :: ExecFlags +defaultExecFlags = ExecFlags { + execVerbosity = toFlag normal + } + +execCommand :: CommandUI ExecFlags +execCommand = CommandUI { + commandName = "exec", + commandSynopsis = "Give a command access to the sandbox package repository.", + commandDescription = Just $ \pname -> wrapText $ + -- TODO: this is too GHC-focused for my liking.. + "A directly invoked GHC will not automatically be aware of any" + ++ " sandboxes: the GHC_PACKAGE_PATH environment variable controls what" + ++ " GHC uses. `" ++ pname ++ " exec` can be used to modify this variable:" + ++ " COMMAND will be executed in a modified environment and thereby uses" + ++ " the sandbox package database.\n" + ++ "\n" + ++ "If there is no sandbox, behaves as identity (executing COMMAND).\n" + ++ "\n" + ++ "Note that other " ++ pname ++ " commands change the environment" + ++ " variable appropriately already, so there is no need to wrap those" + ++ " in `" ++ pname ++ " exec`. But with `" ++ pname ++ " exec`, the user" + ++ " has more control and can, for example, execute custom scripts which" + ++ " indirectly execute GHC.\n" + ++ "\n" + ++ "See `" ++ pname ++ " sandbox`.", + commandNotes = Just $ \pname -> + "Examples:\n" + ++ " Install the executable package pandoc into a sandbox and run it:\n" + ++ " " ++ pname ++ " sandbox init\n" + ++ " " ++ pname ++ " install pandoc\n" + ++ " " ++ pname ++ " exec pandoc foo.md\n\n" + ++ " Install the executable package hlint into the user package database\n" + ++ " and run it:\n" + ++ " " ++ pname ++ " install --user hlint\n" + ++ " " ++ pname ++ " exec hlint Foo.hs\n\n" + ++ " Execute runghc on Foo.hs with runghc configured to use the\n" + ++ " sandbox package database (if a sandbox is being used):\n" + ++ " " ++ pname ++ " exec runghc Foo.hs\n", + commandUsage = \pname -> + "Usage: " ++ pname ++ " exec [FLAGS] [--] COMMAND [--] [ARGS]\n", + + commandDefaultFlags = defaultExecFlags, + commandOptions = \_ -> + [ optionVerbosity execVerbosity + (\v flags -> flags { execVerbosity = v }) + ] + } + +instance Monoid ExecFlags where + mempty = ExecFlags { + execVerbosity = mempty + } + mappend a b = ExecFlags { + execVerbosity = combine execVerbosity + } + where combine field = field a `mappend` field b + +-- ------------------------------------------------------------ +-- * UserConfig flags +-- ------------------------------------------------------------ + +data UserConfigFlags = UserConfigFlags { + userConfigVerbosity :: Flag Verbosity +} + +instance Monoid UserConfigFlags where + mempty = UserConfigFlags { + userConfigVerbosity = toFlag normal + } + mappend a b = UserConfigFlags { + userConfigVerbosity = combine userConfigVerbosity + } + where combine field = field a `mappend` field b + +userConfigCommand :: CommandUI UserConfigFlags +userConfigCommand = CommandUI { + commandName = "user-config", + commandSynopsis = "Display and update the user's global cabal configuration.", + commandDescription = Just $ \_ -> wrapText $ + "When upgrading cabal, the set of configuration keys and their default" + ++ " values may change. This command provides means to merge the existing" + ++ " config in ~/.cabal/config" + ++ " (i.e. all bindings that are actually defined and not commented out)" + ++ " and the default config of the new version.\n" + ++ "\n" + ++ "diff: Shows a pseudo-diff of the user's ~/.cabal/config file and" + ++ " the default configuration that would be created by cabal if the" + ++ " config file did not exist.\n" + ++ "update: Applies the pseudo-diff to the configuration that would be" + ++ " created by default, and write the result back to ~/.cabal/config.", + + commandNotes = Nothing, + commandUsage = usageAlternatives "user-config" ["diff", "update"], + commandDefaultFlags = mempty, + commandOptions = \ _ -> [ + optionVerbosity userConfigVerbosity (\v flags -> flags { userConfigVerbosity = v }) + ] + } + +-- ------------------------------------------------------------ +-- * GetOpt Utils +-- ------------------------------------------------------------ + +reqArgFlag :: ArgPlaceHolder -> + MkOptDescr (b -> Flag String) (Flag String -> b -> b) b +reqArgFlag ad = reqArg ad (succeedReadE Flag) flagToList + +liftOptions :: (b -> a) -> (a -> b -> b) + -> [OptionField a] -> [OptionField b] +liftOptions get set = map (liftOption get set) + +yesNoOpt :: ShowOrParseArgs -> MkOptDescr (b -> Flag Bool) (Flag Bool -> b -> b) b +yesNoOpt ShowArgs sf lf = trueArg sf lf +yesNoOpt _ sf lf = Command.boolOpt' flagToMaybe Flag (sf, lf) ([], map ("no-" ++) lf) sf lf + +optionSolver :: (flags -> Flag PreSolver) + -> (Flag PreSolver -> flags -> flags) + -> OptionField flags +optionSolver get set = + option [] ["solver"] + ("Select dependency solver to use (default: " ++ display defaultSolver ++ "). Choices: " ++ allSolvers ++ ", where 'choose' chooses between 'topdown' and 'modular' based on compiler version.") + get set + (reqArg "SOLVER" (readP_to_E (const $ "solver must be one of: " ++ allSolvers) + (toFlag `fmap` parse)) + (flagToList . fmap display)) + +optionSolverFlags :: ShowOrParseArgs + -> (flags -> Flag Int ) -> (Flag Int -> flags -> flags) + -> (flags -> Flag Bool ) -> (Flag Bool -> flags -> flags) + -> (flags -> Flag Bool ) -> (Flag Bool -> flags -> flags) + -> (flags -> Flag Bool ) -> (Flag Bool -> flags -> flags) + -> (flags -> Flag Bool ) -> (Flag Bool -> flags -> flags) + -> [OptionField flags] +optionSolverFlags showOrParseArgs getmbj setmbj getrg setrg _getig _setig getsip setsip getstrfl setstrfl = + [ option [] ["max-backjumps"] + ("Maximum number of backjumps allowed while solving (default: " ++ show defaultMaxBackjumps ++ "). Use a negative number to enable unlimited backtracking. Use 0 to disable backtracking completely.") + getmbj setmbj + (reqArg "NUM" (readP_to_E ("Cannot parse number: "++) + (fmap toFlag (Parse.readS_to_P reads))) + (map show . flagToList)) + , option [] ["reorder-goals"] + "Try to reorder goals according to certain heuristics. Slows things down on average, but may make backtracking faster for some packages." + getrg setrg + (yesNoOpt showOrParseArgs) + -- TODO: Disabled for now because it does not work as advertised (yet). +{- + , option [] ["independent-goals"] + "Treat several goals on the command line as independent. If several goals depend on the same package, different versions can be chosen." + getig setig + (yesNoOpt showOrParseArgs) +-} + , option [] ["shadow-installed-packages"] + "If multiple package instances of the same version are installed, treat all but one as shadowed." + getsip setsip + (yesNoOpt showOrParseArgs) + , option [] ["strong-flags"] + "Do not defer flag choices (this used to be the default in cabal-install <= 1.20)." + getstrfl setstrfl + (yesNoOpt showOrParseArgs) + ] + +usageFlagsOrPackages :: String -> String -> String +usageFlagsOrPackages name pname = + "Usage: " ++ pname ++ " " ++ name ++ " [FLAGS]\n" + ++ " or: " ++ pname ++ " " ++ name ++ " [PACKAGES]\n" + +usagePackages :: String -> String -> String +usagePackages name pname = + "Usage: " ++ pname ++ " " ++ name ++ " [PACKAGES]\n" + +usageFlags :: String -> String -> String +usageFlags name pname = + "Usage: " ++ pname ++ " " ++ name ++ " [FLAGS]\n" + +--TODO: do we want to allow per-package flags? +parsePackageArgs :: [String] -> Either String [Dependency] +parsePackageArgs = parsePkgArgs [] + where + parsePkgArgs ds [] = Right (reverse ds) + parsePkgArgs ds (arg:args) = + case readPToMaybe parseDependencyOrPackageId arg of + Just dep -> parsePkgArgs (dep:ds) args + Nothing -> Left $ + show arg ++ " is not valid syntax for a package name or" + ++ " package dependency." + +readPToMaybe :: Parse.ReadP a a -> String -> Maybe a +readPToMaybe p str = listToMaybe [ r | (r,s) <- Parse.readP_to_S p str + , all isSpace s ] + +parseDependencyOrPackageId :: Parse.ReadP r Dependency +parseDependencyOrPackageId = parse Parse.+++ liftM pkgidToDependency parse + where + pkgidToDependency :: PackageIdentifier -> Dependency + pkgidToDependency p = case packageVersion p of + Version [] _ -> Dependency (packageName p) anyVersion + version -> Dependency (packageName p) (thisVersion version) + +showRepo :: RemoteRepo -> String +showRepo repo = remoteRepoName repo ++ ":" + ++ uriToString id (remoteRepoURI repo) [] + +readRepo :: String -> Maybe RemoteRepo +readRepo = readPToMaybe parseRepo + +parseRepo :: Parse.ReadP r RemoteRepo +parseRepo = do + name <- Parse.munch1 (\c -> isAlphaNum c || c `elem` "_-.") + _ <- Parse.char ':' + uriStr <- Parse.munch1 (\c -> isAlphaNum c || c `elem` "+-=._/*()@'$:;&!?~") + uri <- maybe Parse.pfail return (parseAbsoluteURI uriStr) + return $ RemoteRepo { + remoteRepoName = name, + remoteRepoURI = uri + } + +-- ------------------------------------------------------------ +-- * Helpers for Documentation +-- ------------------------------------------------------------ + +headLine :: String -> String +headLine = unlines + . map unwords + . wrapLine 79 + . words + +paragraph :: String -> String +paragraph = (++"\n") + . unlines + . map unwords + . wrapLine 79 + . words + +indentParagraph :: String -> String +indentParagraph = unlines + . map ((" "++).unwords) + . wrapLine 77 + . words + +relevantConfigValuesText :: [String] -> String +relevantConfigValuesText vs = + "Relevant global configuration keys:\n" + ++ concat [" " ++ v ++ "\n" |v <- vs] diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/SetupWrapper.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/SetupWrapper.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/SetupWrapper.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/SetupWrapper.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,579 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.SetupWrapper +-- Copyright : (c) The University of Glasgow 2006, +-- Duncan Coutts 2008 +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : alpha +-- Portability : portable +-- +-- An interface to building and installing Cabal packages. +-- If the @Built-Type@ field is specified as something other than +-- 'Custom', and the current version of Cabal is acceptable, this performs +-- setup actions directly. Otherwise it builds the setup script and +-- runs it with the given arguments. + +module Distribution.Client.SetupWrapper ( + setupWrapper, + SetupScriptOptions(..), + defaultSetupScriptOptions, + ) where + +import qualified Distribution.Make as Make +import qualified Distribution.Simple as Simple +import Distribution.Version + ( Version(..), VersionRange, anyVersion + , intersectVersionRanges, orLaterVersion + , withinRange ) +import Distribution.InstalledPackageInfo (installedPackageId) +import Distribution.Package + ( InstalledPackageId(..), PackageIdentifier(..), + PackageName(..), Package(..), packageName + , packageVersion, Dependency(..) ) +import Distribution.PackageDescription + ( GenericPackageDescription(packageDescription) + , PackageDescription(..), specVersion + , BuildType(..), knownBuildTypes, defaultRenaming ) +import Distribution.PackageDescription.Parse + ( readPackageDescription ) +import Distribution.Simple.Configure + ( configCompilerEx ) +import Distribution.Compiler + ( buildCompilerId, CompilerFlavor(GHC, GHCJS) ) +import Distribution.Simple.Compiler + ( Compiler(compilerId), compilerFlavor, PackageDB(..), PackageDBStack ) +import Distribution.Simple.PreProcess + ( runSimplePreProcessor, ppUnlit ) +import Distribution.Simple.Program + ( ProgramConfiguration, emptyProgramConfiguration + , getProgramSearchPath, getDbProgramOutput, runDbProgram, ghcProgram + , ghcjsProgram ) +import Distribution.Simple.Program.Find + ( programSearchPathAsPATHVar ) +import Distribution.Simple.Program.Run + ( getEffectiveEnvironment ) +import qualified Distribution.Simple.Program.Strip as Strip +import Distribution.Simple.BuildPaths + ( defaultDistPref, exeExtension ) +import Distribution.Simple.Command + ( CommandUI(..), commandShowOptions ) +import Distribution.Simple.Program.GHC + ( GhcMode(..), GhcOptions(..), renderGhcOptions ) +import qualified Distribution.Simple.PackageIndex as PackageIndex +import Distribution.Simple.PackageIndex (InstalledPackageIndex) +import Distribution.Client.Config + ( defaultCabalDir ) +import Distribution.Client.IndexUtils + ( getInstalledPackages ) +import Distribution.Client.JobControl + ( Lock, criticalSection ) +import Distribution.Simple.Setup + ( Flag(..) ) +import Distribution.Simple.Utils + ( die, debug, info, cabalVersion, tryFindPackageDesc, comparing + , createDirectoryIfMissingVerbose, installExecutableFile + , copyFileVerbose, rewriteFile, intercalate ) +import Distribution.Client.Utils + ( inDir, tryCanonicalizePath + , existsAndIsMoreRecentThan, moreRecentFile +#if mingw32_HOST_OS + , canonicalizePathNoThrow +#endif + ) +import Distribution.System ( Platform(..), buildPlatform ) +import Distribution.Text + ( display ) +import Distribution.Utils.NubList + ( toNubListR ) +import Distribution.Verbosity + ( Verbosity ) +import Distribution.Compat.Exception + ( catchIO ) + +import System.Directory ( doesFileExist ) +import System.FilePath ( (), (<.>) ) +import System.IO ( Handle, hPutStr ) +import System.Exit ( ExitCode(..), exitWith ) +import System.Process ( runProcess, waitForProcess ) +#if !MIN_VERSION_base(4,8,0) +import Control.Applicative ( (<$>), (<*>) ) +#endif +import Control.Monad ( when, unless ) +import Data.List ( foldl1' ) +import Data.Maybe ( fromMaybe, isJust ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid ( mempty ) +#endif +import Data.Char ( isSpace ) + +#ifdef mingw32_HOST_OS +import Distribution.Simple.Utils + ( withTempDirectory ) + +import Control.Exception ( bracket ) +import System.FilePath ( equalFilePath, takeDirectory ) +import System.Directory ( doesDirectoryExist ) +import qualified System.Win32 as Win32 +#endif + +data SetupScriptOptions = SetupScriptOptions { + useCabalVersion :: VersionRange, + useCompiler :: Maybe Compiler, + usePlatform :: Maybe Platform, + usePackageDB :: PackageDBStack, + usePackageIndex :: Maybe InstalledPackageIndex, + useProgramConfig :: ProgramConfiguration, + useDistPref :: FilePath, + useLoggingHandle :: Maybe Handle, + useWorkingDir :: Maybe FilePath, + forceExternalSetupMethod :: Bool, + + -- Used only by 'cabal clean' on Windows. + -- + -- Note: win32 clean hack + ------------------------- + -- On Windows, running './dist/setup/setup clean' doesn't work because the + -- setup script will try to delete itself (which causes it to fail horribly, + -- unlike on Linux). So we have to move the setup exe out of the way first + -- and then delete it manually. This applies only to the external setup + -- method. + useWin32CleanHack :: Bool, + + -- Used only when calling setupWrapper from parallel code to serialise + -- access to the setup cache; should be Nothing otherwise. + -- + -- Note: setup exe cache + ------------------------ + -- When we are installing in parallel, we always use the external setup + -- method. Since compiling the setup script each time adds noticeable + -- overhead, we use a shared setup script cache + -- ('~/.cabal/setup-exe-cache'). For each (compiler, platform, Cabal + -- version) combination the cache holds a compiled setup script + -- executable. This only affects the Simple build type; for the Custom, + -- Configure and Make build types we always compile the setup script anew. + setupCacheLock :: Maybe Lock + } + +defaultSetupScriptOptions :: SetupScriptOptions +defaultSetupScriptOptions = SetupScriptOptions { + useCabalVersion = anyVersion, + useCompiler = Nothing, + usePlatform = Nothing, + usePackageDB = [GlobalPackageDB, UserPackageDB], + usePackageIndex = Nothing, + useProgramConfig = emptyProgramConfiguration, + useDistPref = defaultDistPref, + useLoggingHandle = Nothing, + useWorkingDir = Nothing, + useWin32CleanHack = False, + forceExternalSetupMethod = False, + setupCacheLock = Nothing + } + +setupWrapper :: Verbosity + -> SetupScriptOptions + -> Maybe PackageDescription + -> CommandUI flags + -> (Version -> flags) + -> [String] + -> IO () +setupWrapper verbosity options mpkg cmd flags extraArgs = do + pkg <- maybe getPkg return mpkg + let setupMethod = determineSetupMethod options' buildType' + options' = options { + useCabalVersion = intersectVersionRanges + (useCabalVersion options) + (orLaterVersion (specVersion pkg)) + } + buildType' = fromMaybe Custom (buildType pkg) + mkArgs cabalLibVersion = commandName cmd + : commandShowOptions cmd (flags cabalLibVersion) + ++ extraArgs + checkBuildType buildType' + setupMethod verbosity options' (packageId pkg) buildType' mkArgs + where + getPkg = tryFindPackageDesc (fromMaybe "." (useWorkingDir options)) + >>= readPackageDescription verbosity + >>= return . packageDescription + + checkBuildType (UnknownBuildType name) = + die $ "The build-type '" ++ name ++ "' is not known. Use one of: " + ++ intercalate ", " (map display knownBuildTypes) ++ "." + checkBuildType _ = return () + +-- | Decide if we're going to be able to do a direct internal call to the +-- entry point in the Cabal library or if we're going to have to compile +-- and execute an external Setup.hs script. +-- +determineSetupMethod :: SetupScriptOptions -> BuildType -> SetupMethod +determineSetupMethod options buildType' + | forceExternalSetupMethod options = externalSetupMethod + | isJust (useLoggingHandle options) + || buildType' == Custom = externalSetupMethod + | cabalVersion `withinRange` + useCabalVersion options = internalSetupMethod + | otherwise = externalSetupMethod + +type SetupMethod = Verbosity + -> SetupScriptOptions + -> PackageIdentifier + -> BuildType + -> (Version -> [String]) -> IO () + +-- ------------------------------------------------------------ +-- * Internal SetupMethod +-- ------------------------------------------------------------ + +internalSetupMethod :: SetupMethod +internalSetupMethod verbosity options _ bt mkargs = do + let args = mkargs cabalVersion + debug verbosity $ "Using internal setup method with build-type " ++ show bt + ++ " and args:\n " ++ show args + inDir (useWorkingDir options) $ + buildTypeAction bt args + +buildTypeAction :: BuildType -> ([String] -> IO ()) +buildTypeAction Simple = Simple.defaultMainArgs +buildTypeAction Configure = Simple.defaultMainWithHooksArgs + Simple.autoconfUserHooks +buildTypeAction Make = Make.defaultMainArgs +buildTypeAction Custom = error "buildTypeAction Custom" +buildTypeAction (UnknownBuildType _) = error "buildTypeAction UnknownBuildType" + +-- ------------------------------------------------------------ +-- * External SetupMethod +-- ------------------------------------------------------------ + +externalSetupMethod :: SetupMethod +externalSetupMethod verbosity options pkg bt mkargs = do + debug verbosity $ "Using external setup method with build-type " ++ show bt + createDirectoryIfMissingVerbose verbosity True setupDir + (cabalLibVersion, mCabalLibInstalledPkgId, options') <- cabalLibVersionToUse + debug verbosity $ "Using Cabal library version " ++ display cabalLibVersion + path <- if useCachedSetupExecutable + then getCachedSetupExecutable options' + cabalLibVersion mCabalLibInstalledPkgId + else compileSetupExecutable options' + cabalLibVersion mCabalLibInstalledPkgId False + invokeSetupScript options' path (mkargs cabalLibVersion) + + where + workingDir = case fromMaybe "" (useWorkingDir options) of + [] -> "." + dir -> dir + setupDir = workingDir useDistPref options "setup" + setupVersionFile = setupDir "setup" <.> "version" + setupHs = setupDir "setup" <.> "hs" + setupProgFile = setupDir "setup" <.> exeExtension + platform = fromMaybe buildPlatform (usePlatform options) + + useCachedSetupExecutable = (bt == Simple || bt == Configure || bt == Make) + + maybeGetInstalledPackages :: SetupScriptOptions -> Compiler + -> ProgramConfiguration -> IO InstalledPackageIndex + maybeGetInstalledPackages options' comp conf = + case usePackageIndex options' of + Just index -> return index + Nothing -> getInstalledPackages verbosity + comp (usePackageDB options') conf + + cabalLibVersionToUse :: IO (Version, (Maybe InstalledPackageId) + ,SetupScriptOptions) + cabalLibVersionToUse = do + savedVer <- savedVersion + case savedVer of + Just version | version `withinRange` useCabalVersion options + -> do updateSetupScript version bt + -- Does the previously compiled setup executable still exist and + -- is it up-to date? + useExisting <- canUseExistingSetup version + if useExisting + then return (version, Nothing, options) + else installedVersion + _ -> installedVersion + where + -- This check duplicates the checks in 'getCachedSetupExecutable' / + -- 'compileSetupExecutable'. Unfortunately, we have to perform it twice + -- because the selected Cabal version may change as a result of this + -- check. + canUseExistingSetup :: Version -> IO Bool + canUseExistingSetup version = + if useCachedSetupExecutable + then do + (_, cachedSetupProgFile) <- cachedSetupDirAndProg options version + doesFileExist cachedSetupProgFile + else + (&&) <$> setupProgFile `existsAndIsMoreRecentThan` setupHs + <*> setupProgFile `existsAndIsMoreRecentThan` setupVersionFile + + installedVersion :: IO (Version, Maybe InstalledPackageId + ,SetupScriptOptions) + installedVersion = do + (comp, conf, options') <- configureCompiler options + (version, mipkgid, options'') <- installedCabalVersion options' comp conf + updateSetupScript version bt + writeFile setupVersionFile (show version ++ "\n") + return (version, mipkgid, options'') + + savedVersion :: IO (Maybe Version) + savedVersion = do + versionString <- readFile setupVersionFile `catchIO` \_ -> return "" + case reads versionString of + [(version,s)] | all isSpace s -> return (Just version) + _ -> return Nothing + + -- | Update a Setup.hs script, creating it if necessary. + updateSetupScript :: Version -> BuildType -> IO () + updateSetupScript _ Custom = do + useHs <- doesFileExist customSetupHs + useLhs <- doesFileExist customSetupLhs + unless (useHs || useLhs) $ die + "Using 'build-type: Custom' but there is no Setup.hs or Setup.lhs script." + let src = (if useHs then customSetupHs else customSetupLhs) + srcNewer <- src `moreRecentFile` setupHs + when srcNewer $ if useHs + then copyFileVerbose verbosity src setupHs + else runSimplePreProcessor ppUnlit src setupHs verbosity + where + customSetupHs = workingDir "Setup.hs" + customSetupLhs = workingDir "Setup.lhs" + + updateSetupScript cabalLibVersion _ = + rewriteFile setupHs (buildTypeScript cabalLibVersion) + + buildTypeScript :: Version -> String + buildTypeScript cabalLibVersion = case bt of + Simple -> "import Distribution.Simple; main = defaultMain\n" + Configure -> "import Distribution.Simple; main = defaultMainWithHooks " + ++ if cabalLibVersion >= Version [1,3,10] [] + then "autoconfUserHooks\n" + else "defaultUserHooks\n" + Make -> "import Distribution.Make; main = defaultMain\n" + Custom -> error "buildTypeScript Custom" + UnknownBuildType _ -> error "buildTypeScript UnknownBuildType" + + installedCabalVersion :: SetupScriptOptions -> Compiler -> ProgramConfiguration + -> IO (Version, Maybe InstalledPackageId + ,SetupScriptOptions) + installedCabalVersion options' compiler conf = do + index <- maybeGetInstalledPackages options' compiler conf + let cabalDep = Dependency (PackageName "Cabal") (useCabalVersion options') + options'' = options' { usePackageIndex = Just index } + case PackageIndex.lookupDependency index cabalDep of + [] -> die $ "The package '" ++ display (packageName pkg) + ++ "' requires Cabal library version " + ++ display (useCabalVersion options) + ++ " but no suitable version is installed." + pkgs -> let ipkginfo = head . snd . bestVersion fst $ pkgs + in return (packageVersion ipkginfo + ,Just . installedPackageId $ ipkginfo, options'') + + bestVersion :: (a -> Version) -> [a] -> a + bestVersion f = firstMaximumBy (comparing (preference . f)) + where + -- Like maximumBy, but picks the first maximum element instead of the + -- last. In general, we expect the preferred version to go first in the + -- list. For the default case, this has the effect of choosing the version + -- installed in the user package DB instead of the global one. See #1463. + -- + -- Note: firstMaximumBy could be written as just + -- `maximumBy cmp . reverse`, but the problem is that the behaviour of + -- maximumBy is not fully specified in the case when there is not a single + -- greatest element. + firstMaximumBy :: (a -> a -> Ordering) -> [a] -> a + firstMaximumBy _ [] = + error "Distribution.Client.firstMaximumBy: empty list" + firstMaximumBy cmp xs = foldl1' maxBy xs + where + maxBy x y = case cmp x y of { GT -> x; EQ -> x; LT -> y; } + + preference version = (sameVersion, sameMajorVersion + ,stableVersion, latestVersion) + where + sameVersion = version == cabalVersion + sameMajorVersion = majorVersion version == majorVersion cabalVersion + majorVersion = take 2 . versionBranch + stableVersion = case versionBranch version of + (_:x:_) -> even x + _ -> False + latestVersion = version + + configureCompiler :: SetupScriptOptions + -> IO (Compiler, ProgramConfiguration, SetupScriptOptions) + configureCompiler options' = do + (comp, conf) <- case useCompiler options' of + Just comp -> return (comp, useProgramConfig options') + Nothing -> do (comp, _, conf) <- + configCompilerEx (Just GHC) Nothing Nothing + (useProgramConfig options') verbosity + return (comp, conf) + -- Whenever we need to call configureCompiler, we also need to access the + -- package index, so let's cache it in SetupScriptOptions. + index <- maybeGetInstalledPackages options' comp conf + return (comp, conf, options' { useCompiler = Just comp, + usePackageIndex = Just index, + useProgramConfig = conf }) + + -- | Path to the setup exe cache directory and path to the cached setup + -- executable. + cachedSetupDirAndProg :: SetupScriptOptions -> Version + -> IO (FilePath, FilePath) + cachedSetupDirAndProg options' cabalLibVersion = do + cabalDir <- defaultCabalDir + let setupCacheDir = cabalDir "setup-exe-cache" + cachedSetupProgFile = setupCacheDir + ("setup-" ++ buildTypeString ++ "-" + ++ cabalVersionString ++ "-" + ++ platformString ++ "-" + ++ compilerVersionString) + <.> exeExtension + return (setupCacheDir, cachedSetupProgFile) + where + buildTypeString = show bt + cabalVersionString = "Cabal-" ++ (display cabalLibVersion) + compilerVersionString = display $ + fromMaybe buildCompilerId + (fmap compilerId . useCompiler $ options') + platformString = display platform + + -- | Look up the setup executable in the cache; update the cache if the setup + -- executable is not found. + getCachedSetupExecutable :: SetupScriptOptions + -> Version -> Maybe InstalledPackageId + -> IO FilePath + getCachedSetupExecutable options' cabalLibVersion + maybeCabalLibInstalledPkgId = do + (setupCacheDir, cachedSetupProgFile) <- + cachedSetupDirAndProg options' cabalLibVersion + cachedSetupExists <- doesFileExist cachedSetupProgFile + if cachedSetupExists + then debug verbosity $ + "Found cached setup executable: " ++ cachedSetupProgFile + else criticalSection' $ do + -- The cache may have been populated while we were waiting. + cachedSetupExists' <- doesFileExist cachedSetupProgFile + if cachedSetupExists' + then debug verbosity $ + "Found cached setup executable: " ++ cachedSetupProgFile + else do + debug verbosity $ "Setup executable not found in the cache." + src <- compileSetupExecutable options' + cabalLibVersion maybeCabalLibInstalledPkgId True + createDirectoryIfMissingVerbose verbosity True setupCacheDir + installExecutableFile verbosity src cachedSetupProgFile + -- Do not strip if we're using GHCJS, since the result may be a script + when (maybe True ((/=GHCJS).compilerFlavor) $ useCompiler options') $ + Strip.stripExe verbosity platform (useProgramConfig options') + cachedSetupProgFile + return cachedSetupProgFile + where + criticalSection' = fromMaybe id + (fmap criticalSection $ setupCacheLock options') + + -- | If the Setup.hs is out of date wrt the executable then recompile it. + -- Currently this is GHC/GHCJS only. It should really be generalised. + -- + compileSetupExecutable :: SetupScriptOptions + -> Version -> Maybe InstalledPackageId -> Bool + -> IO FilePath + compileSetupExecutable options' cabalLibVersion maybeCabalLibInstalledPkgId + forceCompile = do + setupHsNewer <- setupHs `moreRecentFile` setupProgFile + cabalVersionNewer <- setupVersionFile `moreRecentFile` setupProgFile + let outOfDate = setupHsNewer || cabalVersionNewer + when (outOfDate || forceCompile) $ do + debug verbosity "Setup executable needs to be updated, compiling..." + (compiler, conf, options'') <- configureCompiler options' + let cabalPkgid = PackageIdentifier (PackageName "Cabal") cabalLibVersion + (program, extraOpts) + = case compilerFlavor compiler of + GHCJS -> (ghcjsProgram, ["-build-runner"]) + _ -> (ghcProgram, ["-threaded"]) + ghcOptions = mempty { + ghcOptVerbosity = Flag verbosity + , ghcOptMode = Flag GhcModeMake + , ghcOptInputFiles = toNubListR [setupHs] + , ghcOptOutputFile = Flag setupProgFile + , ghcOptObjDir = Flag setupDir + , ghcOptHiDir = Flag setupDir + , ghcOptSourcePathClear = Flag True + , ghcOptSourcePath = case bt of + Custom -> toNubListR [workingDir] + _ -> mempty + , ghcOptPackageDBs = usePackageDB options'' + , ghcOptPackages = toNubListR $ + maybe [] (\ipkgid -> [(ipkgid, cabalPkgid, defaultRenaming)]) + maybeCabalLibInstalledPkgId + , ghcOptExtra = toNubListR extraOpts + } + let ghcCmdLine = renderGhcOptions compiler ghcOptions + case useLoggingHandle options of + Nothing -> runDbProgram verbosity program conf ghcCmdLine + + -- If build logging is enabled, redirect compiler output to the log file. + (Just logHandle) -> do output <- getDbProgramOutput verbosity program + conf ghcCmdLine + hPutStr logHandle output + return setupProgFile + + invokeSetupScript :: SetupScriptOptions -> FilePath -> [String] -> IO () + invokeSetupScript options' path args = do + info verbosity $ unwords (path : args) + case useLoggingHandle options' of + Nothing -> return () + Just logHandle -> info verbosity $ "Redirecting build log to " + ++ show logHandle + + -- Since useWorkingDir can change the relative path, the path argument must + -- be turned into an absolute path. On some systems, runProcess will take + -- path as relative to the new working directory instead of the current + -- working directory. + path' <- tryCanonicalizePath path + + -- See 'Note: win32 clean hack' above. +#if mingw32_HOST_OS + -- setupProgFile may not exist if we're using a cached program + setupProgFile' <- canonicalizePathNoThrow setupProgFile + let win32CleanHackNeeded = (useWin32CleanHack options') + -- Skip when a cached setup script is used. + && setupProgFile' `equalFilePath` path' + if win32CleanHackNeeded then doWin32CleanHack path' else doInvoke path' +#else + doInvoke path' +#endif + + where + doInvoke path' = do + searchpath <- programSearchPathAsPATHVar + (getProgramSearchPath (useProgramConfig options')) + env <- getEffectiveEnvironment [("PATH", Just searchpath)] + + process <- runProcess path' args + (useWorkingDir options') env Nothing + (useLoggingHandle options') (useLoggingHandle options') + exitCode <- waitForProcess process + unless (exitCode == ExitSuccess) $ exitWith exitCode + +#if mingw32_HOST_OS + doWin32CleanHack path' = do + info verbosity $ "Using the Win32 clean hack." + -- Recursively removes the temp dir on exit. + withTempDirectory verbosity workingDir "cabal-tmp" $ \tmpDir -> + bracket (moveOutOfTheWay tmpDir path') + (maybeRestore path') + doInvoke + + moveOutOfTheWay tmpDir path' = do + let newPath = tmpDir "setup" <.> exeExtension + Win32.moveFile path' newPath + return newPath + + maybeRestore oldPath path' = do + let oldPathDir = takeDirectory oldPath + oldPathDirExists <- doesDirectoryExist oldPathDir + -- 'setup clean' didn't complete, 'dist/setup' still exists. + when oldPathDirExists $ + Win32.moveFile path' oldPath +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/SrcDist.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/SrcDist.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/SrcDist.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/SrcDist.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,139 @@ +-- Implements the \"@.\/cabal sdist@\" command, which creates a source +-- distribution for this package. That is, packs up the source code +-- into a tarball, making use of the corresponding Cabal module. +module Distribution.Client.SrcDist ( + sdist + ) where + + +import Distribution.Client.SetupWrapper + ( SetupScriptOptions(..), defaultSetupScriptOptions, setupWrapper ) +import Distribution.Client.Tar (createTarGzFile) + +import Distribution.Package + ( Package(..) ) +import Distribution.PackageDescription + ( PackageDescription ) +import Distribution.PackageDescription.Configuration + ( flattenPackageDescription ) +import Distribution.PackageDescription.Parse + ( readPackageDescription ) +import Distribution.Simple.Utils + ( createDirectoryIfMissingVerbose, defaultPackageDesc + , die, notice, withTempDirectory ) +import Distribution.Client.Setup + ( SDistFlags(..), SDistExFlags(..), ArchiveFormat(..) ) +import Distribution.Simple.Setup + ( Flag(..), sdistCommand, flagToList, fromFlag, fromFlagOrDefault ) +import Distribution.Simple.BuildPaths ( srcPref) +import Distribution.Simple.Program (requireProgram, simpleProgram, programPath) +import Distribution.Simple.Program.Db (emptyProgramDb) +import Distribution.Text ( display ) +import Distribution.Verbosity (Verbosity) +import Distribution.Version (Version(..), orLaterVersion) + +import System.FilePath ((), (<.>)) +import Control.Monad (when, unless) +import System.Directory (doesFileExist, removeFile, canonicalizePath) +import System.Process (runProcess, waitForProcess) +import System.Exit (ExitCode(..)) + +-- |Create a source distribution. +sdist :: SDistFlags -> SDistExFlags -> IO () +sdist flags exflags = do + pkg <- return . flattenPackageDescription + =<< readPackageDescription verbosity + =<< defaultPackageDesc verbosity + let withDir = if not needMakeArchive then (\f -> f tmpTargetDir) + else withTempDirectory verbosity tmpTargetDir "sdist." + -- 'withTempDir' fails if we don't create 'tmpTargetDir'... + when needMakeArchive $ + createDirectoryIfMissingVerbose verbosity True tmpTargetDir + withDir $ \tmpDir -> do + let outDir = if isOutDirectory then tmpDir else tmpDir tarBallName pkg + flags' = (if not needMakeArchive then flags + else flags { sDistDirectory = Flag outDir }) + unless isListSources $ + createDirectoryIfMissingVerbose verbosity True outDir + + -- Run 'setup sdist --output-directory=tmpDir' (or + -- '--list-source'/'--output-directory=someOtherDir') in case we were passed + -- those options. + setupWrapper verbosity setupOpts (Just pkg) sdistCommand (const flags') [] + + -- Unless we were given --list-sources or --output-directory ourselves, + -- create an archive. + when needMakeArchive $ + createArchive verbosity pkg tmpDir distPref + + when isOutDirectory $ + notice verbosity $ "Source directory created: " ++ tmpTargetDir + + when isListSources $ + notice verbosity $ "List of package sources written to file '" + ++ (fromFlag . sDistListSources $ flags) ++ "'" + + where + flagEnabled f = not . null . flagToList . f $ flags + + isListSources = flagEnabled sDistListSources + isOutDirectory = flagEnabled sDistDirectory + needMakeArchive = not (isListSources || isOutDirectory) + verbosity = fromFlag (sDistVerbosity flags) + distPref = fromFlag (sDistDistPref flags) + tmpTargetDir = fromFlagOrDefault (srcPref distPref) (sDistDirectory flags) + setupOpts = defaultSetupScriptOptions { + -- The '--output-directory' sdist flag was introduced in Cabal 1.12, and + -- '--list-sources' in 1.17. + useCabalVersion = if isListSources + then orLaterVersion $ Version [1,17,0] [] + else orLaterVersion $ Version [1,12,0] [] + } + format = fromFlag (sDistFormat exflags) + createArchive = case format of + TargzFormat -> createTarGzArchive + ZipFormat -> createZipArchive + +tarBallName :: PackageDescription -> String +tarBallName = display . packageId + +-- | Create a tar.gz archive from a tree of source files. +createTarGzArchive :: Verbosity -> PackageDescription -> FilePath -> FilePath + -> IO () +createTarGzArchive verbosity pkg tmpDir targetPref = do + createTarGzFile tarBallFilePath tmpDir (tarBallName pkg) + notice verbosity $ "Source tarball created: " ++ tarBallFilePath + where + tarBallFilePath = targetPref tarBallName pkg <.> "tar.gz" + +-- | Create a zip archive from a tree of source files. +createZipArchive :: Verbosity -> PackageDescription -> FilePath -> FilePath + -> IO () +createZipArchive verbosity pkg tmpDir targetPref = do + let dir = tarBallName pkg + zipfile = targetPref dir <.> "zip" + (zipProg, _) <- requireProgram verbosity zipProgram emptyProgramDb + + -- zip has an annoying habit of updating the target rather than creating + -- it from scratch. While that might sound like an optimisation, it doesn't + -- remove files already in the archive that are no longer present in the + -- uncompressed tree. + alreadyExists <- doesFileExist zipfile + when alreadyExists $ removeFile zipfile + + -- We call zip with a different CWD, so have to make the path + -- absolute. Can't just use 'canonicalizePath zipfile' since this function + -- requires its argument to refer to an existing file. + zipfileAbs <- fmap ( dir <.> "zip") . canonicalizePath $ targetPref + + --TODO: use runProgramInvocation, but has to be able to set CWD + hnd <- runProcess (programPath zipProg) ["-q", "-r", zipfileAbs, dir] + (Just tmpDir) + Nothing Nothing Nothing Nothing + exitCode <- waitForProcess hnd + unless (exitCode == ExitSuccess) $ + die $ "Generating the zip file failed " + ++ "(zip returned exit code " ++ show exitCode ++ ")" + notice verbosity $ "Source zip archive created: " ++ zipfile + where + zipProgram = simpleProgram "zip" diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Targets.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Targets.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Targets.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Targets.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,774 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Targets +-- Copyright : (c) Duncan Coutts 2011 +-- License : BSD-like +-- +-- Maintainer : duncan@community.haskell.org +-- +-- Handling for user-specified targets +----------------------------------------------------------------------------- +module Distribution.Client.Targets ( + -- * User targets + UserTarget(..), + readUserTargets, + + -- * Package specifiers + PackageSpecifier(..), + pkgSpecifierTarget, + pkgSpecifierConstraints, + + -- * Resolving user targets to package specifiers + resolveUserTargets, + + -- ** Detailed interface + UserTargetProblem(..), + readUserTarget, + reportUserTargetProblems, + expandUserTarget, + + PackageTarget(..), + fetchPackageTarget, + readPackageTarget, + + PackageTargetProblem(..), + reportPackageTargetProblems, + + disambiguatePackageTargets, + disambiguatePackageName, + + -- * User constraints + UserConstraint(..), + readUserConstraint, + userToPackageConstraint + + ) where + +import Distribution.Package + ( Package(..), PackageName(..) + , PackageIdentifier(..), packageName, packageVersion + , Dependency(Dependency) ) +import Distribution.Client.Types + ( SourcePackage(..), PackageLocation(..), OptionalStanza(..) ) +import Distribution.Client.Dependency.Types + ( PackageConstraint(..) ) + +import qualified Distribution.Client.World as World +import Distribution.Client.PackageIndex (PackageIndex) +import qualified Distribution.Client.PackageIndex as PackageIndex +import qualified Distribution.Client.Tar as Tar +import Distribution.Client.FetchUtils +import Distribution.Client.Utils ( tryFindPackageDesc ) + +import Distribution.PackageDescription + ( GenericPackageDescription, FlagName(..), FlagAssignment ) +import Distribution.PackageDescription.Parse + ( readPackageDescription, parsePackageDescription, ParseResult(..) ) +import Distribution.Version + ( Version(Version), thisVersion, anyVersion, isAnyVersion + , VersionRange ) +import Distribution.Text + ( Text(..), display ) +import Distribution.Verbosity (Verbosity) +import Distribution.Simple.Utils + ( die, warn, intercalate, fromUTF8, lowercase ) + +import Data.List + ( find, nub ) +import Data.Maybe + ( listToMaybe ) +import Data.Either + ( partitionEithers ) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid + ( Monoid(..) ) +#endif +import qualified Data.Map as Map +import qualified Data.ByteString.Lazy as BS +import qualified Data.ByteString.Lazy.Char8 as BS.Char8 +import qualified Distribution.Client.GZipUtils as GZipUtils +import Control.Monad (liftM) +import qualified Distribution.Compat.ReadP as Parse +import Distribution.Compat.ReadP + ( (+++), (<++) ) +import qualified Text.PrettyPrint as Disp +import Text.PrettyPrint + ( (<>), (<+>) ) +import Data.Char + ( isSpace, isAlphaNum ) +import System.FilePath + ( takeExtension, dropExtension, takeDirectory, splitPath ) +import System.Directory + ( doesFileExist, doesDirectoryExist ) +import Network.URI + ( URI(..), URIAuth(..), parseAbsoluteURI ) + +-- ------------------------------------------------------------ +-- * User targets +-- ------------------------------------------------------------ + +-- | Various ways that a user may specify a package or package collection. +-- +data UserTarget = + + -- | A partially specified package, identified by name and possibly with + -- an exact version or a version constraint. + -- + -- > cabal install foo + -- > cabal install foo-1.0 + -- > cabal install 'foo < 2' + -- + UserTargetNamed Dependency + + -- | A special virtual package that refers to the collection of packages + -- recorded in the world file that the user specifically installed. + -- + -- > cabal install world + -- + | UserTargetWorld + + -- | A specific package that is unpacked in a local directory, often the + -- current directory. + -- + -- > cabal install . + -- > cabal install ../lib/other + -- + -- * Note: in future, if multiple @.cabal@ files are allowed in a single + -- directory then this will refer to the collection of packages. + -- + | UserTargetLocalDir FilePath + + -- | A specific local unpacked package, identified by its @.cabal@ file. + -- + -- > cabal install foo.cabal + -- > cabal install ../lib/other/bar.cabal + -- + | UserTargetLocalCabalFile FilePath + + -- | A specific package that is available as a local tarball file + -- + -- > cabal install dist/foo-1.0.tar.gz + -- > cabal install ../build/baz-1.0.tar.gz + -- + | UserTargetLocalTarball FilePath + + -- | A specific package that is available as a remote tarball file + -- + -- > cabal install http://code.haskell.org/~user/foo/foo-0.9.tar.gz + -- + | UserTargetRemoteTarball URI + deriving (Show,Eq) + + +-- ------------------------------------------------------------ +-- * Package specifier +-- ------------------------------------------------------------ + +-- | A fully or partially resolved reference to a package. +-- +data PackageSpecifier pkg = + + -- | A partially specified reference to a package (either source or + -- installed). It is specified by package name and optionally some + -- additional constraints. Use a dependency resolver to pick a specific + -- package satisfying these constraints. + -- + NamedPackage PackageName [PackageConstraint] + + -- | A fully specified source package. + -- + | SpecificSourcePackage pkg + deriving Show + +pkgSpecifierTarget :: Package pkg => PackageSpecifier pkg -> PackageName +pkgSpecifierTarget (NamedPackage name _) = name +pkgSpecifierTarget (SpecificSourcePackage pkg) = packageName pkg + +pkgSpecifierConstraints :: Package pkg + => PackageSpecifier pkg -> [PackageConstraint] +pkgSpecifierConstraints (NamedPackage _ constraints) = constraints +pkgSpecifierConstraints (SpecificSourcePackage pkg) = + [PackageConstraintVersion (packageName pkg) + (thisVersion (packageVersion pkg))] + + +-- ------------------------------------------------------------ +-- * Parsing and checking user targets +-- ------------------------------------------------------------ + +readUserTargets :: Verbosity -> [String] -> IO [UserTarget] +readUserTargets _verbosity targetStrs = do + (problems, targets) <- liftM partitionEithers + (mapM readUserTarget targetStrs) + reportUserTargetProblems problems + return targets + + +data UserTargetProblem + = UserTargetUnexpectedFile String + | UserTargetNonexistantFile String + | UserTargetUnexpectedUriScheme String + | UserTargetUnrecognisedUri String + | UserTargetUnrecognised String + | UserTargetBadWorldPkg + deriving Show + +readUserTarget :: String -> IO (Either UserTargetProblem UserTarget) +readUserTarget targetstr = + case testNamedTargets targetstr of + Just (Dependency (PackageName "world") verrange) + | verrange == anyVersion -> return (Right UserTargetWorld) + | otherwise -> return (Left UserTargetBadWorldPkg) + Just dep -> return (Right (UserTargetNamed dep)) + Nothing -> do + fileTarget <- testFileTargets targetstr + case fileTarget of + Just target -> return target + Nothing -> + case testUriTargets targetstr of + Just target -> return target + Nothing -> return (Left (UserTargetUnrecognised targetstr)) + where + testNamedTargets = readPToMaybe parseDependencyOrPackageId + + testFileTargets filename = do + isDir <- doesDirectoryExist filename + isFile <- doesFileExist filename + parentDirExists <- case takeDirectory filename of + [] -> return False + dir -> doesDirectoryExist dir + let result + | isDir + = Just (Right (UserTargetLocalDir filename)) + + | isFile && extensionIsTarGz filename + = Just (Right (UserTargetLocalTarball filename)) + + | isFile && takeExtension filename == ".cabal" + = Just (Right (UserTargetLocalCabalFile filename)) + + | isFile + = Just (Left (UserTargetUnexpectedFile filename)) + + | parentDirExists + = Just (Left (UserTargetNonexistantFile filename)) + + | otherwise + = Nothing + return result + + testUriTargets str = + case parseAbsoluteURI str of + Just uri@URI { + uriScheme = scheme, + uriAuthority = Just URIAuth { uriRegName = host } + } + | scheme /= "http:" -> + Just (Left (UserTargetUnexpectedUriScheme targetstr)) + + | null host -> + Just (Left (UserTargetUnrecognisedUri targetstr)) + + | otherwise -> + Just (Right (UserTargetRemoteTarball uri)) + _ -> Nothing + + extensionIsTarGz f = takeExtension f == ".gz" + && takeExtension (dropExtension f) == ".tar" + + parseDependencyOrPackageId :: Parse.ReadP r Dependency + parseDependencyOrPackageId = parse + +++ liftM pkgidToDependency parse + where + pkgidToDependency :: PackageIdentifier -> Dependency + pkgidToDependency p = case packageVersion p of + Version [] _ -> Dependency (packageName p) anyVersion + version -> Dependency (packageName p) (thisVersion version) + +readPToMaybe :: Parse.ReadP a a -> String -> Maybe a +readPToMaybe p str = listToMaybe [ r | (r,s) <- Parse.readP_to_S p str + , all isSpace s ] + + +reportUserTargetProblems :: [UserTargetProblem] -> IO () +reportUserTargetProblems problems = do + case [ target | UserTargetUnrecognised target <- problems ] of + [] -> return () + target -> die + $ unlines + [ "Unrecognised target '" ++ name ++ "'." + | name <- target ] + ++ "Targets can be:\n" + ++ " - package names, e.g. 'pkgname', 'pkgname-1.0.1', 'pkgname < 2.0'\n" + ++ " - the special 'world' target\n" + ++ " - cabal files 'pkgname.cabal' or package directories 'pkgname/'\n" + ++ " - package tarballs 'pkgname.tar.gz' or 'http://example.com/pkgname.tar.gz'" + + case [ () | UserTargetBadWorldPkg <- problems ] of + [] -> return () + _ -> die "The special 'world' target does not take any version." + + case [ target | UserTargetNonexistantFile target <- problems ] of + [] -> return () + target -> die + $ unlines + [ "The file does not exist '" ++ name ++ "'." + | name <- target ] + + case [ target | UserTargetUnexpectedFile target <- problems ] of + [] -> return () + target -> die + $ unlines + [ "Unrecognised file target '" ++ name ++ "'." + | name <- target ] + ++ "File targets can be either package tarballs 'pkgname.tar.gz' " + ++ "or cabal files 'pkgname.cabal'." + + case [ target | UserTargetUnexpectedUriScheme target <- problems ] of + [] -> return () + target -> die + $ unlines + [ "URL target not supported '" ++ name ++ "'." + | name <- target ] + ++ "Only 'http://' URLs are supported." + + case [ target | UserTargetUnrecognisedUri target <- problems ] of + [] -> return () + target -> die + $ unlines + [ "Unrecognise URL target '" ++ name ++ "'." + | name <- target ] + + +-- ------------------------------------------------------------ +-- * Resolving user targets to package specifiers +-- ------------------------------------------------------------ + +-- | Given a bunch of user-specified targets, try to resolve what it is they +-- refer to. They can either be specific packages (local dirs, tarballs etc) +-- or they can be named packages (with or without version info). +-- +resolveUserTargets :: Package pkg + => Verbosity + -> FilePath + -> PackageIndex pkg + -> [UserTarget] + -> IO [PackageSpecifier SourcePackage] +resolveUserTargets verbosity worldFile available userTargets = do + + -- given the user targets, get a list of fully or partially resolved + -- package references + packageTargets <- mapM (readPackageTarget verbosity) + =<< mapM (fetchPackageTarget verbosity) . concat + =<< mapM (expandUserTarget worldFile) userTargets + + -- users are allowed to give package names case-insensitively, so we must + -- disambiguate named package references + let (problems, packageSpecifiers) = + disambiguatePackageTargets available availableExtra packageTargets + + -- use any extra specific available packages to help us disambiguate + availableExtra = [ packageName pkg + | PackageTargetLocation pkg <- packageTargets ] + + reportPackageTargetProblems verbosity problems + + return packageSpecifiers + + +-- ------------------------------------------------------------ +-- * Package targets +-- ------------------------------------------------------------ + +-- | An intermediate between a 'UserTarget' and a resolved 'PackageSpecifier'. +-- Unlike a 'UserTarget', a 'PackageTarget' refers only to a single package. +-- +data PackageTarget pkg = + PackageTargetNamed PackageName [PackageConstraint] UserTarget + + -- | A package identified by name, but case insensitively, so it needs + -- to be resolved to the right case-sensitive name. + | PackageTargetNamedFuzzy PackageName [PackageConstraint] UserTarget + | PackageTargetLocation pkg + deriving Show + + +-- ------------------------------------------------------------ +-- * Converting user targets to package targets +-- ------------------------------------------------------------ + +-- | Given a user-specified target, expand it to a bunch of package targets +-- (each of which refers to only one package). +-- +expandUserTarget :: FilePath + -> UserTarget + -> IO [PackageTarget (PackageLocation ())] +expandUserTarget worldFile userTarget = case userTarget of + + UserTargetNamed (Dependency name vrange) -> + let constraints = [ PackageConstraintVersion name vrange + | not (isAnyVersion vrange) ] + in return [PackageTargetNamedFuzzy name constraints userTarget] + + UserTargetWorld -> do + worldPkgs <- World.getContents worldFile + --TODO: should we warn if there are no world targets? + return [ PackageTargetNamed name constraints userTarget + | World.WorldPkgInfo (Dependency name vrange) flags <- worldPkgs + , let constraints = [ PackageConstraintVersion name vrange + | not (isAnyVersion vrange) ] + ++ [ PackageConstraintFlags name flags + | not (null flags) ] ] + + UserTargetLocalDir dir -> + return [ PackageTargetLocation (LocalUnpackedPackage dir) ] + + UserTargetLocalCabalFile file -> do + let dir = takeDirectory file + _ <- tryFindPackageDesc dir (localPackageError dir) -- just as a check + return [ PackageTargetLocation (LocalUnpackedPackage dir) ] + + UserTargetLocalTarball tarballFile -> + return [ PackageTargetLocation (LocalTarballPackage tarballFile) ] + + UserTargetRemoteTarball tarballURL -> + return [ PackageTargetLocation (RemoteTarballPackage tarballURL ()) ] + +localPackageError :: FilePath -> String +localPackageError dir = + "Error reading local package.\nCouldn't find .cabal file in: " ++ dir + +-- ------------------------------------------------------------ +-- * Fetching and reading package targets +-- ------------------------------------------------------------ + + +-- | Fetch any remote targets so that they can be read. +-- +fetchPackageTarget :: Verbosity + -> PackageTarget (PackageLocation ()) + -> IO (PackageTarget (PackageLocation FilePath)) +fetchPackageTarget verbosity target = case target of + PackageTargetNamed n cs ut -> return (PackageTargetNamed n cs ut) + PackageTargetNamedFuzzy n cs ut -> return (PackageTargetNamedFuzzy n cs ut) + PackageTargetLocation location -> do + location' <- fetchPackage verbosity (fmap (const Nothing) location) + return (PackageTargetLocation location') + + +-- | Given a package target that has been fetched, read the .cabal file. +-- +-- This only affects targets given by location, named targets are unaffected. +-- +readPackageTarget :: Verbosity + -> PackageTarget (PackageLocation FilePath) + -> IO (PackageTarget SourcePackage) +readPackageTarget verbosity target = case target of + + PackageTargetNamed pkgname constraints userTarget -> + return (PackageTargetNamed pkgname constraints userTarget) + + PackageTargetNamedFuzzy pkgname constraints userTarget -> + return (PackageTargetNamedFuzzy pkgname constraints userTarget) + + PackageTargetLocation location -> case location of + + LocalUnpackedPackage dir -> do + pkg <- tryFindPackageDesc dir (localPackageError dir) >>= + readPackageDescription verbosity + return $ PackageTargetLocation $ + SourcePackage { + packageInfoId = packageId pkg, + packageDescription = pkg, + packageSource = fmap Just location, + packageDescrOverride = Nothing + } + + LocalTarballPackage tarballFile -> + readTarballPackageTarget location tarballFile tarballFile + + RemoteTarballPackage tarballURL tarballFile -> + readTarballPackageTarget location tarballFile (show tarballURL) + + RepoTarballPackage _repo _pkgid _ -> + error "TODO: readPackageTarget RepoTarballPackage" + -- For repo tarballs this info should be obtained from the index. + + where + readTarballPackageTarget location tarballFile tarballOriginalLoc = do + (filename, content) <- extractTarballPackageCabalFile + tarballFile tarballOriginalLoc + case parsePackageDescription' content of + Nothing -> die $ "Could not parse the cabal file " + ++ filename ++ " in " ++ tarballFile + Just pkg -> + return $ PackageTargetLocation $ + SourcePackage { + packageInfoId = packageId pkg, + packageDescription = pkg, + packageSource = fmap Just location, + packageDescrOverride = Nothing + } + + extractTarballPackageCabalFile :: FilePath -> String + -> IO (FilePath, BS.ByteString) + extractTarballPackageCabalFile tarballFile tarballOriginalLoc = + either (die . formatErr) return + . check + . Tar.entriesIndex + . Tar.filterEntries isCabalFile + . Tar.read + . GZipUtils.maybeDecompress + =<< BS.readFile tarballFile + where + formatErr msg = "Error reading " ++ tarballOriginalLoc ++ ": " ++ msg + + check (Left e) = Left e + check (Right m) = case Map.elems m of + [] -> Left noCabalFile + [file] -> case Tar.entryContent file of + Tar.NormalFile content _ -> Right (Tar.entryPath file, content) + _ -> Left noCabalFile + _files -> Left multipleCabalFiles + where + noCabalFile = "No cabal file found" + multipleCabalFiles = "Multiple cabal files found" + + isCabalFile e = case splitPath (Tar.entryPath e) of + [ _dir, file] -> takeExtension file == ".cabal" + [".", _dir, file] -> takeExtension file == ".cabal" + _ -> False + + parsePackageDescription' :: BS.ByteString -> Maybe GenericPackageDescription + parsePackageDescription' content = + case parsePackageDescription . fromUTF8 . BS.Char8.unpack $ content of + ParseOk _ pkg -> Just pkg + _ -> Nothing + + +-- ------------------------------------------------------------ +-- * Checking package targets +-- ------------------------------------------------------------ + +data PackageTargetProblem + = PackageNameUnknown PackageName UserTarget + | PackageNameAmbigious PackageName [PackageName] UserTarget + deriving Show + + +-- | Users are allowed to give package names case-insensitively, so we must +-- disambiguate named package references. +-- +disambiguatePackageTargets :: Package pkg' + => PackageIndex pkg' + -> [PackageName] + -> [PackageTarget pkg] + -> ( [PackageTargetProblem] + , [PackageSpecifier pkg] ) +disambiguatePackageTargets availablePkgIndex availableExtra targets = + partitionEithers (map disambiguatePackageTarget targets) + where + disambiguatePackageTarget packageTarget = case packageTarget of + PackageTargetLocation pkg -> Right (SpecificSourcePackage pkg) + + PackageTargetNamed pkgname constraints userTarget + | null (PackageIndex.lookupPackageName availablePkgIndex pkgname) + -> Left (PackageNameUnknown pkgname userTarget) + | otherwise -> Right (NamedPackage pkgname constraints) + + PackageTargetNamedFuzzy pkgname constraints userTarget -> + case disambiguatePackageName packageNameEnv pkgname of + None -> Left (PackageNameUnknown + pkgname userTarget) + Ambiguous pkgnames -> Left (PackageNameAmbigious + pkgname pkgnames userTarget) + Unambiguous pkgname' -> Right (NamedPackage pkgname' constraints') + where + constraints' = map (renamePackageConstraint pkgname') constraints + + -- use any extra specific available packages to help us disambiguate + packageNameEnv :: PackageNameEnv + packageNameEnv = mappend (indexPackageNameEnv availablePkgIndex) + (extraPackageNameEnv availableExtra) + + +-- | Report problems to the user. That is, if there are any problems +-- then raise an exception. +reportPackageTargetProblems :: Verbosity + -> [PackageTargetProblem] -> IO () +reportPackageTargetProblems verbosity problems = do + case [ pkg | PackageNameUnknown pkg originalTarget <- problems + , not (isUserTagetWorld originalTarget) ] of + [] -> return () + pkgs -> die $ unlines + [ "There is no package named '" ++ display name ++ "'. " + | name <- pkgs ] + ++ "You may need to run 'cabal update' to get the latest " + ++ "list of available packages." + + case [ (pkg, matches) | PackageNameAmbigious pkg matches _ <- problems ] of + [] -> return () + ambiguities -> die $ unlines + [ "The package name '" ++ display name + ++ "' is ambigious. It could be: " + ++ intercalate ", " (map display matches) + | (name, matches) <- ambiguities ] + + case [ pkg | PackageNameUnknown pkg UserTargetWorld <- problems ] of + [] -> return () + pkgs -> warn verbosity $ + "The following 'world' packages will be ignored because " + ++ "they refer to packages that cannot be found: " + ++ intercalate ", " (map display pkgs) ++ "\n" + ++ "You can suppress this warning by correcting the world file." + where + isUserTagetWorld UserTargetWorld = True; isUserTagetWorld _ = False + + +-- ------------------------------------------------------------ +-- * Disambiguating package names +-- ------------------------------------------------------------ + +data MaybeAmbigious a = None | Unambiguous a | Ambiguous [a] + +-- | Given a package name and a list of matching names, figure out which one it +-- might be referring to. If there is an exact case-sensitive match then that's +-- ok. If it matches just one package case-insensitively then that's also ok. +-- The only problem is if it matches multiple packages case-insensitively, in +-- that case it is ambigious. +-- +disambiguatePackageName :: PackageNameEnv + -> PackageName + -> MaybeAmbigious PackageName +disambiguatePackageName (PackageNameEnv pkgNameLookup) name = + case nub (pkgNameLookup name) of + [] -> None + [name'] -> Unambiguous name' + names -> case find (name==) names of + Just name' -> Unambiguous name' + Nothing -> Ambiguous names + + +newtype PackageNameEnv = PackageNameEnv (PackageName -> [PackageName]) + +instance Monoid PackageNameEnv where + mempty = PackageNameEnv (const []) + mappend (PackageNameEnv lookupA) (PackageNameEnv lookupB) = + PackageNameEnv (\name -> lookupA name ++ lookupB name) + +indexPackageNameEnv :: Package pkg => PackageIndex pkg -> PackageNameEnv +indexPackageNameEnv pkgIndex = PackageNameEnv pkgNameLookup + where + pkgNameLookup (PackageName name) = + map fst (PackageIndex.searchByName pkgIndex name) + +extraPackageNameEnv :: [PackageName] -> PackageNameEnv +extraPackageNameEnv names = PackageNameEnv pkgNameLookup + where + pkgNameLookup (PackageName name) = + [ PackageName name' + | let lname = lowercase name + , PackageName name' <- names + , lowercase name' == lname ] + + +-- ------------------------------------------------------------ +-- * Package constraints +-- ------------------------------------------------------------ + +data UserConstraint = + UserConstraintVersion PackageName VersionRange + | UserConstraintInstalled PackageName + | UserConstraintSource PackageName + | UserConstraintFlags PackageName FlagAssignment + | UserConstraintStanzas PackageName [OptionalStanza] + deriving (Show,Eq) + + +userToPackageConstraint :: UserConstraint -> PackageConstraint +-- At the moment, the types happen to be directly equivalent +userToPackageConstraint uc = case uc of + UserConstraintVersion name ver -> PackageConstraintVersion name ver + UserConstraintInstalled name -> PackageConstraintInstalled name + UserConstraintSource name -> PackageConstraintSource name + UserConstraintFlags name flags -> PackageConstraintFlags name flags + UserConstraintStanzas name stanzas -> PackageConstraintStanzas name stanzas + +renamePackageConstraint :: PackageName -> PackageConstraint -> PackageConstraint +renamePackageConstraint name pc = case pc of + PackageConstraintVersion _ ver -> PackageConstraintVersion name ver + PackageConstraintInstalled _ -> PackageConstraintInstalled name + PackageConstraintSource _ -> PackageConstraintSource name + PackageConstraintFlags _ flags -> PackageConstraintFlags name flags + PackageConstraintStanzas _ stanzas -> PackageConstraintStanzas name stanzas + +readUserConstraint :: String -> Either String UserConstraint +readUserConstraint str = + case readPToMaybe parse str of + Nothing -> Left msgCannotParse + Just c -> Right c + where + msgCannotParse = + "expected a package name followed by a constraint, which is " + ++ "either a version range, 'installed', 'source' or flags" + +--FIXME: use Text instance for FlagName and FlagAssignment +instance Text UserConstraint where + disp (UserConstraintVersion pkgname verrange) = disp pkgname + <+> disp verrange + disp (UserConstraintInstalled pkgname) = disp pkgname + <+> Disp.text "installed" + disp (UserConstraintSource pkgname) = disp pkgname + <+> Disp.text "source" + disp (UserConstraintFlags pkgname flags) = disp pkgname + <+> dispFlagAssignment flags + where + dispFlagAssignment = Disp.hsep . map dispFlagValue + dispFlagValue (f, True) = Disp.char '+' <> dispFlagName f + dispFlagValue (f, False) = Disp.char '-' <> dispFlagName f + dispFlagName (FlagName f) = Disp.text f + + disp (UserConstraintStanzas pkgname stanzas) = disp pkgname + <+> dispStanzas stanzas + where + dispStanzas = Disp.hsep . map dispStanza + dispStanza TestStanzas = Disp.text "test" + dispStanza BenchStanzas = Disp.text "bench" + + parse = parse >>= parseConstraint + where + spaces = Parse.satisfy isSpace >> Parse.skipSpaces + + parseConstraint pkgname = + ((parse >>= return . UserConstraintVersion pkgname) + +++ (do spaces + _ <- Parse.string "installed" + return (UserConstraintInstalled pkgname)) + +++ (do spaces + _ <- Parse.string "source" + return (UserConstraintSource pkgname)) + +++ (do spaces + _ <- Parse.string "test" + return (UserConstraintStanzas pkgname [TestStanzas])) + +++ (do spaces + _ <- Parse.string "bench" + return (UserConstraintStanzas pkgname [BenchStanzas]))) + <++ (parseFlagAssignment >>= (return . UserConstraintFlags pkgname)) + + parseFlagAssignment = Parse.many1 (spaces >> parseFlagValue) + parseFlagValue = + (do Parse.optional (Parse.char '+') + f <- parseFlagName + return (f, True)) + +++ (do _ <- Parse.char '-' + f <- parseFlagName + return (f, False)) + parseFlagName = liftM FlagName ident + + ident :: Parse.ReadP r String + ident = Parse.munch1 identChar >>= \s -> check s >> return s + where + identChar c = isAlphaNum c || c == '_' || c == '-' + check ('-':_) = Parse.pfail + check _ = return () diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Tar.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Tar.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Tar.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Tar.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,951 @@ +{-# LANGUAGE DeriveFunctor #-} +{-# OPTIONS_GHC -fno-warn-unused-imports #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Tar +-- Copyright : (c) 2007 Bjorn Bringert, +-- 2008 Andrea Vezzosi, +-- 2008-2009 Duncan Coutts +-- License : BSD3 +-- +-- Maintainer : duncan@community.haskell.org +-- Portability : portable +-- +-- Reading, writing and manipulating \"@.tar@\" archive files. +-- +----------------------------------------------------------------------------- +module Distribution.Client.Tar ( + -- * High level \"all in one\" operations + createTarGzFile, + extractTarGzFile, + + -- * Converting between internal and external representation + read, + write, + writeEntries, + + -- * Packing and unpacking files to\/from internal representation + pack, + unpack, + + -- * Tar entry and associated types + Entry(..), + entryPath, + EntryContent(..), + Ownership(..), + FileSize, + Permissions, + EpochTime, + DevMajor, + DevMinor, + TypeCode, + Format(..), + buildTreeRefTypeCode, + buildTreeSnapshotTypeCode, + isBuildTreeRefTypeCode, + entrySizeInBlocks, + entrySizeInBytes, + + -- * Constructing simple entry values + simpleEntry, + fileEntry, + directoryEntry, + + -- * TarPath type + TarPath, + toTarPath, + fromTarPath, + + -- ** Sequences of tar entries + Entries(..), + foldrEntries, + foldlEntries, + unfoldrEntries, + mapEntries, + filterEntries, + entriesIndex, + + ) where + +import Data.Char (ord) +import Data.Int (Int64) +import Data.Bits (Bits, shiftL, testBit) +import Data.List (foldl') +import Numeric (readOct, showOct) +import Control.Applicative (Applicative(..)) +import Control.Monad (MonadPlus(mplus), when, ap, liftM) +import qualified Data.Map as Map +import qualified Data.ByteString.Lazy as BS +import qualified Data.ByteString.Lazy.Char8 as BS.Char8 +import Data.ByteString.Lazy (ByteString) +import qualified Codec.Compression.GZip as GZip +import qualified Distribution.Client.GZipUtils as GZipUtils + +import System.FilePath + ( () ) +import qualified System.FilePath as FilePath.Native +import qualified System.FilePath.Windows as FilePath.Windows +import qualified System.FilePath.Posix as FilePath.Posix +import System.Directory + ( getDirectoryContents, doesDirectoryExist + , getPermissions, createDirectoryIfMissing, copyFile ) +import qualified System.Directory as Permissions + ( Permissions(executable) ) +import Distribution.Client.Compat.FilePerms + ( setFileExecutable ) +import System.Posix.Types + ( FileMode ) +import Distribution.Client.Compat.Time + ( EpochTime, getModTime ) +import System.IO + ( IOMode(ReadMode), openBinaryFile, hFileSize ) +import System.IO.Unsafe (unsafeInterleaveIO) + +import Prelude hiding (read) + + +-- +-- * High level operations +-- + +createTarGzFile :: FilePath -- ^ Full Tarball path + -> FilePath -- ^ Base directory + -> FilePath -- ^ Directory to archive, relative to base dir + -> IO () +createTarGzFile tar base dir = + BS.writeFile tar . GZip.compress . write =<< pack base [dir] + +extractTarGzFile :: FilePath -- ^ Destination directory + -> FilePath -- ^ Expected subdir (to check for tarbombs) + -> FilePath -- ^ Tarball + -> IO () +extractTarGzFile dir expected tar = + unpack dir . checkTarbomb expected . read + . GZipUtils.maybeDecompress =<< BS.readFile tar + +-- +-- * Entry type +-- + +type FileSize = Int64 +type DevMajor = Int +type DevMinor = Int +type TypeCode = Char +type Permissions = FileMode + +-- | Tar archive entry. +-- +data Entry = Entry { + + -- | The path of the file or directory within the archive. This is in a + -- tar-specific form. Use 'entryPath' to get a native 'FilePath'. + entryTarPath :: !TarPath, + + -- | The real content of the entry. For 'NormalFile' this includes the + -- file data. An entry usually contains a 'NormalFile' or a 'Directory'. + entryContent :: !EntryContent, + + -- | File permissions (Unix style file mode). + entryPermissions :: !Permissions, + + -- | The user and group to which this file belongs. + entryOwnership :: !Ownership, + + -- | The time the file was last modified. + entryTime :: !EpochTime, + + -- | The tar format the archive is using. + entryFormat :: !Format + } + +-- | Type code for the local build tree reference entry type. We don't use the +-- symbolic link entry type because it allows only 100 ASCII characters for the +-- path. +buildTreeRefTypeCode :: TypeCode +buildTreeRefTypeCode = 'C' + +-- | Type code for the local build tree snapshot entry type. +buildTreeSnapshotTypeCode :: TypeCode +buildTreeSnapshotTypeCode = 'S' + +-- | Is this a type code for a build tree reference? +isBuildTreeRefTypeCode :: TypeCode -> Bool +isBuildTreeRefTypeCode typeCode + | (typeCode == buildTreeRefTypeCode + || typeCode == buildTreeSnapshotTypeCode) = True + | otherwise = False + +-- | Native 'FilePath' of the file or directory within the archive. +-- +entryPath :: Entry -> FilePath +entryPath = fromTarPath . entryTarPath + +-- | Return the size of an entry in bytes. +entrySizeInBytes :: Entry -> FileSize +entrySizeInBytes = (*512) . fromIntegral . entrySizeInBlocks + +-- | Return the number of blocks in an entry. +entrySizeInBlocks :: Entry -> Int +entrySizeInBlocks entry = 1 + case entryContent entry of + NormalFile _ size -> bytesToBlocks size + OtherEntryType _ _ size -> bytesToBlocks size + _ -> 0 + where + bytesToBlocks s = 1 + ((fromIntegral s - 1) `div` 512) + +-- | The content of a tar archive entry, which depends on the type of entry. +-- +-- Portable archives should contain only 'NormalFile' and 'Directory'. +-- +data EntryContent = NormalFile ByteString !FileSize + | Directory + | SymbolicLink !LinkTarget + | HardLink !LinkTarget + | CharacterDevice !DevMajor !DevMinor + | BlockDevice !DevMajor !DevMinor + | NamedPipe + | OtherEntryType !TypeCode ByteString !FileSize + +data Ownership = Ownership { + -- | The owner user name. Should be set to @\"\"@ if unknown. + ownerName :: String, + + -- | The owner group name. Should be set to @\"\"@ if unknown. + groupName :: String, + + -- | Numeric owner user id. Should be set to @0@ if unknown. + ownerId :: !Int, + + -- | Numeric owner group id. Should be set to @0@ if unknown. + groupId :: !Int + } + +-- | There have been a number of extensions to the tar file format over the +-- years. They all share the basic entry fields and put more meta-data in +-- different extended headers. +-- +data Format = + + -- | This is the classic Unix V7 tar format. It does not support owner and + -- group names, just numeric Ids. It also does not support device numbers. + V7Format + + -- | The \"USTAR\" format is an extension of the classic V7 format. It was + -- later standardised by POSIX. It has some restrictions but is the most + -- portable format. + -- + | UstarFormat + + -- | The GNU tar implementation also extends the classic V7 format, though + -- in a slightly different way from the USTAR format. In general for new + -- archives the standard USTAR/POSIX should be used. + -- + | GnuFormat + deriving Eq + +-- | @rw-r--r--@ for normal files +ordinaryFilePermissions :: Permissions +ordinaryFilePermissions = 0o0644 + +-- | @rwxr-xr-x@ for executable files +executableFilePermissions :: Permissions +executableFilePermissions = 0o0755 + +-- | @rwxr-xr-x@ for directories +directoryPermissions :: Permissions +directoryPermissions = 0o0755 + +isExecutable :: Permissions -> Bool +isExecutable p = testBit p 0 || testBit p 6 -- user or other executable + +-- | An 'Entry' with all default values except for the file name and type. It +-- uses the portable USTAR/POSIX format (see 'UstarHeader'). +-- +-- You can use this as a basis and override specific fields, eg: +-- +-- > (emptyEntry name HardLink) { linkTarget = target } +-- +simpleEntry :: TarPath -> EntryContent -> Entry +simpleEntry tarpath content = Entry { + entryTarPath = tarpath, + entryContent = content, + entryPermissions = case content of + Directory -> directoryPermissions + _ -> ordinaryFilePermissions, + entryOwnership = Ownership "" "" 0 0, + entryTime = 0, + entryFormat = UstarFormat + } + +-- | A tar 'Entry' for a file. +-- +-- Entry fields such as file permissions and ownership have default values. +-- +-- You can use this as a basis and override specific fields. For example if you +-- need an executable file you could use: +-- +-- > (fileEntry name content) { fileMode = executableFileMode } +-- +fileEntry :: TarPath -> ByteString -> Entry +fileEntry name fileContent = + simpleEntry name (NormalFile fileContent (BS.length fileContent)) + +-- | A tar 'Entry' for a directory. +-- +-- Entry fields such as file permissions and ownership have default values. +-- +directoryEntry :: TarPath -> Entry +directoryEntry name = simpleEntry name Directory + +-- +-- * Tar paths +-- + +-- | The classic tar format allowed just 100 characters for the file name. The +-- USTAR format extended this with an extra 155 characters, however it uses a +-- complex method of splitting the name between the two sections. +-- +-- Instead of just putting any overflow into the extended area, it uses the +-- extended area as a prefix. The aggravating insane bit however is that the +-- prefix (if any) must only contain a directory prefix. That is the split +-- between the two areas must be on a directory separator boundary. So there is +-- no simple calculation to work out if a file name is too long. Instead we +-- have to try to find a valid split that makes the name fit in the two areas. +-- +-- The rationale presumably was to make it a bit more compatible with old tar +-- programs that only understand the classic format. A classic tar would be +-- able to extract the file name and possibly some dir prefix, but not the +-- full dir prefix. So the files would end up in the wrong place, but that's +-- probably better than ending up with the wrong names too. +-- +-- So it's understandable but rather annoying. +-- +-- * Tar paths use POSIX format (ie @\'/\'@ directory separators), irrespective +-- of the local path conventions. +-- +-- * The directory separator between the prefix and name is /not/ stored. +-- +data TarPath = TarPath FilePath -- path name, 100 characters max. + FilePath -- path prefix, 155 characters max. + deriving (Eq, Ord) + +-- | Convert a 'TarPath' to a native 'FilePath'. +-- +-- The native 'FilePath' will use the native directory separator but it is not +-- otherwise checked for validity or sanity. In particular: +-- +-- * The tar path may be invalid as a native path, eg the filename @\"nul\"@ is +-- not valid on Windows. +-- +-- * The tar path may be an absolute path or may contain @\"..\"@ components. +-- For security reasons this should not usually be allowed, but it is your +-- responsibility to check for these conditions (eg using 'checkSecurity'). +-- +fromTarPath :: TarPath -> FilePath +fromTarPath (TarPath name prefix) = adjustDirectory $ + FilePath.Native.joinPath $ FilePath.Posix.splitDirectories prefix + ++ FilePath.Posix.splitDirectories name + where + adjustDirectory | FilePath.Posix.hasTrailingPathSeparator name + = FilePath.Native.addTrailingPathSeparator + | otherwise = id + +-- | Convert a native 'FilePath' to a 'TarPath'. +-- +-- The conversion may fail if the 'FilePath' is too long. See 'TarPath' for a +-- description of the problem with splitting long 'FilePath's. +-- +toTarPath :: Bool -- ^ Is the path for a directory? This is needed because for + -- directories a 'TarPath' must always use a trailing @\/@. + -> FilePath -> Either String TarPath +toTarPath isDir = splitLongPath + . addTrailingSep + . FilePath.Posix.joinPath + . FilePath.Native.splitDirectories + where + addTrailingSep | isDir = FilePath.Posix.addTrailingPathSeparator + | otherwise = id + +-- | Take a sanitized path, split on directory separators and try to pack it +-- into the 155 + 100 tar file name format. +-- +-- The strategy is this: take the name-directory components in reverse order +-- and try to fit as many components into the 100 long name area as possible. +-- If all the remaining components fit in the 155 name area then we win. +-- +splitLongPath :: FilePath -> Either String TarPath +splitLongPath path = + case packName nameMax (reverse (FilePath.Posix.splitPath path)) of + Left err -> Left err + Right (name, []) -> Right (TarPath name "") + Right (name, first:rest) -> case packName prefixMax remainder of + Left err -> Left err + Right (_ , _ : _) -> Left "File name too long (cannot split)" + Right (prefix, []) -> Right (TarPath name prefix) + where + -- drop the '/' between the name and prefix: + remainder = init first : rest + + where + nameMax, prefixMax :: Int + nameMax = 100 + prefixMax = 155 + + packName _ [] = Left "File name empty" + packName maxLen (c:cs) + | n > maxLen = Left "File name too long" + | otherwise = Right (packName' maxLen n [c] cs) + where n = length c + + packName' maxLen n ok (c:cs) + | n' <= maxLen = packName' maxLen n' (c:ok) cs + where n' = n + length c + packName' _ _ ok cs = (FilePath.Posix.joinPath ok, cs) + +-- | The tar format allows just 100 ASCII characters for the 'SymbolicLink' and +-- 'HardLink' entry types. +-- +newtype LinkTarget = LinkTarget FilePath + deriving (Eq, Ord) + +-- | Convert a tar 'LinkTarget' to a native 'FilePath'. +-- +fromLinkTarget :: LinkTarget -> FilePath +fromLinkTarget (LinkTarget path) = adjustDirectory $ + FilePath.Native.joinPath $ FilePath.Posix.splitDirectories path + where + adjustDirectory | FilePath.Posix.hasTrailingPathSeparator path + = FilePath.Native.addTrailingPathSeparator + | otherwise = id + +-- +-- * Entries type +-- + +-- | A tar archive is a sequence of entries. +data Entries = Next Entry Entries + | Done + | Fail String + +unfoldrEntries :: (a -> Either String (Maybe (Entry, a))) -> a -> Entries +unfoldrEntries f = unfold + where + unfold x = case f x of + Left err -> Fail err + Right Nothing -> Done + Right (Just (e, x')) -> Next e (unfold x') + +foldrEntries :: (Entry -> a -> a) -> a -> (String -> a) -> Entries -> a +foldrEntries next done fail' = fold + where + fold (Next e es) = next e (fold es) + fold Done = done + fold (Fail err) = fail' err + +foldlEntries :: (a -> Entry -> a) -> a -> Entries -> Either String a +foldlEntries f = fold + where + fold a (Next e es) = (fold $! f a e) es + fold a Done = Right a + fold _ (Fail err) = Left err + +mapEntries :: (Entry -> Entry) -> Entries -> Entries +mapEntries f = foldrEntries (Next . f) Done Fail + +filterEntries :: (Entry -> Bool) -> Entries -> Entries +filterEntries p = + foldrEntries + (\entry rest -> if p entry + then Next entry rest + else rest) + Done Fail + +checkEntries :: (Entry -> Maybe String) -> Entries -> Entries +checkEntries checkEntry = + foldrEntries + (\entry rest -> case checkEntry entry of + Nothing -> Next entry rest + Just err -> Fail err) + Done Fail + +entriesIndex :: Entries -> Either String (Map.Map TarPath Entry) +entriesIndex = foldlEntries (\m e -> Map.insert (entryTarPath e) e m) Map.empty + +-- +-- * Checking +-- + +-- | This function checks a sequence of tar entries for file name security +-- problems. It checks that: +-- +-- * file paths are not absolute +-- +-- * file paths do not contain any path components that are \"@..@\" +-- +-- * file names are valid +-- +-- These checks are from the perspective of the current OS. That means we check +-- for \"@C:\blah@\" files on Windows and \"\/blah\" files on Unix. For archive +-- entry types 'HardLink' and 'SymbolicLink' the same checks are done for the +-- link target. A failure in any entry terminates the sequence of entries with +-- an error. +-- +checkSecurity :: Entries -> Entries +checkSecurity = checkEntries checkEntrySecurity + +checkTarbomb :: FilePath -> Entries -> Entries +checkTarbomb expectedTopDir = checkEntries (checkEntryTarbomb expectedTopDir) + +checkEntrySecurity :: Entry -> Maybe String +checkEntrySecurity entry = case entryContent entry of + HardLink link -> check (entryPath entry) + `mplus` check (fromLinkTarget link) + SymbolicLink link -> check (entryPath entry) + `mplus` check (fromLinkTarget link) + _ -> check (entryPath entry) + + where + check name + | not (FilePath.Native.isRelative name) + = Just $ "Absolute file name in tar archive: " ++ show name + + | not (FilePath.Native.isValid name) + = Just $ "Invalid file name in tar archive: " ++ show name + + | ".." `elem` FilePath.Native.splitDirectories name + = Just $ "Invalid file name in tar archive: " ++ show name + + | otherwise = Nothing + +checkEntryTarbomb :: FilePath -> Entry -> Maybe String +checkEntryTarbomb _ entry | nonFilesystemEntry = Nothing + where + -- Ignore some special entries we will not unpack anyway + nonFilesystemEntry = + case entryContent entry of + OtherEntryType 'g' _ _ -> True --PAX global header + OtherEntryType 'x' _ _ -> True --PAX individual header + _ -> False + +checkEntryTarbomb expectedTopDir entry = + case FilePath.Native.splitDirectories (entryPath entry) of + (topDir:_) | topDir == expectedTopDir -> Nothing + s -> Just $ "File in tar archive is not in the expected directory. " + ++ "Expected: " ++ show expectedTopDir + ++ " but got the following hierarchy: " + ++ show s + + +-- +-- * Reading +-- + +read :: ByteString -> Entries +read = unfoldrEntries getEntry + +getEntry :: ByteString -> Either String (Maybe (Entry, ByteString)) +getEntry bs + | BS.length header < 512 = Left "truncated tar archive" + + -- Tar files end with at least two blocks of all '0'. Checking this serves + -- two purposes. It checks the format but also forces the tail of the data + -- which is necessary to close the file if it came from a lazily read file. + | BS.head bs == 0 = case BS.splitAt 1024 bs of + (end, trailing) + | BS.length end /= 1024 -> Left "short tar trailer" + | not (BS.all (== 0) end) -> Left "bad tar trailer" + | not (BS.all (== 0) trailing) -> Left "tar file has trailing junk" + | otherwise -> Right Nothing + + | otherwise = partial $ do + + case (chksum_, format_) of + (Ok chksum, _ ) | correctChecksum header chksum -> return () + (Ok _, Ok _) -> fail "tar checksum error" + _ -> fail "data is not in tar format" + + -- These fields are partial, have to check them + format <- format_; mode <- mode_; + uid <- uid_; gid <- gid_; + size <- size_; mtime <- mtime_; + devmajor <- devmajor_; devminor <- devminor_; + + let content = BS.take size (BS.drop 512 bs) + padding = (512 - size) `mod` 512 + bs' = BS.drop (512 + size + padding) bs + + entry = Entry { + entryTarPath = TarPath name prefix, + entryContent = case typecode of + '\0' -> NormalFile content size + '0' -> NormalFile content size + '1' -> HardLink (LinkTarget linkname) + '2' -> SymbolicLink (LinkTarget linkname) + '3' -> CharacterDevice devmajor devminor + '4' -> BlockDevice devmajor devminor + '5' -> Directory + '6' -> NamedPipe + '7' -> NormalFile content size + _ -> OtherEntryType typecode content size, + entryPermissions = mode, + entryOwnership = Ownership uname gname uid gid, + entryTime = mtime, + entryFormat = format + } + + return (Just (entry, bs')) + + where + header = BS.take 512 bs + + name = getString 0 100 header + mode_ = getOct 100 8 header + uid_ = getOct 108 8 header + gid_ = getOct 116 8 header + size_ = getOct 124 12 header + mtime_ = getOct 136 12 header + chksum_ = getOct 148 8 header + typecode = getByte 156 header + linkname = getString 157 100 header + magic = getChars 257 8 header + uname = getString 265 32 header + gname = getString 297 32 header + devmajor_ = getOct 329 8 header + devminor_ = getOct 337 8 header + prefix = getString 345 155 header +-- trailing = getBytes 500 12 header + + format_ = case magic of + "\0\0\0\0\0\0\0\0" -> return V7Format + "ustar\NUL00" -> return UstarFormat + "ustar \NUL" -> return GnuFormat + _ -> fail "tar entry not in a recognised format" + +correctChecksum :: ByteString -> Int -> Bool +correctChecksum header checksum = checksum == checksum' + where + -- sum of all 512 bytes in the header block, + -- treating each byte as an 8-bit unsigned value + checksum' = BS.Char8.foldl' (\x y -> x + ord y) 0 header' + -- treating the 8 bytes of chksum as blank characters. + header' = BS.concat [BS.take 148 header, + BS.Char8.replicate 8 ' ', + BS.drop 156 header] + +-- * TAR format primitive input + +getOct :: (Integral a, Bits a) => Int64 -> Int64 -> ByteString -> Partial a +getOct off len header + | BS.head bytes == 128 = parseBinInt (BS.unpack (BS.tail bytes)) + | null octstr = return 0 + | otherwise = case readOct octstr of + [(x,[])] -> return x + _ -> fail "tar header is malformed (bad numeric encoding)" + where + bytes = getBytes off len header + octstr = BS.Char8.unpack + . BS.Char8.takeWhile (\c -> c /= '\NUL' && c /= ' ') + . BS.Char8.dropWhile (== ' ') + $ bytes + + -- Some tar programs switch into a binary format when they try to represent + -- field values that will not fit in the required width when using the text + -- octal format. In particular, the UID/GID fields can only hold up to 2^21 + -- while in the binary format can hold up to 2^32. The binary format uses + -- '\128' as the header which leaves 7 bytes. Only the last 4 are used. + parseBinInt [0, 0, 0, byte3, byte2, byte1, byte0] = + return $! shiftL (fromIntegral byte3) 24 + + shiftL (fromIntegral byte2) 16 + + shiftL (fromIntegral byte1) 8 + + shiftL (fromIntegral byte0) 0 + parseBinInt _ = fail "tar header uses non-standard number encoding" + +getBytes :: Int64 -> Int64 -> ByteString -> ByteString +getBytes off len = BS.take len . BS.drop off + +getByte :: Int64 -> ByteString -> Char +getByte off bs = BS.Char8.index bs off + +getChars :: Int64 -> Int64 -> ByteString -> String +getChars off len = BS.Char8.unpack . getBytes off len + +getString :: Int64 -> Int64 -> ByteString -> String +getString off len = BS.Char8.unpack . BS.Char8.takeWhile (/='\0') + . getBytes off len + +data Partial a = Error String | Ok a + deriving Functor + +partial :: Partial a -> Either String a +partial (Error msg) = Left msg +partial (Ok x) = Right x + +instance Applicative Partial where + pure = return + (<*>) = ap + +instance Monad Partial where + return = Ok + Error m >>= _ = Error m + Ok x >>= k = k x + fail = Error + +-- +-- * Writing +-- + +-- | Create the external representation of a tar archive by serialising a list +-- of tar entries. +-- +-- * The conversion is done lazily. +-- +write :: [Entry] -> ByteString +write es = BS.concat $ map putEntry es ++ [BS.replicate (512*2) 0] + +-- | Same as 'write', but for 'Entries'. +writeEntries :: Entries -> ByteString +writeEntries entries = BS.concat $ foldrEntries (\e res -> putEntry e : res) + [BS.replicate (512*2) 0] error entries + +putEntry :: Entry -> ByteString +putEntry entry = case entryContent entry of + NormalFile content size -> BS.concat [ header, content, padding size ] + OtherEntryType _ content size -> BS.concat [ header, content, padding size ] + _ -> header + where + header = putHeader entry + padding size = BS.replicate paddingSize 0 + where paddingSize = fromIntegral (negate size `mod` 512) + +putHeader :: Entry -> ByteString +putHeader entry = + BS.concat [ BS.take 148 block + , BS.Char8.pack $ putOct 7 checksum + , BS.Char8.singleton ' ' + , BS.drop 156 block ] + where + -- putHeaderNoChkSum returns a String, so we convert it to the final + -- representation before calculating the checksum. + block = BS.Char8.pack . putHeaderNoChkSum $ entry + checksum = BS.Char8.foldl' (\x y -> x + ord y) 0 block + +putHeaderNoChkSum :: Entry -> String +putHeaderNoChkSum Entry { + entryTarPath = TarPath name prefix, + entryContent = content, + entryPermissions = permissions, + entryOwnership = ownership, + entryTime = modTime, + entryFormat = format + } = + + concat + [ putString 100 $ name + , putOct 8 $ permissions + , putOct 8 $ ownerId ownership + , putOct 8 $ groupId ownership + , putOct 12 $ contentSize + , putOct 12 $ modTime + , fill 8 $ ' ' -- dummy checksum + , putChar8 $ typeCode + , putString 100 $ linkTarget + ] ++ + case format of + V7Format -> + fill 255 '\NUL' + UstarFormat -> concat + [ putString 8 $ "ustar\NUL00" + , putString 32 $ ownerName ownership + , putString 32 $ groupName ownership + , putOct 8 $ deviceMajor + , putOct 8 $ deviceMinor + , putString 155 $ prefix + , fill 12 $ '\NUL' + ] + GnuFormat -> concat + [ putString 8 $ "ustar \NUL" + , putString 32 $ ownerName ownership + , putString 32 $ groupName ownership + , putGnuDev 8 $ deviceMajor + , putGnuDev 8 $ deviceMinor + , putString 155 $ prefix + , fill 12 $ '\NUL' + ] + where + (typeCode, contentSize, linkTarget, + deviceMajor, deviceMinor) = case content of + NormalFile _ size -> ('0' , size, [], 0, 0) + Directory -> ('5' , 0, [], 0, 0) + SymbolicLink (LinkTarget link) -> ('2' , 0, link, 0, 0) + HardLink (LinkTarget link) -> ('1' , 0, link, 0, 0) + CharacterDevice major minor -> ('3' , 0, [], major, minor) + BlockDevice major minor -> ('4' , 0, [], major, minor) + NamedPipe -> ('6' , 0, [], 0, 0) + OtherEntryType code _ size -> (code, size, [], 0, 0) + + putGnuDev w n = case content of + CharacterDevice _ _ -> putOct w n + BlockDevice _ _ -> putOct w n + _ -> replicate w '\NUL' + +-- * TAR format primitive output + +type FieldWidth = Int + +putString :: FieldWidth -> String -> String +putString n s = take n s ++ fill (n - length s) '\NUL' + +--TODO: check integer widths, eg for large file sizes +putOct :: (Show a, Integral a) => FieldWidth -> a -> String +putOct n x = + let octStr = take (n-1) $ showOct x "" + in fill (n - length octStr - 1) '0' + ++ octStr + ++ putChar8 '\NUL' + +putChar8 :: Char -> String +putChar8 c = [c] + +fill :: FieldWidth -> Char -> String +fill n c = replicate n c + +-- +-- * Unpacking +-- + +unpack :: FilePath -> Entries -> IO () +unpack baseDir entries = unpackEntries [] (checkSecurity entries) + >>= emulateLinks + + where + -- We're relying here on 'checkSecurity' to make sure we're not scribbling + -- files all over the place. + + unpackEntries _ (Fail err) = fail err + unpackEntries links Done = return links + unpackEntries links (Next entry es) = case entryContent entry of + NormalFile file _ -> extractFile entry path file + >> unpackEntries links es + Directory -> extractDir path + >> unpackEntries links es + HardLink link -> (unpackEntries $! saveLink path link links) es + SymbolicLink link -> (unpackEntries $! saveLink path link links) es + _ -> unpackEntries links es --ignore other file types + where + path = entryPath entry + + extractFile entry path content = do + -- Note that tar archives do not make sure each directory is created + -- before files they contain, indeed we may have to create several + -- levels of directory. + createDirectoryIfMissing True absDir + BS.writeFile absPath content + when (isExecutable (entryPermissions entry)) + (setFileExecutable absPath) + where + absDir = baseDir FilePath.Native.takeDirectory path + absPath = baseDir path + + extractDir path = createDirectoryIfMissing True (baseDir path) + + saveLink path link links = seq (length path) + $ seq (length link') + $ (path, link'):links + where link' = fromLinkTarget link + + emulateLinks = mapM_ $ \(relPath, relLinkTarget) -> + let absPath = baseDir relPath + absTarget = FilePath.Native.takeDirectory absPath relLinkTarget + in copyFile absTarget absPath + +-- +-- * Packing +-- + +pack :: FilePath -- ^ Base directory + -> [FilePath] -- ^ Files and directories to pack, relative to the base dir + -> IO [Entry] +pack baseDir paths0 = preparePaths baseDir paths0 >>= packPaths baseDir + +preparePaths :: FilePath -> [FilePath] -> IO [FilePath] +preparePaths baseDir paths = + fmap concat $ interleave + [ do isDir <- doesDirectoryExist (baseDir path) + if isDir + then do entries <- getDirectoryContentsRecursive (baseDir path) + return (FilePath.Native.addTrailingPathSeparator path + : map (path ) entries) + else return [path] + | path <- paths ] + +packPaths :: FilePath -> [FilePath] -> IO [Entry] +packPaths baseDir paths = + interleave + [ do tarpath <- either fail return (toTarPath isDir relpath) + if isDir then packDirectoryEntry filepath tarpath + else packFileEntry filepath tarpath + | relpath <- paths + , let isDir = FilePath.Native.hasTrailingPathSeparator filepath + filepath = baseDir relpath ] + +interleave :: [IO a] -> IO [a] +interleave = unsafeInterleaveIO . go + where + go [] = return [] + go (x:xs) = do + x' <- x + xs' <- interleave xs + return (x':xs') + +packFileEntry :: FilePath -- ^ Full path to find the file on the local disk + -> TarPath -- ^ Path to use for the tar Entry in the archive + -> IO Entry +packFileEntry filepath tarpath = do + mtime <- getModTime filepath + perms <- getPermissions filepath + file <- openBinaryFile filepath ReadMode + size <- hFileSize file + content <- BS.hGetContents file + return (simpleEntry tarpath (NormalFile content (fromIntegral size))) { + entryPermissions = if Permissions.executable perms + then executableFilePermissions + else ordinaryFilePermissions, + entryTime = mtime + } + +packDirectoryEntry :: FilePath -- ^ Full path to find the file on the local disk + -> TarPath -- ^ Path to use for the tar Entry in the archive + -> IO Entry +packDirectoryEntry filepath tarpath = do + mtime <- getModTime filepath + return (directoryEntry tarpath) { + entryTime = mtime + } + +getDirectoryContentsRecursive :: FilePath -> IO [FilePath] +getDirectoryContentsRecursive dir0 = + fmap tail (recurseDirectories dir0 [""]) + +recurseDirectories :: FilePath -> [FilePath] -> IO [FilePath] +recurseDirectories _ [] = return [] +recurseDirectories base (dir:dirs) = unsafeInterleaveIO $ do + (files, dirs') <- collect [] [] =<< getDirectoryContents (base dir) + + files' <- recurseDirectories base (dirs' ++ dirs) + return (dir : files ++ files') + + where + collect files dirs' [] = return (reverse files, reverse dirs') + collect files dirs' (entry:entries) | ignore entry + = collect files dirs' entries + collect files dirs' (entry:entries) = do + let dirEntry = dir entry + dirEntry' = FilePath.Native.addTrailingPathSeparator dirEntry + isDirectory <- doesDirectoryExist (base dirEntry) + if isDirectory + then collect files (dirEntry':dirs') entries + else collect (dirEntry:files) dirs' entries + + ignore ['.'] = True + ignore ['.', '.'] = True + ignore _ = False diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Types.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Types.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Types.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Types.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,247 @@ +{-# LANGUAGE DeriveFunctor #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Types +-- Copyright : (c) David Himmelstrup 2005 +-- Duncan Coutts 2011 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Various common data types for the entire cabal-install system +----------------------------------------------------------------------------- +module Distribution.Client.Types where + +import Distribution.Package + ( PackageName, PackageId, Package(..), PackageFixedDeps(..) + , mkPackageKey, PackageKey, InstalledPackageId(..) + , PackageInstalled(..) ) +import Distribution.InstalledPackageInfo + ( InstalledPackageInfo, packageKey ) +import Distribution.PackageDescription + ( Benchmark(..), GenericPackageDescription(..), FlagAssignment + , TestSuite(..) ) +import Distribution.PackageDescription.Configuration + ( mapTreeData ) +import Distribution.Client.PackageIndex + ( PackageIndex ) +import Distribution.Version + ( VersionRange ) +import Distribution.Simple.Compiler + ( Compiler, packageKeySupported ) +import Distribution.Text (display) + +import Data.Map (Map) +import Network.URI (URI) +import Data.ByteString.Lazy (ByteString) +import Control.Exception + ( SomeException ) + +newtype Username = Username { unUsername :: String } +newtype Password = Password { unPassword :: String } + +-- | This is the information we get from a @00-index.tar.gz@ hackage index. +-- +data SourcePackageDb = SourcePackageDb { + packageIndex :: PackageIndex SourcePackage, + packagePreferences :: Map PackageName VersionRange +} + +-- ------------------------------------------------------------ +-- * Various kinds of information about packages +-- ------------------------------------------------------------ + +-- | TODO: This is a hack to help us transition from Cabal-1.6 to 1.8. +-- What is new in 1.8 is that installed packages and dependencies between +-- installed packages are now identified by an opaque InstalledPackageId +-- rather than a source PackageId. +-- +-- We should use simply an 'InstalledPackageInfo' here but to ease the +-- transition we are temporarily using this variant where we pretend that +-- installed packages still specify their deps in terms of PackageIds. +-- +-- Crucially this means that 'InstalledPackage' can be an instance of +-- 'PackageFixedDeps' where as 'InstalledPackageInfo' is no longer an instance +-- of that class. This means we can make 'PackageIndex'es of InstalledPackage +-- where as the InstalledPackageInfo now has its own monomorphic index type. +-- +data InstalledPackage = InstalledPackage + InstalledPackageInfo + [PackageId] + +instance Package InstalledPackage where + packageId (InstalledPackage pkg _) = packageId pkg +instance PackageFixedDeps InstalledPackage where + depends (InstalledPackage _ deps) = deps +instance PackageInstalled InstalledPackage where + installedPackageId (InstalledPackage pkg _) = installedPackageId pkg + installedDepends (InstalledPackage pkg _) = installedDepends pkg + + +-- | In order to reuse the implementation of PackageIndex which relies on +-- 'InstalledPackageId', we need to be able to synthesize these IDs prior +-- to installation. Eventually, we'll move to a representation of +-- 'InstalledPackageId' which can be properly computed before compilation +-- (of course, it's a bit of a misnomer since the packages are not actually +-- installed yet.) In any case, we'll synthesize temporary installed package +-- IDs to use as keys during install planning. These should never be written +-- out! Additionally, they need to be guaranteed unique within the install +-- plan. +fakeInstalledPackageId :: PackageId -> InstalledPackageId +fakeInstalledPackageId = InstalledPackageId . (".fake."++) . display + +-- | A 'ConfiguredPackage' is a not-yet-installed package along with the +-- total configuration information. The configuration information is total in +-- the sense that it provides all the configuration information and so the +-- final configure process will be independent of the environment. +-- +data ConfiguredPackage = ConfiguredPackage + SourcePackage -- package info, including repo + FlagAssignment -- complete flag assignment for the package + [OptionalStanza] -- list of enabled optional stanzas for the package + [PackageId] -- set of exact dependencies. These must be + -- consistent with the 'buildDepends' in the + -- 'PackageDescription' that you'd get by applying + -- the flag assignment and optional stanzas. + deriving Show + +instance Package ConfiguredPackage where + packageId (ConfiguredPackage pkg _ _ _) = packageId pkg + +instance PackageFixedDeps ConfiguredPackage where + depends (ConfiguredPackage _ _ _ deps) = deps + +instance PackageInstalled ConfiguredPackage where + installedPackageId = fakeInstalledPackageId . packageId + installedDepends = map fakeInstalledPackageId . depends + +-- | Like 'ConfiguredPackage', but with all dependencies guaranteed to be +-- installed already, hence itself ready to be installed. +data ReadyPackage = ReadyPackage + SourcePackage -- see 'ConfiguredPackage'. + FlagAssignment -- + [OptionalStanza] -- + [InstalledPackageInfo] -- Installed dependencies. + deriving Show + +instance Package ReadyPackage where + packageId (ReadyPackage pkg _ _ _) = packageId pkg + +instance PackageFixedDeps ReadyPackage where + depends (ReadyPackage _ _ _ deps) = map packageId deps + +instance PackageInstalled ReadyPackage where + installedPackageId = fakeInstalledPackageId . packageId + installedDepends (ReadyPackage _ _ _ ipis) = map installedPackageId ipis + +-- | Extracts a package key from ReadyPackage, a common operation needed +-- to calculate build paths. +readyPackageKey :: Compiler -> ReadyPackage -> PackageKey +readyPackageKey comp (ReadyPackage pkg _ _ deps) = + mkPackageKey (packageKeySupported comp) (packageId pkg) + (map packageKey deps) [] + + +-- | Sometimes we need to convert a 'ReadyPackage' back to a +-- 'ConfiguredPackage'. For example, a failed 'PlanPackage' can be *either* +-- Ready or Configured. +readyPackageToConfiguredPackage :: ReadyPackage -> ConfiguredPackage +readyPackageToConfiguredPackage (ReadyPackage srcpkg flags stanzas deps) = + ConfiguredPackage srcpkg flags stanzas (map packageId deps) + +-- | A package description along with the location of the package sources. +-- +data SourcePackage = SourcePackage { + packageInfoId :: PackageId, + packageDescription :: GenericPackageDescription, + packageSource :: PackageLocation (Maybe FilePath), + packageDescrOverride :: PackageDescriptionOverride + } + deriving Show + +-- | We sometimes need to override the .cabal file in the tarball with +-- the newer one from the package index. +type PackageDescriptionOverride = Maybe ByteString + +instance Package SourcePackage where packageId = packageInfoId + +data OptionalStanza + = TestStanzas + | BenchStanzas + deriving (Eq, Ord, Show) + +enableStanzas + :: [OptionalStanza] + -> GenericPackageDescription + -> GenericPackageDescription +enableStanzas stanzas gpkg = gpkg + { condBenchmarks = flagBenchmarks $ condBenchmarks gpkg + , condTestSuites = flagTests $ condTestSuites gpkg + } + where + enableTest t = t { testEnabled = TestStanzas `elem` stanzas } + enableBenchmark bm = bm { benchmarkEnabled = BenchStanzas `elem` stanzas } + flagBenchmarks = map (\(n, bm) -> (n, mapTreeData enableBenchmark bm)) + flagTests = map (\(n, t) -> (n, mapTreeData enableTest t)) + +-- ------------------------------------------------------------ +-- * Package locations and repositories +-- ------------------------------------------------------------ + +data PackageLocation local = + + -- | An unpacked package in the given dir, or current dir + LocalUnpackedPackage FilePath + + -- | A package as a tarball that's available as a local tarball + | LocalTarballPackage FilePath + + -- | A package as a tarball from a remote URI + | RemoteTarballPackage URI local + + -- | A package available as a tarball from a repository. + -- + -- It may be from a local repository or from a remote repository, with a + -- locally cached copy. ie a package available from hackage + | RepoTarballPackage Repo PackageId local + +--TODO: +-- * add support for darcs and other SCM style remote repos with a local cache +-- | ScmPackage + deriving (Show, Functor) + +data LocalRepo = LocalRepo + deriving (Show,Eq) + +data RemoteRepo = RemoteRepo { + remoteRepoName :: String, + remoteRepoURI :: URI + } + deriving (Show,Eq,Ord) + +data Repo = Repo { + repoKind :: Either RemoteRepo LocalRepo, + repoLocalDir :: FilePath + } + deriving (Show,Eq) + +-- ------------------------------------------------------------ +-- * Build results +-- ------------------------------------------------------------ + +type BuildResult = Either BuildFailure BuildSuccess +data BuildFailure = PlanningFailed + | DependentFailed PackageId + | DownloadFailed SomeException + | UnpackFailed SomeException + | ConfigureFailed SomeException + | BuildFailed SomeException + | TestsFailed SomeException + | InstallFailed SomeException +data BuildSuccess = BuildOk DocsResult TestsResult + (Maybe InstalledPackageInfo) + +data DocsResult = DocsNotTried | DocsFailed | DocsOk +data TestsResult = TestsNotTried | TestsOk diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Update.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Update.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Update.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Update.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,55 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Update +-- Copyright : (c) David Himmelstrup 2005 +-- License : BSD-like +-- +-- Maintainer : lemmih@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- +----------------------------------------------------------------------------- +module Distribution.Client.Update + ( update + ) where + +import Distribution.Client.Types + ( Repo(..), RemoteRepo(..), LocalRepo(..) ) +import Distribution.Client.HttpUtils + ( DownloadResult(..) ) +import Distribution.Client.FetchUtils + ( downloadIndex ) +import Distribution.Client.IndexUtils + ( updateRepoIndexCache ) + +import Distribution.Simple.Utils + ( writeFileAtomic, warn, notice ) +import Distribution.Verbosity + ( Verbosity ) + +import qualified Data.ByteString.Lazy as BS +import Distribution.Client.GZipUtils (maybeDecompress) +import System.FilePath (dropExtension) + +-- | 'update' downloads the package list from all known servers +update :: Verbosity -> [Repo] -> IO () +update verbosity [] = + warn verbosity $ "No remote package servers have been specified. Usually " + ++ "you would have one specified in the config file." +update verbosity repos = do + mapM_ (updateRepo verbosity) repos + +updateRepo :: Verbosity -> Repo -> IO () +updateRepo verbosity repo = case repoKind repo of + Right LocalRepo -> return () + Left remoteRepo -> do + notice verbosity $ "Downloading the latest package list from " + ++ remoteRepoName remoteRepo + downloadResult <- downloadIndex verbosity remoteRepo (repoLocalDir repo) + case downloadResult of + FileAlreadyInCache -> return () + FileDownloaded indexPath -> do + writeFileAtomic (dropExtension indexPath) . maybeDecompress + =<< BS.readFile indexPath + updateRepoIndexCache verbosity repo diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Upload.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Upload.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Upload.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Upload.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,173 @@ +-- This is a quick hack for uploading packages to Hackage. +-- See http://hackage.haskell.org/trac/hackage/wiki/CabalUpload + +module Distribution.Client.Upload (check, upload, report) where + +import qualified Data.ByteString.Lazy.Char8 as B (concat, length, pack, readFile, unpack) +import Data.ByteString.Lazy.Char8 (ByteString) + +import Distribution.Client.Types (Username(..), Password(..),Repo(..),RemoteRepo(..)) +import Distribution.Client.HttpUtils (isOldHackageURI, cabalBrowse) + +import Distribution.Simple.Utils (debug, notice, warn, info) +import Distribution.Verbosity (Verbosity) +import Distribution.Text (display) +import Distribution.Client.Config + +import qualified Distribution.Client.BuildReports.Anonymous as BuildReport +import qualified Distribution.Client.BuildReports.Upload as BuildReport + +import Network.Browser + ( request ) +import Network.HTTP + ( Header(..), HeaderName(..), findHeader + , Request(..), RequestMethod(..), Response(..) ) +import Network.URI (URI(uriPath), parseURI) + +import Data.Char (intToDigit) +import Numeric (showHex) +import System.IO (hFlush, stdin, stdout, hGetEcho, hSetEcho) +import Control.Exception (bracket) +import System.Random (randomRIO) +import System.FilePath ((), takeExtension, takeFileName) +import qualified System.FilePath.Posix as FilePath.Posix (combine) +import System.Directory +import Control.Monad (forM_, when) + + +--FIXME: how do we find this path for an arbitrary hackage server? +-- is it always at some fixed location relative to the server root? +legacyUploadURI :: URI +Just legacyUploadURI = parseURI "http://hackage.haskell.org/cgi-bin/hackage-scripts/protected/upload-pkg" + +checkURI :: URI +Just checkURI = parseURI "http://hackage.haskell.org/cgi-bin/hackage-scripts/check-pkg" + + +upload :: Verbosity -> [Repo] -> Maybe Username -> Maybe Password -> [FilePath] -> IO () +upload verbosity repos mUsername mPassword paths = do + let uploadURI = if isOldHackageURI targetRepoURI + then legacyUploadURI + else targetRepoURI{uriPath = uriPath targetRepoURI `FilePath.Posix.combine` "upload"} + Username username <- maybe promptUsername return mUsername + Password password <- maybe promptPassword return mPassword + let auth = Just (username, password) + flip mapM_ paths $ \path -> do + notice verbosity $ "Uploading " ++ path ++ "... " + handlePackage verbosity uploadURI auth path + where + targetRepoURI = remoteRepoURI $ last [ remoteRepo | Left remoteRepo <- map repoKind repos ] --FIXME: better error message when no repos are given + +promptUsername :: IO Username +promptUsername = do + putStr "Hackage username: " + hFlush stdout + fmap Username getLine + +promptPassword :: IO Password +promptPassword = do + putStr "Hackage password: " + hFlush stdout + -- save/restore the terminal echoing status + passwd <- bracket (hGetEcho stdin) (hSetEcho stdin) $ \_ -> do + hSetEcho stdin False -- no echoing for entering the password + fmap Password getLine + putStrLn "" + return passwd + +report :: Verbosity -> [Repo] -> Maybe Username -> Maybe Password -> IO () +report verbosity repos mUsername mPassword = do + Username username <- maybe promptUsername return mUsername + Password password <- maybe promptPassword return mPassword + let auth = Just (username, password) + forM_ repos $ \repo -> case repoKind repo of + Left remoteRepo + -> do dotCabal <- defaultCabalDir + let srcDir = dotCabal "reports" remoteRepoName remoteRepo + -- We don't want to bomb out just because we haven't built any packages from this repo yet + srcExists <- doesDirectoryExist srcDir + when srcExists $ do + contents <- getDirectoryContents srcDir + forM_ (filter (\c -> takeExtension c == ".log") contents) $ \logFile -> + do inp <- readFile (srcDir logFile) + let (reportStr, buildLog) = read inp :: (String,String) + case BuildReport.parse reportStr of + Left errs -> do warn verbosity $ "Errors: " ++ errs -- FIXME + Right report' -> + do info verbosity $ "Uploading report for " ++ display (BuildReport.package report') + cabalBrowse verbosity auth $ BuildReport.uploadReports (remoteRepoURI remoteRepo) [(report', Just buildLog)] + return () + Right{} -> return () + +check :: Verbosity -> [FilePath] -> IO () +check verbosity paths = do + flip mapM_ paths $ \path -> do + notice verbosity $ "Checking " ++ path ++ "... " + handlePackage verbosity checkURI Nothing path + +handlePackage :: Verbosity -> URI -> Maybe (String, String) + -> FilePath -> IO () +handlePackage verbosity uri auth path = + do req <- mkRequest uri path + debug verbosity $ "\n" ++ show req + (_,resp) <- cabalBrowse verbosity auth $ request req + debug verbosity $ show resp + case rspCode resp of + (2,0,0) -> do notice verbosity "Ok" + (x,y,z) -> do notice verbosity $ "Error: " ++ path ++ ": " + ++ map intToDigit [x,y,z] ++ " " + ++ rspReason resp + case findHeader HdrContentType resp of + Just contenttype + | takeWhile (/= ';') contenttype == "text/plain" + -> notice verbosity $ B.unpack $ rspBody resp + _ -> debug verbosity $ B.unpack $ rspBody resp + +mkRequest :: URI -> FilePath -> IO (Request ByteString) +mkRequest uri path = + do pkg <- readBinaryFile path + boundary <- genBoundary + let body = printMultiPart (B.pack boundary) (mkFormData path pkg) + return $ Request { + rqURI = uri, + rqMethod = POST, + rqHeaders = [Header HdrContentType ("multipart/form-data; boundary="++boundary), + Header HdrContentLength (show (B.length body)), + Header HdrAccept ("text/plain")], + rqBody = body + } + +readBinaryFile :: FilePath -> IO ByteString +readBinaryFile = B.readFile + +genBoundary :: IO String +genBoundary = do i <- randomRIO (0x10000000000000,0xFFFFFFFFFFFFFF) :: IO Integer + return $ showHex i "" + +mkFormData :: FilePath -> ByteString -> [BodyPart] +mkFormData path pkg = + -- yes, web browsers are that stupid (re quoting) + [BodyPart [Header hdrContentDisposition $ + "form-data; name=package; filename=\""++takeFileName path++"\"", + Header HdrContentType "application/x-gzip"] + pkg] + +hdrContentDisposition :: HeaderName +hdrContentDisposition = HdrCustom "Content-disposition" + +-- * Multipart, partly stolen from the cgi package. + +data BodyPart = BodyPart [Header] ByteString + +printMultiPart :: ByteString -> [BodyPart] -> ByteString +printMultiPart boundary xs = + B.concat $ map (printBodyPart boundary) xs ++ [crlf, dd, boundary, dd, crlf] + +printBodyPart :: ByteString -> BodyPart -> ByteString +printBodyPart boundary (BodyPart hs c) = B.concat $ [crlf, dd, boundary, crlf] ++ map (B.pack . show) hs ++ [crlf, c] + +crlf :: ByteString +crlf = B.pack "\r\n" + +dd :: ByteString +dd = B.pack "--" diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Utils.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Utils.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Utils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Utils.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,237 @@ +{-# LANGUAGE ForeignFunctionInterface, CPP #-} + +module Distribution.Client.Utils ( MergeResult(..) + , mergeBy, duplicates, duplicatesBy + , inDir, determineNumJobs, numberOfProcessors + , removeExistingFile + , makeAbsoluteToCwd, filePathToByteString + , byteStringToFilePath, tryCanonicalizePath + , canonicalizePathNoThrow + , moreRecentFile, existsAndIsMoreRecentThan + , tryFindAddSourcePackageDesc + , tryFindPackageDesc + , relaxEncodingErrors) + where + +import Distribution.Compat.Exception ( catchIO ) +import Distribution.Client.Compat.Time ( getModTime ) +import Distribution.Simple.Setup ( Flag(..) ) +import Distribution.Simple.Utils ( die, findPackageDesc ) +import qualified Data.ByteString.Lazy as BS +import Control.Monad + ( when ) +import Data.Bits + ( (.|.), shiftL, shiftR ) +import Data.Char + ( ord, chr ) +import Data.List + ( isPrefixOf, sortBy, groupBy ) +import Data.Word + ( Word8, Word32) +import Foreign.C.Types ( CInt(..) ) +import qualified Control.Exception as Exception + ( finally ) +import System.Directory + ( canonicalizePath, doesFileExist, getCurrentDirectory + , removeFile, setCurrentDirectory ) +import System.FilePath + ( (), isAbsolute ) +import System.IO + ( Handle +#if MIN_VERSION_base(4,4,0) + , hGetEncoding, hSetEncoding +#endif + ) +import System.IO.Unsafe ( unsafePerformIO ) + +#if MIN_VERSION_base(4,4,0) +import GHC.IO.Encoding + ( recover, TextEncoding(TextEncoding) ) +import GHC.IO.Encoding.Failure + ( recoverEncode, CodingFailureMode(TransliterateCodingFailure) ) +#endif + +#if defined(mingw32_HOST_OS) +import Prelude hiding (ioError) +import Control.Monad (liftM2, unless) +import System.Directory (doesDirectoryExist) +import System.IO.Error (ioError, mkIOError, doesNotExistErrorType) +#endif + +-- | Generic merging utility. For sorted input lists this is a full outer join. +-- +-- * The result list never contains @(Nothing, Nothing)@. +-- +mergeBy :: (a -> b -> Ordering) -> [a] -> [b] -> [MergeResult a b] +mergeBy cmp = merge + where + merge [] ys = [ OnlyInRight y | y <- ys] + merge xs [] = [ OnlyInLeft x | x <- xs] + merge (x:xs) (y:ys) = + case x `cmp` y of + GT -> OnlyInRight y : merge (x:xs) ys + EQ -> InBoth x y : merge xs ys + LT -> OnlyInLeft x : merge xs (y:ys) + +data MergeResult a b = OnlyInLeft a | InBoth a b | OnlyInRight b + +duplicates :: Ord a => [a] -> [[a]] +duplicates = duplicatesBy compare + +duplicatesBy :: (a -> a -> Ordering) -> [a] -> [[a]] +duplicatesBy cmp = filter moreThanOne . groupBy eq . sortBy cmp + where + eq a b = case cmp a b of + EQ -> True + _ -> False + moreThanOne (_:_:_) = True + moreThanOne _ = False + +-- | Like 'removeFile', but does not throw an exception when the file does not +-- exist. +removeExistingFile :: FilePath -> IO () +removeExistingFile path = do + exists <- doesFileExist path + when exists $ + removeFile path + +-- | Executes the action in the specified directory. +inDir :: Maybe FilePath -> IO a -> IO a +inDir Nothing m = m +inDir (Just d) m = do + old <- getCurrentDirectory + setCurrentDirectory d + m `Exception.finally` setCurrentDirectory old + +foreign import ccall "getNumberOfProcessors" c_getNumberOfProcessors :: IO CInt + +-- The number of processors is not going to change during the duration of the +-- program, so unsafePerformIO is safe here. +numberOfProcessors :: Int +numberOfProcessors = fromEnum $ unsafePerformIO c_getNumberOfProcessors + +-- | Determine the number of jobs to use given the value of the '-j' flag. +determineNumJobs :: Flag (Maybe Int) -> Int +determineNumJobs numJobsFlag = + case numJobsFlag of + NoFlag -> 1 + Flag Nothing -> numberOfProcessors + Flag (Just n) -> n + +-- | Given a relative path, make it absolute relative to the current +-- directory. Absolute paths are returned unmodified. +makeAbsoluteToCwd :: FilePath -> IO FilePath +makeAbsoluteToCwd path | isAbsolute path = return path + | otherwise = do cwd <- getCurrentDirectory + return $! cwd path + +-- | Convert a 'FilePath' to a lazy 'ByteString'. Each 'Char' is +-- encoded as a little-endian 'Word32'. +filePathToByteString :: FilePath -> BS.ByteString +filePathToByteString p = + BS.pack $ foldr conv [] codepts + where + codepts :: [Word32] + codepts = map (fromIntegral . ord) p + + conv :: Word32 -> [Word8] -> [Word8] + conv w32 rest = b0:b1:b2:b3:rest + where + b0 = fromIntegral $ w32 + b1 = fromIntegral $ w32 `shiftR` 8 + b2 = fromIntegral $ w32 `shiftR` 16 + b3 = fromIntegral $ w32 `shiftR` 24 + +-- | Reverse operation to 'filePathToByteString'. +byteStringToFilePath :: BS.ByteString -> FilePath +byteStringToFilePath bs | bslen `mod` 4 /= 0 = unexpected + | otherwise = go 0 + where + unexpected = "Distribution.Client.Utils.byteStringToFilePath: unexpected" + bslen = BS.length bs + + go i | i == bslen = [] + | otherwise = (chr . fromIntegral $ w32) : go (i+4) + where + w32 :: Word32 + w32 = b0 .|. (b1 `shiftL` 8) .|. (b2 `shiftL` 16) .|. (b3 `shiftL` 24) + b0 = fromIntegral $ BS.index bs i + b1 = fromIntegral $ BS.index bs (i + 1) + b2 = fromIntegral $ BS.index bs (i + 2) + b3 = fromIntegral $ BS.index bs (i + 3) + +-- | Workaround for the inconsistent behaviour of 'canonicalizePath'. It throws +-- an error if the path refers to a non-existent file on *nix, but not on +-- Windows. +tryCanonicalizePath :: FilePath -> IO FilePath +tryCanonicalizePath path = do + ret <- canonicalizePath path +#if defined(mingw32_HOST_OS) + exists <- liftM2 (||) (doesFileExist ret) (doesDirectoryExist ret) + unless exists $ + ioError $ mkIOError doesNotExistErrorType "canonicalizePath" + Nothing (Just ret) +#endif + return ret + +-- | A non-throwing wrapper for 'canonicalizePath'. If 'canonicalizePath' throws +-- an exception, returns the path argument unmodified. +canonicalizePathNoThrow :: FilePath -> IO FilePath +canonicalizePathNoThrow path = do + canonicalizePath path `catchIO` (\_ -> return path) + +-------------------- +-- Modification time + +-- | Like Distribution.Simple.Utils.moreRecentFile, but uses getModTime instead +-- of getModificationTime for higher precision. We can't merge the two because +-- Distribution.Client.Time uses MIN_VERSION macros. +moreRecentFile :: FilePath -> FilePath -> IO Bool +moreRecentFile a b = do + exists <- doesFileExist b + if not exists + then return True + else do tb <- getModTime b + ta <- getModTime a + return (ta > tb) + +-- | Like 'moreRecentFile', but also checks that the first file exists. +existsAndIsMoreRecentThan :: FilePath -> FilePath -> IO Bool +existsAndIsMoreRecentThan a b = do + exists <- doesFileExist a + if not exists + then return False + else a `moreRecentFile` b + +-- | Sets the handler for encoding errors to one that transliterates invalid +-- characters into one present in the encoding (i.e., \'?\'). +-- This is opposed to the default behavior, which is to throw an exception on +-- error. This function will ignore file handles that have a Unicode encoding +-- set. It's a no-op for versions of `base` less than 4.4. +relaxEncodingErrors :: Handle -> IO () +relaxEncodingErrors handle = do +#if MIN_VERSION_base(4,4,0) + maybeEncoding <- hGetEncoding handle + case maybeEncoding of + Just (TextEncoding name decoder encoder) | not ("UTF" `isPrefixOf` name) -> + let relax x = x { recover = recoverEncode TransliterateCodingFailure } + in hSetEncoding handle (TextEncoding name decoder (fmap relax encoder)) + _ -> +#endif + return () + +-- |Like 'tryFindPackageDesc', but with error specific to add-source deps. +tryFindAddSourcePackageDesc :: FilePath -> String -> IO FilePath +tryFindAddSourcePackageDesc depPath err = tryFindPackageDesc depPath $ + err ++ "\n" ++ "Failed to read cabal file of add-source dependency: " + ++ depPath + +-- |Try to find a @.cabal@ file, in directory @depPath@. Fails if one cannot be +-- found, with @err@ prefixing the error message. This function simply allows +-- us to give a more descriptive error than that provided by @findPackageDesc@. +tryFindPackageDesc :: FilePath -> String -> IO FilePath +tryFindPackageDesc depPath err = do + errOrCabalFile <- findPackageDesc depPath + case errOrCabalFile of + Right file -> return file + Left _ -> die err diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Win32SelfUpgrade.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Win32SelfUpgrade.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/Win32SelfUpgrade.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/Win32SelfUpgrade.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,225 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface #-} +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.Win32SelfUpgrade +-- Copyright : (c) Duncan Coutts 2008 +-- License : BSD-like +-- +-- Maintainer : cabal-devel@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- Support for self-upgrading executables on Windows platforms. +----------------------------------------------------------------------------- +module Distribution.Client.Win32SelfUpgrade ( +-- * Explanation +-- +-- | Windows inherited a design choice from DOS that while initially innocuous +-- has rather unfortunate consequences. It maintains the invariant that every +-- open file has a corresponding name on disk. One positive consequence of this +-- is that an executable can always find it's own executable file. The downside +-- is that a program cannot be deleted or upgraded while it is running without +-- hideous workarounds. This module implements one such hideous workaround. +-- +-- The basic idea is: +-- +-- * Move our own exe file to a new name +-- * Copy a new exe file to the previous name +-- * Run the new exe file, passing our own PID and new path +-- * Wait for the new process to start +-- * Close the new exe file +-- * Exit old process +-- +-- Then in the new process: +-- +-- * Inform the old process that we've started +-- * Wait for the old process to die +-- * Delete the old exe file +-- * Exit new process +-- + + possibleSelfUpgrade, + deleteOldExeFile, + ) where + +#if mingw32_HOST_OS + +import qualified System.Win32 as Win32 +import System.Win32 (DWORD, BOOL, HANDLE, LPCTSTR) +import Foreign.Ptr (Ptr, nullPtr) +import System.Process (runProcess) +import System.Directory (canonicalizePath) +import System.FilePath (takeBaseName, replaceBaseName, equalFilePath) + +import Distribution.Verbosity as Verbosity (Verbosity, showForCabal) +import Distribution.Simple.Utils (debug, info) + +import Prelude hiding (log) + +-- | If one of the given files is our own exe file then we arrange things such +-- that the nested action can replace our own exe file. +-- +-- We require that the new process accepts a command line invocation that +-- calls 'deleteOldExeFile', passing in the PID and exe file. +-- +possibleSelfUpgrade :: Verbosity + -> [FilePath] + -> IO a -> IO a +possibleSelfUpgrade verbosity newPaths action = do + dstPath <- canonicalizePath =<< Win32.getModuleFileName Win32.nullHANDLE + + newPaths' <- mapM canonicalizePath newPaths + let doingSelfUpgrade = any (equalFilePath dstPath) newPaths' + + if not doingSelfUpgrade + then action + else do + info verbosity $ "cabal-install does the replace-own-exe-file dance..." + tmpPath <- moveOurExeOutOfTheWay verbosity + result <- action + scheduleOurDemise verbosity dstPath tmpPath + (\pid path -> ["win32selfupgrade", pid, path + ,"--verbose=" ++ Verbosity.showForCabal verbosity]) + return result + +-- | The name of a Win32 Event object that we use to synchronise between the +-- old and new processes. We need to synchronise to make sure that the old +-- process has not yet terminated by the time the new one starts up and looks +-- for the old process. Otherwise the old one might have already terminated +-- and we could not wait on it terminating reliably (eg the PID might get +-- re-used). +-- +syncEventName :: String +syncEventName = "Local\\cabal-install-upgrade" + +-- | The first part of allowing our exe file to be replaced is to move the +-- existing exe file out of the way. Although we cannot delete our exe file +-- while we're still running, fortunately we can rename it, at least within +-- the same directory. +-- +moveOurExeOutOfTheWay :: Verbosity -> IO FilePath +moveOurExeOutOfTheWay verbosity = do + ourPID <- getCurrentProcessId + dstPath <- Win32.getModuleFileName Win32.nullHANDLE + + let tmpPath = replaceBaseName dstPath (takeBaseName dstPath ++ show ourPID) + + debug verbosity $ "moving " ++ dstPath ++ " to " ++ tmpPath + Win32.moveFile dstPath tmpPath + return tmpPath + +-- | Assuming we've now installed the new exe file in the right place, we +-- launch it and ask it to delete our exe file when we eventually terminate. +-- +scheduleOurDemise :: Verbosity -> FilePath -> FilePath + -> (String -> FilePath -> [String]) -> IO () +scheduleOurDemise verbosity dstPath tmpPath mkArgs = do + ourPID <- getCurrentProcessId + event <- createEvent syncEventName + + let args = mkArgs (show ourPID) tmpPath + log $ "launching child " ++ unwords (dstPath : map show args) + _ <- runProcess dstPath args Nothing Nothing Nothing Nothing Nothing + + log $ "waiting for the child to start up" + waitForSingleObject event (10*1000) -- wait at most 10 sec + log $ "child started ok" + + where + log msg = debug verbosity ("Win32Reinstall.parent: " ++ msg) + +-- | Assuming we're now in the new child process, we've been asked by the old +-- process to wait for it to terminate and then we can remove the old exe file +-- that it renamed itself to. +-- +deleteOldExeFile :: Verbosity -> Int -> FilePath -> IO () +deleteOldExeFile verbosity oldPID tmpPath = do + log $ "process started. Will delete exe file of process " + ++ show oldPID ++ " at path " ++ tmpPath + + log $ "getting handle of parent process " ++ show oldPID + oldPHANDLE <- Win32.openProcess Win32.sYNCHORNIZE False (fromIntegral oldPID) + + log $ "synchronising with parent" + event <- openEvent syncEventName + setEvent event + + log $ "waiting for parent process to terminate" + waitForSingleObject oldPHANDLE Win32.iNFINITE + log $ "parent process terminated" + + log $ "deleting parent's old .exe file" + Win32.deleteFile tmpPath + + where + log msg = debug verbosity ("Win32Reinstall.child: " ++ msg) + +------------------------ +-- Win32 foreign imports +-- + +-- A bunch of functions sadly not provided by the Win32 package. + +#ifdef x86_64_HOST_ARCH +#define CALLCONV ccall +#else +#define CALLCONV stdcall +#endif + +foreign import CALLCONV unsafe "windows.h GetCurrentProcessId" + getCurrentProcessId :: IO DWORD + +foreign import CALLCONV unsafe "windows.h WaitForSingleObject" + waitForSingleObject_ :: HANDLE -> DWORD -> IO DWORD + +waitForSingleObject :: HANDLE -> DWORD -> IO () +waitForSingleObject handle timeout = + Win32.failIf_ bad "WaitForSingleObject" $ + waitForSingleObject_ handle timeout + where + bad result = not (result == 0 || result == wAIT_TIMEOUT) + wAIT_TIMEOUT = 0x00000102 + +foreign import CALLCONV unsafe "windows.h CreateEventW" + createEvent_ :: Ptr () -> BOOL -> BOOL -> LPCTSTR -> IO HANDLE + +createEvent :: String -> IO HANDLE +createEvent name = do + Win32.failIfNull "CreateEvent" $ + Win32.withTString name $ + createEvent_ nullPtr False False + +foreign import CALLCONV unsafe "windows.h OpenEventW" + openEvent_ :: DWORD -> BOOL -> LPCTSTR -> IO HANDLE + +openEvent :: String -> IO HANDLE +openEvent name = do + Win32.failIfNull "OpenEvent" $ + Win32.withTString name $ + openEvent_ eVENT_MODIFY_STATE False + where + eVENT_MODIFY_STATE :: DWORD + eVENT_MODIFY_STATE = 0x0002 + +foreign import CALLCONV unsafe "windows.h SetEvent" + setEvent_ :: HANDLE -> IO BOOL + +setEvent :: HANDLE -> IO () +setEvent handle = + Win32.failIfFalse_ "SetEvent" $ + setEvent_ handle + +#else + +import Distribution.Verbosity (Verbosity) +import Distribution.Simple.Utils (die) + +possibleSelfUpgrade :: Verbosity + -> [FilePath] + -> IO a -> IO a +possibleSelfUpgrade _ _ action = action + +deleteOldExeFile :: Verbosity -> Int -> FilePath -> IO () +deleteOldExeFile _ _ _ = die "win32selfupgrade not needed except on win32" + +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/World.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/World.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Distribution/Client/World.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Distribution/Client/World.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,172 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Distribution.Client.World +-- Copyright : (c) Peter Robinson 2009 +-- License : BSD-like +-- +-- Maintainer : thaldyron@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Interface to the world-file that contains a list of explicitly +-- requested packages. Meant to be imported qualified. +-- +-- A world file entry stores the package-name, package-version, and +-- user flags. +-- For example, the entry generated by +-- # cabal install stm-io-hooks --flags="-debug" +-- looks like this: +-- # stm-io-hooks -any --flags="-debug" +-- To rebuild/upgrade the packages in world (e.g. when updating the compiler) +-- use +-- # cabal install world +-- +----------------------------------------------------------------------------- +module Distribution.Client.World ( + WorldPkgInfo(..), + insert, + delete, + getContents, + ) where + +import Distribution.Package + ( Dependency(..) ) +import Distribution.PackageDescription + ( FlagAssignment, FlagName(FlagName) ) +import Distribution.Verbosity + ( Verbosity ) +import Distribution.Simple.Utils + ( die, info, chattyTry, writeFileAtomic ) +import Distribution.Text + ( Text(..), display, simpleParse ) +import qualified Distribution.Compat.ReadP as Parse +import Distribution.Compat.Exception ( catchIO ) +import qualified Text.PrettyPrint as Disp +import Text.PrettyPrint ( (<>), (<+>) ) + + +import Data.Char as Char + +import Data.List + ( unionBy, deleteFirstsBy, nubBy ) +import System.IO.Error + ( isDoesNotExistError ) +import qualified Data.ByteString.Lazy.Char8 as B +import Prelude hiding (getContents) + + +data WorldPkgInfo = WorldPkgInfo Dependency FlagAssignment + deriving (Show,Eq) + +-- | Adds packages to the world file; creates the file if it doesn't +-- exist yet. Version constraints and flag assignments for a package are +-- updated if already present. IO errors are non-fatal. +insert :: Verbosity -> FilePath -> [WorldPkgInfo] -> IO () +insert = modifyWorld $ unionBy equalUDep + +-- | Removes packages from the world file. +-- Note: Currently unused as there is no mechanism in Cabal (yet) to +-- handle uninstalls. IO errors are non-fatal. +delete :: Verbosity -> FilePath -> [WorldPkgInfo] -> IO () +delete = modifyWorld $ flip (deleteFirstsBy equalUDep) + +-- | WorldPkgInfo values are considered equal if they refer to +-- the same package, i.e., we don't care about differing versions or flags. +equalUDep :: WorldPkgInfo -> WorldPkgInfo -> Bool +equalUDep (WorldPkgInfo (Dependency pkg1 _) _) + (WorldPkgInfo (Dependency pkg2 _) _) = pkg1 == pkg2 + +-- | Modifies the world file by applying an update-function ('unionBy' +-- for 'insert', 'deleteFirstsBy' for 'delete') to the given list of +-- packages. IO errors are considered non-fatal. +modifyWorld :: ([WorldPkgInfo] -> [WorldPkgInfo] + -> [WorldPkgInfo]) + -- ^ Function that defines how + -- the list of user packages are merged with + -- existing world packages. + -> Verbosity + -> FilePath -- ^ Location of the world file + -> [WorldPkgInfo] -- ^ list of user supplied packages + -> IO () +modifyWorld _ _ _ [] = return () +modifyWorld f verbosity world pkgs = + chattyTry "Error while updating world-file. " $ do + pkgsOldWorld <- getContents world + -- Filter out packages that are not in the world file: + let pkgsNewWorld = nubBy equalUDep $ f pkgs pkgsOldWorld + -- 'Dependency' is not an Ord instance, so we need to check for + -- equivalence the awkward way: + if not (all (`elem` pkgsOldWorld) pkgsNewWorld && + all (`elem` pkgsNewWorld) pkgsOldWorld) + then do + info verbosity "Updating world file..." + writeFileAtomic world . B.pack $ unlines + [ (display pkg) | pkg <- pkgsNewWorld] + else + info verbosity "World file is already up to date." + + +-- | Returns the content of the world file as a list +getContents :: FilePath -> IO [WorldPkgInfo] +getContents world = do + content <- safelyReadFile world + let result = map simpleParse (lines $ B.unpack content) + case sequence result of + Nothing -> die "Could not parse world file." + Just xs -> return xs + where + safelyReadFile :: FilePath -> IO B.ByteString + safelyReadFile file = B.readFile file `catchIO` handler + where + handler e | isDoesNotExistError e = return B.empty + | otherwise = ioError e + + +instance Text WorldPkgInfo where + disp (WorldPkgInfo dep flags) = disp dep <+> dispFlags flags + where + dispFlags [] = Disp.empty + dispFlags fs = Disp.text "--flags=" + <> Disp.doubleQuotes (flagAssToDoc fs) + flagAssToDoc = foldr (\(FlagName fname,val) flagAssDoc -> + (if not val then Disp.char '-' + else Disp.empty) + Disp.<> Disp.text fname + Disp.<+> flagAssDoc) + Disp.empty + parse = do + dep <- parse + Parse.skipSpaces + flagAss <- Parse.option [] parseFlagAssignment + return $ WorldPkgInfo dep flagAss + where + parseFlagAssignment :: Parse.ReadP r FlagAssignment + parseFlagAssignment = do + _ <- Parse.string "--flags" + Parse.skipSpaces + _ <- Parse.char '=' + Parse.skipSpaces + inDoubleQuotes $ Parse.many1 flag + where + inDoubleQuotes :: Parse.ReadP r a -> Parse.ReadP r a + inDoubleQuotes = Parse.between (Parse.char '"') (Parse.char '"') + + flag = do + Parse.skipSpaces + val <- negative Parse.+++ positive + name <- ident + Parse.skipSpaces + return (FlagName name,val) + negative = do + _ <- Parse.char '-' + return False + positive = return True + + ident :: Parse.ReadP r String + ident = do + -- First character must be a letter/digit to avoid flags + -- like "+-debug": + c <- Parse.satisfy Char.isAlphaNum + cs <- Parse.munch (\ch -> Char.isAlphaNum ch || ch == '_' + || ch == '-') + return (c:cs) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/LICENSE cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/LICENSE --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/LICENSE 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,34 @@ +Copyright (c) 2003-2008, Isaac Jones, Simon Marlow, Martin Sjögren, + Bjorn Bringert, Krasimir Angelov, + Malcolm Wallace, Ross Patterson, + Lemmih, Paolo Martini, Don Stewart, + Duncan Coutts +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of Isaac Jones nor the names of other + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Main.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Main.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Main.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Main.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,1119 @@ +{-# LANGUAGE CPP #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Main +-- Copyright : (c) David Himmelstrup 2005 +-- License : BSD-like +-- +-- Maintainer : lemmih@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Entry point to the default cabal-install front-end. +----------------------------------------------------------------------------- + +module Main (main) where + +import Distribution.Client.Setup + ( GlobalFlags(..), globalCommand, globalRepos + , ConfigFlags(..) + , ConfigExFlags(..), defaultConfigExFlags, configureExCommand + , BuildFlags(..), BuildExFlags(..), SkipAddSourceDepsCheck(..) + , buildCommand, replCommand, testCommand, benchmarkCommand + , InstallFlags(..), defaultInstallFlags + , installCommand, upgradeCommand + , FetchFlags(..), fetchCommand + , FreezeFlags(..), freezeCommand + , GetFlags(..), getCommand, unpackCommand + , checkCommand + , formatCommand + , updateCommand + , ListFlags(..), listCommand + , InfoFlags(..), infoCommand + , UploadFlags(..), uploadCommand + , ReportFlags(..), reportCommand + , runCommand + , InitFlags(initVerbosity), initCommand + , SDistFlags(..), SDistExFlags(..), sdistCommand + , Win32SelfUpgradeFlags(..), win32SelfUpgradeCommand + , SandboxFlags(..), sandboxCommand + , ExecFlags(..), execCommand + , UserConfigFlags(..), userConfigCommand + , reportCommand + ) +import Distribution.Simple.Setup + ( HaddockFlags(..), haddockCommand, defaultHaddockFlags + , HscolourFlags(..), hscolourCommand + , ReplFlags(..) + , CopyFlags(..), copyCommand + , RegisterFlags(..), registerCommand + , CleanFlags(..), cleanCommand + , TestFlags(..), BenchmarkFlags(..) + , Flag(..), fromFlag, fromFlagOrDefault, flagToMaybe, toFlag + , configAbsolutePaths + ) + +import Distribution.Client.SetupWrapper + ( setupWrapper, SetupScriptOptions(..), defaultSetupScriptOptions ) +import Distribution.Client.Config + ( SavedConfig(..), loadConfig, defaultConfigFile, userConfigDiff + , userConfigUpdate ) +import Distribution.Client.Targets + ( readUserTargets ) +import qualified Distribution.Client.List as List + ( list, info ) + +import Distribution.Client.Install (install) +import Distribution.Client.Configure (configure) +import Distribution.Client.Update (update) +import Distribution.Client.Exec (exec) +import Distribution.Client.Fetch (fetch) +import Distribution.Client.Freeze (freeze) +import Distribution.Client.Check as Check (check) +--import Distribution.Client.Clean (clean) +import Distribution.Client.Upload as Upload (upload, check, report) +import Distribution.Client.Run (run, splitRunArgs) +import Distribution.Client.SrcDist (sdist) +import Distribution.Client.Get (get) +import Distribution.Client.Sandbox (sandboxInit + ,sandboxAddSource + ,sandboxDelete + ,sandboxDeleteSource + ,sandboxListSources + ,sandboxHcPkg + ,dumpPackageEnvironment + + ,getSandboxConfigFilePath + ,loadConfigOrSandboxConfig + ,initPackageDBIfNeeded + ,maybeWithSandboxDirOnSearchPath + ,maybeWithSandboxPackageInfo + ,WereDepsReinstalled(..) + ,maybeReinstallAddSourceDeps + ,tryGetIndexFilePath + ,sandboxBuildDir + ,updateSandboxConfigFileFlag + + ,configCompilerAux' + ,configPackageDB') +import Distribution.Client.Sandbox.PackageEnvironment + (setPackageDB + ,userPackageEnvironmentFile) +import Distribution.Client.Sandbox.Timestamp (maybeAddCompilerTimestampRecord) +import Distribution.Client.Sandbox.Types (UseSandbox(..), whenUsingSandbox) +import Distribution.Client.Init (initCabal) +import qualified Distribution.Client.Win32SelfUpgrade as Win32SelfUpgrade +import Distribution.Client.Utils (determineNumJobs +#if defined(mingw32_HOST_OS) + ,relaxEncodingErrors +#endif + ,existsAndIsMoreRecentThan) + +import Distribution.PackageDescription + ( Executable(..), benchmarkName, benchmarkBuildInfo, testName + , testBuildInfo, buildable ) +import Distribution.PackageDescription.Parse + ( readPackageDescription ) +import Distribution.PackageDescription.PrettyPrint + ( writeGenericPackageDescription ) +import Distribution.Simple.Build + ( startInterpreter ) +import Distribution.Simple.Command + ( CommandParse(..), CommandUI(..), Command + , commandsRun, commandAddAction, hiddenCommand ) +import Distribution.Simple.Compiler + ( Compiler(..) ) +import Distribution.Simple.Configure + ( checkPersistBuildConfigOutdated, configCompilerAuxEx + , ConfigStateFileError(..), localBuildInfoFile + , getPersistBuildConfig, tryGetPersistBuildConfig ) +import qualified Distribution.Simple.LocalBuildInfo as LBI +import Distribution.Simple.Program (defaultProgramConfiguration + ,configureAllKnownPrograms) +import qualified Distribution.Simple.Setup as Cabal +import Distribution.Simple.Utils + ( cabalVersion, die, notice, info, topHandler + , findPackageDesc, tryFindPackageDesc ) +import Distribution.Text + ( display ) +import Distribution.Verbosity as Verbosity + ( Verbosity, normal ) +import Distribution.Version + ( Version(..), orLaterVersion ) +import qualified Paths_cabal_install (version) + +import System.Environment (getArgs, getProgName) +import System.Exit (exitFailure) +import System.FilePath (splitExtension, takeExtension) +import System.IO ( BufferMode(LineBuffering), hSetBuffering +#ifdef mingw32_HOST_OS + , stderr +#endif + , stdout ) +import System.Directory (doesFileExist, getCurrentDirectory) +import Data.List (intercalate) +import Data.Maybe (mapMaybe) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid (Monoid(..)) +#endif +import Control.Monad (when, unless) + +-- | Entry point +-- +main :: IO () +main = do + -- Enable line buffering so that we can get fast feedback even when piped. + -- This is especially important for CI and build systems. + hSetBuffering stdout LineBuffering + -- The default locale encoding for Windows CLI is not UTF-8 and printing + -- Unicode characters to it will fail unless we relax the handling of encoding + -- errors when writing to stderr and stdout. +#ifdef mingw32_HOST_OS + relaxEncodingErrors stdout + relaxEncodingErrors stderr +#endif + getArgs >>= mainWorker + +mainWorker :: [String] -> IO () +mainWorker args = topHandler $ + case commandsRun (globalCommand commands) commands args of + CommandHelp help -> printGlobalHelp help + CommandList opts -> printOptionsList opts + CommandErrors errs -> printErrors errs + CommandReadyToGo (globalFlags, commandParse) -> + case commandParse of + _ | fromFlagOrDefault False (globalVersion globalFlags) + -> printVersion + | fromFlagOrDefault False (globalNumericVersion globalFlags) + -> printNumericVersion + CommandHelp help -> printCommandHelp help + CommandList opts -> printOptionsList opts + CommandErrors errs -> printErrors errs + CommandReadyToGo action -> do + globalFlags' <- updateSandboxConfigFileFlag globalFlags + action globalFlags' + + where + printCommandHelp help = do + pname <- getProgName + putStr (help pname) + printGlobalHelp help = do + pname <- getProgName + configFile <- defaultConfigFile + putStr (help pname) + putStr $ "\nYou can edit the cabal configuration file to set defaults:\n" + ++ " " ++ configFile ++ "\n" + exists <- doesFileExist configFile + when (not exists) $ + putStrLn $ "This file will be generated with sensible " + ++ "defaults if you run 'cabal update'." + printOptionsList = putStr . unlines + printErrors errs = die $ intercalate "\n" errs + printNumericVersion = putStrLn $ display Paths_cabal_install.version + printVersion = putStrLn $ "cabal-install version " + ++ display Paths_cabal_install.version + ++ "\nusing version " + ++ display cabalVersion + ++ " of the Cabal library " + + commands = + [installCommand `commandAddAction` installAction + ,updateCommand `commandAddAction` updateAction + ,listCommand `commandAddAction` listAction + ,infoCommand `commandAddAction` infoAction + ,fetchCommand `commandAddAction` fetchAction + ,freezeCommand `commandAddAction` freezeAction + ,getCommand `commandAddAction` getAction + ,hiddenCommand $ + unpackCommand `commandAddAction` unpackAction + ,checkCommand `commandAddAction` checkAction + ,sdistCommand `commandAddAction` sdistAction + ,uploadCommand `commandAddAction` uploadAction + ,reportCommand `commandAddAction` reportAction + ,runCommand `commandAddAction` runAction + ,initCommand `commandAddAction` initAction + ,configureExCommand `commandAddAction` configureAction + ,buildCommand `commandAddAction` buildAction + ,replCommand `commandAddAction` replAction + ,sandboxCommand `commandAddAction` sandboxAction + ,haddockCommand `commandAddAction` haddockAction + ,execCommand `commandAddAction` execAction + ,userConfigCommand `commandAddAction` userConfigAction + ,cleanCommand `commandAddAction` cleanAction + ,wrapperAction copyCommand + copyVerbosity copyDistPref + ,wrapperAction hscolourCommand + hscolourVerbosity hscolourDistPref + ,wrapperAction registerCommand + regVerbosity regDistPref + ,testCommand `commandAddAction` testAction + ,benchmarkCommand `commandAddAction` benchmarkAction + ,hiddenCommand $ + formatCommand `commandAddAction` formatAction + ,hiddenCommand $ + upgradeCommand `commandAddAction` upgradeAction + ,hiddenCommand $ + win32SelfUpgradeCommand`commandAddAction` win32SelfUpgradeAction + ] + +wrapperAction :: Monoid flags + => CommandUI flags + -> (flags -> Flag Verbosity) + -> (flags -> Flag String) + -> Command (GlobalFlags -> IO ()) +wrapperAction command verbosityFlag distPrefFlag = + commandAddAction command + { commandDefaultFlags = mempty } $ \flags extraArgs _globalFlags -> do + let verbosity = fromFlagOrDefault normal (verbosityFlag flags) + setupScriptOptions = defaultSetupScriptOptions { + useDistPref = fromFlagOrDefault + (useDistPref defaultSetupScriptOptions) + (distPrefFlag flags) + } + setupWrapper verbosity setupScriptOptions Nothing + command (const flags) extraArgs + +configureAction :: (ConfigFlags, ConfigExFlags) + -> [String] -> GlobalFlags -> IO () +configureAction (configFlags, configExFlags) extraArgs globalFlags = do + let verbosity = fromFlagOrDefault normal (configVerbosity configFlags) + + (useSandbox, config) <- loadConfigOrSandboxConfig verbosity + globalFlags (configUserInstall configFlags) + let configFlags' = savedConfigureFlags config `mappend` configFlags + configExFlags' = savedConfigureExFlags config `mappend` configExFlags + globalFlags' = savedGlobalFlags config `mappend` globalFlags + (comp, platform, conf) <- configCompilerAuxEx configFlags' + + -- If we're working inside a sandbox and the user has set the -w option, we + -- may need to create a sandbox-local package DB for this compiler and add a + -- timestamp record for this compiler to the timestamp file. + let configFlags'' = case useSandbox of + NoSandbox -> configFlags' + (UseSandbox sandboxDir) -> setPackageDB sandboxDir + comp platform configFlags' + + whenUsingSandbox useSandbox $ \sandboxDir -> do + initPackageDBIfNeeded verbosity configFlags'' comp conf + -- NOTE: We do not write the new sandbox package DB location to + -- 'cabal.sandbox.config' here because 'configure -w' must not affect + -- subsequent 'install' (for UI compatibility with non-sandboxed mode). + + indexFile <- tryGetIndexFilePath config + maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile + (compilerId comp) platform + + maybeWithSandboxDirOnSearchPath useSandbox $ + configure verbosity + (configPackageDB' configFlags'') + (globalRepos globalFlags') + comp platform conf configFlags'' configExFlags' extraArgs + +buildAction :: (BuildFlags, BuildExFlags) -> [String] -> GlobalFlags -> IO () +buildAction (buildFlags, buildExFlags) extraArgs globalFlags = do + let distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) + (buildDistPref buildFlags) + verbosity = fromFlagOrDefault normal (buildVerbosity buildFlags) + noAddSource = fromFlagOrDefault DontSkipAddSourceDepsCheck + (buildOnly buildExFlags) + + -- Calls 'configureAction' to do the real work, so nothing special has to be + -- done to support sandboxes. + (useSandbox, config) <- reconfigure verbosity distPref + mempty [] globalFlags noAddSource + (buildNumJobs buildFlags) (const Nothing) + + maybeWithSandboxDirOnSearchPath useSandbox $ + build verbosity config distPref buildFlags extraArgs + + +-- | Actually do the work of building the package. This is separate from +-- 'buildAction' so that 'testAction' and 'benchmarkAction' do not invoke +-- 'reconfigure' twice. +build :: Verbosity -> SavedConfig -> FilePath -> BuildFlags -> [String] -> IO () +build verbosity config distPref buildFlags extraArgs = + setupWrapper verbosity setupOptions Nothing + (Cabal.buildCommand progConf) mkBuildFlags extraArgs + where + progConf = defaultProgramConfiguration + setupOptions = defaultSetupScriptOptions { useDistPref = distPref } + + mkBuildFlags version = filterBuildFlags version config buildFlags' + buildFlags' = buildFlags + { buildVerbosity = toFlag verbosity + , buildDistPref = toFlag distPref + } + +-- | Make sure that we don't pass new flags to setup scripts compiled against +-- old versions of Cabal. +filterBuildFlags :: Version -> SavedConfig -> BuildFlags -> BuildFlags +filterBuildFlags version config buildFlags + | version >= Version [1,19,1] [] = buildFlags_latest + -- Cabal < 1.19.1 doesn't support 'build -j'. + | otherwise = buildFlags_pre_1_19_1 + where + buildFlags_pre_1_19_1 = buildFlags { + buildNumJobs = NoFlag + } + buildFlags_latest = buildFlags { + -- Take the 'jobs' setting '~/.cabal/config' into account. + buildNumJobs = Flag . Just . determineNumJobs $ + (numJobsConfigFlag `mappend` numJobsCmdLineFlag) + } + numJobsConfigFlag = installNumJobs . savedInstallFlags $ config + numJobsCmdLineFlag = buildNumJobs buildFlags + + +replAction :: (ReplFlags, BuildExFlags) -> [String] -> GlobalFlags -> IO () +replAction (replFlags, buildExFlags) extraArgs globalFlags = do + cwd <- getCurrentDirectory + pkgDesc <- findPackageDesc cwd + either (const onNoPkgDesc) (const onPkgDesc) pkgDesc + where + verbosity = fromFlagOrDefault normal (replVerbosity replFlags) + + -- There is a .cabal file in the current directory: start a REPL and load + -- the project's modules. + onPkgDesc = do + let distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) + (replDistPref replFlags) + noAddSource = case replReload replFlags of + Flag True -> SkipAddSourceDepsCheck + _ -> fromFlagOrDefault DontSkipAddSourceDepsCheck + (buildOnly buildExFlags) + progConf = defaultProgramConfiguration + setupOptions = defaultSetupScriptOptions + { useCabalVersion = orLaterVersion $ Version [1,18,0] [] + , useDistPref = distPref + } + replFlags' = replFlags + { replVerbosity = toFlag verbosity + , replDistPref = toFlag distPref + } + -- Calls 'configureAction' to do the real work, so nothing special has to + -- be done to support sandboxes. + (useSandbox, _config) <- reconfigure verbosity distPref + mempty [] globalFlags noAddSource NoFlag + (const Nothing) + + maybeWithSandboxDirOnSearchPath useSandbox $ + setupWrapper verbosity setupOptions Nothing + (Cabal.replCommand progConf) (const replFlags') extraArgs + + -- No .cabal file in the current directory: just start the REPL (possibly + -- using the sandbox package DB). + onNoPkgDesc = do + (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags + mempty + let configFlags = savedConfigureFlags config + (comp, _platform, programDb) <- configCompilerAux' configFlags + startInterpreter verbosity programDb comp (configPackageDB' configFlags) + +-- | Re-configure the package in the current directory if needed. Deciding +-- when to reconfigure and with which options is convoluted: +-- +-- If we are reconfiguring, we must always run @configure@ with the +-- verbosity option we are given; however, that a previous configuration +-- uses a different verbosity setting is not reason enough to reconfigure. +-- +-- The package should be configured to use the same \"dist\" prefix as +-- given to the @build@ command, otherwise the build will probably +-- fail. Not only does this determine the \"dist\" prefix setting if we +-- need to reconfigure anyway, but an existing configuration should be +-- invalidated if its \"dist\" prefix differs. +-- +-- If the package has never been configured (i.e., there is no +-- LocalBuildInfo), we must configure first, using the default options. +-- +-- If the package has been configured, there will be a 'LocalBuildInfo'. +-- If there no package description file, we assume that the +-- 'PackageDescription' is up to date, though the configuration may need +-- to be updated for other reasons (see above). If there is a package +-- description file, and it has been modified since the 'LocalBuildInfo' +-- was generated, then we need to reconfigure. +-- +-- The caller of this function may also have specific requirements +-- regarding the flags the last configuration used. For example, +-- 'testAction' requires that the package be configured with test suites +-- enabled. The caller may pass the required settings to this function +-- along with a function to check the validity of the saved 'ConfigFlags'; +-- these required settings will be checked first upon determining that +-- a previous configuration exists. +reconfigure :: Verbosity -- ^ Verbosity setting + -> FilePath -- ^ \"dist\" prefix + -> ConfigFlags -- ^ Additional config flags to set. These flags + -- will be 'mappend'ed to the last used or + -- default 'ConfigFlags' as appropriate, so + -- this value should be 'mempty' with only the + -- required flags set. The required verbosity + -- and \"dist\" prefix flags will be set + -- automatically because they are always + -- required; therefore, it is not necessary to + -- set them here. + -> [String] -- ^ Extra arguments + -> GlobalFlags -- ^ Global flags + -> SkipAddSourceDepsCheck + -- ^ Should we skip the timestamp check for modified + -- add-source dependencies? + -> Flag (Maybe Int) + -- ^ -j flag for reinstalling add-source deps. + -> (ConfigFlags -> Maybe String) + -- ^ Check that the required flags are set in + -- the last used 'ConfigFlags'. If the required + -- flags are not set, provide a message to the + -- user explaining the reason for + -- reconfiguration. Because the correct \"dist\" + -- prefix setting is always required, it is checked + -- automatically; this function need not check + -- for it. + -> IO (UseSandbox, SavedConfig) +reconfigure verbosity distPref addConfigFlags extraArgs globalFlags + skipAddSourceDepsCheck numJobsFlag checkFlags = do + eLbi <- tryGetPersistBuildConfig distPref + case eLbi of + Left err -> onNoBuildConfig err + Right lbi -> onBuildConfig lbi + + where + + -- We couldn't load the saved package config file. + -- + -- If we're in a sandbox: add-source deps don't have to be reinstalled + -- (since we don't know the compiler & platform). + onNoBuildConfig :: ConfigStateFileError -> IO (UseSandbox, SavedConfig) + onNoBuildConfig err = do + let msg = case err of + ConfigStateFileMissing -> "Package has never been configured." + ConfigStateFileNoParse -> "Saved package config file seems " + ++ "to be corrupt." + _ -> show err + case err of + ConfigStateFileBadVersion _ _ _ -> info verbosity msg + _ -> do + notice verbosity + $ msg ++ " Configuring with default flags." ++ configureManually + configureAction (defaultFlags, defaultConfigExFlags) + extraArgs globalFlags + loadConfigOrSandboxConfig verbosity globalFlags mempty + + -- Package has been configured, but the configuration may be out of + -- date or required flags may not be set. + -- + -- If we're in a sandbox: reinstall the modified add-source deps and + -- force reconfigure if we did. + onBuildConfig :: LBI.LocalBuildInfo -> IO (UseSandbox, SavedConfig) + onBuildConfig lbi = do + let configFlags = LBI.configFlags lbi + flags = mconcat [configFlags, addConfigFlags, distVerbFlags] + + -- Was the sandbox created after the package was already configured? We + -- may need to skip reinstallation of add-source deps and force + -- reconfigure. + let buildConfig = localBuildInfoFile distPref + sandboxConfig <- getSandboxConfigFilePath globalFlags + isSandboxConfigNewer <- + sandboxConfig `existsAndIsMoreRecentThan` buildConfig + + let skipAddSourceDepsCheck' + | isSandboxConfigNewer = SkipAddSourceDepsCheck + | otherwise = skipAddSourceDepsCheck + + when (skipAddSourceDepsCheck' == SkipAddSourceDepsCheck) $ + info verbosity "Skipping add-source deps check..." + + (useSandbox, config, depsReinstalled) <- + case skipAddSourceDepsCheck' of + DontSkipAddSourceDepsCheck -> + maybeReinstallAddSourceDeps verbosity numJobsFlag flags globalFlags + SkipAddSourceDepsCheck -> do + (useSandbox, config) <- loadConfigOrSandboxConfig verbosity + globalFlags (configUserInstall flags) + return (useSandbox, config, NoDepsReinstalled) + + -- Is the @cabal.config@ file newer than @dist/setup.config@? Then we need + -- to force reconfigure. Note that it's possible to use @cabal.config@ + -- even without sandboxes. + isUserPackageEnvironmentFileNewer <- + userPackageEnvironmentFile `existsAndIsMoreRecentThan` buildConfig + + -- Determine whether we need to reconfigure and which message to show to + -- the user if that is the case. + mMsg <- determineMessageToShow lbi configFlags depsReinstalled + isSandboxConfigNewer + isUserPackageEnvironmentFileNewer + case mMsg of + + -- No message for the user indicates that reconfiguration + -- is not required. + Nothing -> return (useSandbox, config) + + -- Show the message and reconfigure. + Just msg -> do + notice verbosity msg + configureAction (flags, defaultConfigExFlags) + extraArgs globalFlags + return (useSandbox, config) + + -- Determine what message, if any, to display to the user if reconfiguration + -- is required. + determineMessageToShow :: LBI.LocalBuildInfo -> ConfigFlags + -> WereDepsReinstalled -> Bool -> Bool + -> IO (Maybe String) + determineMessageToShow _ _ _ True _ = + -- The sandbox was created after the package was already configured. + return $! Just $! sandboxConfigNewerMessage + + determineMessageToShow _ _ _ False True = + -- The user package environment file was modified. + return $! Just $! userPackageEnvironmentFileModifiedMessage + + determineMessageToShow lbi configFlags depsReinstalled False False = do + let savedDistPref = fromFlagOrDefault + (useDistPref defaultSetupScriptOptions) + (configDistPref configFlags) + case depsReinstalled of + ReinstalledSomeDeps -> + -- Some add-source deps were reinstalled. + return $! Just $! reinstalledDepsMessage + NoDepsReinstalled -> + case checkFlags configFlags of + -- Flag required by the caller is not set. + Just msg -> return $! Just $! msg ++ configureManually + + Nothing + -- Required "dist" prefix is not set. + | savedDistPref /= distPref -> + return $! Just distPrefMessage + + -- All required flags are set, but the configuration + -- may be outdated. + | otherwise -> case LBI.pkgDescrFile lbi of + Nothing -> return Nothing + Just pdFile -> do + outdated <- checkPersistBuildConfigOutdated + distPref pdFile + return $! if outdated + then Just $! outdatedMessage pdFile + else Nothing + + defaultFlags = mappend addConfigFlags distVerbFlags + distVerbFlags = mempty + { configVerbosity = toFlag verbosity + , configDistPref = toFlag distPref + } + reconfiguringMostRecent = " Re-configuring with most recently used options." + configureManually = " If this fails, please run configure manually." + sandboxConfigNewerMessage = + "The sandbox was created after the package was already configured." + ++ reconfiguringMostRecent + ++ configureManually + userPackageEnvironmentFileModifiedMessage = + "The user package environment file ('" + ++ userPackageEnvironmentFile ++ "') was modified." + ++ reconfiguringMostRecent + ++ configureManually + distPrefMessage = + "Package previously configured with different \"dist\" prefix." + ++ reconfiguringMostRecent + ++ configureManually + outdatedMessage pdFile = + pdFile ++ " has been changed." + ++ reconfiguringMostRecent + ++ configureManually + reinstalledDepsMessage = + "Some add-source dependencies have been reinstalled." + ++ reconfiguringMostRecent + ++ configureManually + +installAction :: (ConfigFlags, ConfigExFlags, InstallFlags, HaddockFlags) + -> [String] -> GlobalFlags -> IO () +installAction (configFlags, _, installFlags, _) _ _globalFlags + | fromFlagOrDefault False (installOnly installFlags) + = let verbosity = fromFlagOrDefault normal (configVerbosity configFlags) + in setupWrapper verbosity defaultSetupScriptOptions Nothing + installCommand (const mempty) [] + +installAction (configFlags, configExFlags, installFlags, haddockFlags) + extraArgs globalFlags = do + let verbosity = fromFlagOrDefault normal (configVerbosity configFlags) + (useSandbox, config) <- loadConfigOrSandboxConfig verbosity + globalFlags (configUserInstall configFlags) + targets <- readUserTargets verbosity extraArgs + + -- TODO: It'd be nice if 'cabal install' picked up the '-w' flag passed to + -- 'configure' when run inside a sandbox. Right now, running + -- + -- $ cabal sandbox init && cabal configure -w /path/to/ghc + -- && cabal build && cabal install + -- + -- performs the compilation twice unless you also pass -w to 'install'. + -- However, this is the same behaviour that 'cabal install' has in the normal + -- mode of operation, so we stick to it for consistency. + + let sandboxDistPref = case useSandbox of + NoSandbox -> NoFlag + UseSandbox sandboxDir -> Flag $ sandboxBuildDir sandboxDir + configFlags' = maybeForceTests installFlags' $ + savedConfigureFlags config `mappend` configFlags + configExFlags' = defaultConfigExFlags `mappend` + savedConfigureExFlags config `mappend` configExFlags + installFlags' = defaultInstallFlags `mappend` + savedInstallFlags config `mappend` installFlags + haddockFlags' = defaultHaddockFlags `mappend` + savedHaddockFlags config `mappend` haddockFlags + globalFlags' = savedGlobalFlags config `mappend` globalFlags + (comp, platform, conf) <- configCompilerAux' configFlags' + -- TODO: Redesign ProgramDB API to prevent such problems as #2241 in the future. + conf' <- configureAllKnownPrograms verbosity conf + + -- If we're working inside a sandbox and the user has set the -w option, we + -- may need to create a sandbox-local package DB for this compiler and add a + -- timestamp record for this compiler to the timestamp file. + configFlags'' <- case useSandbox of + NoSandbox -> configAbsolutePaths $ configFlags' + (UseSandbox sandboxDir) -> + return $ (setPackageDB sandboxDir comp platform configFlags') { + configDistPref = sandboxDistPref + } + + whenUsingSandbox useSandbox $ \sandboxDir -> do + initPackageDBIfNeeded verbosity configFlags'' comp conf' + + indexFile <- tryGetIndexFilePath config + maybeAddCompilerTimestampRecord verbosity sandboxDir indexFile + (compilerId comp) platform + + -- FIXME: Passing 'SandboxPackageInfo' to install unconditionally here means + -- that 'cabal install some-package' inside a sandbox will sometimes reinstall + -- modified add-source deps, even if they are not among the dependencies of + -- 'some-package'. This can also prevent packages that depend on older + -- versions of add-source'd packages from building (see #1362). + maybeWithSandboxPackageInfo verbosity configFlags'' globalFlags' + comp platform conf useSandbox $ \mSandboxPkgInfo -> + maybeWithSandboxDirOnSearchPath useSandbox $ + install verbosity + (configPackageDB' configFlags'') + (globalRepos globalFlags') + comp platform conf' + useSandbox mSandboxPkgInfo + globalFlags' configFlags'' configExFlags' + installFlags' haddockFlags' + targets + + where + -- '--run-tests' implies '--enable-tests'. + maybeForceTests installFlags' configFlags' = + if fromFlagOrDefault False (installRunTests installFlags') + then configFlags' { configTests = toFlag True } + else configFlags' + +testAction :: (TestFlags, BuildFlags, BuildExFlags) -> [String] -> GlobalFlags + -> IO () +testAction (testFlags, buildFlags, buildExFlags) extraArgs globalFlags = do + let verbosity = fromFlagOrDefault normal (testVerbosity testFlags) + distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) + (testDistPref testFlags) + setupOptions = defaultSetupScriptOptions { useDistPref = distPref } + buildFlags' = buildFlags { buildVerbosity = testVerbosity testFlags + , buildDistPref = testDistPref testFlags } + addConfigFlags = mempty { configTests = toFlag True } + checkFlags flags + | fromFlagOrDefault False (configTests flags) = Nothing + | otherwise = Just "Re-configuring with test suites enabled." + noAddSource = fromFlagOrDefault DontSkipAddSourceDepsCheck + (buildOnly buildExFlags) + + -- reconfigure also checks if we're in a sandbox and reinstalls add-source + -- deps if needed. + (useSandbox, config) <- reconfigure verbosity distPref addConfigFlags [] + globalFlags noAddSource + (buildNumJobs buildFlags') checkFlags + + -- the package was just configured, so the LBI must be available + lbi <- getPersistBuildConfig distPref + let pkgDescr = LBI.localPkgDescr lbi + nameTestsOnly = + LBI.foldComponent + (const Nothing) + (const Nothing) + (\t -> + if buildable (testBuildInfo t) + then Just (testName t) + else Nothing) + (const Nothing) + tests = mapMaybe nameTestsOnly $ LBI.pkgComponents pkgDescr + extraArgs' + | null extraArgs = tests + | otherwise = extraArgs + + if null tests + then notice verbosity "Package has no buildable test suites." + else do + maybeWithSandboxDirOnSearchPath useSandbox $ + build verbosity config distPref buildFlags' extraArgs' + + maybeWithSandboxDirOnSearchPath useSandbox $ + setupWrapper verbosity setupOptions Nothing + Cabal.testCommand (const testFlags) extraArgs' + +benchmarkAction :: (BenchmarkFlags, BuildFlags, BuildExFlags) + -> [String] -> GlobalFlags + -> IO () +benchmarkAction (benchmarkFlags, buildFlags, buildExFlags) + extraArgs globalFlags = do + let verbosity = fromFlagOrDefault normal + (benchmarkVerbosity benchmarkFlags) + distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) + (benchmarkDistPref benchmarkFlags) + setupOptions = defaultSetupScriptOptions { useDistPref = distPref } + buildFlags' = buildFlags + { buildVerbosity = benchmarkVerbosity benchmarkFlags + , buildDistPref = benchmarkDistPref benchmarkFlags } + addConfigFlags = mempty { configBenchmarks = toFlag True } + checkFlags flags + | fromFlagOrDefault False (configBenchmarks flags) = Nothing + | otherwise = Just "Re-configuring with benchmarks enabled." + noAddSource = fromFlagOrDefault DontSkipAddSourceDepsCheck + (buildOnly buildExFlags) + + -- reconfigure also checks if we're in a sandbox and reinstalls add-source + -- deps if needed. + (useSandbox, config) <- reconfigure verbosity distPref addConfigFlags [] + globalFlags noAddSource (buildNumJobs buildFlags') + checkFlags + + -- the package was just configured, so the LBI must be available + lbi <- getPersistBuildConfig distPref + let pkgDescr = LBI.localPkgDescr lbi + nameBenchsOnly = + LBI.foldComponent + (const Nothing) + (const Nothing) + (const Nothing) + (\b -> + if buildable (benchmarkBuildInfo b) + then Just (benchmarkName b) + else Nothing) + benchs = mapMaybe nameBenchsOnly $ LBI.pkgComponents pkgDescr + extraArgs' + | null extraArgs = benchs + | otherwise = extraArgs + + if null benchs + then notice verbosity "Package has no buildable benchmarks." + else do + maybeWithSandboxDirOnSearchPath useSandbox $ + build verbosity config distPref buildFlags' extraArgs' + + maybeWithSandboxDirOnSearchPath useSandbox $ + setupWrapper verbosity setupOptions Nothing + Cabal.benchmarkCommand (const benchmarkFlags) extraArgs' + +haddockAction :: HaddockFlags -> [String] -> GlobalFlags -> IO () +haddockAction haddockFlags extraArgs globalFlags = do + let verbosity = fromFlag (haddockVerbosity haddockFlags) + (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty + let haddockFlags' = defaultHaddockFlags `mappend` + savedHaddockFlags config `mappend` haddockFlags + setupScriptOptions = defaultSetupScriptOptions { + useDistPref = fromFlagOrDefault + (useDistPref defaultSetupScriptOptions) + (haddockDistPref haddockFlags') + } + setupWrapper verbosity setupScriptOptions Nothing + haddockCommand (const haddockFlags') extraArgs + +cleanAction :: CleanFlags -> [String] -> GlobalFlags -> IO () +cleanAction cleanFlags extraArgs _globalFlags = + setupWrapper verbosity setupScriptOptions Nothing + cleanCommand (const cleanFlags) extraArgs + where + verbosity = fromFlagOrDefault normal (cleanVerbosity cleanFlags) + setupScriptOptions = defaultSetupScriptOptions { + useDistPref = fromFlagOrDefault + (useDistPref defaultSetupScriptOptions) + (cleanDistPref cleanFlags), + useWin32CleanHack = True + } + +listAction :: ListFlags -> [String] -> GlobalFlags -> IO () +listAction listFlags extraArgs globalFlags = do + let verbosity = fromFlag (listVerbosity listFlags) + (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty + let configFlags' = savedConfigureFlags config + configFlags = configFlags' { + configPackageDBs = configPackageDBs configFlags' + `mappend` listPackageDBs listFlags + } + globalFlags' = savedGlobalFlags config `mappend` globalFlags + (comp, _, conf) <- configCompilerAux' configFlags + List.list verbosity + (configPackageDB' configFlags) + (globalRepos globalFlags') + comp + conf + listFlags + extraArgs + +infoAction :: InfoFlags -> [String] -> GlobalFlags -> IO () +infoAction infoFlags extraArgs globalFlags = do + let verbosity = fromFlag (infoVerbosity infoFlags) + targets <- readUserTargets verbosity extraArgs + (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty + let configFlags' = savedConfigureFlags config + configFlags = configFlags' { + configPackageDBs = configPackageDBs configFlags' + `mappend` infoPackageDBs infoFlags + } + globalFlags' = savedGlobalFlags config `mappend` globalFlags + (comp, _, conf) <- configCompilerAuxEx configFlags + List.info verbosity + (configPackageDB' configFlags) + (globalRepos globalFlags') + comp + conf + globalFlags' + infoFlags + targets + +updateAction :: Flag Verbosity -> [String] -> GlobalFlags -> IO () +updateAction verbosityFlag extraArgs globalFlags = do + unless (null extraArgs) $ + die $ "'update' doesn't take any extra arguments: " ++ unwords extraArgs + let verbosity = fromFlag verbosityFlag + (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags NoFlag + let globalFlags' = savedGlobalFlags config `mappend` globalFlags + update verbosity (globalRepos globalFlags') + +upgradeAction :: (ConfigFlags, ConfigExFlags, InstallFlags, HaddockFlags) + -> [String] -> GlobalFlags -> IO () +upgradeAction _ _ _ = die $ + "Use the 'cabal install' command instead of 'cabal upgrade'.\n" + ++ "You can install the latest version of a package using 'cabal install'. " + ++ "The 'cabal upgrade' command has been removed because people found it " + ++ "confusing and it often led to broken packages.\n" + ++ "If you want the old upgrade behaviour then use the install command " + ++ "with the --upgrade-dependencies flag (but check first with --dry-run " + ++ "to see what would happen). This will try to pick the latest versions " + ++ "of all dependencies, rather than the usual behaviour of trying to pick " + ++ "installed versions of all dependencies. If you do use " + ++ "--upgrade-dependencies, it is recommended that you do not upgrade core " + ++ "packages (e.g. by using appropriate --constraint= flags)." + +fetchAction :: FetchFlags -> [String] -> GlobalFlags -> IO () +fetchAction fetchFlags extraArgs globalFlags = do + let verbosity = fromFlag (fetchVerbosity fetchFlags) + targets <- readUserTargets verbosity extraArgs + config <- loadConfig verbosity (globalConfigFile globalFlags) mempty + let configFlags = savedConfigureFlags config + globalFlags' = savedGlobalFlags config `mappend` globalFlags + (comp, platform, conf) <- configCompilerAux' configFlags + fetch verbosity + (configPackageDB' configFlags) + (globalRepos globalFlags') + comp platform conf globalFlags' fetchFlags + targets + +freezeAction :: FreezeFlags -> [String] -> GlobalFlags -> IO () +freezeAction freezeFlags _extraArgs globalFlags = do + let verbosity = fromFlag (freezeVerbosity freezeFlags) + (useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty + let configFlags = savedConfigureFlags config + globalFlags' = savedGlobalFlags config `mappend` globalFlags + (comp, platform, conf) <- configCompilerAux' configFlags + + maybeWithSandboxPackageInfo verbosity configFlags globalFlags' + comp platform conf useSandbox $ \mSandboxPkgInfo -> + maybeWithSandboxDirOnSearchPath useSandbox $ + freeze verbosity + (configPackageDB' configFlags) + (globalRepos globalFlags') + comp platform conf + mSandboxPkgInfo + globalFlags' freezeFlags + +uploadAction :: UploadFlags -> [String] -> GlobalFlags -> IO () +uploadAction uploadFlags extraArgs globalFlags = do + let verbosity = fromFlag (uploadVerbosity uploadFlags) + config <- loadConfig verbosity (globalConfigFile globalFlags) mempty + let uploadFlags' = savedUploadFlags config `mappend` uploadFlags + globalFlags' = savedGlobalFlags config `mappend` globalFlags + tarfiles = extraArgs + checkTarFiles extraArgs + if fromFlag (uploadCheck uploadFlags') + then Upload.check verbosity tarfiles + else upload verbosity + (globalRepos globalFlags') + (flagToMaybe $ uploadUsername uploadFlags') + (flagToMaybe $ uploadPassword uploadFlags') + tarfiles + where + checkTarFiles tarfiles + | null tarfiles + = die "the 'upload' command expects one or more .tar.gz packages." + | not (null otherFiles) + = die $ "the 'upload' command expects only .tar.gz packages: " + ++ intercalate ", " otherFiles + | otherwise = sequence_ + [ do exists <- doesFileExist tarfile + unless exists $ die $ "file not found: " ++ tarfile + | tarfile <- tarfiles ] + + where otherFiles = filter (not . isTarGzFile) tarfiles + isTarGzFile file = case splitExtension file of + (file', ".gz") -> takeExtension file' == ".tar" + _ -> False + +checkAction :: Flag Verbosity -> [String] -> GlobalFlags -> IO () +checkAction verbosityFlag extraArgs _globalFlags = do + unless (null extraArgs) $ + die $ "'check' doesn't take any extra arguments: " ++ unwords extraArgs + allOk <- Check.check (fromFlag verbosityFlag) + unless allOk exitFailure + +formatAction :: Flag Verbosity -> [String] -> GlobalFlags -> IO () +formatAction verbosityFlag extraArgs _globalFlags = do + let verbosity = fromFlag verbosityFlag + path <- case extraArgs of + [] -> do cwd <- getCurrentDirectory + tryFindPackageDesc cwd + (p:_) -> return p + pkgDesc <- readPackageDescription verbosity path + -- Uses 'writeFileAtomic' under the hood. + writeGenericPackageDescription path pkgDesc + +sdistAction :: (SDistFlags, SDistExFlags) -> [String] -> GlobalFlags -> IO () +sdistAction (sdistFlags, sdistExFlags) extraArgs _globalFlags = do + unless (null extraArgs) $ + die $ "'sdist' doesn't take any extra arguments: " ++ unwords extraArgs + sdist sdistFlags sdistExFlags + +reportAction :: ReportFlags -> [String] -> GlobalFlags -> IO () +reportAction reportFlags extraArgs globalFlags = do + unless (null extraArgs) $ + die $ "'report' doesn't take any extra arguments: " ++ unwords extraArgs + + let verbosity = fromFlag (reportVerbosity reportFlags) + config <- loadConfig verbosity (globalConfigFile globalFlags) mempty + let globalFlags' = savedGlobalFlags config `mappend` globalFlags + reportFlags' = savedReportFlags config `mappend` reportFlags + + Upload.report verbosity (globalRepos globalFlags') + (flagToMaybe $ reportUsername reportFlags') + (flagToMaybe $ reportPassword reportFlags') + +runAction :: (BuildFlags, BuildExFlags) -> [String] -> GlobalFlags -> IO () +runAction (buildFlags, buildExFlags) extraArgs globalFlags = do + let verbosity = fromFlagOrDefault normal (buildVerbosity buildFlags) + distPref = fromFlagOrDefault (useDistPref defaultSetupScriptOptions) + (buildDistPref buildFlags) + noAddSource = fromFlagOrDefault DontSkipAddSourceDepsCheck + (buildOnly buildExFlags) + + -- reconfigure also checks if we're in a sandbox and reinstalls add-source + -- deps if needed. + (useSandbox, config) <- reconfigure verbosity distPref mempty [] + globalFlags noAddSource (buildNumJobs buildFlags) + (const Nothing) + + lbi <- getPersistBuildConfig distPref + (exe, exeArgs) <- splitRunArgs lbi extraArgs + + maybeWithSandboxDirOnSearchPath useSandbox $ + build verbosity config distPref buildFlags ["exe:" ++ exeName exe] + + maybeWithSandboxDirOnSearchPath useSandbox $ + run verbosity lbi exe exeArgs + +getAction :: GetFlags -> [String] -> GlobalFlags -> IO () +getAction getFlags extraArgs globalFlags = do + let verbosity = fromFlag (getVerbosity getFlags) + targets <- readUserTargets verbosity extraArgs + config <- loadConfig verbosity (globalConfigFile globalFlags) mempty + let globalFlags' = savedGlobalFlags config `mappend` globalFlags + get verbosity + (globalRepos (savedGlobalFlags config)) + globalFlags' + getFlags + targets + +unpackAction :: GetFlags -> [String] -> GlobalFlags -> IO () +unpackAction getFlags extraArgs globalFlags = do + getAction getFlags extraArgs globalFlags + +initAction :: InitFlags -> [String] -> GlobalFlags -> IO () +initAction initFlags _extraArgs globalFlags = do + let verbosity = fromFlag (initVerbosity initFlags) + (_useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags mempty + let configFlags = savedConfigureFlags config + (comp, _, conf) <- configCompilerAux' configFlags + initCabal verbosity + (configPackageDB' configFlags) + comp + conf + initFlags + +sandboxAction :: SandboxFlags -> [String] -> GlobalFlags -> IO () +sandboxAction sandboxFlags extraArgs globalFlags = do + let verbosity = fromFlag (sandboxVerbosity sandboxFlags) + case extraArgs of + -- Basic sandbox commands. + ["init"] -> sandboxInit verbosity sandboxFlags globalFlags + ["delete"] -> sandboxDelete verbosity sandboxFlags globalFlags + ("add-source":extra) -> do + when (noExtraArgs extra) $ + die "The 'sandbox add-source' command expects at least one argument" + sandboxAddSource verbosity extra sandboxFlags globalFlags + ("delete-source":extra) -> do + when (noExtraArgs extra) $ + die "The 'sandbox delete-source' command expects \ + \at least one argument" + sandboxDeleteSource verbosity extra sandboxFlags globalFlags + ["list-sources"] -> sandboxListSources verbosity sandboxFlags globalFlags + + -- More advanced commands. + ("hc-pkg":extra) -> do + when (noExtraArgs extra) $ + die $ "The 'sandbox hc-pkg' command expects at least one argument" + sandboxHcPkg verbosity sandboxFlags globalFlags extra + ["buildopts"] -> die "Not implemented!" + + -- Hidden commands. + ["dump-pkgenv"] -> dumpPackageEnvironment verbosity sandboxFlags globalFlags + + -- Error handling. + [] -> die $ "Please specify a subcommand (see 'help sandbox')" + _ -> die $ "Unknown 'sandbox' subcommand: " ++ unwords extraArgs + + where + noExtraArgs = (<1) . length + +execAction :: ExecFlags -> [String] -> GlobalFlags -> IO () +execAction execFlags extraArgs globalFlags = do + let verbosity = fromFlag (execVerbosity execFlags) + (useSandbox, config) <- loadConfigOrSandboxConfig verbosity globalFlags + mempty + let configFlags = savedConfigureFlags config + (comp, platform, conf) <- configCompilerAux' configFlags + exec verbosity useSandbox comp platform conf extraArgs + +userConfigAction :: UserConfigFlags -> [String] -> GlobalFlags -> IO () +userConfigAction ucflags extraArgs globalFlags = do + let verbosity = fromFlag (userConfigVerbosity ucflags) + case extraArgs of + ("diff":_) -> mapM_ putStrLn =<< userConfigDiff globalFlags + ("update":_) -> userConfigUpdate verbosity globalFlags + -- Error handling. + [] -> die $ "Please specify a subcommand (see 'help user-config')" + _ -> die $ "Unknown 'user-config' subcommand: " ++ unwords extraArgs + + +-- | See 'Distribution.Client.Install.withWin32SelfUpgrade' for details. +-- +win32SelfUpgradeAction :: Win32SelfUpgradeFlags -> [String] -> GlobalFlags + -> IO () +win32SelfUpgradeAction selfUpgradeFlags (pid:path:_extraArgs) _globalFlags = do + let verbosity = fromFlag (win32SelfUpgradeVerbosity selfUpgradeFlags) + Win32SelfUpgrade.deleteOldExeFile verbosity (read pid) path +win32SelfUpgradeAction _ _ _ = return () diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/README.md cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/README.md --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/README.md 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,155 @@ +The cabal-install package +========================= + +See the [Cabal web site] for more information. + +The `cabal-install` package provides a command line tool named `cabal`. +It uses the [Cabal] library and provides a user interface to the +Cabal/[Hackage] build automation and package management system. It can +build and install both local and remote packages, including +dependencies. + +[Cabal web site]: http://www.haskell.org/cabal/ +[Cabal]: ../Cabal/README.md + +Installing the `cabal` command-line tool +======================================== + +The `cabal-install` package requires a number of other packages, most of +which come with a standard GHC installation. It requires the [network] +package, which is sometimes packaged separately by Linux distributions; +for example, on Debian or Ubuntu, it is located in the +"libghc6-network-dev" package. + +`cabal` requires a few other Haskell packages that are not always +installed. The exact list is specified in the [.cabal] file or in the +[bootstrap.sh] file. All these packages are available from [Hackage]. + +Note that on some Unix systems you may need to install an additional +zlib development package using your system package manager; for example, +on Debian or Ubuntu, it is located in the "zlib1g-dev" package; on +Fedora, it is located in the "zlib-devel" package. It is required +because the Haskell zlib package uses the system zlib C library and +header files. + +The `cabal-install` package is now part of the [Haskell Platform], so you +do not usually need to install it separately. However, if you are +starting from a minimal GHC installation, you need to install +`cabal-install` manually. Since it is an ordinary Cabal package, +`cabal-install` can be built the standard way; to facilitate this, the +process has been partially automated. It is described below. + +[.cabal]: cabal-install.cabal +[network]: http://hackage.haskell.org/package/network +[Haskell Platform]: http://www.haskell.org/platform/ + +Quick start on Unix-like systems +-------------------------------- + +As a convenience for users on Unix-like systems, there is a +[bootstrap.sh] script that will download and install each of +`cabal-install`'s dependencies in turn. + + $ ./bootstrap.sh + +It will download and install the dependencies. The script will install the +library packages (vanilla, profiling and shared) into `$HOME/.cabal/` and the +`cabal` program into `$HOME/.cabal/bin/`. If you don't want to install profiling +and shared versions of the libraries, use + + $ EXTRA_CONFIGURE_OPTS="" ./bootstrap.sh + +You then have the choice either to place `$HOME/.cabal/bin` on your +`$PATH` or move the `cabal` program to somewhere on your `$PATH`. Next, +you can get the latest list of packages by running: + + $ cabal update + +This will also create a default configuration file, if it does not +already exist, at `$HOME/.cabal/config`. + +By default, `cabal` will install programs to `$HOME/.cabal/bin`. If you +do not want to add this directory to your `$PATH`, you can change +the setting in the config file; for example, you could use the +following: + + symlink-bindir: $HOME/bin + + +Quick start on Windows systems +------------------------------ + +For Windows users, a precompiled program ([cabal.exe]) is provided. +Download and put it somewhere on your `%PATH%` (for example, +`C:\Program Files\Haskell\bin`.) + +Next, you can get the latest list of packages by running: + + $ cabal update + +This will also create a default configuration file (if it does not +already exist) at +`C:\Documents and Settings\%USERNAME%\Application Data\cabal\config`. + +[cabal.exe]: http://www.haskell.org/cabal/release/cabal-install-latest/ + +Using `cabal` +============= + +There are two sets of commands: commands for working with a local +project build tree and those for working with packages distributed +from [Hackage]. + +For the list of the full set of commands and flags for each command, +run: + + $ cabal help + + +Commands for developers for local build trees +--------------------------------------------- + +The commands for local project build trees are almost the same as the +`runghc Setup` command-line interface you may already be familiar with. +In particular, it has the following commands: + + * `cabal configure` + * `cabal build` + * `cabal haddock` + * `cabal clean` + * `cabal sdist` + +The `install` command is somewhat different; it is an all-in-one +operation. If you run `cabal install` in your build tree, it will +configure, build, and install. It takes all the flags that `configure` +takes such as `--global` and `--prefix`. + +In addition, `cabal` will download and install any dependencies that are +not already installed. It can also rebuild packages to ensure a +consistent set of dependencies. + + +Commands for released Hackage packages +-------------------------------------- + + $ cabal update + +This command gets the latest list of packages from the [Hackage] server. +On occasion, this command must be run manually--for instance, if you +want to install a newly released package. + + $ cabal install xmonad + +This command installs one or more named packages, and all their +dependencies, from Hackage. By default, it installs the latest available +version; however, you may specify exact versions or version ranges. For +example, `cabal install alex-2.2` or `cabal install parsec < 3`. + + $ cabal list xml + +This does a search of the installed and available packages. It does a +case-insensitive substring match on the package name. + + +[Hackage]: http://hackage.haskell.org +[bootstrap.sh]: bootstrap.sh diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Setup.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Setup.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/Setup.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,2 @@ +import Distribution.Simple +main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/Exec/Check.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/Exec/Check.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/Exec/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/Exec/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,146 @@ +{-# LANGUAGE CPP #-} +module PackageTests.Exec.Check + ( tests + ) where + + +import PackageTests.PackageTester + +import Test.Framework as TF (Test) +import Test.Framework.Providers.HUnit (testCase) +import Test.HUnit (assertBool) + +#if !MIN_VERSION_base(4,8,0) +import Control.Applicative ((<$>)) +#endif +import Control.Monad (when) +import Data.List (intercalate, isInfixOf) +import System.FilePath (()) +import System.Directory (getDirectoryContents) + +dir :: FilePath +dir = packageTestsDirectory "Exec" + +tests :: TestsPaths -> [TF.Test] +tests paths = + [ testCase "exits with failure if given no argument" $ do + result <- cabal_exec paths dir [] + assertExecFailed result + + , testCase "prints error message if given no argument" $ do + result <- cabal_exec paths dir [] + assertExecFailed result + let output = outputText result + expected = "specify an executable to run" + errMsg = "should have requested an executable be specified\n" ++ + output + assertBool errMsg $ + expected `isInfixOf` (intercalate " " . lines $ output) + + , testCase "runs the given command" $ do + result <- cabal_exec paths dir ["echo", "this", "string"] + assertExecSucceeded result + let output = outputText result + expected = "this string" + errMsg = "should have ran the given command\n" ++ output + assertBool errMsg $ + expected `isInfixOf` (intercalate " " . lines $ output) + + , testCase "can run executables installed in the sandbox" $ do + -- Test that an executable installed into the sandbox can be found. + -- We do this by removing any existing sandbox. Checking that the + -- executable cannot be found. Creating a new sandbox. Installing + -- the executable and checking it can be run. + + cleanPreviousBuilds paths + assertMyExecutableNotFound paths + assertPackageInstall paths + + result <- cabal_exec paths dir ["my-executable"] + assertExecSucceeded result + let output = outputText result + expected = "This is my-executable" + errMsg = "should have found a my-executable\n" ++ output + assertBool errMsg $ + expected `isInfixOf` (intercalate " " . lines $ output) + + , testCase "adds the sandbox bin directory to the PATH" $ do + cleanPreviousBuilds paths + assertMyExecutableNotFound paths + assertPackageInstall paths + + result <- cabal_exec paths dir ["bash", "--", "-c", "my-executable"] + assertExecSucceeded result + let output = outputText result + expected = "This is my-executable" + errMsg = "should have found a my-executable\n" ++ output + assertBool errMsg $ + expected `isInfixOf` (intercalate " " . lines $ output) + + , testCase "configures GHC to use the sandbox" $ do + let libNameAndVersion = "my-0.1" + + cleanPreviousBuilds paths + assertPackageInstall paths + + assertMyLibIsNotAvailableOutsideofSandbox paths libNameAndVersion + + result <- cabal_exec paths dir ["ghc-pkg", "list"] + assertExecSucceeded result + let output = outputText result + errMsg = "my library should have been found" + assertBool errMsg $ + libNameAndVersion `isInfixOf` (intercalate " " . lines $ output) + + + -- , testCase "can find executables built from the package" $ do + + , testCase "configures cabal to use the sandbox" $ do + let libNameAndVersion = "my-0.1" + + cleanPreviousBuilds paths + assertPackageInstall paths + + assertMyLibIsNotAvailableOutsideofSandbox paths libNameAndVersion + + result <- cabal_exec paths dir ["bash", "--", "-c", "cd subdir ; cabal sandbox hc-pkg list"] + assertExecSucceeded result + let output = outputText result + errMsg = "my library should have been found" + assertBool errMsg $ + libNameAndVersion `isInfixOf` (intercalate " " . lines $ output) + ] + +cleanPreviousBuilds :: TestsPaths -> IO () +cleanPreviousBuilds paths = do + sandboxExists <- not . null . filter (== "cabal.sandbox.config") <$> + getDirectoryContents dir + assertCleanSucceeded =<< cabal_clean paths dir [] + when sandboxExists $ do + assertSandboxSucceeded =<< cabal_sandbox paths dir ["delete"] + + +assertPackageInstall :: TestsPaths -> IO () +assertPackageInstall paths = do + assertSandboxSucceeded =<< cabal_sandbox paths dir ["init"] + assertInstallSucceeded =<< cabal_install paths dir [] + + +assertMyExecutableNotFound :: TestsPaths -> IO () +assertMyExecutableNotFound paths = do + result <- cabal_exec paths dir ["my-executable"] + assertExecFailed result + let output = outputText result + expected = "cabal: The program 'my-executable' is required but it " ++ + "could not be found" + errMsg = "should not have found a my-executable\n" ++ output + assertBool errMsg $ + expected `isInfixOf` (intercalate " " . lines $ output) + + + +assertMyLibIsNotAvailableOutsideofSandbox :: TestsPaths -> String -> IO () +assertMyLibIsNotAvailableOutsideofSandbox paths libNameAndVersion = do + (_, _, output) <- run (Just $ dir) (ghcPkgPath paths) ["list"] + assertBool "my library should not have been found" $ not $ + libNameAndVersion `isInfixOf` (intercalate " " . lines $ output) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/Freeze/Check.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/Freeze/Check.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/Freeze/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/Freeze/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,114 @@ +{-# LANGUAGE ScopedTypeVariables #-} + +module PackageTests.Freeze.Check + ( tests + ) where + +import PackageTests.PackageTester + +import Test.Framework as TF (Test) +import Test.Framework.Providers.HUnit (testCase) +import Test.HUnit (assertBool) + +import qualified Control.Exception.Extensible as E +import Data.List (intercalate, isInfixOf) +import System.Directory (doesFileExist, removeFile) +import System.FilePath (()) +import System.IO.Error (isDoesNotExistError) + +dir :: FilePath +dir = packageTestsDirectory "Freeze" + +tests :: TestsPaths -> [TF.Test] +tests paths = + [ testCase "runs without error" $ do + removeCabalConfig + result <- cabal_freeze paths dir [] + assertFreezeSucceeded result + + , testCase "freezes direct dependencies" $ do + removeCabalConfig + result <- cabal_freeze paths dir [] + assertFreezeSucceeded result + c <- readCabalConfig + assertBool ("should have frozen base\n" ++ c) $ + " base ==" `isInfixOf` (intercalate " " $ lines $ c) + + , testCase "freezes transitory dependencies" $ do + removeCabalConfig + result <- cabal_freeze paths dir [] + assertFreezeSucceeded result + c <- readCabalConfig + assertBool ("should have frozen ghc-prim\n" ++ c) $ + " ghc-prim ==" `isInfixOf` (intercalate " " $ lines $ c) + + , testCase "does not freeze packages which are not dependend upon" $ do + -- XXX Test this against a package installed in the sandbox but + -- not depended upon. + removeCabalConfig + result <- cabal_freeze paths dir [] + assertFreezeSucceeded result + c <- readCabalConfig + assertBool ("should not have frozen exceptions\n" ++ c) $ not $ + " exceptions ==" `isInfixOf` (intercalate " " $ lines $ c) + + , testCase "does not include a constraint for the package being frozen" $ do + removeCabalConfig + result <- cabal_freeze paths dir [] + assertFreezeSucceeded result + c <- readCabalConfig + assertBool ("should not have frozen self\n" ++ c) $ not $ + " my ==" `isInfixOf` (intercalate " " $ lines $ c) + + , testCase "--dry-run does not modify the cabal.config file" $ do + removeCabalConfig + result <- cabal_freeze paths dir ["--dry-run"] + assertFreezeSucceeded result + c <- doesFileExist $ dir "cabal.config" + assertBool "cabal.config file should not have been created" (not c) + + , testCase "--enable-tests freezes test dependencies" $ do + removeCabalConfig + result <- cabal_freeze paths dir ["--enable-tests"] + assertFreezeSucceeded result + c <- readCabalConfig + assertBool ("should have frozen test-framework\n" ++ c) $ + " test-framework ==" `isInfixOf` (intercalate " " $ lines $ c) + + , testCase "--disable-tests does not freeze test dependencies" $ do + removeCabalConfig + result <- cabal_freeze paths dir ["--disable-tests"] + assertFreezeSucceeded result + c <- readCabalConfig + assertBool ("should not have frozen test-framework\n" ++ c) $ not $ + " test-framework ==" `isInfixOf` (intercalate " " $ lines $ c) + + , testCase "--enable-benchmarks freezes benchmark dependencies" $ do + removeCabalConfig + result <- cabal_freeze paths dir ["--disable-benchmarks"] + assertFreezeSucceeded result + c <- readCabalConfig + assertBool ("should not have frozen criterion\n" ++ c) $ not $ + " criterion ==" `isInfixOf` (intercalate " " $ lines $ c) + + , testCase "--disable-benchmarks does not freeze benchmark dependencies" $ do + removeCabalConfig + result <- cabal_freeze paths dir ["--disable-benchmarks"] + assertFreezeSucceeded result + c <- readCabalConfig + assertBool ("should not have frozen criterion\n" ++ c) $ not $ + " criterion ==" `isInfixOf` (intercalate " " $ lines $ c) + ] + +removeCabalConfig :: IO () +removeCabalConfig = do + removeFile (dir "cabal.config") + `E.catch` \ (e :: IOError) -> + if isDoesNotExistError e + then return () + else E.throw e + + +readCabalConfig :: IO String +readCabalConfig = do + readFile $ dir "cabal.config" diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/Freeze/my.cabal cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/Freeze/my.cabal --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/Freeze/my.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/Freeze/my.cabal 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,21 @@ +name: my +version: 0.1 +license: BSD3 +cabal-version: >= 1.20.0 +build-type: Simple + +library + exposed-modules: Foo + build-depends: base + +test-suite test-Foo + type: exitcode-stdio-1.0 + hs-source-dirs: tests + main-is: test-Foo.hs + build-depends: base, my, test-framework + +benchmark bench-Foo + type: exitcode-stdio-1.0 + hs-source-dirs: benchmarks + main-is: benchmark-Foo.hs + build-depends: base, my, criterion diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/MultipleSource/Check.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/MultipleSource/Check.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/MultipleSource/Check.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/MultipleSource/Check.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,28 @@ +module PackageTests.MultipleSource.Check + ( tests + ) where + + +import PackageTests.PackageTester + +import Test.Framework as TF (Test) +import Test.Framework.Providers.HUnit (testCase) + +import Control.Monad (void, when) +import System.Directory (doesDirectoryExist) +import System.FilePath (()) + +dir :: FilePath +dir = packageTestsDirectory "MultipleSource" + +tests :: TestsPaths -> [TF.Test] +tests paths = + [ testCase "finds second source of multiple source" $ do + sandboxExists <- doesDirectoryExist $ dir ".cabal-sandbox" + when sandboxExists $ + void $ cabal_sandbox paths dir ["delete"] + assertSandboxSucceeded =<< cabal_sandbox paths dir ["init"] + assertSandboxSucceeded =<< cabal_sandbox paths dir ["add-source", "p"] + assertSandboxSucceeded =<< cabal_sandbox paths dir ["add-source", "q"] + assertInstallSucceeded =<< cabal_install paths dir ["q"] + ] diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/PackageTester.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/PackageTester.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests/PackageTester.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests/PackageTester.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,232 @@ +{-# LANGUAGE ScopedTypeVariables #-} + +-- TODO This module was originally based on the PackageTests.PackageTester +-- module in Cabal, however it has a few differences. I suspect that as +-- this module ages the two modules will diverge further. As such, I have +-- not attempted to merge them into a single module nor to extract a common +-- module from them. Refactor this module and/or Cabal's +-- PackageTests.PackageTester to remove commonality. +-- 2014-05-15 Ben Armston + +-- | Routines for black-box testing cabal-install. +-- +-- Instead of driving the tests by making library calls into +-- Distribution.Simple.* or Distribution.Client.* this module only every +-- executes the `cabal-install` binary. +-- +-- You can set the following VERBOSE environment variable to control +-- the verbosity of the output generated by this module. +module PackageTests.PackageTester + ( TestsPaths(..) + , Result(..) + + , packageTestsDirectory + , packageTestsConfigFile + + -- * Running cabal commands + , cabal_clean + , cabal_exec + , cabal_freeze + , cabal_install + , cabal_sandbox + , run + + -- * Test helpers + , assertCleanSucceeded + , assertExecFailed + , assertExecSucceeded + , assertFreezeSucceeded + , assertInstallSucceeded + , assertSandboxSucceeded + ) where + +import qualified Control.Exception.Extensible as E +import Control.Monad (when, unless) +import Data.Maybe (fromMaybe) +import System.Directory (canonicalizePath, doesFileExist) +import System.Environment (getEnv) +import System.Exit (ExitCode(ExitSuccess)) +import System.FilePath ( (<.>) ) +import System.IO (hClose, hGetChar, hIsEOF) +import System.IO.Error (isDoesNotExistError) +import System.Process (runProcess, waitForProcess) +import Test.HUnit (Assertion, assertFailure) + +import Distribution.Simple.BuildPaths (exeExtension) +import Distribution.Simple.Utils (printRawCommandAndArgs) +import Distribution.Compat.CreatePipe (createPipe) +import Distribution.ReadE (readEOrFail) +import Distribution.Verbosity (Verbosity, flagToVerbosity, normal) + +data Success = Failure + -- | ConfigureSuccess + -- | BuildSuccess + -- | TestSuccess + -- | BenchSuccess + | CleanSuccess + | ExecSuccess + | FreezeSuccess + | InstallSuccess + | SandboxSuccess + deriving (Eq, Show) + +data TestsPaths = TestsPaths + { cabalPath :: FilePath -- ^ absolute path to cabal executable. + , ghcPkgPath :: FilePath -- ^ absolute path to ghc-pkg executable. + , configPath :: FilePath -- ^ absolute path of the default config file + -- to use for tests (tests are free to use + -- a different one). + } + +data Result = Result + { successful :: Bool + , success :: Success + , outputText :: String + } deriving Show + +nullResult :: Result +nullResult = Result True Failure "" + +------------------------------------------------------------------------ +-- * Config + +packageTestsDirectory :: FilePath +packageTestsDirectory = "PackageTests" + +packageTestsConfigFile :: FilePath +packageTestsConfigFile = "cabal-config" + +------------------------------------------------------------------------ +-- * Running cabal commands + +recordRun :: (String, ExitCode, String) -> Success -> Result -> Result +recordRun (cmd, exitCode, exeOutput) thisSucc res = + res { successful = successful res && exitCode == ExitSuccess + , success = if exitCode == ExitSuccess then thisSucc + else success res + , outputText = + (if null $ outputText res then "" else outputText res ++ "\n") ++ + cmd ++ "\n" ++ exeOutput + } + +-- | Run the clean command and return its result. +cabal_clean :: TestsPaths -> FilePath -> [String] -> IO Result +cabal_clean paths dir args = do + res <- cabal paths dir (["clean"] ++ args) + return $ recordRun res CleanSuccess nullResult + +-- | Run the exec command and return its result. +cabal_exec :: TestsPaths -> FilePath -> [String] -> IO Result +cabal_exec paths dir args = do + res <- cabal paths dir (["exec"] ++ args) + return $ recordRun res ExecSuccess nullResult + +-- | Run the freeze command and return its result. +cabal_freeze :: TestsPaths -> FilePath -> [String] -> IO Result +cabal_freeze paths dir args = do + res <- cabal paths dir (["freeze"] ++ args) + return $ recordRun res FreezeSuccess nullResult + +-- | Run the install command and return its result. +cabal_install :: TestsPaths -> FilePath -> [String] -> IO Result +cabal_install paths dir args = do + res <- cabal paths dir (["install"] ++ args) + return $ recordRun res InstallSuccess nullResult + +-- | Run the sandbox command and return its result. +cabal_sandbox :: TestsPaths -> FilePath -> [String] -> IO Result +cabal_sandbox paths dir args = do + res <- cabal paths dir (["sandbox"] ++ args) + return $ recordRun res SandboxSuccess nullResult + +-- | Returns the command that was issued, the return code, and the output text. +cabal :: TestsPaths -> FilePath -> [String] -> IO (String, ExitCode, String) +cabal paths dir cabalArgs = do + run (Just dir) (cabalPath paths) args + where + args = configFileArg : cabalArgs + configFileArg = "--config-file=" ++ configPath paths + +-- | Returns the command that was issued, the return code, and the output text +run :: Maybe FilePath -> String -> [String] -> IO (String, ExitCode, String) +run cwd path args = do + verbosity <- getVerbosity + -- path is relative to the current directory; canonicalizePath makes it + -- absolute, so that runProcess will find it even when changing directory. + path' <- do pathExists <- doesFileExist path + canonicalizePath (if pathExists then path else path <.> exeExtension) + printRawCommandAndArgs verbosity path' args + (readh, writeh) <- createPipe + pid <- runProcess path' args cwd Nothing Nothing (Just writeh) (Just writeh) + + -- fork off a thread to start consuming the output + out <- suckH [] readh + hClose readh + + -- wait for the program to terminate + exitcode <- waitForProcess pid + let fullCmd = unwords (path' : args) + return ("\"" ++ fullCmd ++ "\" in " ++ fromMaybe "" cwd, exitcode, out) + where + suckH output h = do + eof <- hIsEOF h + if eof + then return (reverse output) + else do + c <- hGetChar h + suckH (c:output) h + +------------------------------------------------------------------------ +-- * Test helpers + +assertCleanSucceeded :: Result -> Assertion +assertCleanSucceeded result = unless (successful result) $ + assertFailure $ + "expected: \'cabal clean\' should succeed\n" ++ + " output: " ++ outputText result + +assertExecSucceeded :: Result -> Assertion +assertExecSucceeded result = unless (successful result) $ + assertFailure $ + "expected: \'cabal exec\' should succeed\n" ++ + " output: " ++ outputText result + +assertExecFailed :: Result -> Assertion +assertExecFailed result = when (successful result) $ + assertFailure $ + "expected: \'cabal exec\' should fail\n" ++ + " output: " ++ outputText result + +assertFreezeSucceeded :: Result -> Assertion +assertFreezeSucceeded result = unless (successful result) $ + assertFailure $ + "expected: \'cabal freeze\' should succeed\n" ++ + " output: " ++ outputText result + +assertInstallSucceeded :: Result -> Assertion +assertInstallSucceeded result = unless (successful result) $ + assertFailure $ + "expected: \'cabal install\' should succeed\n" ++ + " output: " ++ outputText result + +assertSandboxSucceeded :: Result -> Assertion +assertSandboxSucceeded result = unless (successful result) $ + assertFailure $ + "expected: \'cabal sandbox\' should succeed\n" ++ + " output: " ++ outputText result + +------------------------------------------------------------------------ +-- Verbosity + +lookupEnv :: String -> IO (Maybe String) +lookupEnv name = + (fmap Just $ getEnv name) + `E.catch` \ (e :: IOError) -> + if isDoesNotExistError e + then return Nothing + else E.throw e + +-- TODO: Convert to a "-v" flag instead. +getVerbosity :: IO Verbosity +getVerbosity = do + maybe normal (readEOrFail flagToVerbosity) `fmap` lookupEnv "VERBOSE" diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/PackageTests.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/PackageTests.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,88 @@ +-- | Groups black-box tests of cabal-install and configures them to test +-- the correct binary. +-- +-- This file should do nothing but import tests from other modules and run +-- them with the path to the correct cabal-install binary. +module Main + where + +-- Modules from Cabal. +import Distribution.Simple.Program.Builtin (ghcPkgProgram) +import Distribution.Simple.Program.Db + (defaultProgramDb, requireProgram, setProgramSearchPath) +import Distribution.Simple.Program.Find + (ProgramSearchPathEntry(ProgramSearchPathDir), defaultProgramSearchPath) +import Distribution.Simple.Program.Types + ( Program(..), simpleProgram, programPath) +import Distribution.Simple.Utils ( findProgramVersion ) +import Distribution.Verbosity (normal) + +-- Third party modules. +import qualified Control.Exception.Extensible as E +import System.Directory + ( canonicalizePath, getCurrentDirectory, setCurrentDirectory + , removeFile, doesFileExist ) +import System.FilePath (()) +import Test.Framework (Test, defaultMain, testGroup) +import Control.Monad ( when ) + +-- Module containing common test code. + +import PackageTests.PackageTester ( TestsPaths(..) + , packageTestsDirectory + , packageTestsConfigFile ) + +-- Modules containing the tests. +import qualified PackageTests.Exec.Check +import qualified PackageTests.Freeze.Check +import qualified PackageTests.MultipleSource.Check + +-- List of tests to run. Each test will be called with the path to the +-- cabal binary to use. +tests :: PackageTests.PackageTester.TestsPaths -> [Test] +tests paths = + [ testGroup "Freeze" $ PackageTests.Freeze.Check.tests paths + , testGroup "Exec" $ PackageTests.Exec.Check.tests paths + , testGroup "MultipleSource" $ PackageTests.MultipleSource.Check.tests paths + ] + +cabalProgram :: Program +cabalProgram = (simpleProgram "cabal") { + programFindVersion = findProgramVersion "--numeric-version" id + } + +main :: IO () +main = do + buildDir <- canonicalizePath "dist/build/cabal" + let programSearchPath = ProgramSearchPathDir buildDir : defaultProgramSearchPath + (cabal, _) <- requireProgram normal cabalProgram + (setProgramSearchPath programSearchPath defaultProgramDb) + (ghcPkg, _) <- requireProgram normal ghcPkgProgram defaultProgramDb + canonicalConfigPath <- canonicalizePath $ "tests" packageTestsDirectory + + let testsPaths = TestsPaths { + cabalPath = programPath cabal, + ghcPkgPath = programPath ghcPkg, + configPath = canonicalConfigPath packageTestsConfigFile + } + + putStrLn $ "Using cabal: " ++ cabalPath testsPaths + putStrLn $ "Using ghc-pkg: " ++ ghcPkgPath testsPaths + + cwd <- getCurrentDirectory + let confFile = packageTestsDirectory "cabal-config" + removeConf = do + b <- doesFileExist confFile + when b $ removeFile confFile + let runTests = do + setCurrentDirectory "tests" + removeConf -- assert that there is no existing config file + -- (we want deterministic testing with the default + -- config values) + defaultMain $ tests testsPaths + runTests `E.finally` do + -- remove the default config file that got created by the tests + removeConf + -- Change back to the old working directory so that the tests can be + -- repeatedly run in `cabal repl` via `:main`. + setCurrentDirectory cwd diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Dependency/Modular/PSQ.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Dependency/Modular/PSQ.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Dependency/Modular/PSQ.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Dependency/Modular/PSQ.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,22 @@ +module UnitTests.Distribution.Client.Dependency.Modular.PSQ ( + tests + ) where + +import Distribution.Client.Dependency.Modular.PSQ + +import Test.Framework as TF (Test) +import Test.Framework.Providers.QuickCheck2 + +tests :: [TF.Test] +tests = [ testProperty "splitsAltImplementation" splitsTest + ] + +-- | Original splits implementation +splits' :: PSQ k a -> PSQ k (a, PSQ k a) +splits' xs = + casePSQ xs + (PSQ []) + (\ k v ys -> cons k (v, ys) (fmap (\ (w, zs) -> (w, cons k v zs)) (splits' ys))) + +splitsTest :: [(Int, Int)] -> Bool +splitsTest psq = splits' (PSQ psq) == splits (PSQ psq) diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Sandbox.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Sandbox.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Sandbox.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Sandbox.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,29 @@ +module UnitTests.Distribution.Client.Sandbox ( + tests + ) where + +import Distribution.Client.Sandbox (withSandboxBinDirOnSearchPath) + +import Test.Framework as TF (Test) +import Test.Framework.Providers.HUnit (testCase) +import Test.HUnit (Assertion, assertBool, assertEqual) + +import System.FilePath (getSearchPath, ()) + +tests :: [TF.Test] +tests = [ testCase "sandboxBinDirOnSearchPath" sandboxBinDirOnSearchPathTest + , testCase "oldSearchPathRestored" oldSearchPathRestoreTest + ] + +sandboxBinDirOnSearchPathTest :: Assertion +sandboxBinDirOnSearchPathTest = + withSandboxBinDirOnSearchPath "foo" $ do + r <- getSearchPath + assertBool "'foo/bin' not on search path" $ ("foo" "bin") `elem` r + +oldSearchPathRestoreTest :: Assertion +oldSearchPathRestoreTest = do + r <- getSearchPath + withSandboxBinDirOnSearchPath "foo" $ return () + r' <- getSearchPath + assertEqual "Old search path wasn't restored" r r' diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Targets.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Targets.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Targets.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/Targets.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,59 @@ +module UnitTests.Distribution.Client.Targets ( + tests + ) where + +import Distribution.Client.Targets (UserConstraint (..), readUserConstraint) +import Distribution.Compat.ReadP (ReadP, readP_to_S) +import Distribution.Package (PackageName (..)) +import Distribution.ParseUtils (parseCommaList) +import Distribution.Text (parse) + +import Test.Framework as TF (Test) +import Test.Framework.Providers.HUnit (testCase) +import Test.HUnit (Assertion, assertEqual) + +import Data.Char (isSpace) + +tests :: [TF.Test] +tests = [ testCase "readUserConstraint" readUserConstraintTest + , testCase "parseUserConstraint" parseUserConstraintTest + , testCase "readUserConstraints" readUserConstraintsTest + ] + +readUserConstraintTest :: Assertion +readUserConstraintTest = + assertEqual ("Couldn't read constraint: '" ++ constr ++ "'") expected actual + where + pkgName = "template-haskell" + constr = pkgName ++ " installed" + + expected = UserConstraintInstalled (PackageName pkgName) + actual = let (Right r) = readUserConstraint constr in r + +parseUserConstraintTest :: Assertion +parseUserConstraintTest = + assertEqual ("Couldn't parse constraint: '" ++ constr ++ "'") expected actual + where + pkgName = "template-haskell" + constr = pkgName ++ " installed" + + expected = [UserConstraintInstalled (PackageName pkgName)] + actual = [ x | (x, ys) <- readP_to_S parseUserConstraint constr + , all isSpace ys] + + parseUserConstraint :: ReadP r UserConstraint + parseUserConstraint = parse + +readUserConstraintsTest :: Assertion +readUserConstraintsTest = + assertEqual ("Couldn't read constraints: '" ++ constr ++ "'") expected actual + where + pkgName = "template-haskell" + constr = pkgName ++ " installed" + + expected = [[UserConstraintInstalled (PackageName pkgName)]] + actual = [ x | (x, ys) <- readP_to_S parseUserConstraints constr + , all isSpace ys] + + parseUserConstraints :: ReadP r [UserConstraint] + parseUserConstraints = parseCommaList parse diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/UserConfig.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/UserConfig.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/UserConfig.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests/Distribution/Client/UserConfig.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,104 @@ +{-# LANGUAGE CPP #-} +module UnitTests.Distribution.Client.UserConfig + ( tests + ) where + +import Control.Exception (bracket) +import Data.List (sort, nub) +#if !MIN_VERSION_base(4,8,0) +import Data.Monoid +#endif +import System.Directory (getCurrentDirectory, removeDirectoryRecursive, createDirectoryIfMissing) +import System.FilePath (takeDirectory) + +import Test.Framework as TF (Test) +import Test.Framework.Providers.HUnit (testCase) +import Test.HUnit (Assertion, assertBool) + +import Distribution.Client.Compat.Environment (lookupEnv, setEnv) +import Distribution.Client.Config +import Distribution.Utils.NubList (fromNubList) +import Distribution.Client.Setup (GlobalFlags (..), InstallFlags (..)) +import Distribution.Simple.Setup (ConfigFlags (..), fromFlag) +import Distribution.Verbosity (silent) + +tests :: [TF.Test] +tests = [ testCase "nullDiffOnCreate" nullDiffOnCreateTest + , testCase "canDetectDifference" canDetectDifference + , testCase "canUpdateConfig" canUpdateConfig + , testCase "doubleUpdateConfig" doubleUpdateConfig + ] + +nullDiffOnCreateTest :: Assertion +nullDiffOnCreateTest = bracketTest . const $ do + -- Create a new default config file in our test directory. + _ <- loadConfig silent mempty mempty + -- Now we read it in and compare it against the default. + diff <- userConfigDiff mempty + assertBool (unlines $ "Following diff should be empty:" : diff) $ null diff + + +canDetectDifference :: Assertion +canDetectDifference = bracketTest . const $ do + -- Create a new default config file in our test directory. + _ <- loadConfig silent mempty mempty + cabalFile <- defaultConfigFile + appendFile cabalFile "verbose: 0\n" + diff <- userConfigDiff mempty + assertBool (unlines $ "Should detect a difference:" : diff) $ + diff == [ "- verbose: 1", "+ verbose: 0" ] + + +canUpdateConfig :: Assertion +canUpdateConfig = bracketTest . const $ do + cabalFile <- defaultConfigFile + createDirectoryIfMissing True $ takeDirectory cabalFile + -- Write a trivial cabal file. + writeFile cabalFile "tests: True\n" + -- Update the config file. + userConfigUpdate silent mempty + -- Load it again. + updated <- loadConfig silent mempty mempty + assertBool ("Field 'tests' should be True") $ + fromFlag (configTests $ savedConfigureFlags updated) + + +doubleUpdateConfig :: Assertion +doubleUpdateConfig = bracketTest . const $ do + -- Create a new default config file in our test directory. + _ <- loadConfig silent mempty mempty + -- Update it. + userConfigUpdate silent mempty + userConfigUpdate silent mempty + -- Load it again. + updated <- loadConfig silent mempty mempty + + assertBool ("Field 'remote-repo' doesn't contain duplicates") $ + listUnique (map show . fromNubList . globalRemoteRepos $ savedGlobalFlags updated) + assertBool ("Field 'extra-prog-path' doesn't contain duplicates") $ + listUnique (map show . fromNubList . configProgramPathExtra $ savedConfigureFlags updated) + assertBool ("Field 'build-summary' doesn't contain duplicates") $ + listUnique (map show . fromNubList . installSummaryFile $ savedInstallFlags updated) + + +listUnique :: Ord a => [a] -> Bool +listUnique xs = + let sorted = sort xs + in nub sorted == xs + + +bracketTest :: ((FilePath, FilePath) -> IO ()) -> Assertion +bracketTest = + bracket testSetup testTearDown + where + testSetup :: IO (FilePath, FilePath) + testSetup = do + Just oldHome <- lookupEnv "HOME" + testdir <- fmap (++ "/test-user-config") getCurrentDirectory + setEnv "HOME" testdir + return (oldHome, testdir) + + testTearDown :: (FilePath, FilePath) -> IO () + testTearDown (oldHome, testdir) = do + setEnv "HOME" oldHome + removeDirectoryRecursive testdir diff -Nru cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests.hs cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests.hs --- cabal-install-1.22-1.22.6.0/src/cabal-install-1.22.9.0/tests/UnitTests.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/cabal-install-1.22.9.0/tests/UnitTests.hs 2016-06-02 07:15:38.000000000 +0000 @@ -0,0 +1,24 @@ +module Main + where + +import Test.Framework + +import qualified UnitTests.Distribution.Client.Sandbox +import qualified UnitTests.Distribution.Client.UserConfig +import qualified UnitTests.Distribution.Client.Targets +import qualified UnitTests.Distribution.Client.Dependency.Modular.PSQ + +tests :: [Test] +tests = [ + testGroup "UnitTests.Distribution.Client.UserConfig" + UnitTests.Distribution.Client.UserConfig.tests + ,testGroup "Distribution.Client.Sandbox" + UnitTests.Distribution.Client.Sandbox.tests + ,testGroup "Distribution.Client.Targets" + UnitTests.Distribution.Client.Targets.tests + ,testGroup "UnitTests.Distribution.Client.Dependency.Modular.PSQ" + UnitTests.Distribution.Client.Dependency.Modular.PSQ.tests + ] + +main :: IO () +main = defaultMain tests diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/CHANGES cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/CHANGES --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/CHANGES 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/CHANGES 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,119 @@ + * If the URI contains "user:pass@" part, use it for Basic Authorization + * Add a test harness. + * Don't leak a socket when getHostAddr throws an exception. + * Send cookies in request format, not response format. + * Moved BrowserAction to be a StateT IO, with instances for + Applicative, MonadIO, MonadState. + * Add method to control size of connection pool. + * Consider both host and port when reusing connections. + * Handle response code 304 "not modified" properly. + * Fix digest authentication by fixing md5 output string rep. + * Make the default user agent string follow the package version. + * Document lack of HTTPS support and fail when clients try + to use it instead of silently falling back to HTTP. + * Add helper to set the request type and body. + +Version 4000.1.2: release 2011-08-11 + * Turn off buffering for the debug log. + * Update installation instructions. + * Bump base dependency to support GHC 7.2. + +Version 4000.1.1: release 2010-11-28 + * Be tolerant of LF (instead of CRLF which is the spec) in responses. + +Version 4000.1.0: release 2010-11-09 + * Retroactively fixed CHANGES to refer to 4000.x.x instead of + 4004.x.x. + * Fix problem with close looping on certain URLs due to trying + to munch the rest of the stream even on EOF. Modified from + a fix by Daniel Wagner. + * This involves a new class member for HStream and is thus an + API change, but one that will only affect clients that + define their own payload type to replace String/ByteString. + * Applied patch by Antoine Latter to fix problem with 301 and 307 + redirects. + +Version 4000.0.10: release 2010-10-29 + * Bump base dependency to support GHC 7.0. + * Stop using 'fail' from the Either monad and instead build Left + values explicitly; the behaviour of fail is changing in GHC 7.0 + and this avoids being sensitive to the change. + +Version 4000.0.9: release 2009-12-20 + + * Export headerMap from Network.HTTP.Headers + (suggested by David Leuschner.) + * Fix Network.TCP.{isTCPConnectedTo,isConnectedTo} to be useful. + * Always delay closing non-persistent connections until we reach EOF. + Delaying it until then is vital when reading the response out as a + lazy ByteString; all of the I/O may not have happened by the time we + were returning the HTTP response. Bug manifested itself occasionally + with larger responses. Courtesy of Valery Vorotyntsev; both untiring bug + hunt and fix. + * drop unused type argument from Network.Browser.BrowserEvent; needlessly general. + (patch provided by Daniel Wagner.) + +Version 4000.0.8: release 2009-08-05 + + * Incorporated proxy setting lookup and parsing contribution + by Eric Kow; provided in Network.HTTP.Proxy + * Factor out HTTP Cookies and Auth handling into separate + modules Network.HTTP.Cookie, Network.HTTP.Auth + * new Network.Browser functionality for hooking up the + proxy detection code in Network.HTTP.Proxy: + + setCheckForProxy :: Bool -> BrowserAction t () + getCheckForProxy :: BrowserAction t Bool + + If you do 'setCheckForProxy True' within a browser + session, the proxy-checking code will be called upon. + Use 'getCheckForProxy' to get the current setting for + this flag. + + * Network.Browser: if HTTP Basic Auth is allowed and + server doesn't 401-challenge with an WWW-Authenticate: + header, simply assume / realm and proceed. Preferable + than failing, even if server is the wrong. + +Version 4000.0.7: release 2009-05-22 + + * Minor release. + * Added + Network.TCP.openSocketStream :: (BufferType t) + => String {-host-} + -> Socket + -> IO (HandleStream t) + + for interfacing to pre-existing @Socket@s. Contributed and + suggested by . + +Version 4000.0.6: release 2009-04-21; changes from 4000.0.5 + + * Network.Browser: use HTTP.HandleStream.sendHTTP_notify, not HTTP.sendHTTP_notify + when issuing requests. The latter runs the risk of undoing request normalization. + * Network.HTTP.Base.normalizeRequest: when normalizing proxy-bound requests, + insert a Host: header if none present. Set it to the destination server authority, + not the proxy. + * Network.Browser: don't fail on seeing invalid cookie values, but report them + as errors and continue. + +Version 4000.0.5: release 2009-03-30; changes from 4000.0.4 + + * Get serious about comments and Haddock documentation. + * Cleaned up normalization of requests, fixing bugs and bringing together + previous disparate attempts at handling this. + * RequestMethod now supports custom verbs; use the (Custom String) constructor + * Beef up Network.HTTP.Base's support for normalizing requests and URIs: + + * added splitRequestURI which divides a URI into two; the Authority portion + (as a String) and the input URI sans the authority portion. Useful when + wanting to split up a request's URI into its Host: and abs_path pieces. + * added normalizeRequest :: Bool -> Request ty -> Request ty, which + fixes up a requests URI path and Host: info depending on whether it is + destined for a proxy or not (controlled by the Bool.) + * moved defaultRequest, defaultRequest_, libUA from Network.Browser + to Network.HTTP.Base + * added mkRequest :: RequestMethod -> URI -> Bool -> Request ty + for constructing normalized&sane Request bases on top of which + you can add custom headers, body payload etc. + diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/HTTP.cabal cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/HTTP.cabal --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/HTTP.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/HTTP.cabal 2016-06-02 07:15:43.000000000 +0000 @@ -0,0 +1,169 @@ +Name: HTTP +Version: 4000.3.3 +Cabal-Version: >= 1.8 +Build-type: Simple +License: BSD3 +License-file: LICENSE +Author: Warrick Gray +Maintainer: Ganesh Sittampalam +Homepage: https://github.com/haskell/HTTP +Category: Network +Synopsis: A library for client-side HTTP +Description: + + The HTTP package supports client-side web programming in Haskell. It lets you set up + HTTP connections, transmitting requests and processing the responses coming back, all + from within the comforts of Haskell. It's dependent on the network package to operate, + but other than that, the implementation is all written in Haskell. + . + A basic API for issuing single HTTP requests + receiving responses is provided. On top + of that, a session-level abstraction is also on offer (the @BrowserAction@ monad); + it taking care of handling the management of persistent connections, proxies, + state (cookies) and authentication credentials required to handle multi-step + interactions with a web server. + . + The representation of the bytes flowing across is extensible via the use of a type class, + letting you pick the representation of requests and responses that best fits your use. + Some pre-packaged, common instances are provided for you (@ByteString@, @String@). + . + Here's an example use: + . + > + > do + > rsp <- Network.HTTP.simpleHTTP (getRequest "http://www.haskell.org/") + > -- fetch document and return it (as a 'String'.) + > fmap (take 100) (getResponseBody rsp) + > + > do + > (_, rsp) + > <- Network.Browser.browse $ do + > setAllowRedirects True -- handle HTTP redirects + > request $ getRequest "http://www.haskell.org/" + > return (take 100 (rspBody rsp)) + +Extra-Source-Files: CHANGES + +Source-Repository head + type: git + location: https://github.com/haskell/HTTP.git + +Flag mtl1 + description: Use the old mtl version 1. + default: False + +Flag warn-as-error + default: False + description: Build with warnings-as-errors + manual: True + +Flag network23 + description: Use version 2.3.x or below of the network package + default: False + +Flag conduit10 + description: Use version 1.0.x or below of the conduit package (for the test suite) + default: False + +Flag warp-tests + description: Test against warp + default: True + manual: True + +flag network-uri + description: Get Network.URI from the network-uri package + default: True + +Library + Exposed-modules: + Network.BufferType, + Network.Stream, + Network.StreamDebugger, + Network.StreamSocket, + Network.TCP, + Network.HTTP, + Network.HTTP.Headers, + Network.HTTP.Base, + Network.HTTP.Stream, + Network.HTTP.Auth, + Network.HTTP.Cookie, + Network.HTTP.Proxy, + Network.HTTP.HandleStream, + Network.Browser + Other-modules: + Network.HTTP.Base64, + Network.HTTP.MD5Aux, + Network.HTTP.Utils + Paths_HTTP + GHC-options: -fwarn-missing-signatures -Wall + + -- note the test harness constraints should be kept in sync with these + -- where dependencies are shared + Build-depends: base >= 4.3.0.0 && < 4.10, parsec >= 2.0 && < 3.2 + Build-depends: array >= 0.3.0.2 && < 0.6, bytestring >= 0.9.1.5 && < 0.11 + Build-depends: time >= 1.1.2.3 && < 1.7 + + Extensions: FlexibleInstances + + if flag(mtl1) + Build-depends: mtl >= 1.1.1.0 && < 1.2 + CPP-Options: -DMTL1 + else + Build-depends: mtl >= 2.0 && < 2.3 + + if flag(network-uri) + Build-depends: network-uri == 2.6.*, network == 2.6.* + else + Build-depends: network >= 2.2.1.8 && < 2.6 + + if flag(warn-as-error) + ghc-options: -Werror + + if os(windows) + Build-depends: Win32 >= 2.2.0.0 && < 2.4 + +Test-Suite test + type: exitcode-stdio-1.0 + + hs-source-dirs: test + main-is: httpTests.hs + + other-modules: + Httpd + UnitTests + + -- note: version constraints for dependencies shared with the library + -- should be the same + build-depends: HTTP, + HUnit >= 1.2.0.1 && < 1.4, + httpd-shed >= 0.4 && < 0.5, + mtl >= 1.1.1.0 && < 2.3, + bytestring >= 0.9.1.5 && < 0.11, + deepseq >= 1.3.0.0 && < 1.5, + pureMD5 >= 0.2.4 && < 2.2, + base >= 4.3.0.0 && < 4.10, + split >= 0.1.3 && < 0.3, + test-framework >= 0.2.0 && < 0.9, + test-framework-hunit >= 0.3.0 && <0.4 + + if flag(network-uri) + Build-depends: network-uri == 2.6.*, network == 2.6.* + else + Build-depends: network >= 2.2.1.5 && < 2.6 + + if flag(warp-tests) + CPP-Options: -DWARP_TESTS + build-depends: + case-insensitive >= 0.4.0.1 && < 1.3, + http-types >= 0.8.0 && < 1.0, + wai >= 2.1.0 && < 3.3, + warp >= 2.1.0 && < 3.3 + + if flag(conduit10) + build-depends: + conduit >= 1.0.8 && < 1.1 + else + build-depends: + conduit >= 1.1 && < 1.3, + conduit-extra >= 1.1 && < 1.2 + + diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/LICENSE cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/LICENSE --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/LICENSE 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,46 @@ +Copyright (c) 2002, Warrick Gray +Copyright (c) 2002-2005, Ian Lynagh +Copyright (c) 2003-2006, Bjorn Bringert +Copyright (c) 2004, Andre Furtado +Copyright (c) 2004-2005, Dominic Steinitz +Copyright (c) 2007, Robin Bate Boerop +Copyright (c) 2008-2010, Sigbjorn Finne +Copyright (c) 2009, Eric Kow +Copyright (c) 2010, Antoine Latter +Copyright (c) 2004, 2010-2011, Ganesh Sittampalam +Copyright (c) 2011, Duncan Coutts +Copyright (c) 2011, Matthew Gruen +Copyright (c) 2011, Jeremy Yallop +Copyright (c) 2011, Eric Hesselink +Copyright (c) 2011, Yi Huang +Copyright (c) 2011, Tom Lokhorst + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * The names of contributors may not be used to endorse or promote + products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/Browser.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/Browser.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/Browser.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/Browser.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,1091 @@ +{-# LANGUAGE MultiParamTypeClasses, GeneralizedNewtypeDeriving, CPP, FlexibleContexts #-} +{- | + +Module : Network.Browser +Copyright : See LICENSE file +License : BSD + +Maintainer : Ganesh Sittampalam +Stability : experimental +Portability : non-portable (not tested) + +Session-level interactions over HTTP. + +The "Network.Browser" goes beyond the basic "Network.HTTP" functionality in +providing support for more involved, and real, request/response interactions over +HTTP. Additional features supported are: + +* HTTP Authentication handling + +* Transparent handling of redirects + +* Cookie stores + transmission. + +* Transaction logging + +* Proxy-mediated connections. + +Example use: + +> do +> (_, rsp) +> <- Network.Browser.browse $ do +> setAllowRedirects True -- handle HTTP redirects +> request $ getRequest "http://www.haskell.org/" +> return (take 100 (rspBody rsp)) + +-} +module Network.Browser + ( BrowserState + , BrowserAction -- browser monad, effectively a state monad. + , Proxy(..) + + , browse -- :: BrowserAction a -> IO a + , request -- :: Request -> BrowserAction Response + + , getBrowserState -- :: BrowserAction t (BrowserState t) + , withBrowserState -- :: BrowserState t -> BrowserAction t a -> BrowserAction t a + + , setAllowRedirects -- :: Bool -> BrowserAction t () + , getAllowRedirects -- :: BrowserAction t Bool + + , setMaxRedirects -- :: Int -> BrowserAction t () + , getMaxRedirects -- :: BrowserAction t (Maybe Int) + + , Authority(..) + , getAuthorities + , setAuthorities + , addAuthority + , Challenge(..) + , Qop(..) + , Algorithm(..) + + , getAuthorityGen + , setAuthorityGen + , setAllowBasicAuth + , getAllowBasicAuth + + , setMaxErrorRetries -- :: Maybe Int -> BrowserAction t () + , getMaxErrorRetries -- :: BrowserAction t (Maybe Int) + + , setMaxPoolSize -- :: Int -> BrowserAction t () + , getMaxPoolSize -- :: BrowserAction t (Maybe Int) + + , setMaxAuthAttempts -- :: Maybe Int -> BrowserAction t () + , getMaxAuthAttempts -- :: BrowserAction t (Maybe Int) + + , setCookieFilter -- :: (URI -> Cookie -> IO Bool) -> BrowserAction t () + , getCookieFilter -- :: BrowserAction t (URI -> Cookie -> IO Bool) + , defaultCookieFilter -- :: URI -> Cookie -> IO Bool + , userCookieFilter -- :: URI -> Cookie -> IO Bool + + , Cookie(..) + , getCookies -- :: BrowserAction t [Cookie] + , setCookies -- :: [Cookie] -> BrowserAction t () + , addCookie -- :: Cookie -> BrowserAction t () + + , setErrHandler -- :: (String -> IO ()) -> BrowserAction t () + , setOutHandler -- :: (String -> IO ()) -> BrowserAction t () + + , setEventHandler -- :: (BrowserEvent -> BrowserAction t ()) -> BrowserAction t () + + , BrowserEvent(..) + , BrowserEventType(..) + , RequestID + + , setProxy -- :: Proxy -> BrowserAction t () + , getProxy -- :: BrowserAction t Proxy + + , setCheckForProxy -- :: Bool -> BrowserAction t () + , getCheckForProxy -- :: BrowserAction t Bool + + , setDebugLog -- :: Maybe String -> BrowserAction t () + + , getUserAgent -- :: BrowserAction t String + , setUserAgent -- :: String -> BrowserAction t () + + , out -- :: String -> BrowserAction t () + , err -- :: String -> BrowserAction t () + , ioAction -- :: IO a -> BrowserAction a + + , defaultGETRequest + , defaultGETRequest_ + + , formToRequest + , uriDefaultTo + + -- old and half-baked; don't use: + , Form(..) + , FormVar + ) where + +import Network.URI + ( URI(..) + , URIAuth(..) + , parseURI, parseURIReference, relativeTo + ) +import Network.StreamDebugger (debugByteStream) +import Network.HTTP hiding ( sendHTTP_notify ) +import Network.HTTP.HandleStream ( sendHTTP_notify ) +import Network.HTTP.Auth +import Network.HTTP.Cookie +import Network.HTTP.Proxy + +import Network.Stream ( ConnError(..), Result ) +import Network.BufferType + +import Data.Char (toLower) +import Data.List (isPrefixOf) +import Data.Maybe (fromMaybe, listToMaybe, catMaybes ) +import Control.Applicative (Applicative (..), (<$>)) +#ifdef MTL1 +import Control.Monad (filterM, forM_, when, ap) +#else +import Control.Monad (filterM, forM_, when) +#endif +import Control.Monad.State (StateT (..), MonadIO (..), modify, gets, withStateT, evalStateT, MonadState (..)) + +import qualified System.IO + ( hSetBuffering, hPutStr, stdout, stdin, hGetChar + , BufferMode(NoBuffering, LineBuffering) + ) +import Data.Time.Clock ( UTCTime, getCurrentTime ) + + +------------------------------------------------------------------ +----------------------- Cookie Stuff ----------------------------- +------------------------------------------------------------------ + +-- | @defaultCookieFilter@ is the initial cookie acceptance filter. +-- It welcomes them all into the store @:-)@ +defaultCookieFilter :: URI -> Cookie -> IO Bool +defaultCookieFilter _url _cky = return True + +-- | @userCookieFilter@ is a handy acceptance filter, asking the +-- user if he/she is willing to accept an incoming cookie before +-- adding it to the store. +userCookieFilter :: URI -> Cookie -> IO Bool +userCookieFilter url cky = do + do putStrLn ("Set-Cookie received when requesting: " ++ show url) + case ckComment cky of + Nothing -> return () + Just x -> putStrLn ("Cookie Comment:\n" ++ x) + let pth = maybe "" ('/':) (ckPath cky) + putStrLn ("Domain/Path: " ++ ckDomain cky ++ pth) + putStrLn (ckName cky ++ '=' : ckValue cky) + System.IO.hSetBuffering System.IO.stdout System.IO.NoBuffering + System.IO.hSetBuffering System.IO.stdin System.IO.NoBuffering + System.IO.hPutStr System.IO.stdout "Accept [y/n]? " + x <- System.IO.hGetChar System.IO.stdin + System.IO.hSetBuffering System.IO.stdin System.IO.LineBuffering + System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering + return (toLower x == 'y') + +-- | @addCookie c@ adds a cookie to the browser state, removing duplicates. +addCookie :: Cookie -> BrowserAction t () +addCookie c = modify (\b -> b{bsCookies = c : filter (/=c) (bsCookies b) }) + +-- | @setCookies cookies@ replaces the set of cookies known to +-- the browser to @cookies@. Useful when wanting to restore cookies +-- used across 'browse' invocations. +setCookies :: [Cookie] -> BrowserAction t () +setCookies cs = modify (\b -> b { bsCookies=cs }) + +-- | @getCookies@ returns the current set of cookies known to +-- the browser. +getCookies :: BrowserAction t [Cookie] +getCookies = gets bsCookies + +-- ...get domain specific cookies... +-- ... this needs changing for consistency with rfc2109... +-- ... currently too broad. +getCookiesFor :: String -> String -> BrowserAction t [Cookie] +getCookiesFor dom path = + do cks <- getCookies + return (filter cookiematch cks) + where + cookiematch :: Cookie -> Bool + cookiematch = cookieMatch (dom,path) + + +-- | @setCookieFilter fn@ sets the cookie acceptance filter to @fn@. +setCookieFilter :: (URI -> Cookie -> IO Bool) -> BrowserAction t () +setCookieFilter f = modify (\b -> b { bsCookieFilter=f }) + +-- | @getCookieFilter@ returns the current cookie acceptance filter. +getCookieFilter :: BrowserAction t (URI -> Cookie -> IO Bool) +getCookieFilter = gets bsCookieFilter + +------------------------------------------------------------------ +----------------------- Authorisation Stuff ---------------------- +------------------------------------------------------------------ + +{- + +The browser handles 401 responses in the following manner: + 1) extract all WWW-Authenticate headers from a 401 response + 2) rewrite each as a Challenge object, using "headerToChallenge" + 3) pick a challenge to respond to, usually the strongest + challenge understood by the client, using "pickChallenge" + 4) generate a username/password combination using the browsers + "bsAuthorityGen" function (the default behaviour is to ask + the user) + 5) build an Authority object based upon the challenge and user + data, store this new Authority in the browser state + 6) convert the Authority to a request header and add this + to a request using "withAuthority" + 7) send the amended request + +Note that by default requests are annotated with authority headers +before the first sending, based upon previously generated Authority +objects (which contain domain information). Once a specific authority +is added to a rejected request this predictive annotation is suppressed. + +407 responses are handled in a similar manner, except + a) Authorities are not collected, only a single proxy authority + is kept by the browser + b) If the proxy used by the browser (type Proxy) is NoProxy, then + a 407 response will generate output on the "err" stream and + the response will be returned. + + +Notes: + - digest authentication so far ignores qop, so fails to authenticate + properly with qop=auth-int challenges + - calculates a1 more than necessary + - doesn't reverse authenticate + - doesn't properly receive AuthenticationInfo headers, so fails + to use next-nonce etc + +-} + +-- | Return authorities for a given domain and path. +-- Assumes "dom" is lower case +getAuthFor :: String -> String -> BrowserAction t [Authority] +getAuthFor dom pth = getAuthorities >>= return . (filter match) + where + match :: Authority -> Bool + match au@AuthBasic{} = matchURI (auSite au) + match au@AuthDigest{} = or (map matchURI (auDomain au)) + + matchURI :: URI -> Bool + matchURI s = (uriToAuthorityString s == dom) && (uriPath s `isPrefixOf` pth) + + +-- | @getAuthorities@ return the current set of @Authority@s known +-- to the browser. +getAuthorities :: BrowserAction t [Authority] +getAuthorities = gets bsAuthorities + +-- @setAuthorities as@ replaces the Browser's known set +-- of 'Authority's to @as@. +setAuthorities :: [Authority] -> BrowserAction t () +setAuthorities as = modify (\b -> b { bsAuthorities=as }) + +-- @addAuthority a@ adds 'Authority' @a@ to the Browser's +-- set of known authorities. +addAuthority :: Authority -> BrowserAction t () +addAuthority a = modify (\b -> b { bsAuthorities=a:bsAuthorities b }) + +-- | @getAuthorityGen@ returns the current authority generator +getAuthorityGen :: BrowserAction t (URI -> String -> IO (Maybe (String,String))) +getAuthorityGen = gets bsAuthorityGen + +-- | @setAuthorityGen genAct@ sets the auth generator to @genAct@. +setAuthorityGen :: (URI -> String -> IO (Maybe (String,String))) -> BrowserAction t () +setAuthorityGen f = modify (\b -> b { bsAuthorityGen=f }) + +-- | @setAllowBasicAuth onOff@ enables\/disables HTTP Basic Authentication. +setAllowBasicAuth :: Bool -> BrowserAction t () +setAllowBasicAuth ba = modify (\b -> b { bsAllowBasicAuth=ba }) + +getAllowBasicAuth :: BrowserAction t Bool +getAllowBasicAuth = gets bsAllowBasicAuth + +-- | @setMaxAuthAttempts mbMax@ sets the maximum number of authentication attempts +-- to do. If @Nothing@, rever to default max. +setMaxAuthAttempts :: Maybe Int -> BrowserAction t () +setMaxAuthAttempts mb + | fromMaybe 0 mb < 0 = return () + | otherwise = modify (\ b -> b{bsMaxAuthAttempts=mb}) + +-- | @getMaxAuthAttempts@ returns the current max auth attempts. If @Nothing@, +-- the browser's default is used. +getMaxAuthAttempts :: BrowserAction t (Maybe Int) +getMaxAuthAttempts = gets bsMaxAuthAttempts + +-- | @setMaxErrorRetries mbMax@ sets the maximum number of attempts at +-- transmitting a request. If @Nothing@, rever to default max. +setMaxErrorRetries :: Maybe Int -> BrowserAction t () +setMaxErrorRetries mb + | fromMaybe 0 mb < 0 = return () + | otherwise = modify (\ b -> b{bsMaxErrorRetries=mb}) + +-- | @getMaxErrorRetries@ returns the current max number of error retries. +getMaxErrorRetries :: BrowserAction t (Maybe Int) +getMaxErrorRetries = gets bsMaxErrorRetries + +-- TO BE CHANGED!!! +pickChallenge :: Bool -> [Challenge] -> Maybe Challenge +pickChallenge allowBasic [] + | allowBasic = Just (ChalBasic "/") -- manufacture a challenge if one missing; more robust. +pickChallenge _ ls = listToMaybe ls + +-- | Retrieve a likely looking authority for a Request. +anticipateChallenge :: Request ty -> BrowserAction t (Maybe Authority) +anticipateChallenge rq = + let uri = rqURI rq in + do { authlist <- getAuthFor (uriAuthToString $ reqURIAuth rq) (uriPath uri) + ; return (listToMaybe authlist) + } + +-- | Asking the user to respond to a challenge +challengeToAuthority :: URI -> Challenge -> BrowserAction t (Maybe Authority) +challengeToAuthority uri ch + | not (answerable ch) = return Nothing + | otherwise = do + -- prompt user for authority + prompt <- getAuthorityGen + userdetails <- liftIO $ prompt uri (chRealm ch) + case userdetails of + Nothing -> return Nothing + Just (u,p) -> return (Just $ buildAuth ch u p) + where + answerable :: Challenge -> Bool + answerable ChalBasic{} = True + answerable chall = (chAlgorithm chall) == Just AlgMD5 + + buildAuth :: Challenge -> String -> String -> Authority + buildAuth (ChalBasic r) u p = + AuthBasic { auSite=uri + , auRealm=r + , auUsername=u + , auPassword=p + } + + -- note to self: this is a pretty stupid operation + -- to perform isn't it? ChalX and AuthX are so very + -- similar. + buildAuth (ChalDigest r d n o _stale a q) u p = + AuthDigest { auRealm=r + , auUsername=u + , auPassword=p + , auDomain=d + , auNonce=n + , auOpaque=o + , auAlgorithm=a + , auQop=q + } + + +------------------------------------------------------------------ +------------------ Browser State Actions ------------------------- +------------------------------------------------------------------ + + +-- | @BrowserState@ is the (large) record type tracking the current +-- settings of the browser. +data BrowserState connection + = BS { bsErr, bsOut :: String -> IO () + , bsCookies :: [Cookie] + , bsCookieFilter :: URI -> Cookie -> IO Bool + , bsAuthorityGen :: URI -> String -> IO (Maybe (String,String)) + , bsAuthorities :: [Authority] + , bsAllowRedirects :: Bool + , bsAllowBasicAuth :: Bool + , bsMaxRedirects :: Maybe Int + , bsMaxErrorRetries :: Maybe Int + , bsMaxAuthAttempts :: Maybe Int + , bsMaxPoolSize :: Maybe Int + , bsConnectionPool :: [connection] + , bsCheckProxy :: Bool + , bsProxy :: Proxy + , bsDebug :: Maybe String + , bsEvent :: Maybe (BrowserEvent -> BrowserAction connection ()) + , bsRequestID :: RequestID + , bsUserAgent :: Maybe String + } + +instance Show (BrowserState t) where + show bs = "BrowserState { " + ++ shows (bsCookies bs) ("\n" + {- ++ show (bsAuthorities bs) ++ "\n"-} + ++ "AllowRedirects: " ++ shows (bsAllowRedirects bs) "} ") + +-- | @BrowserAction@ is the IO monad, but carrying along a 'BrowserState'. +newtype BrowserAction conn a + = BA { unBA :: StateT (BrowserState conn) IO a } +#ifdef MTL1 + deriving (Functor, Monad, MonadIO, MonadState (BrowserState conn)) + +instance Applicative (BrowserAction conn) where + pure = return + (<*>) = ap +#else + deriving (Functor, Applicative, Monad, MonadIO, MonadState (BrowserState conn)) +#endif + +runBA :: BrowserState conn -> BrowserAction conn a -> IO a +runBA bs = flip evalStateT bs . unBA + +-- | @browse act@ is the toplevel action to perform a 'BrowserAction'. +-- Example use: @browse (request (getRequest yourURL))@. +browse :: BrowserAction conn a -> IO a +browse = runBA defaultBrowserState + +-- | The default browser state has the settings +defaultBrowserState :: BrowserState t +defaultBrowserState = res + where + res = BS + { bsErr = putStrLn + , bsOut = putStrLn + , bsCookies = [] + , bsCookieFilter = defaultCookieFilter + , bsAuthorityGen = \ _uri _realm -> do + bsErr res "No action for prompting/generating user+password credentials provided (use: setAuthorityGen); returning Nothing" + return Nothing + , bsAuthorities = [] + , bsAllowRedirects = True + , bsAllowBasicAuth = False + , bsMaxRedirects = Nothing + , bsMaxErrorRetries = Nothing + , bsMaxAuthAttempts = Nothing + , bsMaxPoolSize = Nothing + , bsConnectionPool = [] + , bsCheckProxy = defaultAutoProxyDetect + , bsProxy = noProxy + , bsDebug = Nothing + , bsEvent = Nothing + , bsRequestID = 0 + , bsUserAgent = Nothing + } + +{-# DEPRECATED getBrowserState "Use Control.Monad.State.get instead." #-} +-- | @getBrowserState@ returns the current browser config. Useful +-- for restoring state across 'BrowserAction's. +getBrowserState :: BrowserAction t (BrowserState t) +getBrowserState = get + +-- | @withBrowserAction st act@ performs @act@ with 'BrowserState' @st@. +withBrowserState :: BrowserState t -> BrowserAction t a -> BrowserAction t a +withBrowserState bs = BA . withStateT (const bs) . unBA + +-- | @nextRequest act@ performs the browser action @act@ as +-- the next request, i.e., setting up a new request context +-- before doing so. +nextRequest :: BrowserAction t a -> BrowserAction t a +nextRequest act = do + let updReqID st = + let + rid = succ (bsRequestID st) + in + rid `seq` st{bsRequestID=rid} + modify updReqID + act + +-- | Lifts an IO action into the 'BrowserAction' monad. +{-# DEPRECATED ioAction "Use Control.Monad.Trans.liftIO instead." #-} +ioAction :: IO a -> BrowserAction t a +ioAction = liftIO + +-- | @setErrHandler@ sets the IO action to call when +-- the browser reports running errors. To disable any +-- such, set it to @const (return ())@. +setErrHandler :: (String -> IO ()) -> BrowserAction t () +setErrHandler h = modify (\b -> b { bsErr=h }) + +-- | @setOutHandler@ sets the IO action to call when +-- the browser chatters info on its running. To disable any +-- such, set it to @const (return ())@. +setOutHandler :: (String -> IO ()) -> BrowserAction t () +setOutHandler h = modify (\b -> b { bsOut=h }) + +out, err :: String -> BrowserAction t () +out s = do { f <- gets bsOut ; liftIO $ f s } +err s = do { f <- gets bsErr ; liftIO $ f s } + +-- | @setAllowRedirects onOff@ toggles the willingness to +-- follow redirects (HTTP responses with 3xx status codes). +setAllowRedirects :: Bool -> BrowserAction t () +setAllowRedirects bl = modify (\b -> b {bsAllowRedirects=bl}) + +-- | @getAllowRedirects@ returns current setting of the do-chase-redirects flag. +getAllowRedirects :: BrowserAction t Bool +getAllowRedirects = gets bsAllowRedirects + +-- | @setMaxRedirects maxCount@ sets the maxiumum number of forwarding hops +-- we are willing to jump through. A no-op if the count is negative; if zero, +-- the max is set to whatever default applies. Notice that setting the max +-- redirects count does /not/ enable following of redirects itself; use +-- 'setAllowRedirects' to do so. +setMaxRedirects :: Maybe Int -> BrowserAction t () +setMaxRedirects c + | fromMaybe 0 c < 0 = return () + | otherwise = modify (\b -> b{bsMaxRedirects=c}) + +-- | @getMaxRedirects@ returns the current setting for the max-redirect count. +-- If @Nothing@, the "Network.Browser"'s default is used. +getMaxRedirects :: BrowserAction t (Maybe Int) +getMaxRedirects = gets bsMaxRedirects + +-- | @setMaxPoolSize maxCount@ sets the maximum size of the connection pool +-- that is used to cache connections between requests +setMaxPoolSize :: Maybe Int -> BrowserAction t () +setMaxPoolSize c = modify (\b -> b{bsMaxPoolSize=c}) + +-- | @getMaxPoolSize@ gets the maximum size of the connection pool +-- that is used to cache connections between requests. +-- If @Nothing@, the "Network.Browser"'s default is used. +getMaxPoolSize :: BrowserAction t (Maybe Int) +getMaxPoolSize = gets bsMaxPoolSize + +-- | @setProxy p@ will disable proxy usage if @p@ is @NoProxy@. +-- If @p@ is @Proxy proxyURL mbAuth@, then @proxyURL@ is interpreted +-- as the URL of the proxy to use, possibly authenticating via +-- 'Authority' information in @mbAuth@. +setProxy :: Proxy -> BrowserAction t () +setProxy p = + -- Note: if user _explicitly_ sets the proxy, we turn + -- off any auto-detection of proxies. + modify (\b -> b {bsProxy = p, bsCheckProxy=False}) + +-- | @getProxy@ returns the current proxy settings. If +-- the auto-proxy flag is set to @True@, @getProxy@ will +-- perform the necessary +getProxy :: BrowserAction t Proxy +getProxy = do + p <- gets bsProxy + case p of + -- Note: if there is a proxy, no need to perform any auto-detect. + -- Presumably this is the user's explicit and preferred proxy server. + Proxy{} -> return p + NoProxy{} -> do + flg <- gets bsCheckProxy + if not flg + then return p + else do + np <- liftIO $ fetchProxy True{-issue warning on stderr if ill-formed...-} + -- note: this resets the check-proxy flag; a one-off affair. + setProxy np + return np + +-- | @setCheckForProxy flg@ sets the one-time check for proxy +-- flag to @flg@. If @True@, the session will try to determine +-- the proxy server is locally configured. See 'Network.HTTP.Proxy.fetchProxy' +-- for details of how this done. +setCheckForProxy :: Bool -> BrowserAction t () +setCheckForProxy flg = modify (\ b -> b{bsCheckProxy=flg}) + +-- | @getCheckForProxy@ returns the current check-proxy setting. +-- Notice that this may not be equal to @True@ if the session has +-- set it to that via 'setCheckForProxy' and subsequently performed +-- some HTTP protocol interactions. i.e., the flag return represents +-- whether a proxy will be checked for again before any future protocol +-- interactions. +getCheckForProxy :: BrowserAction t Bool +getCheckForProxy = gets bsCheckProxy + +-- | @setDebugLog mbFile@ turns off debug logging iff @mbFile@ +-- is @Nothing@. If set to @Just fStem@, logs of browser activity +-- is appended to files of the form @fStem-url-authority@, i.e., +-- @fStem@ is just the prefix for a set of log files, one per host/authority. +setDebugLog :: Maybe String -> BrowserAction t () +setDebugLog v = modify (\b -> b {bsDebug=v}) + +-- | @setUserAgent ua@ sets the current @User-Agent:@ string to @ua@. It +-- will be used if no explicit user agent header is found in subsequent requests. +-- +-- A common form of user agent string is @\"name\/version (details)\"@. For +-- example @\"cabal-install/0.10.2 (HTTP 4000.1.2)\"@. Including the version +-- of this HTTP package can be helpful if you ever need to track down HTTP +-- compatability quirks. This version is available via 'httpPackageVersion'. +-- For more info see . +-- +setUserAgent :: String -> BrowserAction t () +setUserAgent ua = modify (\b -> b{bsUserAgent=Just ua}) + +-- | @getUserAgent@ returns the current @User-Agent:@ default string. +getUserAgent :: BrowserAction t String +getUserAgent = do + n <- gets bsUserAgent + return (maybe defaultUserAgent id n) + +-- | @RequestState@ is an internal tallying type keeping track of various +-- per-connection counters, like the number of authorization attempts and +-- forwards we've gone through. +data RequestState + = RequestState + { reqDenies :: Int -- ^ number of 401 responses so far + , reqRedirects :: Int -- ^ number of redirects so far + , reqRetries :: Int -- ^ number of retries so far + , reqStopOnDeny :: Bool -- ^ whether to pre-empt 401 response + } + +type RequestID = Int -- yeah, it will wrap around. + +nullRequestState :: RequestState +nullRequestState = RequestState + { reqDenies = 0 + , reqRedirects = 0 + , reqRetries = 0 + , reqStopOnDeny = True + } + +-- | @BrowserEvent@ is the event record type that a user-defined handler, set +-- via 'setEventHandler', will be passed. It indicates various state changes +-- encountered in the processing of a given 'RequestID', along with timestamps +-- at which they occurred. +data BrowserEvent + = BrowserEvent + { browserTimestamp :: UTCTime + , browserRequestID :: RequestID + , browserRequestURI :: {-URI-}String + , browserEventType :: BrowserEventType + } + +-- | 'BrowserEventType' is the enumerated list of events that the browser +-- internals will report to a user-defined event handler. +data BrowserEventType + = OpenConnection + | ReuseConnection + | RequestSent + | ResponseEnd ResponseData + | ResponseFinish +{- not yet, you will have to determine these via the ResponseEnd event. + | Redirect + | AuthChallenge + | AuthResponse +-} + +-- | @setEventHandler onBrowserEvent@ configures event handling. +-- If @onBrowserEvent@ is @Nothing@, event handling is turned off; +-- setting it to @Just onEv@ causes the @onEv@ IO action to be +-- notified of browser events during the processing of a request +-- by the Browser pipeline. +setEventHandler :: Maybe (BrowserEvent -> BrowserAction ty ()) -> BrowserAction ty () +setEventHandler mbH = modify (\b -> b { bsEvent=mbH}) + +buildBrowserEvent :: BrowserEventType -> {-URI-}String -> RequestID -> IO BrowserEvent +buildBrowserEvent bt uri reqID = do + ct <- getCurrentTime + return BrowserEvent + { browserTimestamp = ct + , browserRequestID = reqID + , browserRequestURI = uri + , browserEventType = bt + } + +reportEvent :: BrowserEventType -> {-URI-}String -> BrowserAction t () +reportEvent bt uri = do + st <- get + case bsEvent st of + Nothing -> return () + Just evH -> do + evt <- liftIO $ buildBrowserEvent bt uri (bsRequestID st) + evH evt -- if it fails, we fail. + +-- | The default number of hops we are willing not to go beyond for +-- request forwardings. +defaultMaxRetries :: Int +defaultMaxRetries = 4 + +-- | The default number of error retries we are willing to perform. +defaultMaxErrorRetries :: Int +defaultMaxErrorRetries = 4 + +-- | The default maximum HTTP Authentication attempts we will make for +-- a single request. +defaultMaxAuthAttempts :: Int +defaultMaxAuthAttempts = 2 + +-- | The default setting for auto-proxy detection. +-- You may change this within a session via 'setAutoProxyDetect'. +-- To avoid initial backwards compatibility issues, leave this as @False@. +defaultAutoProxyDetect :: Bool +defaultAutoProxyDetect = False + +-- | @request httpRequest@ tries to submit the 'Request' @httpRequest@ +-- to some HTTP server (possibly going via a /proxy/, see 'setProxy'.) +-- Upon successful delivery, the URL where the response was fetched from +-- is returned along with the 'Response' itself. +request :: HStream ty + => Request ty + -> BrowserAction (HandleStream ty) (URI,Response ty) +request req = nextRequest $ do + res <- request' nullVal initialState req + reportEvent ResponseFinish (show (rqURI req)) + case res of + Right r -> return r + Left e -> do + let errStr = ("Network.Browser.request: Error raised " ++ show e) + err errStr + fail errStr + where + initialState = nullRequestState + nullVal = buf_empty bufferOps + +-- | Internal helper function, explicitly carrying along per-request +-- counts. +request' :: HStream ty + => ty + -> RequestState + -> Request ty + -> BrowserAction (HandleStream ty) (Result (URI,Response ty)) +request' nullVal rqState rq = do + let uri = rqURI rq + failHTTPS uri + let uria = reqURIAuth rq + -- add cookies to request + cookies <- getCookiesFor (uriAuthToString uria) (uriPath uri) +{- Not for now: + (case uriUserInfo uria of + "" -> id + xs -> + case chopAtDelim ':' xs of + (_,[]) -> id + (usr,pwd) -> withAuth + AuthBasic{ auUserName = usr + , auPassword = pwd + , auRealm = "/" + , auSite = uri + }) $ do +-} + when (not $ null cookies) + (out $ "Adding cookies to request. Cookie names: " ++ unwords (map ckName cookies)) + -- add credentials to request + rq' <- + if not (reqStopOnDeny rqState) + then return rq + else do + auth <- anticipateChallenge rq + case auth of + Nothing -> return rq + Just x -> return (insertHeader HdrAuthorization (withAuthority x rq) rq) + let rq'' = if not $ null cookies then insertHeaders [cookiesToHeader cookies] rq' else rq' + p <- getProxy + def_ua <- gets bsUserAgent + let defaultOpts = + case p of + NoProxy -> defaultNormalizeRequestOptions{normUserAgent=def_ua} + Proxy _ ath -> + defaultNormalizeRequestOptions + { normForProxy = True + , normUserAgent = def_ua + , normCustoms = + maybe [] + (\ authS -> [\ _ r -> insertHeader HdrProxyAuthorization (withAuthority authS r) r]) + ath + } + let final_req = normalizeRequest defaultOpts rq'' + out ("Sending:\n" ++ show final_req) + e_rsp <- + case p of + NoProxy -> dorequest (reqURIAuth rq'') final_req + Proxy str _ath -> do + let notURI + | null pt || null hst = + URIAuth{ uriUserInfo = "" + , uriRegName = str + , uriPort = "" + } + | otherwise = + URIAuth{ uriUserInfo = "" + , uriRegName = hst + , uriPort = pt + } + -- If the ':' is dropped from port below, dorequest will assume port 80. Leave it! + where (hst, pt) = span (':'/=) str + -- Proxy can take multiple forms - look for http://host:port first, + -- then host:port. Fall back to just the string given (probably a host name). + let proxyURIAuth = + maybe notURI + (\parsed -> maybe notURI id (uriAuthority parsed)) + (parseURI str) + + out $ "proxy uri host: " ++ uriRegName proxyURIAuth ++ ", port: " ++ uriPort proxyURIAuth + dorequest proxyURIAuth final_req + mbMx <- getMaxErrorRetries + case e_rsp of + Left v + | (reqRetries rqState < fromMaybe defaultMaxErrorRetries mbMx) && + (v == ErrorReset || v == ErrorClosed) -> do + --empty connnection pool in case connection has become invalid + modify (\b -> b { bsConnectionPool=[] }) + request' nullVal rqState{reqRetries=succ (reqRetries rqState)} rq + | otherwise -> + return (Left v) + Right rsp -> do + out ("Received:\n" ++ show rsp) + -- add new cookies to browser state + handleCookies uri (uriAuthToString $ reqURIAuth rq) + (retrieveHeaders HdrSetCookie rsp) + -- Deal with "Connection: close" in response. + handleConnectionClose (reqURIAuth rq) (retrieveHeaders HdrConnection rsp) + mbMxAuths <- getMaxAuthAttempts + case rspCode rsp of + (4,0,1) -- Credentials not sent or refused. + | reqDenies rqState > fromMaybe defaultMaxAuthAttempts mbMxAuths -> do + out "401 - credentials again refused; exceeded retry count (2)" + return (Right (uri,rsp)) + | otherwise -> do + out "401 - credentials not supplied or refused; retrying.." + let hdrs = retrieveHeaders HdrWWWAuthenticate rsp + flg <- getAllowBasicAuth + case pickChallenge flg (catMaybes $ map (headerToChallenge uri) hdrs) of + Nothing -> do + out "no challenge" + return (Right (uri,rsp)) {- do nothing -} + Just x -> do + au <- challengeToAuthority uri x + case au of + Nothing -> do + out "no auth" + return (Right (uri,rsp)) {- do nothing -} + Just au' -> do + out "Retrying request with new credentials" + request' nullVal + rqState{ reqDenies = succ(reqDenies rqState) + , reqStopOnDeny = False + } + (insertHeader HdrAuthorization (withAuthority au' rq) rq) + + (4,0,7) -- Proxy Authentication required + | reqDenies rqState > fromMaybe defaultMaxAuthAttempts mbMxAuths -> do + out "407 - proxy authentication required; max deny count exceeeded (2)" + return (Right (uri,rsp)) + | otherwise -> do + out "407 - proxy authentication required" + let hdrs = retrieveHeaders HdrProxyAuthenticate rsp + flg <- getAllowBasicAuth + case pickChallenge flg (catMaybes $ map (headerToChallenge uri) hdrs) of + Nothing -> return (Right (uri,rsp)) {- do nothing -} + Just x -> do + au <- challengeToAuthority uri x + case au of + Nothing -> return (Right (uri,rsp)) {- do nothing -} + Just au' -> do + pxy <- gets bsProxy + case pxy of + NoProxy -> do + err "Proxy authentication required without proxy!" + return (Right (uri,rsp)) + Proxy px _ -> do + out "Retrying with proxy authentication" + setProxy (Proxy px (Just au')) + request' nullVal + rqState{ reqDenies = succ(reqDenies rqState) + , reqStopOnDeny = False + } + rq + + (3,0,x) | x `elem` [2,3,1,7] -> do + out ("30" ++ show x ++ " - redirect") + allow_redirs <- allowRedirect rqState + case allow_redirs of + False -> return (Right (uri,rsp)) + _ -> do + case retrieveHeaders HdrLocation rsp of + [] -> do + err "No Location: header in redirect response" + return (Right (uri,rsp)) + (Header _ u:_) -> + case parseURIReference u of + Nothing -> do + err ("Parse of Location: header in a redirect response failed: " ++ u) + return (Right (uri,rsp)) + Just newURI + | {-uriScheme newURI_abs /= uriScheme uri && -}(not (supportedScheme newURI_abs)) -> do + err ("Unable to handle redirect, unsupported scheme: " ++ show newURI_abs) + return (Right (uri, rsp)) + | otherwise -> do + out ("Redirecting to " ++ show newURI_abs ++ " ...") + + -- Redirect using GET request method, depending on + -- response code. + let toGet = x `elem` [2,3] + method = if toGet then GET else rqMethod rq + rq1 = rq { rqMethod=method, rqURI=newURI_abs } + rq2 = if toGet then (replaceHeader HdrContentLength "0") (rq1 {rqBody = nullVal}) else rq1 + + request' nullVal + rqState{ reqDenies = 0 + , reqRedirects = succ(reqRedirects rqState) + , reqStopOnDeny = True + } + rq2 + where + newURI_abs = uriDefaultTo newURI uri + + (3,0,5) -> + case retrieveHeaders HdrLocation rsp of + [] -> do + err "No Location header in proxy redirect response." + return (Right (uri,rsp)) + (Header _ u:_) -> + case parseURIReference u of + Nothing -> do + err ("Parse of Location header in a proxy redirect response failed: " ++ u) + return (Right (uri,rsp)) + Just newuri -> do + out ("Retrying with proxy " ++ show newuri ++ "...") + setProxy (Proxy (uriToAuthorityString newuri) Nothing) + request' nullVal rqState{ reqDenies = 0 + , reqRedirects = 0 + , reqRetries = succ (reqRetries rqState) + , reqStopOnDeny = True + } + rq + _ -> return (Right (uri,rsp)) + +-- | The internal request handling state machine. +dorequest :: (HStream ty) + => URIAuth + -> Request ty + -> BrowserAction (HandleStream ty) + (Result (Response ty)) +dorequest hst rqst = do + pool <- gets bsConnectionPool + let uPort = uriAuthPort Nothing{-ToDo: feed in complete URL-} hst + conn <- liftIO $ filterM (\c -> c `isTCPConnectedTo` EndPoint (uriRegName hst) uPort) pool + rsp <- + case conn of + [] -> do + out ("Creating new connection to " ++ uriAuthToString hst) + reportEvent OpenConnection (show (rqURI rqst)) + c <- liftIO $ openStream (uriRegName hst) uPort + updateConnectionPool c + dorequest2 c rqst + (c:_) -> do + out ("Recovering connection to " ++ uriAuthToString hst) + reportEvent ReuseConnection (show (rqURI rqst)) + dorequest2 c rqst + case rsp of + Right (Response a b c _) -> + reportEvent (ResponseEnd (a,b,c)) (show (rqURI rqst)) ; _ -> return () + return rsp + where + dorequest2 c r = do + dbg <- gets bsDebug + st <- get + let + onSendComplete = + maybe (return ()) + (\evh -> do + x <- buildBrowserEvent RequestSent (show (rqURI r)) (bsRequestID st) + runBA st (evh x) + return ()) + (bsEvent st) + liftIO $ + maybe (sendHTTP_notify c r onSendComplete) + (\ f -> do + c' <- debugByteStream (f++'-': uriAuthToString hst) c + sendHTTP_notify c' r onSendComplete) + dbg + +updateConnectionPool :: HStream hTy + => HandleStream hTy + -> BrowserAction (HandleStream hTy) () +updateConnectionPool c = do + pool <- gets bsConnectionPool + let len_pool = length pool + maxPoolSize <- fromMaybe defaultMaxPoolSize <$> gets bsMaxPoolSize + when (len_pool > maxPoolSize) + (liftIO $ close (last pool)) + let pool' + | len_pool > maxPoolSize = init pool + | otherwise = pool + when (maxPoolSize > 0) $ modify (\b -> b { bsConnectionPool=c:pool' }) + return () + +-- | Default maximum number of open connections we are willing to have active. +defaultMaxPoolSize :: Int +defaultMaxPoolSize = 5 + +cleanConnectionPool :: HStream hTy + => URIAuth -> BrowserAction (HandleStream hTy) () +cleanConnectionPool uri = do + let ep = EndPoint (uriRegName uri) (uriAuthPort Nothing uri) + pool <- gets bsConnectionPool + bad <- liftIO $ mapM (\c -> c `isTCPConnectedTo` ep) pool + let tmp = zip bad pool + newpool = map snd $ filter (not . fst) tmp + toclose = map snd $ filter fst tmp + liftIO $ forM_ toclose close + modify (\b -> b { bsConnectionPool = newpool }) + +handleCookies :: URI -> String -> [Header] -> BrowserAction t () +handleCookies _ _ [] = return () -- cut short the silliness. +handleCookies uri dom cookieHeaders = do + when (not $ null errs) + (err $ unlines ("Errors parsing these cookie values: ":errs)) + when (not $ null newCookies) + (out $ foldl (\x y -> x ++ "\n " ++ show y) "Cookies received:" newCookies) + filterfn <- getCookieFilter + newCookies' <- liftIO (filterM (filterfn uri) newCookies) + when (not $ null newCookies') + (out $ "Accepting cookies with names: " ++ unwords (map ckName newCookies')) + mapM_ addCookie newCookies' + where + (errs, newCookies) = processCookieHeaders dom cookieHeaders + +handleConnectionClose :: HStream hTy + => URIAuth -> [Header] + -> BrowserAction (HandleStream hTy) () +handleConnectionClose _ [] = return () +handleConnectionClose uri headers = do + let doClose = any (== "close") $ map headerToConnType headers + when doClose $ cleanConnectionPool uri + where headerToConnType (Header _ t) = map toLower t + +------------------------------------------------------------------ +----------------------- Miscellaneous ---------------------------- +------------------------------------------------------------------ + +allowRedirect :: RequestState -> BrowserAction t Bool +allowRedirect rqState = do + rd <- getAllowRedirects + mbMxRetries <- getMaxRedirects + return (rd && (reqRedirects rqState <= fromMaybe defaultMaxRetries mbMxRetries)) + +-- | Return @True@ iff the package is able to handle requests and responses +-- over it. +supportedScheme :: URI -> Bool +supportedScheme u = uriScheme u == "http:" + +-- | @uriDefaultTo a b@ returns a URI that is consistent with the first +-- argument URI @a@ when read in the context of the second URI @b@. +-- If the second argument is not sufficient context for determining +-- a full URI then anarchy reins. +uriDefaultTo :: URI -> URI -> URI +#if MIN_VERSION_network(2,4,0) +uriDefaultTo a b = a `relativeTo` b +#else +uriDefaultTo a b = maybe a id (a `relativeTo` b) +#endif + + +-- This form junk is completely untested... + +type FormVar = (String,String) + +data Form = Form RequestMethod URI [FormVar] + +formToRequest :: Form -> Request_String +formToRequest (Form m u vs) = + let enc = urlEncodeVars vs + in case m of + GET -> Request { rqMethod=GET + , rqHeaders=[ Header HdrContentLength "0" ] + , rqBody="" + , rqURI=u { uriQuery= '?' : enc } -- What about old query? + } + POST -> Request { rqMethod=POST + , rqHeaders=[ Header HdrContentType "application/x-www-form-urlencoded", + Header HdrContentLength (show $ length enc) ] + , rqBody=enc + , rqURI=u + } + _ -> error ("unexpected request: " ++ show m) + + diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/BufferType.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/BufferType.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/BufferType.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/BufferType.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,164 @@ +{-# LANGUAGE TypeSynonymInstances #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network.BufferType +-- Description : Abstract representation of request and response buffer types. +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- In order to give the user freedom in how request and response content +-- is represented, a sufficiently abstract representation is needed of +-- these internally. The "Network.BufferType" module provides this, defining +-- the 'BufferType' class and its ad-hoc representation of buffer operations +-- via the 'BufferOp' record. +-- +-- This module provides definitions for the standard buffer types that the +-- package supports, i.e., for @String@ and @ByteString@ (strict and lazy.) +-- +----------------------------------------------------------------------------- +module Network.BufferType + ( + BufferType(..) + + , BufferOp(..) + , strictBufferOp + , lazyBufferOp + , stringBufferOp + ) where + + +import qualified Data.ByteString as Strict hiding ( unpack, pack, span ) +import qualified Data.ByteString.Char8 as Strict ( unpack, pack, span ) +import qualified Data.ByteString.Lazy as Lazy hiding ( pack, unpack,span ) +import qualified Data.ByteString.Lazy.Char8 as Lazy ( pack, unpack, span ) +import System.IO ( Handle ) +import Data.Word ( Word8 ) + +import Network.HTTP.Utils ( crlf, lf ) + +-- | The @BufferType@ class encodes, in a mixed-mode way, the interface +-- that the library requires to operate over data embedded in HTTP +-- requests and responses. That is, we use explicit dictionaries +-- for the operations, but overload the name of the dicts themselves. +-- +class BufferType bufType where + bufferOps :: BufferOp bufType + +instance BufferType Lazy.ByteString where + bufferOps = lazyBufferOp + +instance BufferType Strict.ByteString where + bufferOps = strictBufferOp + +instance BufferType String where + bufferOps = stringBufferOp + +-- | @BufferOp@ encodes the I/O operations of the underlying buffer over +-- a Handle in an (explicit) dictionary type. May not be needed, but gives +-- us flexibility in explicit overriding and wrapping up of these methods. +-- +-- Along with IO operations is an ad-hoc collection of functions for working +-- with these abstract buffers, as needed by the internals of the code +-- that processes requests and responses. +-- +-- We supply three default @BufferOp@ values, for @String@ along with the +-- strict and lazy versions of @ByteString@. To add others, provide @BufferOp@ +-- definitions for +data BufferOp a + = BufferOp + { buf_hGet :: Handle -> Int -> IO a + , buf_hGetContents :: Handle -> IO a + , buf_hPut :: Handle -> a -> IO () + , buf_hGetLine :: Handle -> IO a + , buf_empty :: a + , buf_append :: a -> a -> a + , buf_concat :: [a] -> a + , buf_fromStr :: String -> a + , buf_toStr :: a -> String + , buf_snoc :: a -> Word8 -> a + , buf_splitAt :: Int -> a -> (a,a) + , buf_span :: (Char -> Bool) -> a -> (a,a) + , buf_isLineTerm :: a -> Bool + , buf_isEmpty :: a -> Bool + } + +instance Eq (BufferOp a) where + _ == _ = False + +-- | @strictBufferOp@ is the 'BufferOp' definition over @ByteString@s, +-- the non-lazy kind. +strictBufferOp :: BufferOp Strict.ByteString +strictBufferOp = + BufferOp + { buf_hGet = Strict.hGet + , buf_hGetContents = Strict.hGetContents + , buf_hPut = Strict.hPut + , buf_hGetLine = Strict.hGetLine + , buf_append = Strict.append + , buf_concat = Strict.concat + , buf_fromStr = Strict.pack + , buf_toStr = Strict.unpack + , buf_snoc = Strict.snoc + , buf_splitAt = Strict.splitAt + , buf_span = Strict.span + , buf_empty = Strict.empty + , buf_isLineTerm = \ b -> Strict.length b == 2 && p_crlf == b || + Strict.length b == 1 && p_lf == b + , buf_isEmpty = Strict.null + } + where + p_crlf = Strict.pack crlf + p_lf = Strict.pack lf + +-- | @lazyBufferOp@ is the 'BufferOp' definition over @ByteString@s, +-- the non-strict kind. +lazyBufferOp :: BufferOp Lazy.ByteString +lazyBufferOp = + BufferOp + { buf_hGet = Lazy.hGet + , buf_hGetContents = Lazy.hGetContents + , buf_hPut = Lazy.hPut + , buf_hGetLine = \ h -> Strict.hGetLine h >>= \ l -> return (Lazy.fromChunks [l]) + , buf_append = Lazy.append + , buf_concat = Lazy.concat + , buf_fromStr = Lazy.pack + , buf_toStr = Lazy.unpack + , buf_snoc = Lazy.snoc + , buf_splitAt = \ i x -> Lazy.splitAt (fromIntegral i) x + , buf_span = Lazy.span + , buf_empty = Lazy.empty + , buf_isLineTerm = \ b -> Lazy.length b == 2 && p_crlf == b || + Lazy.length b == 1 && p_lf == b + , buf_isEmpty = Lazy.null + } + where + p_crlf = Lazy.pack crlf + p_lf = Lazy.pack lf + +-- | @stringBufferOp@ is the 'BufferOp' definition over @String@s. +-- It is defined in terms of @strictBufferOp@ operations, +-- unpacking/converting to @String@ when needed. +stringBufferOp :: BufferOp String +stringBufferOp =BufferOp + { buf_hGet = \ h n -> buf_hGet strictBufferOp h n >>= return . Strict.unpack + , buf_hGetContents = \ h -> buf_hGetContents strictBufferOp h >>= return . Strict.unpack + , buf_hPut = \ h s -> buf_hPut strictBufferOp h (Strict.pack s) + , buf_hGetLine = \ h -> buf_hGetLine strictBufferOp h >>= return . Strict.unpack + , buf_append = (++) + , buf_concat = concat + , buf_fromStr = id + , buf_toStr = id + , buf_snoc = \ a x -> a ++ [toEnum (fromIntegral x)] + , buf_splitAt = splitAt + , buf_span = \ p a -> + case Strict.span p (Strict.pack a) of + (x,y) -> (Strict.unpack x, Strict.unpack y) + , buf_empty = [] + , buf_isLineTerm = \ b -> b == crlf || b == lf + , buf_isEmpty = null + } + diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Auth.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Auth.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Auth.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Auth.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,221 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network.HTTP.Auth +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- Representing HTTP Auth values in Haskell. +-- Right now, it contains mostly functionality needed by 'Network.Browser'. +-- +----------------------------------------------------------------------------- +module Network.HTTP.Auth + ( Authority(..) + , Algorithm(..) + , Challenge(..) + , Qop(..) + + , headerToChallenge -- :: URI -> Header -> Maybe Challenge + , withAuthority -- :: Authority -> Request ty -> String + ) where + +import Network.URI +import Network.HTTP.Base +import Network.HTTP.Utils +import Network.HTTP.Headers ( Header(..) ) +import qualified Network.HTTP.MD5Aux as MD5 (md5s, Str(Str)) +import qualified Network.HTTP.Base64 as Base64 (encode) +import Text.ParserCombinators.Parsec + ( Parser, char, many, many1, satisfy, parse, spaces, sepBy1 ) + +import Data.Char +import Data.Maybe +import Data.Word ( Word8 ) + +-- | @Authority@ specifies the HTTP Authentication method to use for +-- a given domain/realm; @Basic@ or @Digest@. +data Authority + = AuthBasic { auRealm :: String + , auUsername :: String + , auPassword :: String + , auSite :: URI + } + | AuthDigest{ auRealm :: String + , auUsername :: String + , auPassword :: String + , auNonce :: String + , auAlgorithm :: Maybe Algorithm + , auDomain :: [URI] + , auOpaque :: Maybe String + , auQop :: [Qop] + } + + +data Challenge + = ChalBasic { chRealm :: String } + | ChalDigest { chRealm :: String + , chDomain :: [URI] + , chNonce :: String + , chOpaque :: Maybe String + , chStale :: Bool + , chAlgorithm ::Maybe Algorithm + , chQop :: [Qop] + } + +-- | @Algorithm@ controls the digest algorithm to, @MD5@ or @MD5Session@. +data Algorithm = AlgMD5 | AlgMD5sess + deriving(Eq) + +instance Show Algorithm where + show AlgMD5 = "md5" + show AlgMD5sess = "md5-sess" + +-- | +data Qop = QopAuth | QopAuthInt + deriving(Eq,Show) + +-- | @withAuthority auth req@ generates a credentials value from the @auth@ 'Authority', +-- in the context of the given request. +-- +-- If a client nonce was to be used then this function might need to be of type ... -> BrowserAction String +withAuthority :: Authority -> Request ty -> String +withAuthority a rq = case a of + AuthBasic{} -> "Basic " ++ base64encode (auUsername a ++ ':' : auPassword a) + AuthDigest{} -> + "Digest " ++ + concat [ "username=" ++ quo (auUsername a) + , ",realm=" ++ quo (auRealm a) + , ",nonce=" ++ quo (auNonce a) + , ",uri=" ++ quo digesturi + , ",response=" ++ quo rspdigest + -- plus optional stuff: + , fromMaybe "" (fmap (\ alg -> ",algorithm=" ++ quo (show alg)) (auAlgorithm a)) + , fromMaybe "" (fmap (\ o -> ",opaque=" ++ quo o) (auOpaque a)) + , if null (auQop a) then "" else ",qop=auth" + ] + where + quo s = '"':s ++ "\"" + + rspdigest = map toLower (kd (md5 a1) (noncevalue ++ ":" ++ md5 a2)) + + a1, a2 :: String + a1 = auUsername a ++ ":" ++ auRealm a ++ ":" ++ auPassword a + + {- + If the "qop" directive's value is "auth" or is unspecified, then A2 + is: + A2 = Method ":" digest-uri-value + If the "qop" value is "auth-int", then A2 is: + A2 = Method ":" digest-uri-value ":" H(entity-body) + -} + a2 = show (rqMethod rq) ++ ":" ++ digesturi + + digesturi = show (rqURI rq) + noncevalue = auNonce a + +type Octet = Word8 + +-- FIXME: these probably only work right for latin-1 strings +stringToOctets :: String -> [Octet] +stringToOctets = map (fromIntegral . fromEnum) + +base64encode :: String -> String +base64encode = Base64.encode . stringToOctets + +md5 :: String -> String +md5 = MD5.md5s . MD5.Str + +kd :: String -> String -> String +kd a b = md5 (a ++ ":" ++ b) + + + + +-- | @headerToChallenge base www_auth@ tries to convert the @WWW-Authenticate@ header +-- @www_auth@ into a 'Challenge' value. +headerToChallenge :: URI -> Header -> Maybe Challenge +headerToChallenge baseURI (Header _ str) = + case parse challenge "" str of + Left{} -> Nothing + Right (name,props) -> case name of + "basic" -> mkBasic props + "digest" -> mkDigest props + _ -> Nothing + where + challenge :: Parser (String,[(String,String)]) + challenge = + do { nme <- word + ; spaces + ; pps <- cprops + ; return (map toLower nme,pps) + } + + cprops = sepBy1 cprop comma + + comma = do { spaces ; _ <- char ',' ; spaces } + + cprop = + do { nm <- word + ; _ <- char '=' + ; val <- quotedstring + ; return (map toLower nm,val) + } + + mkBasic, mkDigest :: [(String,String)] -> Maybe Challenge + + mkBasic params = fmap ChalBasic (lookup "realm" params) + + mkDigest params = + -- with Maybe monad + do { r <- lookup "realm" params + ; n <- lookup "nonce" params + ; return $ + ChalDigest { chRealm = r + , chDomain = (annotateURIs + $ map parseURI + $ words + $ fromMaybe [] + $ lookup "domain" params) + , chNonce = n + , chOpaque = lookup "opaque" params + , chStale = "true" == (map toLower + $ fromMaybe "" (lookup "stale" params)) + , chAlgorithm= readAlgorithm (fromMaybe "MD5" $ lookup "algorithm" params) + , chQop = readQop (fromMaybe "" $ lookup "qop" params) + } + } + + annotateURIs :: [Maybe URI] -> [URI] +#if MIN_VERSION_network(2,4,0) + annotateURIs = map (`relativeTo` baseURI) . catMaybes +#else + annotateURIs = (map (\u -> fromMaybe u (u `relativeTo` baseURI))) . catMaybes +#endif + + -- Change These: + readQop :: String -> [Qop] + readQop = catMaybes . (map strToQop) . (splitBy ',') + + strToQop qs = case map toLower (trim qs) of + "auth" -> Just QopAuth + "auth-int" -> Just QopAuthInt + _ -> Nothing + + readAlgorithm astr = case map toLower (trim astr) of + "md5" -> Just AlgMD5 + "md5-sess" -> Just AlgMD5sess + _ -> Nothing + +word, quotedstring :: Parser String +quotedstring = + do { _ <- char '"' -- " + ; str <- many (satisfy $ not . (=='"')) + ; _ <- char '"' + ; return str + } + +word = many1 (satisfy (\x -> isAlphaNum x || x=='_' || x=='.' || x=='-' || x==':')) diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Base64.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Base64.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Base64.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Base64.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,282 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Codec.Binary.Base64 +-- Copyright : (c) Dominic Steinitz 2005, Warrick Gray 2002 +-- License : BSD-style (see the file ReadMe.tex) +-- +-- Maintainer : dominic.steinitz@blueyonder.co.uk +-- Stability : experimental +-- Portability : portable +-- +-- Base64 encoding and decoding functions provided by Warwick Gray. +-- See +-- and . +-- +----------------------------------------------------------------------------- + +module Network.HTTP.Base64 + ( encode + , decode + , chop72 + , Octet + ) where + +{------------------------------------------------------------------------ +This is what RFC2045 had to say: + +6.8. Base64 Content-Transfer-Encoding + + The Base64 Content-Transfer-Encoding is designed to represent + arbitrary sequences of octets in a form that need not be humanly + readable. The encoding and decoding algorithms are simple, but the + encoded data are consistently only about 33 percent larger than the + unencoded data. This encoding is virtually identical to the one used + in Privacy Enhanced Mail (PEM) applications, as defined in RFC 1421. + + A 65-character subset of US-ASCII is used, enabling 6 bits to be + represented per printable character. (The extra 65th character, "=", + is used to signify a special processing function.) + + NOTE: This subset has the important property that it is represented + identically in all versions of ISO 646, including US-ASCII, and all + characters in the subset are also represented identically in all + versions of EBCDIC. Other popular encodings, such as the encoding + used by the uuencode utility, Macintosh binhex 4.0 [RFC-1741], and + the base85 encoding specified as part of Level 2 PostScript, do not + share these properties, and thus do not fulfill the portability + requirements a binary transport encoding for mail must meet. + + The encoding process represents 24-bit groups of input bits as output + strings of 4 encoded characters. Proceeding from left to right, a + 24-bit input group is formed by concatenating 3 8bit input groups. + These 24 bits are then treated as 4 concatenated 6-bit groups, each + of which is translated into a single digit in the base64 alphabet. + When encoding a bit stream via the base64 encoding, the bit stream + must be presumed to be ordered with the most-significant-bit first. + That is, the first bit in the stream will be the high-order bit in + the first 8bit byte, and the eighth bit will be the low-order bit in + the first 8bit byte, and so on. + + Each 6-bit group is used as an index into an array of 64 printable + characters. The character referenced by the index is placed in the + output string. These characters, identified in Table 1, below, are + selected so as to be universally representable, and the set excludes + characters with particular significance to SMTP (e.g., ".", CR, LF) + and to the multipart boundary delimiters defined in RFC 2046 (e.g., + "-"). + + + + Table 1: The Base64 Alphabet + + Value Encoding Value Encoding Value Encoding Value Encoding + 0 A 17 R 34 i 51 z + 1 B 18 S 35 j 52 0 + 2 C 19 T 36 k 53 1 + 3 D 20 U 37 l 54 2 + 4 E 21 V 38 m 55 3 + 5 F 22 W 39 n 56 4 + 6 G 23 X 40 o 57 5 + 7 H 24 Y 41 p 58 6 + 8 I 25 Z 42 q 59 7 + 9 J 26 a 43 r 60 8 + 10 K 27 b 44 s 61 9 + 11 L 28 c 45 t 62 + + 12 M 29 d 46 u 63 / + 13 N 30 e 47 v + 14 O 31 f 48 w (pad) = + 15 P 32 g 49 x + 16 Q 33 h 50 y + + The encoded output stream must be represented in lines of no more + than 76 characters each. All line breaks or other characters not + found in Table 1 must be ignored by decoding software. In base64 + data, characters other than those in Table 1, line breaks, and other + white space probably indicate a transmission error, about which a + warning message or even a message rejection might be appropriate + under some circumstances. + + Special processing is performed if fewer than 24 bits are available + at the end of the data being encoded. A full encoding quantum is + always completed at the end of a body. When fewer than 24 input bits + are available in an input group, zero bits are added (on the right) + to form an integral number of 6-bit groups. Padding at the end of + the data is performed using the "=" character. Since all base64 + input is an integral number of octets, only the following cases can + arise: (1) the final quantum of encoding input is an integral + multiple of 24 bits; here, the final unit of encoded output will be + an integral multiple of 4 characters with no "=" padding, (2) the + final quantum of encoding input is exactly 8 bits; here, the final + unit of encoded output will be two characters followed by two "=" + padding characters, or (3) the final quantum of encoding input is + exactly 16 bits; here, the final unit of encoded output will be three + characters followed by one "=" padding character. + + Because it is used only for padding at the end of the data, the + occurrence of any "=" characters may be taken as evidence that the + end of the data has been reached (without truncation in transit). No + such assurance is possible, however, when the number of octets + transmitted was a multiple of three and no "=" characters are + present. + + Any characters outside of the base64 alphabet are to be ignored in + base64-encoded data. + + Care must be taken to use the proper octets for line breaks if base64 + encoding is applied directly to text material that has not been + converted to canonical form. In particular, text line breaks must be + converted into CRLF sequences prior to base64 encoding. The + important thing to note is that this may be done directly by the + encoder rather than in a prior canonicalization step in some + implementations. + + NOTE: There is no need to worry about quoting potential boundary + delimiters within base64-encoded bodies within multipart entities + because no hyphen characters are used in the base64 encoding. + +----------------------------------------------------------------------------} + +{- + +The following properties should hold: + + decode . encode = id + decode . chop72 . encode = id + +I.E. Both "encode" and "chop72 . encode" are valid methods of encoding input, +the second variation corresponds better with the RFC above, but outside of +MIME applications might be undesireable. + + +But: The Haskell98 Char type is at least 16bits (and often 32), these implementations assume only + 8 significant bits, which is more than enough for US-ASCII. +-} + + +import Data.Array (Array, array, (!)) +import Data.Bits (shiftL, shiftR, (.&.), (.|.)) +import Data.Char (chr, ord) +import Data.Word (Word8) + +type Octet = Word8 + +encodeArray :: Array Int Char +encodeArray = array (0,64) + [ (0,'A'), (1,'B'), (2,'C'), (3,'D'), (4,'E'), (5,'F') + , (6,'G'), (7,'H'), (8,'I'), (9,'J'), (10,'K'), (11,'L') + , (12,'M'), (13,'N'), (14,'O'), (15,'P'), (16,'Q'), (17,'R') + , (18,'S'), (19,'T'), (20,'U'), (21,'V'), (22,'W'), (23,'X') + , (24,'Y'), (25,'Z'), (26,'a'), (27,'b'), (28,'c'), (29,'d') + , (30,'e'), (31,'f'), (32,'g'), (33,'h'), (34,'i'), (35,'j') + , (36,'k'), (37,'l'), (38,'m'), (39,'n'), (40,'o'), (41,'p') + , (42,'q'), (43,'r'), (44,'s'), (45,'t'), (46,'u'), (47,'v') + , (48,'w'), (49,'x'), (50,'y'), (51,'z'), (52,'0'), (53,'1') + , (54,'2'), (55,'3'), (56,'4'), (57,'5'), (58,'6'), (59,'7') + , (60,'8'), (61,'9'), (62,'+'), (63,'/') ] + + +-- Convert between 4 base64 (6bits ea) integers and 1 ordinary integer (32 bits) +-- clearly the upmost/leftmost 8 bits of the answer are 0. +-- Hack Alert: In the last entry of the answer, the upper 8 bits encode +-- the integer number of 6bit groups encoded in that integer, ie 1, 2, 3. +-- 0 represents a 4 :( +int4_char3 :: [Int] -> [Char] +int4_char3 (a:b:c:d:t) = + let n = (a `shiftL` 18 .|. b `shiftL` 12 .|. c `shiftL` 6 .|. d) + in (chr (n `shiftR` 16 .&. 0xff)) + : (chr (n `shiftR` 8 .&. 0xff)) + : (chr (n .&. 0xff)) : int4_char3 t + +int4_char3 [a,b,c] = + let n = (a `shiftL` 18 .|. b `shiftL` 12 .|. c `shiftL` 6) + in [ (chr (n `shiftR` 16 .&. 0xff)) + , (chr (n `shiftR` 8 .&. 0xff)) ] + +int4_char3 [a,b] = + let n = (a `shiftL` 18 .|. b `shiftL` 12) + in [ (chr (n `shiftR` 16 .&. 0xff)) ] + +int4_char3 [_] = error "Network.HTTP.Base64.int4_char3: impossible number of Ints." + +int4_char3 [] = [] + + + + +-- Convert triplets of characters to +-- 4 base64 integers. The last entries +-- in the list may not produce 4 integers, +-- a trailing 2 character group gives 3 integers, +-- while a trailing single character gives 2 integers. +char3_int4 :: [Char] -> [Int] +char3_int4 (a:b:c:t) + = let n = (ord a `shiftL` 16 .|. ord b `shiftL` 8 .|. ord c) + in (n `shiftR` 18 .&. 0x3f) : (n `shiftR` 12 .&. 0x3f) : (n `shiftR` 6 .&. 0x3f) : (n .&. 0x3f) : char3_int4 t + +char3_int4 [a,b] + = let n = (ord a `shiftL` 16 .|. ord b `shiftL` 8) + in [ (n `shiftR` 18 .&. 0x3f) + , (n `shiftR` 12 .&. 0x3f) + , (n `shiftR` 6 .&. 0x3f) ] + +char3_int4 [a] + = let n = (ord a `shiftL` 16) + in [(n `shiftR` 18 .&. 0x3f),(n `shiftR` 12 .&. 0x3f)] + +char3_int4 [] = [] + + +-- Retrieve base64 char, given an array index integer in the range [0..63] +enc1 :: Int -> Char +enc1 ch = encodeArray!ch + + +-- | Cut up a string into 72 char lines, each line terminated by CRLF. + +chop72 :: String -> String +chop72 str = let (bgn,end) = splitAt 70 str + in if null end then bgn else "\r\n" ++ chop72 end + + +-- Pads a base64 code to a multiple of 4 characters, using the special +-- '=' character. +quadruplets :: [Char] -> [Char] +quadruplets (a:b:c:d:t) = a:b:c:d:quadruplets t +quadruplets [a,b,c] = [a,b,c,'='] -- 16bit tail unit +quadruplets [a,b] = [a,b,'=','='] -- 8bit tail unit +quadruplets [_] = error "Network.HTTP.Base64.quadruplets: impossible number of characters." +quadruplets [] = [] -- 24bit tail unit + + +enc :: [Int] -> [Char] +enc = quadruplets . map enc1 + + +dcd :: String -> [Int] +dcd [] = [] +dcd (h:t) + | h <= 'Z' && h >= 'A' = ord h - ord 'A' : dcd t + | h >= '0' && h <= '9' = ord h - ord '0' + 52 : dcd t + | h >= 'a' && h <= 'z' = ord h - ord 'a' + 26 : dcd t + | h == '+' = 62 : dcd t + | h == '/' = 63 : dcd t + | h == '=' = [] -- terminate data stream + | otherwise = dcd t + + +-- Principal encoding and decoding functions. + +encode :: [Octet] -> String +encode = enc . char3_int4 . (map (chr .fromIntegral)) + +{- +prop_base64 os = + os == (f . g . h) os + where types = (os :: [Word8]) + f = map (fromIntegral. ord) + g = decode . encode + h = map (chr . fromIntegral) +-} + +decode :: String -> [Octet] +decode = (map (fromIntegral . ord)) . int4_char3 . dcd diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Base.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Base.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Base.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Base.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,994 @@ +{-# LANGUAGE ScopedTypeVariables #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network.HTTP.Base +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- Definitions of @Request@ and @Response@ types along with functions +-- for normalizing them. It is assumed to be an internal module; user +-- code should, if possible, import @Network.HTTP@ to access the functionality +-- that this module provides. +-- +-- Additionally, the module exports internal functions for working with URLs, +-- and for handling the processing of requests and responses coming back. +-- +----------------------------------------------------------------------------- +module Network.HTTP.Base + ( + -- ** Constants + httpVersion -- :: String + + -- ** HTTP + , Request(..) + , Response(..) + , RequestMethod(..) + + , Request_String + , Response_String + , HTTPRequest + , HTTPResponse + + -- ** URL Encoding + , urlEncode + , urlDecode + , urlEncodeVars + + -- ** URI authority parsing + , URIAuthority(..) + , parseURIAuthority + + -- internal + , uriToAuthorityString -- :: URI -> String + , uriAuthToString -- :: URIAuth -> String + , uriAuthPort -- :: Maybe URI -> URIAuth -> Int + , reqURIAuth -- :: Request ty -> URIAuth + + , parseResponseHead -- :: [String] -> Result ResponseData + , parseRequestHead -- :: [String] -> Result RequestData + + , ResponseNextStep(..) + , matchResponse + , ResponseData + , ResponseCode + , RequestData + + , NormalizeRequestOptions(..) + , defaultNormalizeRequestOptions -- :: NormalizeRequestOptions ty + , RequestNormalizer + + , normalizeRequest -- :: NormalizeRequestOptions ty -> Request ty -> Request ty + + , splitRequestURI + + , getAuth + , normalizeRequestURI + , normalizeHostHeader + , findConnClose + + -- internal export (for the use by Network.HTTP.{Stream,ByteStream} ) + , linearTransfer + , hopefulTransfer + , chunkedTransfer + , uglyDeathTransfer + , readTillEmpty1 + , readTillEmpty2 + + , defaultGETRequest + , defaultGETRequest_ + , mkRequest + , setRequestBody + + , defaultUserAgent + , httpPackageVersion + , libUA {- backwards compatibility, will disappear..soon -} + + , catchIO + , catchIO_ + , responseParseError + + , getRequestVersion + , getResponseVersion + , setRequestVersion + , setResponseVersion + + , failHTTPS + + ) where + +import Network.URI + ( URI(uriAuthority, uriPath, uriScheme) + , URIAuth(URIAuth, uriUserInfo, uriRegName, uriPort) + , parseURIReference + ) + +import Control.Monad ( guard ) +import Control.Monad.Error () +import Data.Bits ( (.&.), (.|.), shiftL, shiftR ) +import Data.Word ( Word8 ) +import Data.Char ( digitToInt, intToDigit, toLower, isDigit, + isAscii, isAlphaNum, ord, chr ) +import Data.List ( partition, find ) +import Data.Maybe ( listToMaybe, fromMaybe ) +import Numeric ( readHex ) + +import Network.Stream +import Network.BufferType ( BufferOp(..), BufferType(..) ) +import Network.HTTP.Headers +import Network.HTTP.Utils ( trim, crlf, sp, readsOne ) +import qualified Network.HTTP.Base64 as Base64 (encode) + +import Text.Read.Lex (readDecP) +import Text.ParserCombinators.ReadP + ( ReadP, readP_to_S, char, (<++), look, munch, munch1 ) + +import Control.Exception as Exception (catch, IOException) + +import qualified Paths_HTTP as Self (version) +import Data.Version (showVersion) + +----------------------------------------------------------------- +------------------ URI Authority parsing ------------------------ +----------------------------------------------------------------- + +data URIAuthority = URIAuthority { user :: Maybe String, + password :: Maybe String, + host :: String, + port :: Maybe Int + } deriving (Eq,Show) + +-- | Parse the authority part of a URL. +-- +-- > RFC 1732, section 3.1: +-- > +-- > //:@:/ +-- > Some or all of the parts ":@", ":", +-- > ":", and "/" may be excluded. +parseURIAuthority :: String -> Maybe URIAuthority +parseURIAuthority s = listToMaybe (map fst (readP_to_S pURIAuthority s)) + + +pURIAuthority :: ReadP URIAuthority +pURIAuthority = do + (u,pw) <- (pUserInfo `before` char '@') + <++ return (Nothing, Nothing) + h <- rfc2732host <++ munch (/=':') + p <- orNothing (char ':' >> readDecP) + look >>= guard . null + return URIAuthority{ user=u, password=pw, host=h, port=p } + +-- RFC2732 adds support for '[literal-ipv6-address]' in the host part of a URL +rfc2732host :: ReadP String +rfc2732host = do + _ <- char '[' + res <- munch1 (/=']') + _ <- char ']' + return res + +pUserInfo :: ReadP (Maybe String, Maybe String) +pUserInfo = do + u <- orNothing (munch (`notElem` ":@")) + p <- orNothing (char ':' >> munch (/='@')) + return (u,p) + +before :: Monad m => m a -> m b -> m a +before a b = a >>= \x -> b >> return x + +orNothing :: ReadP a -> ReadP (Maybe a) +orNothing p = fmap Just p <++ return Nothing + +-- This function duplicates old Network.URI.authority behaviour. +uriToAuthorityString :: URI -> String +uriToAuthorityString u = maybe "" uriAuthToString (uriAuthority u) + +uriAuthToString :: URIAuth -> String +uriAuthToString ua = + concat [ uriUserInfo ua + , uriRegName ua + , uriPort ua + ] + +uriAuthPort :: Maybe URI -> URIAuth -> Int +uriAuthPort mbURI u = + case uriPort u of + (':':s) -> readsOne id (default_port mbURI) s + _ -> default_port mbURI + where + default_port Nothing = default_http + default_port (Just url) = + case map toLower $ uriScheme url of + "http:" -> default_http + "https:" -> default_https + -- todo: refine + _ -> default_http + + default_http = 80 + default_https = 443 + +failHTTPS :: Monad m => URI -> m () +failHTTPS uri + | map toLower (uriScheme uri) == "https:" = fail "https not supported" + | otherwise = return () + +-- Fish out the authority from a possibly normalized Request, i.e., +-- the information may either be in the request's URI or inside +-- the Host: header. +reqURIAuth :: Request ty -> URIAuth +reqURIAuth req = + case uriAuthority (rqURI req) of + Just ua -> ua + _ -> case lookupHeader HdrHost (rqHeaders req) of + Nothing -> error ("reqURIAuth: no URI authority for: " ++ show req) + Just h -> + case toHostPort h of + (ht,p) -> URIAuth { uriUserInfo = "" + , uriRegName = ht + , uriPort = p + } + where + -- Note: just in case you're wondering..the convention is to include the ':' + -- in the port part.. + toHostPort h = break (==':') h + +----------------------------------------------------------------- +------------------ HTTP Messages -------------------------------- +----------------------------------------------------------------- + + +-- Protocol version +httpVersion :: String +httpVersion = "HTTP/1.1" + + +-- | The HTTP request method, to be used in the 'Request' object. +-- We are missing a few of the stranger methods, but these are +-- not really necessary until we add full TLS. +data RequestMethod = HEAD | PUT | GET | POST | DELETE | OPTIONS | TRACE | CONNECT | Custom String + deriving(Eq) + +instance Show RequestMethod where + show x = + case x of + HEAD -> "HEAD" + PUT -> "PUT" + GET -> "GET" + POST -> "POST" + DELETE -> "DELETE" + OPTIONS -> "OPTIONS" + TRACE -> "TRACE" + CONNECT -> "CONNECT" + Custom c -> c + +rqMethodMap :: [(String, RequestMethod)] +rqMethodMap = [("HEAD", HEAD), + ("PUT", PUT), + ("GET", GET), + ("POST", POST), + ("DELETE", DELETE), + ("OPTIONS", OPTIONS), + ("TRACE", TRACE), + ("CONNECT", CONNECT)] + +-- +-- for backwards-ish compatibility; suggest +-- migrating to new Req/Resp by adding type param. +-- +type Request_String = Request String +type Response_String = Response String + +-- Hmm..I really want to use these for the record +-- type, but it will upset codebases wanting to +-- migrate (and live with using pre-HTTPbis versions.) +type HTTPRequest a = Request a +type HTTPResponse a = Response a + +-- | An HTTP Request. +-- The 'Show' instance of this type is used for message serialisation, +-- which means no body data is output. +data Request a = + Request { rqURI :: URI -- ^ might need changing in future + -- 1) to support '*' uri in OPTIONS request + -- 2) transparent support for both relative + -- & absolute uris, although this should + -- already work (leave scheme & host parts empty). + , rqMethod :: RequestMethod + , rqHeaders :: [Header] + , rqBody :: a + } + +-- Notice that request body is not included, +-- this show function is used to serialise +-- a request for the transport link, we send +-- the body separately where possible. +instance Show (Request a) where + show req@(Request u m h _) = + show m ++ sp ++ alt_uri ++ sp ++ ver ++ crlf + ++ foldr (++) [] (map show (dropHttpVersion h)) ++ crlf + where + ver = fromMaybe httpVersion (getRequestVersion req) + alt_uri = show $ if null (uriPath u) || head (uriPath u) /= '/' + then u { uriPath = '/' : uriPath u } + else u + +instance HasHeaders (Request a) where + getHeaders = rqHeaders + setHeaders rq hdrs = rq { rqHeaders=hdrs } + +-- | For easy pattern matching, HTTP response codes @xyz@ are +-- represented as @(x,y,z)@. +type ResponseCode = (Int,Int,Int) + +-- | @ResponseData@ contains the head of a response payload; +-- HTTP response code, accompanying text description + header +-- fields. +type ResponseData = (ResponseCode,String,[Header]) + +-- | @RequestData@ contains the head of a HTTP request; method, +-- its URL along with the auxillary/supporting header data. +type RequestData = (RequestMethod,URI,[Header]) + +-- | An HTTP Response. +-- The 'Show' instance of this type is used for message serialisation, +-- which means no body data is output, additionally the output will +-- show an HTTP version of 1.1 instead of the actual version returned +-- by a server. +data Response a = + Response { rspCode :: ResponseCode + , rspReason :: String + , rspHeaders :: [Header] + , rspBody :: a + } + +-- This is an invalid representation of a received response, +-- since we have made the assumption that all responses are HTTP/1.1 +instance Show (Response a) where + show rsp@(Response (a,b,c) reason headers _) = + ver ++ ' ' : map intToDigit [a,b,c] ++ ' ' : reason ++ crlf + ++ foldr (++) [] (map show (dropHttpVersion headers)) ++ crlf + where + ver = fromMaybe httpVersion (getResponseVersion rsp) + +instance HasHeaders (Response a) where + getHeaders = rspHeaders + setHeaders rsp hdrs = rsp { rspHeaders=hdrs } + + +------------------------------------------------------------------ +------------------ Request Building ------------------------------ +------------------------------------------------------------------ + +-- | Deprecated. Use 'defaultUserAgent' +libUA :: String +libUA = "hs-HTTP-4000.0.9" +{-# DEPRECATED libUA "Use defaultUserAgent instead (but note the user agent name change)" #-} + +-- | A default user agent string. The string is @\"haskell-HTTP/$version\"@ +-- where @$version@ is the version of this HTTP package. +-- +defaultUserAgent :: String +defaultUserAgent = "haskell-HTTP/" ++ httpPackageVersion + +-- | The version of this HTTP package as a string, e.g. @\"4000.1.2\"@. This +-- may be useful to include in a user agent string so that you can determine +-- from server logs what version of this package HTTP clients are using. +-- This can be useful for tracking down HTTP compatibility quirks. +-- +httpPackageVersion :: String +httpPackageVersion = showVersion Self.version + +defaultGETRequest :: URI -> Request_String +defaultGETRequest uri = defaultGETRequest_ uri + +defaultGETRequest_ :: BufferType a => URI -> Request a +defaultGETRequest_ uri = mkRequest GET uri + +-- | 'mkRequest method uri' constructs a well formed +-- request for the given HTTP method and URI. It does not +-- normalize the URI for the request _nor_ add the required +-- Host: header. That is done either explicitly by the user +-- or when requests are normalized prior to transmission. +mkRequest :: BufferType ty => RequestMethod -> URI -> Request ty +mkRequest meth uri = req + where + req = + Request { rqURI = uri + , rqBody = empty + , rqHeaders = [ Header HdrContentLength "0" + , Header HdrUserAgent defaultUserAgent + ] + , rqMethod = meth + } + + empty = buf_empty (toBufOps req) + +-- set rqBody, Content-Type and Content-Length headers. +setRequestBody :: Request_String -> (String, String) -> Request_String +setRequestBody req (typ, body) = req' { rqBody=body } + where + req' = replaceHeader HdrContentType typ . + replaceHeader HdrContentLength (show $ length body) $ + req + +{- + -- stub out the user info. + updAuth = fmap (\ x -> x{uriUserInfo=""}) (uriAuthority uri) + + withHost = + case uriToAuthorityString uri{uriAuthority=updAuth} of + "" -> id + h -> ((Header HdrHost h):) + + uri_req + | forProxy = uri + | otherwise = snd (splitRequestURI uri) +-} + + +toBufOps :: BufferType a => Request a -> BufferOp a +toBufOps _ = bufferOps + +----------------------------------------------------------------- +------------------ Parsing -------------------------------------- +----------------------------------------------------------------- + +-- Parsing a request +parseRequestHead :: [String] -> Result RequestData +parseRequestHead [] = Left ErrorClosed +parseRequestHead (com:hdrs) = do + (version,rqm,uri) <- requestCommand com (words com) + hdrs' <- parseHeaders hdrs + return (rqm,uri,withVer version hdrs') + where + withVer [] hs = hs + withVer (h:_) hs = withVersion h hs + + requestCommand l _yes@(rqm:uri:version) = + case (parseURIReference uri, lookup rqm rqMethodMap) of + (Just u, Just r) -> return (version,r,u) + (Just u, Nothing) -> return (version,Custom rqm,u) + _ -> parse_err l + requestCommand l _ + | null l = failWith ErrorClosed + | otherwise = parse_err l + + parse_err l = responseParseError "parseRequestHead" + ("Request command line parse failure: " ++ l) + +-- Parsing a response +parseResponseHead :: [String] -> Result ResponseData +parseResponseHead [] = failWith ErrorClosed +parseResponseHead (sts:hdrs) = do + (version,code,reason) <- responseStatus sts (words sts) + hdrs' <- parseHeaders hdrs + return (code,reason, withVersion version hdrs') + where + responseStatus _l _yes@(version:code:reason) = + return (version,match code,concatMap (++" ") reason) + responseStatus l _no + | null l = failWith ErrorClosed -- an assumption + | otherwise = parse_err l + + parse_err l = + responseParseError + "parseResponseHead" + ("Response status line parse failure: " ++ l) + + match [a,b,c] = (digitToInt a, + digitToInt b, + digitToInt c) + match _ = (-1,-1,-1) -- will create appropriate behaviour + +-- To avoid changing the @RequestData@ and @ResponseData@ types +-- just for this (and the upstream backwards compat. woes that +-- will result in), encode version info as a custom header. +-- Used by 'parseResponseData' and 'parseRequestData'. +-- +-- Note: the Request and Response types do not currently represent +-- the version info explicitly in their record types. You have to use +-- {get,set}{Request,Response}Version for that. +withVersion :: String -> [Header] -> [Header] +withVersion v hs + | v == httpVersion = hs -- don't bother adding it if the default. + | otherwise = (Header (HdrCustom "X-HTTP-Version") v) : hs + +-- | @getRequestVersion req@ returns the HTTP protocol version of +-- the request @req@. If @Nothing@, the default 'httpVersion' can be assumed. +getRequestVersion :: Request a -> Maybe String +getRequestVersion r = getHttpVersion r + +-- | @setRequestVersion v req@ returns a new request, identical to +-- @req@, but with its HTTP version set to @v@. +setRequestVersion :: String -> Request a -> Request a +setRequestVersion s r = setHttpVersion r s + + +-- | @getResponseVersion rsp@ returns the HTTP protocol version of +-- the response @rsp@. If @Nothing@, the default 'httpVersion' can be +-- assumed. +getResponseVersion :: Response a -> Maybe String +getResponseVersion r = getHttpVersion r + +-- | @setResponseVersion v rsp@ returns a new response, identical to +-- @rsp@, but with its HTTP version set to @v@. +setResponseVersion :: String -> Response a -> Response a +setResponseVersion s r = setHttpVersion r s + +-- internal functions for accessing HTTP-version info in +-- requests and responses. Not exported as it exposes ho +-- version info is represented internally. + +getHttpVersion :: HasHeaders a => a -> Maybe String +getHttpVersion r = + fmap toVersion $ + find isHttpVersion $ + getHeaders r + where + toVersion (Header _ x) = x + +setHttpVersion :: HasHeaders a => a -> String -> a +setHttpVersion r v = + setHeaders r $ + withVersion v $ + dropHttpVersion $ + getHeaders r + +dropHttpVersion :: [Header] -> [Header] +dropHttpVersion hs = filter (not.isHttpVersion) hs + +isHttpVersion :: Header -> Bool +isHttpVersion (Header (HdrCustom "X-HTTP-Version") _) = True +isHttpVersion _ = False + + + +----------------------------------------------------------------- +------------------ HTTP Send / Recv ---------------------------------- +----------------------------------------------------------------- + +data ResponseNextStep + = Continue + | Retry + | Done + | ExpectEntity + | DieHorribly String + +matchResponse :: RequestMethod -> ResponseCode -> ResponseNextStep +matchResponse rqst rsp = + case rsp of + (1,0,0) -> Continue + (1,0,1) -> Done -- upgrade to TLS + (1,_,_) -> Continue -- default + (2,0,4) -> Done + (2,0,5) -> Done + (2,_,_) -> ans + (3,0,4) -> Done + (3,0,5) -> Done + (3,_,_) -> ans + (4,1,7) -> Retry -- Expectation failed + (4,_,_) -> ans + (5,_,_) -> ans + (a,b,c) -> DieHorribly ("Response code " ++ map intToDigit [a,b,c] ++ " not recognised") + where + ans | rqst == HEAD = Done + | otherwise = ExpectEntity + + + +----------------------------------------------------------------- +------------------ A little friendly funtionality --------------- +----------------------------------------------------------------- + + +{- + I had a quick look around but couldn't find any RFC about + the encoding of data on the query string. I did find an + IETF memo, however, so this is how I justify the urlEncode + and urlDecode methods. + + Doc name: draft-tiwari-appl-wxxx-forms-01.txt (look on www.ietf.org) + + Reserved chars: ";", "/", "?", ":", "@", "&", "=", "+", ",", and "$" are reserved. + Unwise: "{" | "}" | "|" | "\" | "^" | "[" | "]" | "`" + URI delims: "<" | ">" | "#" | "%" | <"> + Unallowed ASCII: + + Also unallowed: any non-us-ascii character + + Escape method: char -> '%' a b where a, b :: Hex digits +-} + +replacement_character :: Char +replacement_character = '\xfffd' + +-- | Encode a single Haskell Char to a list of Word8 values, in UTF8 format. +-- +-- Shamelessly stolen from utf-8string-0.3.7 +encodeChar :: Char -> [Word8] +encodeChar = map fromIntegral . go . ord + where + go oc + | oc <= 0x7f = [oc] + + | oc <= 0x7ff = [ 0xc0 + (oc `shiftR` 6) + , 0x80 + oc .&. 0x3f + ] + + | oc <= 0xffff = [ 0xe0 + (oc `shiftR` 12) + , 0x80 + ((oc `shiftR` 6) .&. 0x3f) + , 0x80 + oc .&. 0x3f + ] + | otherwise = [ 0xf0 + (oc `shiftR` 18) + , 0x80 + ((oc `shiftR` 12) .&. 0x3f) + , 0x80 + ((oc `shiftR` 6) .&. 0x3f) + , 0x80 + oc .&. 0x3f + ] + +-- | Decode a UTF8 string packed into a list of Word8 values, directly to String +-- +-- Shamelessly stolen from utf-8string-0.3.7 +decode :: [Word8] -> String +decode [ ] = "" +decode (c:cs) + | c < 0x80 = chr (fromEnum c) : decode cs + | c < 0xc0 = replacement_character : decode cs + | c < 0xe0 = multi1 + | c < 0xf0 = multi_byte 2 0xf 0x800 + | c < 0xf8 = multi_byte 3 0x7 0x10000 + | c < 0xfc = multi_byte 4 0x3 0x200000 + | c < 0xfe = multi_byte 5 0x1 0x4000000 + | otherwise = replacement_character : decode cs + where + multi1 = case cs of + c1 : ds | c1 .&. 0xc0 == 0x80 -> + let d = ((fromEnum c .&. 0x1f) `shiftL` 6) .|. fromEnum (c1 .&. 0x3f) + in if d >= 0x000080 then toEnum d : decode ds + else replacement_character : decode ds + _ -> replacement_character : decode cs + + multi_byte :: Int -> Word8 -> Int -> [Char] + multi_byte i mask overlong = aux i cs (fromEnum (c .&. mask)) + where + aux 0 rs acc + | overlong <= acc && acc <= 0x10ffff && + (acc < 0xd800 || 0xdfff < acc) && + (acc < 0xfffe || 0xffff < acc) = chr acc : decode rs + | otherwise = replacement_character : decode rs + + aux n (r:rs) acc + | r .&. 0xc0 == 0x80 = aux (n-1) rs + $ shiftL acc 6 .|. fromEnum (r .&. 0x3f) + + aux _ rs _ = replacement_character : decode rs + + +-- This function is a bit funny because potentially the input String could contain some actual Unicode +-- characters (though this shouldn't happen for most use cases), so we have to preserve those characters +-- while simultaneously decoding any UTF-8 data +urlDecode :: String -> String +urlDecode = go [] + where + go bs ('%':a:b:rest) = go (fromIntegral (16 * digitToInt a + digitToInt b) : bs) rest + go bs (h:t) | fromEnum h < 256 = go (fromIntegral (fromEnum h) : bs) t -- Treat ASCII as just another byte of UTF-8 + go [] [] = [] + go [] (h:t) = h : go [] t -- h >= 256, so can't be part of any UTF-8 byte sequence + go bs rest = decode (reverse bs) ++ go [] rest + + +urlEncode :: String -> String +urlEncode [] = [] +urlEncode (ch:t) + | (isAscii ch && isAlphaNum ch) || ch `elem` "-_.~" = ch : urlEncode t + | not (isAscii ch) = foldr escape (urlEncode t) (encodeChar ch) + | otherwise = escape (fromIntegral (fromEnum ch)) (urlEncode t) + where + escape b rs = '%':showH (b `div` 16) (showH (b `mod` 16) rs) + + showH :: Word8 -> String -> String + showH x xs + | x <= 9 = to (o_0 + x) : xs + | otherwise = to (o_A + (x-10)) : xs + where + to = toEnum . fromIntegral + fro = fromIntegral . fromEnum + + o_0 = fro '0' + o_A = fro 'A' + +-- Encode form variables, useable in either the +-- query part of a URI, or the body of a POST request. +-- I have no source for this information except experience, +-- this sort of encoding worked fine in CGI programming. +urlEncodeVars :: [(String,String)] -> String +urlEncodeVars ((n,v):t) = + let (same,diff) = partition ((==n) . fst) t + in urlEncode n ++ '=' : foldl (\x y -> x ++ ',' : urlEncode y) (urlEncode $ v) (map snd same) + ++ urlEncodeRest diff + where urlEncodeRest [] = [] + urlEncodeRest diff = '&' : urlEncodeVars diff +urlEncodeVars [] = [] + +-- | @getAuth req@ fishes out the authority portion of the URL in a request's @Host@ +-- header. +getAuth :: Monad m => Request ty -> m URIAuthority +getAuth r = + -- ToDo: verify that Network.URI functionality doesn't take care of this (now.) + case parseURIAuthority auth of + Just x -> return x + Nothing -> fail $ "Network.HTTP.Base.getAuth: Error parsing URI authority '" ++ auth ++ "'" + where + auth = maybe (uriToAuthorityString uri) id (findHeader HdrHost r) + uri = rqURI r + +{-# DEPRECATED normalizeRequestURI "Please use Network.HTTP.Base.normalizeRequest instead" #-} +normalizeRequestURI :: Bool{-do close-} -> {-URI-}String -> Request ty -> Request ty +normalizeRequestURI doClose h r = + (if doClose then replaceHeader HdrConnection "close" else id) $ + insertHeaderIfMissing HdrHost h $ + r { rqURI = (rqURI r){ uriScheme = "" + , uriAuthority = Nothing + }} + +-- | @NormalizeRequestOptions@ brings together the various defaulting\/normalization options +-- over 'Request's. Use 'defaultNormalizeRequestOptions' for the standard selection of option +data NormalizeRequestOptions ty + = NormalizeRequestOptions + { normDoClose :: Bool + , normForProxy :: Bool + , normUserAgent :: Maybe String + , normCustoms :: [RequestNormalizer ty] + } + +-- | @RequestNormalizer@ is the shape of a (pure) function that rewrites +-- a request into some normalized form. +type RequestNormalizer ty = NormalizeRequestOptions ty -> Request ty -> Request ty + +defaultNormalizeRequestOptions :: NormalizeRequestOptions ty +defaultNormalizeRequestOptions = NormalizeRequestOptions + { normDoClose = False + , normForProxy = False + , normUserAgent = Just defaultUserAgent + , normCustoms = [] + } + +-- | @normalizeRequest opts req@ is the entry point to use to normalize your +-- request prior to transmission (or other use.) Normalization is controlled +-- via the @NormalizeRequestOptions@ record. +normalizeRequest :: NormalizeRequestOptions ty + -> Request ty + -> Request ty +normalizeRequest opts req = foldr (\ f -> f opts) req normalizers + where + --normalizers :: [RequestNormalizer ty] + normalizers = + ( normalizeHostURI + : normalizeBasicAuth + : normalizeConnectionClose + : normalizeUserAgent + : normCustoms opts + ) + +-- | @normalizeUserAgent ua x req@ augments the request @req@ with +-- a @User-Agent: ua@ header if @req@ doesn't already have a +-- a @User-Agent:@ set. +normalizeUserAgent :: RequestNormalizer ty +normalizeUserAgent opts req = + case normUserAgent opts of + Nothing -> req + Just ua -> + case findHeader HdrUserAgent req of + Just u | u /= defaultUserAgent -> req + _ -> replaceHeader HdrUserAgent ua req + +-- | @normalizeConnectionClose opts req@ sets the header @Connection: close@ +-- to indicate one-shot behavior iff @normDoClose@ is @True@. i.e., it then +-- _replaces_ any an existing @Connection:@ header in @req@. +normalizeConnectionClose :: RequestNormalizer ty +normalizeConnectionClose opts req + | normDoClose opts = replaceHeader HdrConnection "close" req + | otherwise = req + +-- | @normalizeBasicAuth opts req@ sets the header @Authorization: Basic...@ +-- if the "user:pass@" part is present in the "http://user:pass@host/path" +-- of the URI. If Authorization header was present already it is not replaced. +normalizeBasicAuth :: RequestNormalizer ty +normalizeBasicAuth _ req = + case getAuth req of + Just uriauth -> + case (user uriauth, password uriauth) of + (Just u, Just p) -> + insertHeaderIfMissing HdrAuthorization astr req + where + astr = "Basic " ++ base64encode (u ++ ":" ++ p) + base64encode = Base64.encode . stringToOctets :: String -> String + stringToOctets = map (fromIntegral . fromEnum) :: String -> [Word8] + (_, _) -> req + Nothing ->req + +-- | @normalizeHostURI forProxy req@ rewrites your request to have it +-- follow the expected formats by the receiving party (proxy or server.) +-- +normalizeHostURI :: RequestNormalizer ty +normalizeHostURI opts req = + case splitRequestURI uri of + ("",_uri_abs) + | forProxy -> + case findHeader HdrHost req of + Nothing -> req -- no host/authority in sight..not much we can do. + Just h -> req{rqURI=uri{ uriAuthority=Just URIAuth{uriUserInfo="", uriRegName=hst, uriPort=pNum} + , uriScheme=if (null (uriScheme uri)) then "http" else uriScheme uri + }} + where + hst = case span (/='@') user_hst of + (as,'@':bs) -> + case span (/=':') as of + (_,_:_) -> bs + _ -> user_hst + _ -> user_hst + + (user_hst, pNum) = + case span isDigit (reverse h) of + (ds,':':bs) -> (reverse bs, ':':reverse ds) + _ -> (h,"") + | otherwise -> + case findHeader HdrHost req of + Nothing -> req -- no host/authority in sight..not much we can do...complain? + Just{} -> req + (h,uri_abs) + | forProxy -> insertHeaderIfMissing HdrHost h req + | otherwise -> replaceHeader HdrHost h req{rqURI=uri_abs} -- Note: _not_ stubbing out user:pass + where + uri0 = rqURI req + -- stub out the user:pass + uri = uri0{uriAuthority=fmap (\ x -> x{uriUserInfo=""}) (uriAuthority uri0)} + + forProxy = normForProxy opts + +{- Comments re: above rewriting: + RFC 2616, section 5.1.2: + "The most common form of Request-URI is that used to identify a + resource on an origin server or gateway. In this case the absolute + path of the URI MUST be transmitted (see section 3.2.1, abs_path) as + the Request-URI, and the network location of the URI (authority) MUST + be transmitted in a Host header field." + We assume that this is the case, so we take the host name from + the Host header if there is one, otherwise from the request-URI. + Then we make the request-URI an abs_path and make sure that there + is a Host header. +-} + +splitRequestURI :: URI -> ({-authority-}String, URI) +splitRequestURI uri = (uriToAuthorityString uri, uri{uriScheme="", uriAuthority=Nothing}) + +-- Adds a Host header if one is NOT ALREADY PRESENT.. +{-# DEPRECATED normalizeHostHeader "Please use Network.HTTP.Base.normalizeRequest instead" #-} +normalizeHostHeader :: Request ty -> Request ty +normalizeHostHeader rq = + insertHeaderIfMissing HdrHost + (uriToAuthorityString $ rqURI rq) + rq + +-- Looks for a "Connection" header with the value "close". +-- Returns True when this is found. +findConnClose :: [Header] -> Bool +findConnClose hdrs = + maybe False + (\ x -> map toLower (trim x) == "close") + (lookupHeader HdrConnection hdrs) + +-- | Used when we know exactly how many bytes to expect. +linearTransfer :: (Int -> IO (Result a)) -> Int -> IO (Result ([Header],a)) +linearTransfer readBlk n = fmapE (\str -> Right ([],str)) (readBlk n) + +-- | Used when nothing about data is known, +-- Unfortunately waiting for a socket closure +-- causes bad behaviour. Here we just +-- take data once and give up the rest. +hopefulTransfer :: BufferOp a + -> IO (Result a) + -> [a] + -> IO (Result ([Header],a)) +hopefulTransfer bufOps readL strs + = readL >>= + either (\v -> return $ Left v) + (\more -> if (buf_isEmpty bufOps more) + then return (Right ([], buf_concat bufOps $ reverse strs)) + else hopefulTransfer bufOps readL (more:strs)) + +-- | A necessary feature of HTTP\/1.1 +-- Also the only transfer variety likely to +-- return any footers. +chunkedTransfer :: BufferOp a + -> IO (Result a) + -> (Int -> IO (Result a)) + -> IO (Result ([Header], a)) +chunkedTransfer bufOps readL readBlk = chunkedTransferC bufOps readL readBlk [] 0 + +chunkedTransferC :: BufferOp a + -> IO (Result a) + -> (Int -> IO (Result a)) + -> [a] + -> Int + -> IO (Result ([Header], a)) +chunkedTransferC bufOps readL readBlk acc n = do + v <- readL + case v of + Left e -> return (Left e) + Right line + | size == 0 -> + -- last chunk read; look for trailing headers.. + fmapE (\ strs -> do + ftrs <- parseHeaders (map (buf_toStr bufOps) strs) + -- insert (computed) Content-Length header. + let ftrs' = Header HdrContentLength (show n) : ftrs + return (ftrs',buf_concat bufOps (reverse acc))) + + (readTillEmpty2 bufOps readL []) + + | otherwise -> do + some <- readBlk size + case some of + Left e -> return (Left e) + Right cdata -> do + _ <- readL -- CRLF is mandated after the chunk block; ToDo: check that the line is empty.? + chunkedTransferC bufOps readL readBlk (cdata:acc) (n+size) + where + size + | buf_isEmpty bufOps line = 0 + | otherwise = + case readHex (buf_toStr bufOps line) of + (hx,_):_ -> hx + _ -> 0 + +-- | Maybe in the future we will have a sensible thing +-- to do here, at that time we might want to change +-- the name. +uglyDeathTransfer :: String -> IO (Result ([Header],a)) +uglyDeathTransfer loc = return (responseParseError loc "Unknown Transfer-Encoding") + +-- | Remove leading crlfs then call readTillEmpty2 (not required by RFC) +readTillEmpty1 :: BufferOp a + -> IO (Result a) + -> IO (Result [a]) +readTillEmpty1 bufOps readL = + readL >>= + either (return . Left) + (\ s -> + if buf_isLineTerm bufOps s + then readTillEmpty1 bufOps readL + else readTillEmpty2 bufOps readL [s]) + +-- | Read lines until an empty line (CRLF), +-- also accepts a connection close as end of +-- input, which is not an HTTP\/1.1 compliant +-- thing to do - so probably indicates an +-- error condition. +readTillEmpty2 :: BufferOp a + -> IO (Result a) + -> [a] + -> IO (Result [a]) +readTillEmpty2 bufOps readL list = + readL >>= + either (return . Left) + (\ s -> + if buf_isLineTerm bufOps s || buf_isEmpty bufOps s + then return (Right $ reverse (s:list)) + else readTillEmpty2 bufOps readL (s:list)) + +-- +-- Misc +-- + +-- | @catchIO a h@ handles IO action exceptions throughout codebase; version-specific +-- tweaks better go here. +catchIO :: IO a -> (IOException -> IO a) -> IO a +catchIO a h = Exception.catch a h + +catchIO_ :: IO a -> IO a -> IO a +catchIO_ a h = Exception.catch a (\(_ :: IOException) -> h) + +responseParseError :: String -> String -> Result a +responseParseError loc v = failWith (ErrorParse (loc ++ ' ':v)) diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Cookie.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Cookie.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Cookie.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Cookie.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,141 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Network.HTTP.Cookie +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- This module provides the data types and functions for working with HTTP cookies. +-- Right now, it contains mostly functionality needed by 'Network.Browser'. +-- +----------------------------------------------------------------------------- +module Network.HTTP.Cookie + ( Cookie(..) + , cookieMatch -- :: (String,String) -> Cookie -> Bool + + -- functions for translating cookies and headers. + , cookiesToHeader -- :: [Cookie] -> Header + , processCookieHeaders -- :: String -> [Header] -> ([String], [Cookie]) + ) where + +import Network.HTTP.Headers + +import Data.Char +import Data.List +import Data.Maybe + +import Text.ParserCombinators.Parsec + ( Parser, char, many, many1, satisfy, parse, option, try + , (<|>), sepBy1 + ) + +------------------------------------------------------------------ +----------------------- Cookie Stuff ----------------------------- +------------------------------------------------------------------ + +-- | @Cookie@ is the Haskell representation of HTTP cookie values. +-- See its relevant specs for authoritative details. +data Cookie + = MkCookie + { ckDomain :: String + , ckName :: String + , ckValue :: String + , ckPath :: Maybe String + , ckComment :: Maybe String + , ckVersion :: Maybe String + } + deriving(Show,Read) + +instance Eq Cookie where + a == b = ckDomain a == ckDomain b + && ckName a == ckName b + && ckPath a == ckPath b + +-- | @cookieToHeaders ck@ serialises @Cookie@s to an HTTP request header. +cookiesToHeader :: [Cookie] -> Header +cookiesToHeader cs = Header HdrCookie (mkCookieHeaderValue cs) + +-- | Turn a list of cookies into a key=value pair list, separated by +-- semicolons. +mkCookieHeaderValue :: [Cookie] -> String +mkCookieHeaderValue = intercalate "; " . map mkCookieHeaderValue1 + where + mkCookieHeaderValue1 c = ckName c ++ "=" ++ ckValue c + +-- | @cookieMatch (domain,path) ck@ performs the standard cookie +-- match wrt the given domain and path. +cookieMatch :: (String, String) -> Cookie -> Bool +cookieMatch (dom,path) ck = + ckDomain ck `isSuffixOf` dom && + case ckPath ck of + Nothing -> True + Just p -> p `isPrefixOf` path + + +-- | @processCookieHeaders dom hdrs@ +processCookieHeaders :: String -> [Header] -> ([String], [Cookie]) +processCookieHeaders dom hdrs = foldr (headerToCookies dom) ([],[]) hdrs + +-- | @headerToCookies dom hdr acc@ +headerToCookies :: String -> Header -> ([String], [Cookie]) -> ([String], [Cookie]) +headerToCookies dom (Header HdrSetCookie val) (accErr, accCookie) = + case parse cookies "" val of + Left{} -> (val:accErr, accCookie) + Right x -> (accErr, x ++ accCookie) + where + cookies :: Parser [Cookie] + cookies = sepBy1 cookie (char ',') + + cookie :: Parser Cookie + cookie = + do name <- word + _ <- spaces_l + _ <- char '=' + _ <- spaces_l + val1 <- cvalue + args <- cdetail + return $ mkCookie name val1 args + + cvalue :: Parser String + + spaces_l = many (satisfy isSpace) + + cvalue = quotedstring <|> many1 (satisfy $ not . (==';')) <|> return "" + + -- all keys in the result list MUST be in lower case + cdetail :: Parser [(String,String)] + cdetail = many $ + try (do _ <- spaces_l + _ <- char ';' + _ <- spaces_l + s1 <- word + _ <- spaces_l + s2 <- option "" (char '=' >> spaces_l >> cvalue) + return (map toLower s1,s2) + ) + + mkCookie :: String -> String -> [(String,String)] -> Cookie + mkCookie nm cval more = + MkCookie { ckName = nm + , ckValue = cval + , ckDomain = map toLower (fromMaybe dom (lookup "domain" more)) + , ckPath = lookup "path" more + , ckVersion = lookup "version" more + , ckComment = lookup "comment" more + } +headerToCookies _ _ acc = acc + + + + +word, quotedstring :: Parser String +quotedstring = + do _ <- char '"' -- " + str <- many (satisfy $ not . (=='"')) + _ <- char '"' + return str + +word = many1 (satisfy (\x -> isAlphaNum x || x=='_' || x=='.' || x=='-' || x==':')) diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/HandleStream.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/HandleStream.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/HandleStream.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/HandleStream.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,252 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Network.HTTP.HandleStream +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- A 'HandleStream'-based version of "Network.HTTP" interface. +-- +-- For more detailed information about what the individual exports do, please consult +-- the documentation for "Network.HTTP". /Notice/ however that the functions here do +-- not perform any kind of normalization prior to transmission (or receipt); you are +-- responsible for doing any such yourself, or, if you prefer, just switch to using +-- "Network.HTTP" function instead. +-- +----------------------------------------------------------------------------- +module Network.HTTP.HandleStream + ( simpleHTTP -- :: Request ty -> IO (Result (Response ty)) + , simpleHTTP_ -- :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) + , sendHTTP -- :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) + , sendHTTP_notify -- :: HStream ty => HandleStream ty -> Request ty -> IO () -> IO (Result (Response ty)) + , receiveHTTP -- :: HStream ty => HandleStream ty -> IO (Result (Request ty)) + , respondHTTP -- :: HStream ty => HandleStream ty -> Response ty -> IO () + + , simpleHTTP_debug -- :: FilePath -> Request DebugString -> IO (Response DebugString) + ) where + +----------------------------------------------------------------- +------------------ Imports -------------------------------------- +----------------------------------------------------------------- + +import Network.BufferType +import Network.Stream ( fmapE, Result ) +import Network.StreamDebugger ( debugByteStream ) +import Network.TCP (HStream(..), HandleStream ) + +import Network.HTTP.Base +import Network.HTTP.Headers +import Network.HTTP.Utils ( trim, readsOne ) + +import Data.Char (toLower) +import Data.Maybe (fromMaybe) +import Control.Exception (onException) +import Control.Monad (when) + +----------------------------------------------------------------- +------------------ Misc ----------------------------------------- +----------------------------------------------------------------- + +-- | @simpleHTTP@ transmits a resource across a non-persistent connection. +simpleHTTP :: HStream ty => Request ty -> IO (Result (Response ty)) +simpleHTTP r = do + auth <- getAuth r + failHTTPS (rqURI r) + c <- openStream (host auth) (fromMaybe 80 (port auth)) + simpleHTTP_ c r + +-- | @simpleHTTP_debug debugFile req@ behaves like 'simpleHTTP', but logs +-- the HTTP operation via the debug file @debugFile@. +simpleHTTP_debug :: HStream ty => FilePath -> Request ty -> IO (Result (Response ty)) +simpleHTTP_debug httpLogFile r = do + auth <- getAuth r + failHTTPS (rqURI r) + c0 <- openStream (host auth) (fromMaybe 80 (port auth)) + c <- debugByteStream httpLogFile c0 + simpleHTTP_ c r + +-- | Like 'simpleHTTP', but acting on an already opened stream. +simpleHTTP_ :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) +simpleHTTP_ s r = sendHTTP s r + +-- | @sendHTTP hStream httpRequest@ transmits @httpRequest@ over +-- @hStream@, but does not alter the status of the connection, nor request it to be +-- closed upon receiving the response. +sendHTTP :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) +sendHTTP conn rq = sendHTTP_notify conn rq (return ()) + +-- | @sendHTTP_notify hStream httpRequest action@ behaves like 'sendHTTP', but +-- lets you supply an IO @action@ to execute once the request has been successfully +-- transmitted over the connection. Useful when you want to set up tracing of +-- request transmission and its performance. +sendHTTP_notify :: HStream ty + => HandleStream ty + -> Request ty + -> IO () + -> IO (Result (Response ty)) +sendHTTP_notify conn rq onSendComplete = do + when providedClose $ (closeOnEnd conn True) + onException (sendMain conn rq onSendComplete) + (close conn) + where + providedClose = findConnClose (rqHeaders rq) + +-- From RFC 2616, section 8.2.3: +-- 'Because of the presence of older implementations, the protocol allows +-- ambiguous situations in which a client may send "Expect: 100- +-- continue" without receiving either a 417 (Expectation Failed) status +-- or a 100 (Continue) status. Therefore, when a client sends this +-- header field to an origin server (possibly via a proxy) from which it +-- has never seen a 100 (Continue) status, the client SHOULD NOT wait +-- for an indefinite period before sending the request body.' +-- +-- Since we would wait forever, I have disabled use of 100-continue for now. +sendMain :: HStream ty + => HandleStream ty + -> Request ty + -> (IO ()) + -> IO (Result (Response ty)) +sendMain conn rqst onSendComplete = do + --let str = if null (rqBody rqst) + -- then show rqst + -- else show (insertHeader HdrExpect "100-continue" rqst) + -- TODO review throwing away of result + _ <- writeBlock conn (buf_fromStr bufferOps $ show rqst) + -- write body immediately, don't wait for 100 CONTINUE + -- TODO review throwing away of result + _ <- writeBlock conn (rqBody rqst) + onSendComplete + rsp <- getResponseHead conn + switchResponse conn True False rsp rqst + + -- Hmmm, this could go bad if we keep getting "100 Continue" + -- responses... Except this should never happen according + -- to the RFC. + +switchResponse :: HStream ty + => HandleStream ty + -> Bool {- allow retry? -} + -> Bool {- is body sent? -} + -> Result ResponseData + -> Request ty + -> IO (Result (Response ty)) +switchResponse _ _ _ (Left e) _ = return (Left e) + -- retry on connreset? + -- if we attempt to use the same socket then there is an excellent + -- chance that the socket is not in a completely closed state. + +switchResponse conn allow_retry bdy_sent (Right (cd,rn,hdrs)) rqst = + case matchResponse (rqMethod rqst) cd of + Continue + | not bdy_sent -> do {- Time to send the body -} + writeBlock conn (rqBody rqst) >>= either (return . Left) + (\ _ -> do + rsp <- getResponseHead conn + switchResponse conn allow_retry True rsp rqst) + | otherwise -> do {- keep waiting -} + rsp <- getResponseHead conn + switchResponse conn allow_retry bdy_sent rsp rqst + + Retry -> do {- Request with "Expect" header failed. + Trouble is the request contains Expects + other than "100-Continue" -} + -- TODO review throwing away of result + _ <- writeBlock conn ((buf_append bufferOps) + (buf_fromStr bufferOps (show rqst)) + (rqBody rqst)) + rsp <- getResponseHead conn + switchResponse conn False bdy_sent rsp rqst + + Done -> do + when (findConnClose hdrs) + (closeOnEnd conn True) + return (Right $ Response cd rn hdrs (buf_empty bufferOps)) + + DieHorribly str -> do + close conn + return (responseParseError "Invalid response:" str) + ExpectEntity -> do + r <- fmapE (\ (ftrs,bdy) -> Right (Response cd rn (hdrs++ftrs) bdy)) $ + maybe (maybe (hopefulTransfer bo (readLine conn) []) + (\ x -> + readsOne (linearTransfer (readBlock conn)) + (return$responseParseError "unrecognized content-length value" x) + x) + cl) + (ifChunked (chunkedTransfer bo (readLine conn) (readBlock conn)) + (uglyDeathTransfer "sendHTTP")) + tc + case r of + Left{} -> do + close conn + return r + Right (Response _ _ hs _) -> do + when (findConnClose hs) + (closeOnEnd conn True) + return r + + where + tc = lookupHeader HdrTransferEncoding hdrs + cl = lookupHeader HdrContentLength hdrs + bo = bufferOps + +-- reads and parses headers +getResponseHead :: HStream ty => HandleStream ty -> IO (Result ResponseData) +getResponseHead conn = + fmapE (\es -> parseResponseHead (map (buf_toStr bufferOps) es)) + (readTillEmpty1 bufferOps (readLine conn)) + +-- | @receiveHTTP hStream@ reads a 'Request' from the 'HandleStream' @hStream@ +receiveHTTP :: HStream bufTy => HandleStream bufTy -> IO (Result (Request bufTy)) +receiveHTTP conn = getRequestHead >>= either (return . Left) processRequest + where + -- reads and parses headers + getRequestHead :: IO (Result RequestData) + getRequestHead = do + fmapE (\es -> parseRequestHead (map (buf_toStr bufferOps) es)) + (readTillEmpty1 bufferOps (readLine conn)) + + processRequest (rm,uri,hdrs) = + fmapE (\ (ftrs,bdy) -> Right (Request uri rm (hdrs++ftrs) bdy)) $ + maybe + (maybe (return (Right ([], buf_empty bo))) -- hopefulTransfer "" + (\ x -> readsOne (linearTransfer (readBlock conn)) + (return$responseParseError "unrecognized Content-Length value" x) + x) + + cl) + (ifChunked (chunkedTransfer bo (readLine conn) (readBlock conn)) + (uglyDeathTransfer "receiveHTTP")) + tc + where + -- FIXME : Also handle 100-continue. + tc = lookupHeader HdrTransferEncoding hdrs + cl = lookupHeader HdrContentLength hdrs + bo = bufferOps + +-- | @respondHTTP hStream httpResponse@ transmits an HTTP 'Response' over +-- the 'HandleStream' @hStream@. It could be used to implement simple web +-- server interactions, performing the dual role to 'sendHTTP'. +respondHTTP :: HStream ty => HandleStream ty -> Response ty -> IO () +respondHTTP conn rsp = do + -- TODO: review throwing away of result + _ <- writeBlock conn (buf_fromStr bufferOps $ show rsp) + -- write body immediately, don't wait for 100 CONTINUE + -- TODO: review throwing away of result + _ <- writeBlock conn (rspBody rsp) + return () + +------------------------------------------------------------------------------ + +headerName :: String -> String +headerName x = map toLower (trim x) + +ifChunked :: a -> a -> String -> a +ifChunked a b s = + case headerName s of + "chunked" -> a + _ -> b + diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Headers.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Headers.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Headers.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Headers.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,306 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Network.HTTP.Headers +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- This module provides the data types for representing HTTP headers, and +-- operations for looking up header values and working with sequences of +-- header values in 'Request's and 'Response's. To avoid having to provide +-- separate set of operations for doing so, we introduce a type class 'HasHeaders' +-- to facilitate writing such processing using overloading instead. +-- +----------------------------------------------------------------------------- +module Network.HTTP.Headers + ( HasHeaders(..) -- type class + + , Header(..) + , mkHeader -- :: HeaderName -> String -> Header + , hdrName -- :: Header -> HeaderName + , hdrValue -- :: Header -> String + + , HeaderName(..) + + , insertHeader -- :: HasHeaders a => HeaderName -> String -> a -> a + , insertHeaderIfMissing -- :: HasHeaders a => HeaderName -> String -> a -> a + , insertHeaders -- :: HasHeaders a => [Header] -> a -> a + , retrieveHeaders -- :: HasHeaders a => HeaderName -> a -> [Header] + , replaceHeader -- :: HasHeaders a => HeaderName -> String -> a -> a + , findHeader -- :: HasHeaders a => HeaderName -> a -> Maybe String + , lookupHeader -- :: HeaderName -> [Header] -> Maybe String + + , parseHeader -- :: parseHeader :: String -> Result Header + , parseHeaders -- :: [String] -> Result [Header] + + , headerMap -- :: [(String, HeaderName)] + + , HeaderSetter + ) where + +import Data.Char (toLower) +import Network.Stream (Result, failParse) +import Network.HTTP.Utils ( trim, split, crlf ) + +-- | The @Header@ data type pairs header names & values. +data Header = Header HeaderName String + +hdrName :: Header -> HeaderName +hdrName (Header h _) = h + +hdrValue :: Header -> String +hdrValue (Header _ v) = v + +-- | Header constructor as a function, hiding above rep. +mkHeader :: HeaderName -> String -> Header +mkHeader = Header + +instance Show Header where + show (Header key value) = shows key (':':' ':value ++ crlf) + +-- | HTTP @HeaderName@ type, a Haskell data constructor for each +-- specification-defined header, prefixed with @Hdr@ and CamelCased, +-- (i.e., eliding the @-@ in the process.) Should you require using +-- a custom header, there's the @HdrCustom@ constructor which takes +-- a @String@ argument. +-- +-- Encoding HTTP header names differently, as Strings perhaps, is an +-- equally fine choice..no decidedly clear winner, but let's stick +-- with data constructors here. +-- +data HeaderName + -- Generic Headers -- + = HdrCacheControl + | HdrConnection + | HdrDate + | HdrPragma + | HdrTransferEncoding + | HdrUpgrade + | HdrVia + -- Request Headers -- + | HdrAccept + | HdrAcceptCharset + | HdrAcceptEncoding + | HdrAcceptLanguage + | HdrAuthorization + | HdrCookie + | HdrExpect + | HdrFrom + | HdrHost + | HdrIfModifiedSince + | HdrIfMatch + | HdrIfNoneMatch + | HdrIfRange + | HdrIfUnmodifiedSince + | HdrMaxForwards + | HdrProxyAuthorization + | HdrRange + | HdrReferer + | HdrUserAgent + -- Response Headers + | HdrAge + | HdrLocation + | HdrProxyAuthenticate + | HdrPublic + | HdrRetryAfter + | HdrServer + | HdrSetCookie + | HdrTE + | HdrTrailer + | HdrVary + | HdrWarning + | HdrWWWAuthenticate + -- Entity Headers + | HdrAllow + | HdrContentBase + | HdrContentEncoding + | HdrContentLanguage + | HdrContentLength + | HdrContentLocation + | HdrContentMD5 + | HdrContentRange + | HdrContentType + | HdrETag + | HdrExpires + | HdrLastModified + -- | MIME entity headers (for sub-parts) + | HdrContentTransferEncoding + -- | Allows for unrecognised or experimental headers. + | HdrCustom String -- not in header map below. + deriving(Eq) + +-- | @headerMap@ is a straight assoc list for translating between header names +-- and values. +headerMap :: [ (String,HeaderName) ] +headerMap = + [ p "Cache-Control" HdrCacheControl + , p "Connection" HdrConnection + , p "Date" HdrDate + , p "Pragma" HdrPragma + , p "Transfer-Encoding" HdrTransferEncoding + , p "Upgrade" HdrUpgrade + , p "Via" HdrVia + , p "Accept" HdrAccept + , p "Accept-Charset" HdrAcceptCharset + , p "Accept-Encoding" HdrAcceptEncoding + , p "Accept-Language" HdrAcceptLanguage + , p "Authorization" HdrAuthorization + , p "Cookie" HdrCookie + , p "Expect" HdrExpect + , p "From" HdrFrom + , p "Host" HdrHost + , p "If-Modified-Since" HdrIfModifiedSince + , p "If-Match" HdrIfMatch + , p "If-None-Match" HdrIfNoneMatch + , p "If-Range" HdrIfRange + , p "If-Unmodified-Since" HdrIfUnmodifiedSince + , p "Max-Forwards" HdrMaxForwards + , p "Proxy-Authorization" HdrProxyAuthorization + , p "Range" HdrRange + , p "Referer" HdrReferer + , p "User-Agent" HdrUserAgent + , p "Age" HdrAge + , p "Location" HdrLocation + , p "Proxy-Authenticate" HdrProxyAuthenticate + , p "Public" HdrPublic + , p "Retry-After" HdrRetryAfter + , p "Server" HdrServer + , p "Set-Cookie" HdrSetCookie + , p "TE" HdrTE + , p "Trailer" HdrTrailer + , p "Vary" HdrVary + , p "Warning" HdrWarning + , p "WWW-Authenticate" HdrWWWAuthenticate + , p "Allow" HdrAllow + , p "Content-Base" HdrContentBase + , p "Content-Encoding" HdrContentEncoding + , p "Content-Language" HdrContentLanguage + , p "Content-Length" HdrContentLength + , p "Content-Location" HdrContentLocation + , p "Content-MD5" HdrContentMD5 + , p "Content-Range" HdrContentRange + , p "Content-Type" HdrContentType + , p "ETag" HdrETag + , p "Expires" HdrExpires + , p "Last-Modified" HdrLastModified + , p "Content-Transfer-Encoding" HdrContentTransferEncoding + ] + where + p a b = (a,b) + +instance Show HeaderName where + show (HdrCustom s) = s + show x = case filter ((==x).snd) headerMap of + [] -> error "headerMap incomplete" + (h:_) -> fst h + +-- | @HasHeaders@ is a type class for types containing HTTP headers, allowing +-- you to write overloaded header manipulation functions +-- for both 'Request' and 'Response' data types, for instance. +class HasHeaders x where + getHeaders :: x -> [Header] + setHeaders :: x -> [Header] -> x + +-- Header manipulation functions + +type HeaderSetter a = HeaderName -> String -> a -> a + +-- | @insertHeader hdr val x@ inserts a header with the given header name +-- and value. Does not check for existing headers with same name, allowing +-- duplicates to be introduce (use 'replaceHeader' if you want to avoid this.) +insertHeader :: HasHeaders a => HeaderSetter a +insertHeader name value x = setHeaders x newHeaders + where + newHeaders = (Header name value) : getHeaders x + +-- | @insertHeaderIfMissing hdr val x@ adds the new header only if no previous +-- header with name @hdr@ exists in @x@. +insertHeaderIfMissing :: HasHeaders a => HeaderSetter a +insertHeaderIfMissing name value x = setHeaders x (newHeaders $ getHeaders x) + where + newHeaders list@(h@(Header n _): rest) + | n == name = list + | otherwise = h : newHeaders rest + newHeaders [] = [Header name value] + +-- | @replaceHeader hdr val o@ replaces the header @hdr@ with the +-- value @val@, dropping any existing +replaceHeader :: HasHeaders a => HeaderSetter a +replaceHeader name value h = setHeaders h newHeaders + where + newHeaders = Header name value : [ x | x@(Header n _) <- getHeaders h, name /= n ] + +-- | @insertHeaders hdrs x@ appends multiple headers to @x@'s existing +-- set. +insertHeaders :: HasHeaders a => [Header] -> a -> a +insertHeaders hdrs x = setHeaders x (getHeaders x ++ hdrs) + +-- | @retrieveHeaders hdrNm x@ gets a list of headers with 'HeaderName' @hdrNm@. +retrieveHeaders :: HasHeaders a => HeaderName -> a -> [Header] +retrieveHeaders name x = filter matchname (getHeaders x) + where + matchname (Header n _) = n == name + +-- | @findHeader hdrNm x@ looks up @hdrNm@ in @x@, returning the first +-- header that matches, if any. +findHeader :: HasHeaders a => HeaderName -> a -> Maybe String +findHeader n x = lookupHeader n (getHeaders x) + +-- | @lookupHeader hdr hdrs@ locates the first header matching @hdr@ in the +-- list @hdrs@. +lookupHeader :: HeaderName -> [Header] -> Maybe String +lookupHeader _ [] = Nothing +lookupHeader v (Header n s:t) + | v == n = Just s + | otherwise = lookupHeader v t + +-- | @parseHeader headerNameAndValueString@ tries to unscramble a +-- @header: value@ pairing and returning it as a 'Header'. +parseHeader :: String -> Result Header +parseHeader str = + case split ':' str of + Nothing -> failParse ("Unable to parse header: " ++ str) + Just (k,v) -> return $ Header (fn k) (trim $ drop 1 v) + where + fn k = case map snd $ filter (match k . fst) headerMap of + [] -> (HdrCustom k) + (h:_) -> h + + match :: String -> String -> Bool + match s1 s2 = map toLower s1 == map toLower s2 + +-- | @parseHeaders hdrs@ takes a sequence of strings holding header +-- information and parses them into a set of headers (preserving their +-- order in the input argument.) Handles header values split up over +-- multiple lines. +parseHeaders :: [String] -> Result [Header] +parseHeaders = catRslts [] . + map (parseHeader . clean) . + joinExtended "" + where + -- Joins consecutive lines where the second line + -- begins with ' ' or '\t'. + joinExtended old [] = [old] + joinExtended old (h : t) + | isLineExtension h = joinExtended (old ++ ' ' : tail h) t + | otherwise = old : joinExtended h t + + isLineExtension (x:_) = x == ' ' || x == '\t' + isLineExtension _ = False + + clean [] = [] + clean (h:t) | h `elem` "\t\r\n" = ' ' : clean t + | otherwise = h : clean t + + -- tolerant of errors? should parse + -- errors here be reported or ignored? + -- currently ignored. + catRslts :: [a] -> [Result a] -> Result [a] + catRslts list (h:t) = + case h of + Left _ -> catRslts list t + Right v -> catRslts (v:list) t + catRslts list [] = Right $ reverse list diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/MD5Aux.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/MD5Aux.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/MD5Aux.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/MD5Aux.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,343 @@ +module Network.HTTP.MD5Aux + (md5, md5s, md5i, + MD5(..), ABCD(..), + Zord64, Str(..), BoolList(..), WordList(..)) where + +import Data.Char (ord, chr) +import Data.Bits (rotateL, shiftL, shiftR, (.&.), (.|.), xor, complement) +import Data.Word (Word32, Word64) + +rotL :: Word32 -> Int -> Word32 +rotL x = rotateL x + +type Zord64 = Word64 + +-- ===================== TYPES AND CLASS DEFINTIONS ======================== + + +type XYZ = (Word32, Word32, Word32) +type Rotation = Int +newtype ABCD = ABCD (Word32, Word32, Word32, Word32) deriving (Eq, Show) +newtype Str = Str String +newtype BoolList = BoolList [Bool] +newtype WordList = WordList ([Word32], Word64) + +-- Anything we want to work out the MD5 of must be an instance of class MD5 + +class MD5 a where + get_next :: a -> ([Word32], Int, a) -- get the next blocks worth + -- \ \ \------ the rest of the input + -- \ \--------- the number of bits returned + -- \--------------- the bits returned in 32bit words + len_pad :: Word64 -> a -> a -- append the padding and length + finished :: a -> Bool -- Have we run out of input yet? + + +-- Mainly exists because it's fairly easy to do MD5s on input where the +-- length is not a multiple of 8 + +instance MD5 BoolList where + get_next (BoolList s) = (bools_to_word32s ys, length ys, BoolList zs) + where (ys, zs) = splitAt 512 s + len_pad l (BoolList bs) + = BoolList (bs ++ [True] + ++ replicate (fromIntegral $ (447 - l) .&. 511) False + ++ [l .&. (shiftL 1 x) > 0 | x <- (mangle [0..63])] + ) + where mangle [] = [] + mangle xs = reverse ys ++ mangle zs + where (ys, zs) = splitAt 8 xs + finished (BoolList s) = s == [] + + +-- The string instance is fairly straightforward + +instance MD5 Str where + get_next (Str s) = (string_to_word32s ys, 8 * length ys, Str zs) + where (ys, zs) = splitAt 64 s + len_pad c64 (Str s) = Str (s ++ padding ++ l) + where padding = '\128':replicate (fromIntegral zeros) '\000' + zeros = shiftR ((440 - c64) .&. 511) 3 + l = length_to_chars 8 c64 + finished (Str s) = s == "" + + +-- YA instance that is believed will be useful + +instance MD5 WordList where + get_next (WordList (ws, l)) = (xs, fromIntegral taken, WordList (ys, l - taken)) + where (xs, ys) = splitAt 16 ws + taken = if l > 511 then 512 else l .&. 511 + len_pad c64 (WordList (ws, l)) = WordList (beginning ++ nextish ++ blanks ++ size, newlen) + where beginning = if length ws > 0 then start ++ lastone' else [] + start = init ws + lastone = last ws + offset = c64 .&. 31 + lastone' = [if offset > 0 then lastone + theone else lastone] + theone = shiftL (shiftR 128 (fromIntegral $ offset .&. 7)) + (fromIntegral $ offset .&. (31 - 7)) + nextish = if offset == 0 then [128] else [] + c64' = c64 + (32 - offset) + num_blanks = (fromIntegral $ shiftR ((448 - c64') .&. 511) 5) + blanks = replicate num_blanks 0 + lowsize = fromIntegral $ c64 .&. (shiftL 1 32 - 1) + topsize = fromIntegral $ shiftR c64 32 + size = [lowsize, topsize] + newlen = l .&. (complement 511) + + if c64 .&. 511 >= 448 then 1024 else 512 + finished (WordList (_, z)) = z == 0 + + +instance Num ABCD where + ABCD (a1, b1, c1, d1) + ABCD (a2, b2, c2, d2) = ABCD (a1 + a2, b1 + b2, c1 + c2, d1 + d2) + + (-) = error "(-){ABCD}: no instance method defined" + (*) = error "(*){ABCD}: no instance method defined" + signum = error "signum{ABCD}: no instance method defined" + fromInteger = error "fromInteger{ABCD}: no instance method defined" + abs = error "abs{ABCD}: no instance method defined" +-- ===================== EXPORTED FUNCTIONS ======================== + + +-- The simplest function, gives you the MD5 of a string as 4-tuple of +-- 32bit words. + +md5 :: (MD5 a) => a -> ABCD +md5 m = md5_main False 0 magic_numbers m + + +-- Returns a hex number ala the md5sum program + +md5s :: (MD5 a) => a -> String +md5s = abcd_to_string . md5 + + +-- Returns an integer equivalent to the above hex number + +md5i :: (MD5 a) => a -> Integer +md5i = abcd_to_integer . md5 + + +-- ===================== THE CORE ALGORITHM ======================== + + +-- Decides what to do. The first argument indicates if padding has been +-- added. The second is the length mod 2^64 so far. Then we have the +-- starting state, the rest of the string and the final state. + +md5_main :: (MD5 a) => + Bool -- Have we added padding yet? + -> Word64 -- The length so far mod 2^64 + -> ABCD -- The initial state + -> a -- The non-processed portion of the message + -> ABCD -- The resulting state +md5_main padded ilen abcd m + = if finished m && padded + then abcd + else md5_main padded' (ilen + 512) (abcd + abcd') m'' + where (m16, l, m') = get_next m + len' = ilen + fromIntegral l + ((m16', _, m''), padded') = if not padded && l < 512 + then (get_next $ len_pad len' m, True) + else ((m16, l, m'), padded) + abcd' = md5_do_block abcd m16' + + +-- md5_do_block processes a 512 bit block by calling md5_round 4 times to +-- apply each round with the correct constants and permutations of the +-- block + +md5_do_block :: ABCD -- Initial state + -> [Word32] -- The block to be processed - 16 32bit words + -> ABCD -- Resulting state +md5_do_block abcd0 w = abcd4 + where (r1, r2, r3, r4) = rounds + {- + map (\x -> w !! x) [1,6,11,0,5,10,15,4,9,14,3,8,13,2,7,12] + -- [(5 * x + 1) `mod` 16 | x <- [0..15]] + map (\x -> w !! x) [5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2] + -- [(3 * x + 5) `mod` 16 | x <- [0..15]] + map (\x -> w !! x) [0,7,14,5,12,3,10,1,8,15,6,13,4,11,2,9] + -- [(7 * x) `mod` 16 | x <- [0..15]] + -} + perm5 [c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15] + = [c1,c6,c11,c0,c5,c10,c15,c4,c9,c14,c3,c8,c13,c2,c7,c12] + perm5 _ = error "broke at perm5" + perm3 [c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15] + = [c5,c8,c11,c14,c1,c4,c7,c10,c13,c0,c3,c6,c9,c12,c15,c2] + perm3 _ = error "broke at perm3" + perm7 [c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15] + = [c0,c7,c14,c5,c12,c3,c10,c1,c8,c15,c6,c13,c4,c11,c2,c9] + perm7 _ = error "broke at perm7" + abcd1 = md5_round md5_f abcd0 w r1 + abcd2 = md5_round md5_g abcd1 (perm5 w) r2 + abcd3 = md5_round md5_h abcd2 (perm3 w) r3 + abcd4 = md5_round md5_i abcd3 (perm7 w) r4 + + +-- md5_round does one of the rounds. It takes an auxiliary function and foldls +-- (md5_inner_function f) to repeatedly apply it to the initial state with the +-- correct constants + +md5_round :: (XYZ -> Word32) -- Auxiliary function (F, G, H or I + -- for those of you with a copy of + -- the prayer book^W^WRFC) + -> ABCD -- Initial state + -> [Word32] -- The 16 32bit words of input + -> [(Rotation, Word32)] -- The list of 16 rotations and + -- additive constants + -> ABCD -- Resulting state +md5_round f abcd s ns = foldl (md5_inner_function f) abcd ns' + where ns' = zipWith (\x (y, z) -> (y, x + z)) s ns + + +-- Apply one of the functions md5_[fghi] and put the new ABCD together + +md5_inner_function :: (XYZ -> Word32) -- Auxiliary function + -> ABCD -- Initial state + -> (Rotation, Word32) -- The rotation and additive + -- constant (X[i] + T[j]) + -> ABCD -- Resulting state +md5_inner_function f (ABCD (a, b, c, d)) (s, ki) = ABCD (d, a', b, c) + where mid_a = a + f(b,c,d) + ki + rot_a = rotL mid_a s + a' = b + rot_a + + +-- The 4 auxiliary functions + +md5_f :: XYZ -> Word32 +md5_f (x, y, z) = z `xor` (x .&. (y `xor` z)) +{- optimised version of: (x .&. y) .|. ((complement x) .&. z) -} + +md5_g :: XYZ -> Word32 +md5_g (x, y, z) = md5_f (z, x, y) +{- was: (x .&. z) .|. (y .&. (complement z)) -} + +md5_h :: XYZ -> Word32 +md5_h (x, y, z) = x `xor` y `xor` z + +md5_i :: XYZ -> Word32 +md5_i (x, y, z) = y `xor` (x .|. (complement z)) + + +-- The magic numbers from the RFC. + +magic_numbers :: ABCD +magic_numbers = ABCD (0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476) + + +-- The 4 lists of (rotation, additive constant) tuples, one for each round + +rounds :: ([(Rotation, Word32)], + [(Rotation, Word32)], + [(Rotation, Word32)], + [(Rotation, Word32)]) +rounds = (r1, r2, r3, r4) + where r1 = [(s11, 0xd76aa478), (s12, 0xe8c7b756), (s13, 0x242070db), + (s14, 0xc1bdceee), (s11, 0xf57c0faf), (s12, 0x4787c62a), + (s13, 0xa8304613), (s14, 0xfd469501), (s11, 0x698098d8), + (s12, 0x8b44f7af), (s13, 0xffff5bb1), (s14, 0x895cd7be), + (s11, 0x6b901122), (s12, 0xfd987193), (s13, 0xa679438e), + (s14, 0x49b40821)] + r2 = [(s21, 0xf61e2562), (s22, 0xc040b340), (s23, 0x265e5a51), + (s24, 0xe9b6c7aa), (s21, 0xd62f105d), (s22, 0x2441453), + (s23, 0xd8a1e681), (s24, 0xe7d3fbc8), (s21, 0x21e1cde6), + (s22, 0xc33707d6), (s23, 0xf4d50d87), (s24, 0x455a14ed), + (s21, 0xa9e3e905), (s22, 0xfcefa3f8), (s23, 0x676f02d9), + (s24, 0x8d2a4c8a)] + r3 = [(s31, 0xfffa3942), (s32, 0x8771f681), (s33, 0x6d9d6122), + (s34, 0xfde5380c), (s31, 0xa4beea44), (s32, 0x4bdecfa9), + (s33, 0xf6bb4b60), (s34, 0xbebfbc70), (s31, 0x289b7ec6), + (s32, 0xeaa127fa), (s33, 0xd4ef3085), (s34, 0x4881d05), + (s31, 0xd9d4d039), (s32, 0xe6db99e5), (s33, 0x1fa27cf8), + (s34, 0xc4ac5665)] + r4 = [(s41, 0xf4292244), (s42, 0x432aff97), (s43, 0xab9423a7), + (s44, 0xfc93a039), (s41, 0x655b59c3), (s42, 0x8f0ccc92), + (s43, 0xffeff47d), (s44, 0x85845dd1), (s41, 0x6fa87e4f), + (s42, 0xfe2ce6e0), (s43, 0xa3014314), (s44, 0x4e0811a1), + (s41, 0xf7537e82), (s42, 0xbd3af235), (s43, 0x2ad7d2bb), + (s44, 0xeb86d391)] + s11 = 7 + s12 = 12 + s13 = 17 + s14 = 22 + s21 = 5 + s22 = 9 + s23 = 14 + s24 = 20 + s31 = 4 + s32 = 11 + s33 = 16 + s34 = 23 + s41 = 6 + s42 = 10 + s43 = 15 + s44 = 21 + + +-- ===================== CONVERSION FUNCTIONS ======================== + + +-- Turn the 4 32 bit words into a string representing the hex number they +-- represent. + +abcd_to_string :: ABCD -> String +abcd_to_string (ABCD (a,b,c,d)) = concat $ map display_32bits_as_hex [a,b,c,d] + + +-- Split the 32 bit word up, swap the chunks over and convert the numbers +-- to their hex equivalents. + +display_32bits_as_hex :: Word32 -> String +display_32bits_as_hex w = swap_pairs cs + where cs = map (\x -> getc $ (shiftR w (4*x)) .&. 15) [0..7] + getc n = (['0'..'9'] ++ ['a'..'f']) !! (fromIntegral n) + swap_pairs (x1:x2:xs) = x2:x1:swap_pairs xs + swap_pairs _ = [] + +-- Convert to an integer, performing endianness magic as we go + +abcd_to_integer :: ABCD -> Integer +abcd_to_integer (ABCD (a,b,c,d)) = rev_num a * 2^(96 :: Int) + + rev_num b * 2^(64 :: Int) + + rev_num c * 2^(32 :: Int) + + rev_num d + +rev_num :: Word32 -> Integer +rev_num i = toInteger j `mod` (2^(32 :: Int)) + -- NHC's fault ~~~~~~~~~~~~~~~~~~~~~ + where j = foldl (\so_far next -> shiftL so_far 8 + (shiftR i next .&. 255)) + 0 [0,8,16,24] + +-- Used to convert a 64 byte string to 16 32bit words + +string_to_word32s :: String -> [Word32] +string_to_word32s "" = [] +string_to_word32s ss = this:string_to_word32s ss' + where (s, ss') = splitAt 4 ss + this = foldr (\c w -> shiftL w 8 + (fromIntegral.ord) c) 0 s + + +-- Used to convert a list of 512 bools to 16 32bit words + +bools_to_word32s :: [Bool] -> [Word32] +bools_to_word32s [] = [] +bools_to_word32s bs = this:bools_to_word32s rest + where (bs1, bs1') = splitAt 8 bs + (bs2, bs2') = splitAt 8 bs1' + (bs3, bs3') = splitAt 8 bs2' + (bs4, rest) = splitAt 8 bs3' + this = boolss_to_word32 [bs1, bs2, bs3, bs4] + bools_to_word8 = foldl (\w b -> shiftL w 1 + if b then 1 else 0) 0 + boolss_to_word32 = foldr (\w8 w -> shiftL w 8 + bools_to_word8 w8) 0 + + +-- Convert the size into a list of characters used by the len_pad function +-- for strings + +length_to_chars :: Int -> Word64 -> String +length_to_chars 0 _ = [] +length_to_chars p n = this:length_to_chars (p-1) (shiftR n 8) + where this = chr $ fromIntegral $ n .&. 255 + diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Proxy.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Proxy.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Proxy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Proxy.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,213 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network.HTTP.Proxy +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- Handling proxy server settings and their resolution. +-- +----------------------------------------------------------------------------- +module Network.HTTP.Proxy + ( Proxy(..) + , noProxy -- :: Proxy + , fetchProxy -- :: Bool -> IO Proxy + , parseProxy -- :: String -> Maybe Proxy + ) where + +{- +#if !defined(WIN32) && defined(mingw32_HOST_OS) +#define WIN32 1 +#endif +-} + +import Control.Monad ( when, mplus, join, liftM2 ) + +#if defined(WIN32) +import Network.HTTP.Base ( catchIO ) +import Control.Monad ( liftM ) +import Data.List ( isPrefixOf ) +#endif +import Network.HTTP.Utils ( dropWhileTail, chopAtDelim ) +import Network.HTTP.Auth +import Network.URI + ( URI(..), URIAuth(..), parseAbsoluteURI, unEscapeString ) +import System.IO ( hPutStrLn, stderr ) +import System.Environment + +{- +#if !defined(WIN32) && defined(mingw32_HOST_OS) +#define WIN32 1 +#endif +-} + +#if defined(WIN32) +import System.Win32.Types ( DWORD, HKEY ) +import System.Win32.Registry( hKEY_CURRENT_USER, regOpenKey, regCloseKey, regQueryValue, regQueryValueEx ) +import Control.Exception ( bracket ) +import Foreign ( toBool, Storable(peek, sizeOf), castPtr, alloca ) +#endif + +-- | HTTP proxies (or not) are represented via 'Proxy', specifying if a +-- proxy should be used for the request (see 'Network.Browser.setProxy') +data Proxy + = NoProxy -- ^ Don't use a proxy. + | Proxy String + (Maybe Authority) -- ^ Use the proxy given. Should be of the + -- form "http:\/\/host:port", "host", "host:port", or "http:\/\/host". + -- Additionally, an optional 'Authority' for authentication with the proxy. + + +noProxy :: Proxy +noProxy = NoProxy + +-- | @envProxyString@ locates proxy server settings by looking +-- up env variable @HTTP_PROXY@ (or its lower-case equivalent.) +-- If no mapping found, returns @Nothing@. +envProxyString :: IO (Maybe String) +envProxyString = do + env <- getEnvironment + return (lookup "http_proxy" env `mplus` lookup "HTTP_PROXY" env) + +-- | @proxyString@ tries to locate the user's proxy server setting. +-- Consults environment variable, and in case of Windows, by querying +-- the Registry (cf. @registryProxyString@.) +proxyString :: IO (Maybe String) +proxyString = liftM2 mplus envProxyString windowsProxyString + +windowsProxyString :: IO (Maybe String) +#if !defined(WIN32) +windowsProxyString = return Nothing +#else +windowsProxyString = liftM (>>= parseWindowsProxy) registryProxyString + +registryProxyLoc :: (HKEY,String) +registryProxyLoc = (hive, path) + where + -- some sources say proxy settings should be at + -- HKEY_LOCAL_MACHINE\SOFTWARE\Policies\Microsoft\Windows + -- \CurrentVersion\Internet Settings\ProxyServer + -- but if the user sets them with IE connection panel they seem to + -- end up in the following place: + hive = hKEY_CURRENT_USER + path = "Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings" + +-- read proxy settings from the windows registry; this is just a best +-- effort and may not work on all setups. +registryProxyString :: IO (Maybe String) +registryProxyString = catchIO + (bracket (uncurry regOpenKey registryProxyLoc) regCloseKey $ \hkey -> do + enable <- fmap toBool $ regQueryValueDWORD hkey "ProxyEnable" + if enable + then fmap Just $ regQueryValue hkey (Just "ProxyServer") + else return Nothing) + (\_ -> return Nothing) + +-- the proxy string is in the format "http=x.x.x.x:yyyy;https=...;ftp=...;socks=..." +-- even though the following article indicates otherwise +-- https://support.microsoft.com/en-us/kb/819961 +-- +-- to be sure, parse strings where each entry in the ';'-separated list above is +-- either in the format "protocol=..." or "protocol://..." +-- +-- only return the first "http" of them, if it exists +parseWindowsProxy :: String -> Maybe String +parseWindowsProxy s = + case proxies of + x:_ -> Just x + _ -> Nothing + where + parts = split ';' s + pr x = case break (== '=') x of + (p, []) -> p -- might be in format http:// + (p, u) -> p ++ "://" ++ drop 1 u + + proxies = filter (isPrefixOf "http://") . map pr $ parts + + split :: Eq a => a -> [a] -> [[a]] + split _ [] = [] + split a xs = case break (a ==) xs of + (ys, []) -> [ys] + (ys, _:zs) -> ys:split a zs + +#endif + +-- | @fetchProxy flg@ gets the local proxy settings and parse the string +-- into a @Proxy@ value. If you want to be informed of ill-formed proxy +-- configuration strings, supply @True@ for @flg@. +-- Proxy settings are sourced from the @HTTP_PROXY@ environment variable, +-- and in the case of Windows platforms, by consulting IE/WinInet's proxy +-- setting in the Registry. +fetchProxy :: Bool -> IO Proxy +fetchProxy warnIfIllformed = do + mstr <- proxyString + case mstr of + Nothing -> return NoProxy + Just str -> case parseProxy str of + Just p -> return p + Nothing -> do + when warnIfIllformed $ System.IO.hPutStrLn System.IO.stderr $ unlines + [ "invalid http proxy uri: " ++ show str + , "proxy uri must be http with a hostname" + , "ignoring http proxy, trying a direct connection" + ] + return NoProxy + +-- | @parseProxy str@ translates a proxy server string into a @Proxy@ value; +-- returns @Nothing@ if not well-formed. +parseProxy :: String -> Maybe Proxy +parseProxy str = join + . fmap uri2proxy + $ parseHttpURI str + `mplus` parseHttpURI ("http://" ++ str) + where + parseHttpURI str' = + case parseAbsoluteURI str' of + Just uri@URI{uriAuthority = Just{}} -> Just (fixUserInfo uri) + _ -> Nothing + + -- Note: we need to be able to parse non-URIs like @\"wwwcache.example.com:80\"@ + -- which lack the @\"http://\"@ URI scheme. The problem is that + -- @\"wwwcache.example.com:80\"@ is in fact a valid URI but with scheme + -- @\"wwwcache.example.com:\"@, no authority part and a path of @\"80\"@. + -- + -- So our strategy is to try parsing as normal uri first and if it lacks the + -- 'uriAuthority' then we try parsing again with a @\"http://\"@ prefix. + -- + +-- | tidy up user portion, don't want the trailing "\@". +fixUserInfo :: URI -> URI +fixUserInfo uri = uri{ uriAuthority = f `fmap` uriAuthority uri } + where + f a@URIAuth{uriUserInfo=s} = a{uriUserInfo=dropWhileTail (=='@') s} + +-- +uri2proxy :: URI -> Maybe Proxy +uri2proxy uri@URI{ uriScheme = "http:" + , uriAuthority = Just (URIAuth auth' hst prt) + } = + Just (Proxy (hst ++ prt) auth) + where + auth = + case auth' of + [] -> Nothing + as -> Just (AuthBasic "" (unEscapeString usr) (unEscapeString pwd) uri) + where + (usr,pwd) = chopAtDelim ':' as + +uri2proxy _ = Nothing + +-- utilities +#if defined(WIN32) +regQueryValueDWORD :: HKEY -> String -> IO DWORD +regQueryValueDWORD hkey name = alloca $ \ptr -> do + -- TODO: this throws away the key type returned by regQueryValueEx + -- we should check it's what we expect instead + _ <- regQueryValueEx hkey name (castPtr ptr) (sizeOf (undefined :: DWORD)) + peek ptr + +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Stream.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Stream.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Stream.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Stream.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,236 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Network.HTTP.Stream +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- Transmitting HTTP requests and responses holding @String@ in their payload bodies. +-- This is one of the implementation modules for the "Network.HTTP" interface, representing +-- request and response content as @String@s and transmitting them in non-packed form +-- (cf. "Network.HTTP.HandleStream" and its use of @ByteString@s.) over 'Stream' handles. +-- It is mostly here for backwards compatibility, representing how requests and responses +-- were transmitted up until the 4.x releases of the HTTP package. +-- +-- For more detailed information about what the individual exports do, please consult +-- the documentation for "Network.HTTP". /Notice/ however that the functions here do +-- not perform any kind of normalization prior to transmission (or receipt); you are +-- responsible for doing any such yourself, or, if you prefer, just switch to using +-- "Network.HTTP" function instead. +-- +----------------------------------------------------------------------------- +module Network.HTTP.Stream + ( module Network.Stream + + , simpleHTTP -- :: Request_String -> IO (Result Response_String) + , simpleHTTP_ -- :: Stream s => s -> Request_String -> IO (Result Response_String) + , sendHTTP -- :: Stream s => s -> Request_String -> IO (Result Response_String) + , sendHTTP_notify -- :: Stream s => s -> Request_String -> IO () -> IO (Result Response_String) + , receiveHTTP -- :: Stream s => s -> IO (Result Request_String) + , respondHTTP -- :: Stream s => s -> Response_String -> IO () + + ) where + +----------------------------------------------------------------- +------------------ Imports -------------------------------------- +----------------------------------------------------------------- + +import Network.Stream +import Network.StreamDebugger (debugStream) +import Network.TCP (openTCPPort) +import Network.BufferType ( stringBufferOp ) + +import Network.HTTP.Base +import Network.HTTP.Headers +import Network.HTTP.Utils ( trim ) + +import Data.Char (toLower) +import Data.Maybe (fromMaybe) +import Control.Exception (onException) +import Control.Monad (when) + + +-- Turn on to enable HTTP traffic logging +debug :: Bool +debug = False + +-- File that HTTP traffic logs go to +httpLogFile :: String +httpLogFile = "http-debug.log" + +----------------------------------------------------------------- +------------------ Misc ----------------------------------------- +----------------------------------------------------------------- + + +-- | Simple way to transmit a resource across a non-persistent connection. +simpleHTTP :: Request_String -> IO (Result Response_String) +simpleHTTP r = do + auth <- getAuth r + c <- openTCPPort (host auth) (fromMaybe 80 (port auth)) + simpleHTTP_ c r + +-- | Like 'simpleHTTP', but acting on an already opened stream. +simpleHTTP_ :: Stream s => s -> Request_String -> IO (Result Response_String) +simpleHTTP_ s r + | not debug = sendHTTP s r + | otherwise = do + s' <- debugStream httpLogFile s + sendHTTP s' r + +sendHTTP :: Stream s => s -> Request_String -> IO (Result Response_String) +sendHTTP conn rq = sendHTTP_notify conn rq (return ()) + +sendHTTP_notify :: Stream s => s -> Request_String -> IO () -> IO (Result Response_String) +sendHTTP_notify conn rq onSendComplete = do + when providedClose $ (closeOnEnd conn True) + onException (sendMain conn rq onSendComplete) + (close conn) + where + providedClose = findConnClose (rqHeaders rq) + +-- From RFC 2616, section 8.2.3: +-- 'Because of the presence of older implementations, the protocol allows +-- ambiguous situations in which a client may send "Expect: 100- +-- continue" without receiving either a 417 (Expectation Failed) status +-- or a 100 (Continue) status. Therefore, when a client sends this +-- header field to an origin server (possibly via a proxy) from which it +-- has never seen a 100 (Continue) status, the client SHOULD NOT wait +-- for an indefinite period before sending the request body.' +-- +-- Since we would wait forever, I have disabled use of 100-continue for now. +sendMain :: Stream s => s -> Request_String -> IO () -> IO (Result Response_String) +sendMain conn rqst onSendComplete = do + --let str = if null (rqBody rqst) + -- then show rqst + -- else show (insertHeader HdrExpect "100-continue" rqst) + -- TODO review throwing away of result + _ <- writeBlock conn (show rqst) + -- write body immediately, don't wait for 100 CONTINUE + -- TODO review throwing away of result + _ <- writeBlock conn (rqBody rqst) + onSendComplete + rsp <- getResponseHead conn + switchResponse conn True False rsp rqst + +-- reads and parses headers +getResponseHead :: Stream s => s -> IO (Result ResponseData) +getResponseHead conn = do + lor <- readTillEmpty1 stringBufferOp (readLine conn) + return $ lor >>= parseResponseHead + +-- Hmmm, this could go bad if we keep getting "100 Continue" +-- responses... Except this should never happen according +-- to the RFC. +switchResponse :: Stream s + => s + -> Bool {- allow retry? -} + -> Bool {- is body sent? -} + -> Result ResponseData + -> Request_String + -> IO (Result Response_String) +switchResponse _ _ _ (Left e) _ = return (Left e) + -- retry on connreset? + -- if we attempt to use the same socket then there is an excellent + -- chance that the socket is not in a completely closed state. +switchResponse conn allow_retry bdy_sent (Right (cd,rn,hdrs)) rqst = + case matchResponse (rqMethod rqst) cd of + Continue + | not bdy_sent -> {- Time to send the body -} + do { val <- writeBlock conn (rqBody rqst) + ; case val of + Left e -> return (Left e) + Right _ -> + do { rsp <- getResponseHead conn + ; switchResponse conn allow_retry True rsp rqst + } + } + | otherwise -> {- keep waiting -} + do { rsp <- getResponseHead conn + ; switchResponse conn allow_retry bdy_sent rsp rqst + } + + Retry -> {- Request with "Expect" header failed. + Trouble is the request contains Expects + other than "100-Continue" -} + do { -- TODO review throwing away of result + _ <- writeBlock conn (show rqst ++ rqBody rqst) + ; rsp <- getResponseHead conn + ; switchResponse conn False bdy_sent rsp rqst + } + + Done -> do + when (findConnClose hdrs) + (closeOnEnd conn True) + return (Right $ Response cd rn hdrs "") + + DieHorribly str -> do + close conn + return $ responseParseError "sendHTTP" ("Invalid response: " ++ str) + + ExpectEntity -> + let tc = lookupHeader HdrTransferEncoding hdrs + cl = lookupHeader HdrContentLength hdrs + in + do { rslt <- case tc of + Nothing -> + case cl of + Just x -> linearTransfer (readBlock conn) (read x :: Int) + Nothing -> hopefulTransfer stringBufferOp {-null (++) []-} (readLine conn) [] + Just x -> + case map toLower (trim x) of + "chunked" -> chunkedTransfer stringBufferOp + (readLine conn) (readBlock conn) + _ -> uglyDeathTransfer "sendHTTP" + ; case rslt of + Left e -> close conn >> return (Left e) + Right (ftrs,bdy) -> do + when (findConnClose (hdrs++ftrs)) + (closeOnEnd conn True) + return (Right (Response cd rn (hdrs++ftrs) bdy)) + } + +-- | Receive and parse a HTTP request from the given Stream. Should be used +-- for server side interactions. +receiveHTTP :: Stream s => s -> IO (Result Request_String) +receiveHTTP conn = getRequestHead >>= processRequest + where + -- reads and parses headers + getRequestHead :: IO (Result RequestData) + getRequestHead = + do { lor <- readTillEmpty1 stringBufferOp (readLine conn) + ; return $ lor >>= parseRequestHead + } + + processRequest (Left e) = return $ Left e + processRequest (Right (rm,uri,hdrs)) = + do -- FIXME : Also handle 100-continue. + let tc = lookupHeader HdrTransferEncoding hdrs + cl = lookupHeader HdrContentLength hdrs + rslt <- case tc of + Nothing -> + case cl of + Just x -> linearTransfer (readBlock conn) (read x :: Int) + Nothing -> return (Right ([], "")) -- hopefulTransfer "" + Just x -> + case map toLower (trim x) of + "chunked" -> chunkedTransfer stringBufferOp + (readLine conn) (readBlock conn) + _ -> uglyDeathTransfer "receiveHTTP" + + return $ do + (ftrs,bdy) <- rslt + return (Request uri rm (hdrs++ftrs) bdy) + +-- | Very simple function, send a HTTP response over the given stream. This +-- could be improved on to use different transfer types. +respondHTTP :: Stream s => s -> Response_String -> IO () +respondHTTP conn rsp = do -- TODO review throwing away of result + _ <- writeBlock conn (show rsp) + -- write body immediately, don't wait for 100 CONTINUE + -- TODO review throwing away of result + _ <- writeBlock conn (rspBody rsp) + return () diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Utils.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Utils.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP/Utils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP/Utils.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,111 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Network.HTTP.Utils +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- Set of utility functions and definitions used by package modules. +-- +module Network.HTTP.Utils + ( trim -- :: String -> String + , trimL -- :: String -> String + , trimR -- :: String -> String + + , crlf -- :: String + , lf -- :: String + , sp -- :: String + + , split -- :: Eq a => a -> [a] -> Maybe ([a],[a]) + , splitBy -- :: Eq a => a -> [a] -> [[a]] + + , readsOne -- :: Read a => (a -> b) -> b -> String -> b + + , dropWhileTail -- :: (a -> Bool) -> [a] -> [a] + , chopAtDelim -- :: Eq a => a -> [a] -> ([a],[a]) + + ) where + +import Data.Char +import Data.List ( elemIndex ) +import Data.Maybe ( fromMaybe ) + +-- | @crlf@ is our beloved two-char line terminator. +crlf :: String +crlf = "\r\n" + +-- | @lf@ is a tolerated line terminator, per RFC 2616 section 19.3. +lf :: String +lf = "\n" + +-- | @sp@ lets you save typing one character. +sp :: String +sp = " " + +-- | @split delim ls@ splits a list into two parts, the @delim@ occurring +-- at the head of the second list. If @delim@ isn't in @ls@, @Nothing@ is +-- returned. +split :: Eq a => a -> [a] -> Maybe ([a],[a]) +split delim list = case delim `elemIndex` list of + Nothing -> Nothing + Just x -> Just $ splitAt x list + +-- | @trim str@ removes leading and trailing whitespace from @str@. +trim :: String -> String +trim xs = trimR (trimL xs) + +-- | @trimL str@ removes leading whitespace (as defined by 'Data.Char.isSpace') +-- from @str@. +trimL :: String -> String +trimL xs = dropWhile isSpace xs + +-- | @trimL str@ removes trailing whitespace (as defined by 'Data.Char.isSpace') +-- from @str@. +trimR :: String -> String +trimR str = fromMaybe "" $ foldr trimIt Nothing str + where + trimIt x (Just xs) = Just (x:xs) + trimIt x Nothing + | isSpace x = Nothing + | otherwise = Just [x] + +-- | @splitMany delim ls@ removes the delimiter @delim@ from @ls@. +splitBy :: Eq a => a -> [a] -> [[a]] +splitBy _ [] = [] +splitBy c xs = + case break (==c) xs of + (_,[]) -> [xs] + (as,_:bs) -> as : splitBy c bs + +-- | @readsOne f def str@ tries to 'read' @str@, taking +-- the first result and passing it to @f@. If the 'read' +-- doesn't succeed, return @def@. +readsOne :: Read a => (a -> b) -> b -> String -> b +readsOne f n str = + case reads str of + ((v,_):_) -> f v + _ -> n + + +-- | @dropWhileTail p ls@ chops off trailing elements from @ls@ +-- until @p@ returns @False@. +dropWhileTail :: (a -> Bool) -> [a] -> [a] +dropWhileTail f ls = + case foldr chop Nothing ls of { Just xs -> xs; Nothing -> [] } + where + chop x (Just xs) = Just (x:xs) + chop x _ + | f x = Nothing + | otherwise = Just [x] + +-- | @chopAtDelim elt ls@ breaks up @ls@ into two at first occurrence +-- of @elt@; @elt@ is elided too. If @elt@ does not occur, the second +-- list is empty and the first is equal to @ls@. +chopAtDelim :: Eq a => a -> [a] -> ([a],[a]) +chopAtDelim elt xs = + case break (==elt) xs of + (_,[]) -> (xs,[]) + (as,_:bs) -> (as,bs) diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/HTTP.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/HTTP.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,265 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Network.HTTP +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- The 'Network.HTTP' module provides a simple interface for sending and +-- receiving content over HTTP in Haskell. Here's how to fetch a document from +-- a URL and return it as a String: +-- +-- > +-- > simpleHTTP (getRequest "http://www.haskell.org/") >>= fmap (take 100) . getResponseBody +-- > -- fetch document and return it (as a 'String'.) +-- +-- Other functions let you control the submission and transfer of HTTP +-- 'Request's and 'Response's more carefully, letting you integrate the use +-- of 'Network.HTTP' functionality into your application. +-- +-- The module also exports the main types of the package, 'Request' and 'Response', +-- along with 'Header' and functions for working with these. +-- +-- The actual functionality is implemented by modules in the @Network.HTTP.*@ +-- namespace, letting you either use the default implementation here +-- by importing @Network.HTTP@ or, for more specific uses, selectively +-- import the modules in @Network.HTTP.*@. To wit, more than one kind of +-- representation of the bulk data that flows across a HTTP connection is +-- supported. (see "Network.HTTP.HandleStream".) +-- +-- /NOTE:/ The 'Request' send actions will normalize the @Request@ prior to transmission. +-- Normalization such as having the request path be in the expected form and, possibly, +-- introduce a default @Host:@ header if one isn't already present. +-- Normalization also takes the @"user:pass\@"@ portion out of the the URI, +-- if it was supplied, and converts it into @Authorization: Basic$ header. +-- If you do not +-- want the requests tampered with, but sent as-is, please import and use the +-- the "Network.HTTP.HandleStream" or "Network.HTTP.Stream" modules instead. They +-- export the same functions, but leaves construction and any normalization of +-- @Request@s to the user. +-- +-- /NOTE:/ This package only supports HTTP; it does not support HTTPS. +-- Attempts to use HTTPS result in an error. +----------------------------------------------------------------------------- +module Network.HTTP + ( module Network.HTTP.Base + , module Network.HTTP.Headers + + {- the functionality that the implementation modules, + Network.HTTP.HandleStream and Network.HTTP.Stream, + exposes: + -} + , simpleHTTP -- :: Request -> IO (Result Response) + , simpleHTTP_ -- :: Stream s => s -> Request -> IO (Result Response) + , sendHTTP -- :: Stream s => s -> Request -> IO (Result Response) + , sendHTTP_notify -- :: Stream s => s -> Request -> IO () -> IO (Result Response) + , receiveHTTP -- :: Stream s => s -> IO (Result Request) + , respondHTTP -- :: Stream s => s -> Response -> IO () + + , module Network.TCP + + , getRequest -- :: String -> Request_String + , headRequest -- :: String -> Request_String + , postRequest -- :: String -> Request_String + , postRequestWithBody -- :: String -> String -> String -> Request_String + + , getResponseBody -- :: Result (Request ty) -> IO ty + , getResponseCode -- :: Result (Request ty) -> IO ResponseCode + ) where + +----------------------------------------------------------------- +------------------ Imports -------------------------------------- +----------------------------------------------------------------- + +import Network.HTTP.Headers +import Network.HTTP.Base +import qualified Network.HTTP.HandleStream as S +-- old implementation: import Network.HTTP.Stream +import Network.TCP +import Network.Stream ( Result ) +import Network.URI ( parseURI ) + +import Data.Maybe ( fromMaybe ) + +{- + Note: if you switch over/back to using Network.HTTP.Stream here, you'll + have to wrap the results from 'openStream' as Connections via 'hstreamToConnection' + prior to delegating to the Network.HTTP.Stream functions. +-} + +-- | @simpleHTTP req@ transmits the 'Request' @req@ by opening a /direct/, non-persistent +-- connection to the HTTP server that @req@ is destined for, followed by transmitting +-- it and gathering up the response as a 'Result'. Prior to sending the request, +-- it is normalized (via 'normalizeRequest'). If you have to mediate the request +-- via an HTTP proxy, you will have to normalize the request yourself. Or switch to +-- using 'Network.Browser' instead. +-- +-- Examples: +-- +-- > simpleHTTP (getRequest "http://hackage.haskell.org/") +-- > simpleHTTP (getRequest "http://hackage.haskell.org:8012/") + +simpleHTTP :: (HStream ty) => Request ty -> IO (Result (Response ty)) +simpleHTTP r = do + auth <- getAuth r + failHTTPS (rqURI r) + c <- openStream (host auth) (fromMaybe 80 (port auth)) + let norm_r = normalizeRequest defaultNormalizeRequestOptions{normDoClose=True} r + simpleHTTP_ c norm_r + +-- | Identical to 'simpleHTTP', but acting on an already opened stream. +simpleHTTP_ :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) +simpleHTTP_ s r = do + let norm_r = normalizeRequest defaultNormalizeRequestOptions{normDoClose=True} r + S.sendHTTP s norm_r + +-- | @sendHTTP hStream httpRequest@ transmits @httpRequest@ (after normalization) over +-- @hStream@, but does not alter the status of the connection, nor request it to be +-- closed upon receiving the response. +sendHTTP :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) +sendHTTP conn rq = do + let norm_r = normalizeRequest defaultNormalizeRequestOptions rq + S.sendHTTP conn norm_r + +-- | @sendHTTP_notify hStream httpRequest action@ behaves like 'sendHTTP', but +-- lets you supply an IO @action@ to execute once the request has been successfully +-- transmitted over the connection. Useful when you want to set up tracing of +-- request transmission and its performance. +sendHTTP_notify :: HStream ty + => HandleStream ty + -> Request ty + -> IO () + -> IO (Result (Response ty)) +sendHTTP_notify conn rq onSendComplete = do + let norm_r = normalizeRequest defaultNormalizeRequestOptions rq + S.sendHTTP_notify conn norm_r onSendComplete + +-- | @receiveHTTP hStream@ reads a 'Request' from the 'HandleStream' @hStream@ +receiveHTTP :: HStream ty => HandleStream ty -> IO (Result (Request ty)) +receiveHTTP conn = S.receiveHTTP conn + +-- | @respondHTTP hStream httpResponse@ transmits an HTTP 'Response' over +-- the 'HandleStream' @hStream@. It could be used to implement simple web +-- server interactions, performing the dual role to 'sendHTTP'. +respondHTTP :: HStream ty => HandleStream ty -> Response ty -> IO () +respondHTTP conn rsp = S.respondHTTP conn rsp + + +-- | A convenience constructor for a GET 'Request'. +-- +-- If the URL isn\'t syntactically valid, the function raises an error. +getRequest + :: String -- ^URL to fetch + -> Request_String -- ^The constructed request +getRequest urlString = + case parseURI urlString of + Nothing -> error ("getRequest: Not a valid URL - " ++ urlString) + Just u -> mkRequest GET u + +-- | A convenience constructor for a HEAD 'Request'. +-- +-- If the URL isn\'t syntactically valid, the function raises an error. +headRequest + :: String -- ^URL to fetch + -> Request_String -- ^The constructed request +headRequest urlString = + case parseURI urlString of + Nothing -> error ("headRequest: Not a valid URL - " ++ urlString) + Just u -> mkRequest HEAD u + +-- | A convenience constructor for a POST 'Request'. +-- +-- If the URL isn\'t syntactically valid, the function raises an error. +postRequest + :: String -- ^URL to POST to + -> Request_String -- ^The constructed request +postRequest urlString = + case parseURI urlString of + Nothing -> error ("postRequest: Not a valid URL - " ++ urlString) + Just u -> mkRequest POST u + +-- | A convenience constructor for a POST 'Request'. +-- +-- It constructs a request and sets the body as well as +-- the Content-Type and Content-Length headers. The contents of the body +-- are forced to calculate the value for the Content-Length header. +-- +-- If the URL isn\'t syntactically valid, the function raises an error. +postRequestWithBody + :: String -- ^URL to POST to + -> String -- ^Content-Type of body + -> String -- ^The body of the request + -> Request_String -- ^The constructed request +postRequestWithBody urlString typ body = + case parseURI urlString of + Nothing -> error ("postRequestWithBody: Not a valid URL - " ++ urlString) + Just u -> setRequestBody (mkRequest POST u) (typ, body) + +-- | @getResponseBody response@ takes the response of a HTTP requesting action and +-- tries to extricate the body of the 'Response' @response@. If the request action +-- returned an error, an IO exception is raised. +getResponseBody :: Result (Response ty) -> IO ty +getResponseBody (Left err) = fail (show err) +getResponseBody (Right r) = return (rspBody r) + +-- | @getResponseBody response@ takes the response of a HTTP requesting action and +-- tries to extricate the status code of the 'Response' @response@. If the request action +-- returned an error, an IO exception is raised. +getResponseCode :: Result (Response ty) -> IO ResponseCode +getResponseCode (Left err) = fail (show err) +getResponseCode (Right r) = return (rspCode r) + + +-- +-- * TODO +-- - request pipelining +-- - https upgrade (includes full TLS, i.e. SSL, implementation) +-- - use of Stream classes will pay off +-- - consider C implementation of encryption\/decryption +-- - comm timeouts +-- - MIME & entity stuff (happening in separate module) +-- - support \"*\" uri-request-string for OPTIONS request method +-- +-- +-- * Header notes: +-- +-- [@Host@] +-- Required by HTTP\/1.1, if not supplied as part +-- of a request a default Host value is extracted +-- from the request-uri. +-- +-- [@Connection@] +-- If this header is present in any request or +-- response, and it's value is "close", then +-- the current request\/response is the last +-- to be allowed on that connection. +-- +-- [@Expect@] +-- Should a request contain a body, an Expect +-- header will be added to the request. The added +-- header has the value \"100-continue\". After +-- a 417 \"Expectation Failed\" response the request +-- is attempted again without this added Expect +-- header. +-- +-- [@TransferEncoding,ContentLength,...@] +-- if request is inconsistent with any of these +-- header values then you may not receive any response +-- or will generate an error response (probably 4xx). +-- +-- +-- * Response code notes +-- Some response codes induce special behaviour: +-- +-- [@1xx@] \"100 Continue\" will cause any unsent request body to be sent. +-- \"101 Upgrade\" will be returned. +-- Other 1xx responses are ignored. +-- +-- [@417@] The reason for this code is \"Expectation failed\", indicating +-- that the server did not like the Expect \"100-continue\" header +-- added to a request. Receipt of 417 will induce another +-- request attempt (without Expect header), unless no Expect header +-- had been added (in which case 417 response is returned). diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/StreamDebugger.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/StreamDebugger.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/StreamDebugger.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/StreamDebugger.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,103 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Network.StreamDebugger +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- Implements debugging of @Stream@s. Originally part of Gray's\/Bringert's +-- HTTP module. +-- +-- * Changes by Robin Bate Boerop : +-- - Created. Made minor formatting changes. +-- +----------------------------------------------------------------------------- +module Network.StreamDebugger + ( StreamDebugger + , debugStream + , debugByteStream + ) where + +import Network.Stream (Stream(..)) +import System.IO + ( Handle, hFlush, hPutStrLn, IOMode(AppendMode), hClose, openFile, + hSetBuffering, BufferMode(NoBuffering) + ) +import Network.TCP ( HandleStream, HStream, + StreamHooks(..), setStreamHooks, getStreamHooks ) + +-- | Allows stream logging. Refer to 'debugStream' below. +data StreamDebugger x + = Dbg Handle x + +instance (Stream x) => Stream (StreamDebugger x) where + readBlock (Dbg h x) n = + do val <- readBlock x n + hPutStrLn h ("--readBlock " ++ show n) + hPutStrLn h (show val) + return val + readLine (Dbg h x) = + do val <- readLine x + hPutStrLn h ("--readLine") + hPutStrLn h (show val) + return val + writeBlock (Dbg h x) str = + do val <- writeBlock x str + hPutStrLn h ("--writeBlock" ++ show str) + hPutStrLn h (show val) + return val + close (Dbg h x) = + do hPutStrLn h "--closing..." + hFlush h + close x + hPutStrLn h "--closed." + hClose h + closeOnEnd (Dbg h x) f = + do hPutStrLn h ("--close-on-end.." ++ show f) + hFlush h + closeOnEnd x f + +-- | Wraps a stream with logging I\/O. +-- The first argument is a filename which is opened in @AppendMode@. +debugStream :: (Stream a) => FilePath -> a -> IO (StreamDebugger a) +debugStream file stream = + do h <- openFile file AppendMode + hPutStrLn h ("File \"" ++ file ++ "\" opened for appending.") + return (Dbg h stream) + +debugByteStream :: HStream ty => FilePath -> HandleStream ty -> IO (HandleStream ty) +debugByteStream file stream = do + sh <- getStreamHooks stream + case sh of + Just h + | hook_name h == file -> return stream -- reuse the stream hooks. + _ -> do + h <- openFile file AppendMode + hSetBuffering h NoBuffering + hPutStrLn h ("File \"" ++ file ++ "\" opened for appending.") + setStreamHooks stream (debugStreamHooks h file) + return stream + +debugStreamHooks :: HStream ty => Handle -> String -> StreamHooks ty +debugStreamHooks h nm = + StreamHooks + { hook_readBlock = \ toStr n val -> do + let eval = case val of { Left e -> Left e ; Right v -> Right $ toStr v} + hPutStrLn h ("--readBlock " ++ show n) + hPutStrLn h (either show show eval) + , hook_readLine = \ toStr val -> do + let eval = case val of { Left e -> Left e ; Right v -> Right $ toStr v} + hPutStrLn h ("--readLine") + hPutStrLn h (either show show eval) + , hook_writeBlock = \ toStr str val -> do + hPutStrLn h ("--writeBlock " ++ show val) + hPutStrLn h (toStr str) + , hook_close = do + hPutStrLn h "--closing..." + hFlush h + hClose h + , hook_name = nm + } diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/Stream.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/Stream.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/Stream.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/Stream.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,91 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Network.Stream +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- An library for creating abstract streams. Originally part of Gray's\/Bringert's +-- HTTP module. +-- +-- * Changes by Robin Bate Boerop : +-- - Removed unnecessary import statements. +-- - Moved Debug code to StreamDebugger.hs +-- - Moved Socket-related code to StreamSocket.hs. +-- +-- * Changes by Simon Foster: +-- - Split Network.HTTPmodule up into to separate +-- Network.[Stream,TCP,HTTP] modules +----------------------------------------------------------------------------- +module Network.Stream + ( Stream(..) + , ConnError(..) + , Result + , bindE + , fmapE + + , failParse -- :: String -> Result a + , failWith -- :: ConnError -> Result a + , failMisc -- :: String -> Result a + ) where + +import Control.Monad.Error + +data ConnError + = ErrorReset + | ErrorClosed + | ErrorParse String + | ErrorMisc String + deriving(Show,Eq) + +instance Error ConnError where + noMsg = strMsg "unknown error" + strMsg x = ErrorMisc x + +-- in GHC 7.0 the Monad instance for Error no longer +-- uses fail x = Left (strMsg x). failMisc is therefore +-- used instead. +failMisc :: String -> Result a +failMisc x = failWith (strMsg x) + +failParse :: String -> Result a +failParse x = failWith (ErrorParse x) + +failWith :: ConnError -> Result a +failWith x = Left x + +bindE :: Result a -> (a -> Result b) -> Result b +bindE (Left e) _ = Left e +bindE (Right v) f = f v + +fmapE :: (a -> Result b) -> IO (Result a) -> IO (Result b) +fmapE f a = do + x <- a + case x of + Left e -> return (Left e) + Right r -> return (f r) + +-- | This is the type returned by many exported network functions. +type Result a = Either ConnError {- error -} + a {- result -} + +-- | Streams should make layering of TLS protocol easier in future, +-- they allow reading/writing to files etc for debugging, +-- they allow use of protocols other than TCP/IP +-- and they allow customisation. +-- +-- Instances of this class should not trim +-- the input in any way, e.g. leave LF on line +-- endings etc. Unless that is exactly the behaviour +-- you want from your twisted instances ;) +class Stream x where + readLine :: x -> IO (Result String) + readBlock :: x -> Int -> IO (Result String) + writeBlock :: x -> String -> IO (Result ()) + close :: x -> IO () + closeOnEnd :: x -> Bool -> IO () + -- ^ True => shutdown the connection when response has been read / end-of-stream + -- has been reached. diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/StreamSocket.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/StreamSocket.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/StreamSocket.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/StreamSocket.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,93 @@ +{-# OPTIONS_GHC -fno-warn-orphans #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network.StreamSocket +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- Socket Stream instance. Originally part of Gray's\/Bringert's HTTP module. +-- +-- * Changes by Robin Bate Boerop : +-- - Made dependencies explicit in import statements. +-- - Removed false dependencies in import statements. +-- - Created separate module for instance Stream Socket. +-- +-- * Changes by Simon Foster: +-- - Split module up into to sepearate Network.[Stream,TCP,HTTP] modules +-- +----------------------------------------------------------------------------- +module Network.StreamSocket + ( handleSocketError + , myrecv + ) where + +import Network.Stream + ( Stream(..), ConnError(ErrorReset, ErrorMisc), Result + ) +import Network.Socket + ( Socket, getSocketOption, shutdown, send, recv, sClose + , ShutdownCmd(ShutdownBoth), SocketOption(SoError) + ) + +import Network.HTTP.Base ( catchIO ) +import Control.Monad (liftM) +import Control.Exception as Exception (IOException) +import System.IO.Error (isEOFError) + +-- | Exception handler for socket operations. +handleSocketError :: Socket -> IOException -> IO (Result a) +handleSocketError sk e = + do se <- getSocketOption sk SoError + case se of + 0 -> ioError e + 10054 -> return $ Left ErrorReset -- reset + _ -> return $ Left $ ErrorMisc $ show se + +myrecv :: Socket -> Int -> IO String +myrecv sock len = + let handler e = if isEOFError e then return [] else ioError e + in catchIO (recv sock len) handler + +instance Stream Socket where + readBlock sk n = readBlockSocket sk n + readLine sk = readLineSocket sk + writeBlock sk str = writeBlockSocket sk str + close sk = do + -- This slams closed the connection (which is considered rude for TCP\/IP) + shutdown sk ShutdownBoth + sClose sk + closeOnEnd _sk _ = return () -- can't really deal with this, so do run the risk of leaking sockets here. + +readBlockSocket :: Socket -> Int -> IO (Result String) +readBlockSocket sk n = (liftM Right $ fn n) `catchIO` (handleSocketError sk) + where + fn x = do { str <- myrecv sk x + ; let len = length str + ; if len < x + then ( fn (x-len) >>= \more -> return (str++more) ) + else return str + } + +-- Use of the following function is discouraged. +-- The function reads in one character at a time, +-- which causes many calls to the kernel recv() +-- hence causes many context switches. +readLineSocket :: Socket -> IO (Result String) +readLineSocket sk = (liftM Right $ fn "") `catchIO` (handleSocketError sk) + where + fn str = do + c <- myrecv sk 1 -- like eating through a straw. + if null c || c == "\n" + then return (reverse str++c) + else fn (head c:str) + +writeBlockSocket :: Socket -> String -> IO (Result ()) +writeBlockSocket sk str = (liftM Right $ fn str) `catchIO` (handleSocketError sk) + where + fn [] = return () + fn x = send sk x >>= \i -> fn (drop i x) + diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/TCP.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/TCP.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Network/TCP.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Network/TCP.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,414 @@ +{-# LANGUAGE TypeSynonymInstances #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network.TCP +-- Copyright : See LICENSE file +-- License : BSD +-- +-- Maintainer : Ganesh Sittampalam +-- Stability : experimental +-- Portability : non-portable (not tested) +-- +-- Some utility functions for working with the Haskell @network@ package. Mostly +-- for internal use by the @Network.HTTP@ code. +-- +----------------------------------------------------------------------------- +module Network.TCP + ( Connection + , EndPoint(..) + , openTCPPort + , isConnectedTo + + , openTCPConnection + , socketConnection + , isTCPConnectedTo + + , HandleStream + , HStream(..) + + , StreamHooks(..) + , nullHooks + , setStreamHooks + , getStreamHooks + , hstreamToConnection + + ) where + +import Network.Socket + ( Socket, SocketOption(KeepAlive) + , SocketType(Stream), connect + , shutdown, ShutdownCmd(..) + , sClose, setSocketOption, getPeerName + , socket, Family(AF_UNSPEC), defaultProtocol, getAddrInfo + , defaultHints, addrFamily, withSocketsDo + , addrSocketType, addrAddress + ) +import qualified Network.Stream as Stream + ( Stream(readBlock, readLine, writeBlock, close, closeOnEnd) ) +import Network.Stream + ( ConnError(..) + , Result + , failWith + , failMisc + ) +import Network.BufferType + +import Network.HTTP.Base ( catchIO ) +import Network.Socket ( socketToHandle ) + +import Data.Char ( toLower ) +import Data.Word ( Word8 ) +import Control.Concurrent +import Control.Exception ( onException ) +import Control.Monad ( liftM, when ) +import System.IO ( Handle, hFlush, IOMode(..), hClose ) +import System.IO.Error ( isEOFError ) + +import qualified Data.ByteString as Strict +import qualified Data.ByteString.Lazy as Lazy + +----------------------------------------------------------------- +------------------ TCP Connections ------------------------------ +----------------------------------------------------------------- + +-- | The 'Connection' newtype is a wrapper that allows us to make +-- connections an instance of the Stream class, without GHC extensions. +-- While this looks sort of like a generic reference to the transport +-- layer it is actually TCP specific, which can be seen in the +-- implementation of the 'Stream Connection' instance. +newtype Connection = Connection (HandleStream String) + +newtype HandleStream a = HandleStream {getRef :: MVar (Conn a)} + +data EndPoint = EndPoint { epHost :: String, epPort :: Int } + +instance Eq EndPoint where + EndPoint host1 port1 == EndPoint host2 port2 = + map toLower host1 == map toLower host2 && port1 == port2 + +data Conn a + = MkConn { connSock :: ! Socket + , connHandle :: Handle + , connBuffer :: BufferOp a + , connInput :: Maybe a + , connEndPoint :: EndPoint + , connHooks :: Maybe (StreamHooks a) + , connCloseEOF :: Bool -- True => close socket upon reaching end-of-stream. + } + | ConnClosed + deriving(Eq) + +hstreamToConnection :: HandleStream String -> Connection +hstreamToConnection h = Connection h + +connHooks' :: Conn a -> Maybe (StreamHooks a) +connHooks' ConnClosed{} = Nothing +connHooks' x = connHooks x + +-- all of these are post-op hooks +data StreamHooks ty + = StreamHooks + { hook_readLine :: (ty -> String) -> Result ty -> IO () + , hook_readBlock :: (ty -> String) -> Int -> Result ty -> IO () + , hook_writeBlock :: (ty -> String) -> ty -> Result () -> IO () + , hook_close :: IO () + , hook_name :: String -- hack alert: name of the hook itself. + } + +instance Eq ty => Eq (StreamHooks ty) where + (==) _ _ = True + +nullHooks :: StreamHooks ty +nullHooks = StreamHooks + { hook_readLine = \ _ _ -> return () + , hook_readBlock = \ _ _ _ -> return () + , hook_writeBlock = \ _ _ _ -> return () + , hook_close = return () + , hook_name = "" + } + +setStreamHooks :: HandleStream ty -> StreamHooks ty -> IO () +setStreamHooks h sh = modifyMVar_ (getRef h) (\ c -> return c{connHooks=Just sh}) + +getStreamHooks :: HandleStream ty -> IO (Maybe (StreamHooks ty)) +getStreamHooks h = readMVar (getRef h) >>= return.connHooks + +-- | @HStream@ overloads the use of 'HandleStream's, letting you +-- overload the handle operations over the type that is communicated +-- across the handle. It comes in handy for @Network.HTTP@ 'Request' +-- and 'Response's as the payload representation isn't fixed, but overloaded. +-- +-- The library comes with instances for @ByteString@s and @String@, but +-- should you want to plug in your own payload representation, defining +-- your own @HStream@ instance _should_ be all that it takes. +-- +class BufferType bufType => HStream bufType where + openStream :: String -> Int -> IO (HandleStream bufType) + openSocketStream :: String -> Int -> Socket -> IO (HandleStream bufType) + readLine :: HandleStream bufType -> IO (Result bufType) + readBlock :: HandleStream bufType -> Int -> IO (Result bufType) + writeBlock :: HandleStream bufType -> bufType -> IO (Result ()) + close :: HandleStream bufType -> IO () + closeQuick :: HandleStream bufType -> IO () + closeOnEnd :: HandleStream bufType -> Bool -> IO () + +instance HStream Strict.ByteString where + openStream = openTCPConnection + openSocketStream = socketConnection + readBlock c n = readBlockBS c n + readLine c = readLineBS c + writeBlock c str = writeBlockBS c str + close c = closeIt c Strict.null True + closeQuick c = closeIt c Strict.null False + closeOnEnd c f = closeEOF c f + +instance HStream Lazy.ByteString where + openStream = \ a b -> openTCPConnection_ a b True + openSocketStream = \ a b c -> socketConnection_ a b c True + readBlock c n = readBlockBS c n + readLine c = readLineBS c + writeBlock c str = writeBlockBS c str + close c = closeIt c Lazy.null True + closeQuick c = closeIt c Lazy.null False + closeOnEnd c f = closeEOF c f + +instance Stream.Stream Connection where + readBlock (Connection c) = Network.TCP.readBlock c + readLine (Connection c) = Network.TCP.readLine c + writeBlock (Connection c) = Network.TCP.writeBlock c + close (Connection c) = Network.TCP.close c + closeOnEnd (Connection c) f = Network.TCP.closeEOF c f + +instance HStream String where + openStream = openTCPConnection + openSocketStream = socketConnection + readBlock ref n = readBlockBS ref n + + -- This function uses a buffer, at this time the buffer is just 1000 characters. + -- (however many bytes this is is left to the user to decypher) + readLine ref = readLineBS ref + -- The 'Connection' object allows no outward buffering, + -- since in general messages are serialised in their entirety. + writeBlock ref str = writeBlockBS ref str -- (stringToBuf str) + + -- Closes a Connection. Connection will no longer + -- allow any of the other Stream functions. Notice that a Connection may close + -- at any time before a call to this function. This function is idempotent. + -- (I think the behaviour here is TCP specific) + close c = closeIt c null True + + -- Closes a Connection without munching the rest of the stream. + closeQuick c = closeIt c null False + + closeOnEnd c f = closeEOF c f + +-- | @openTCPPort uri port@ establishes a connection to a remote +-- host, using 'getHostByName' which possibly queries the DNS system, hence +-- may trigger a network connection. +openTCPPort :: String -> Int -> IO Connection +openTCPPort uri port = openTCPConnection uri port >>= return.Connection + +-- Add a "persistent" option? Current persistent is default. +-- Use "Result" type for synchronous exception reporting? +openTCPConnection :: BufferType ty => String -> Int -> IO (HandleStream ty) +openTCPConnection uri port = openTCPConnection_ uri port False + +openTCPConnection_ :: BufferType ty => String -> Int -> Bool -> IO (HandleStream ty) +openTCPConnection_ uri port stashInput = do + -- HACK: uri is sometimes obtained by calling Network.URI.uriRegName, and this includes + -- the surrounding square brackets for an RFC 2732 host like [::1]. It's not clear whether + -- it should, or whether all call sites should be using something different instead, but + -- the simplest short-term fix is to strip any surrounding square brackets here. + -- It shouldn't affect any as this is the only situation they can occur - see RFC 3986. + let fixedUri = + case uri of + '[':(rest@(c:_)) | last rest == ']' + -> if c == 'v' || c == 'V' + then error $ "Unsupported post-IPv6 address " ++ uri + else init rest + _ -> uri + + + -- use withSocketsDo here in case the caller hasn't used it, which would make getAddrInfo fail on Windows + -- although withSocketsDo is supposed to wrap the entire program, in practice it is safe to use it locally + -- like this as it just does a once-only installation of a shutdown handler to run at program exit, + -- rather than actually shutting down after the action + addrinfos <- withSocketsDo $ getAddrInfo (Just $ defaultHints { addrFamily = AF_UNSPEC, addrSocketType = Stream }) (Just fixedUri) (Just . show $ port) + case addrinfos of + [] -> fail "openTCPConnection: getAddrInfo returned no address information" + (a:_) -> do + s <- socket (addrFamily a) Stream defaultProtocol + onException (do + setSocketOption s KeepAlive 1 + connect s (addrAddress a) + socketConnection_ fixedUri port s stashInput + ) (sClose s) + +-- | @socketConnection@, like @openConnection@ but using a pre-existing 'Socket'. +socketConnection :: BufferType ty + => String + -> Int + -> Socket + -> IO (HandleStream ty) +socketConnection hst port sock = socketConnection_ hst port sock False + +-- Internal function used to control the on-demand streaming of input +-- for /lazy/ streams. +socketConnection_ :: BufferType ty + => String + -> Int + -> Socket + -> Bool + -> IO (HandleStream ty) +socketConnection_ hst port sock stashInput = do + h <- socketToHandle sock ReadWriteMode + mb <- case stashInput of { True -> liftM Just $ buf_hGetContents bufferOps h; _ -> return Nothing } + let conn = MkConn + { connSock = sock + , connHandle = h + , connBuffer = bufferOps + , connInput = mb + , connEndPoint = EndPoint hst port + , connHooks = Nothing + , connCloseEOF = False + } + v <- newMVar conn + return (HandleStream v) + +closeConnection :: HStream a => HandleStream a -> IO Bool -> IO () +closeConnection ref readL = do + -- won't hold onto the lock for the duration + -- we are draining it...ToDo: have Connection + -- into a shutting-down state so that other + -- threads will simply back off if/when attempting + -- to also close it. + c <- readMVar (getRef ref) + closeConn c `catchIO` (\_ -> return ()) + modifyMVar_ (getRef ref) (\ _ -> return ConnClosed) + where + -- Be kind to peer & close gracefully. + closeConn ConnClosed = return () + closeConn conn = do + let sk = connSock conn + hFlush (connHandle conn) + shutdown sk ShutdownSend + suck readL + hClose (connHandle conn) + shutdown sk ShutdownReceive + sClose sk + + suck :: IO Bool -> IO () + suck rd = do + f <- rd + if f then return () else suck rd + +-- | Checks both that the underlying Socket is connected +-- and that the connection peer matches the given +-- host name (which is recorded locally). +isConnectedTo :: Connection -> EndPoint -> IO Bool +isConnectedTo (Connection conn) endPoint = isTCPConnectedTo conn endPoint + +isTCPConnectedTo :: HandleStream ty -> EndPoint -> IO Bool +isTCPConnectedTo conn endPoint = do + v <- readMVar (getRef conn) + case v of + ConnClosed -> return False + _ + | connEndPoint v == endPoint -> + catchIO (getPeerName (connSock v) >> return True) (const $ return False) + | otherwise -> return False + +readBlockBS :: HStream a => HandleStream a -> Int -> IO (Result a) +readBlockBS ref n = onNonClosedDo ref $ \ conn -> do + x <- bufferGetBlock ref n + maybe (return ()) + (\ h -> hook_readBlock h (buf_toStr $ connBuffer conn) n x) + (connHooks' conn) + return x + +-- This function uses a buffer, at this time the buffer is just 1000 characters. +-- (however many bytes this is is left for the user to decipher) +readLineBS :: HStream a => HandleStream a -> IO (Result a) +readLineBS ref = onNonClosedDo ref $ \ conn -> do + x <- bufferReadLine ref + maybe (return ()) + (\ h -> hook_readLine h (buf_toStr $ connBuffer conn) x) + (connHooks' conn) + return x + +-- The 'Connection' object allows no outward buffering, +-- since in general messages are serialised in their entirety. +writeBlockBS :: HandleStream a -> a -> IO (Result ()) +writeBlockBS ref b = onNonClosedDo ref $ \ conn -> do + x <- bufferPutBlock (connBuffer conn) (connHandle conn) b + maybe (return ()) + (\ h -> hook_writeBlock h (buf_toStr $ connBuffer conn) b x) + (connHooks' conn) + return x + +closeIt :: HStream ty => HandleStream ty -> (ty -> Bool) -> Bool -> IO () +closeIt c p b = do + closeConnection c (if b + then readLineBS c >>= \ x -> case x of { Right xs -> return (p xs); _ -> return True} + else return True) + conn <- readMVar (getRef c) + maybe (return ()) + (hook_close) + (connHooks' conn) + +closeEOF :: HandleStream ty -> Bool -> IO () +closeEOF c flg = modifyMVar_ (getRef c) (\ co -> return co{connCloseEOF=flg}) + +bufferGetBlock :: HStream a => HandleStream a -> Int -> IO (Result a) +bufferGetBlock ref n = onNonClosedDo ref $ \ conn -> do + case connInput conn of + Just c -> do + let (a,b) = buf_splitAt (connBuffer conn) n c + modifyMVar_ (getRef ref) (\ co -> return co{connInput=Just b}) + return (return a) + _ -> do + catchIO (buf_hGet (connBuffer conn) (connHandle conn) n >>= return.return) + (\ e -> + if isEOFError e + then do + when (connCloseEOF conn) $ catchIO (closeQuick ref) (\ _ -> return ()) + return (return (buf_empty (connBuffer conn))) + else return (failMisc (show e))) + +bufferPutBlock :: BufferOp a -> Handle -> a -> IO (Result ()) +bufferPutBlock ops h b = + catchIO (buf_hPut ops h b >> hFlush h >> return (return ())) + (\ e -> return (failMisc (show e))) + +bufferReadLine :: HStream a => HandleStream a -> IO (Result a) +bufferReadLine ref = onNonClosedDo ref $ \ conn -> do + case connInput conn of + Just c -> do + let (a,b0) = buf_span (connBuffer conn) (/='\n') c + let (newl,b1) = buf_splitAt (connBuffer conn) 1 b0 + modifyMVar_ (getRef ref) (\ co -> return co{connInput=Just b1}) + return (return (buf_append (connBuffer conn) a newl)) + _ -> catchIO + (buf_hGetLine (connBuffer conn) (connHandle conn) >>= + return . return . appendNL (connBuffer conn)) + (\ e -> + if isEOFError e + then do + when (connCloseEOF conn) $ catchIO (closeQuick ref) (\ _ -> return ()) + return (return (buf_empty (connBuffer conn))) + else return (failMisc (show e))) + where + -- yes, this s**ks.. _may_ have to be addressed if perf + -- suggests worthiness. + appendNL ops b = buf_snoc ops b nl + + nl :: Word8 + nl = fromIntegral (fromEnum '\n') + +onNonClosedDo :: HandleStream a -> (Conn a -> IO (Result b)) -> IO (Result b) +onNonClosedDo h act = do + x <- readMVar (getRef h) + case x of + ConnClosed{} -> return (failWith ErrorClosed) + _ -> act x + diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Setup.lhs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Setup.lhs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/Setup.lhs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/Setup.lhs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,8 @@ +#!/usr/bin/env runghc + +> module Main where + +> import Distribution.Simple + +> main :: IO () +> main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/test/Httpd.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/test/Httpd.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/test/Httpd.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/test/Httpd.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,158 @@ +{-# LANGUAGE CPP #-} + +module Httpd + ( Request, Response, Server + , mkResponse + , reqMethod, reqURI, reqHeaders, reqBody + , shed +#ifdef WARP_TESTS + , warp +#endif + ) + where + +import Control.Applicative +import Control.Arrow ( (***) ) +import Control.DeepSeq +import Control.Monad +import Control.Monad.Trans ( liftIO ) +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as BL +import qualified Data.ByteString.Char8 as BC +import qualified Data.ByteString.Lazy.Char8 as BLC +#ifdef WARP_TESTS +import qualified Data.CaseInsensitive as CI +#endif +import Data.Maybe ( fromJust ) +import Network.URI ( URI, parseRelativeReference ) + +import Network.Socket + ( getAddrInfo, AddrInfo, defaultHints, addrAddress, addrFamily + , addrFlags, addrSocketType, AddrInfoFlag(AI_PASSIVE), socket, Family(AF_UNSPEC,AF_INET6) + , defaultProtocol, SocketType(Stream), listen, setSocketOption, SocketOption(ReuseAddr) + ) +#ifdef WARP_TESTS +#if MIN_VERSION_network(2,4,0) +import Network.Socket ( bind ) +#else +import Network.Socket ( bindSocket, Socket, SockAddr ) +#endif +#endif + +import qualified Network.Shed.Httpd as Shed + ( Request, Response(Response), initServer + , reqMethod, reqURI, reqHeaders, reqBody + ) +#ifdef WARP_TESTS +#if !MIN_VERSION_wai(3,0,0) +import qualified Data.Conduit.Lazy as Warp +#endif + +import qualified Network.HTTP.Types as Warp + ( Status(..) ) +import qualified Network.Wai as Warp +import qualified Network.Wai.Handler.Warp as Warp + ( runSettingsSocket, defaultSettings, setPort ) +#endif + +data Request = Request + { + reqMethod :: String, + reqURI :: URI, + reqHeaders :: [(String, String)], + reqBody :: String + } + +data Response = Response + { + respStatus :: Int, + respHeaders :: [(String, String)], + respBody :: String + } + +mkResponse :: Int -> [(String, String)] -> String -> Response +mkResponse = Response + +type Server = Int -> (Request -> IO Response) -> IO () + +shed :: Server +shed port handler = + () <$ Shed.initServer + port + (liftM responseToShed . handler . requestFromShed) + where + responseToShed (Response status hdrs body) = + Shed.Response status hdrs body + chomp = reverse . strip '\r' . reverse + strip c (c':str) | c == c' = str + strip c str = str + requestFromShed request = + Request + { + reqMethod = Shed.reqMethod request, + reqURI = Shed.reqURI request, + reqHeaders = map (id *** chomp) $ Shed.reqHeaders request, + reqBody = Shed.reqBody request + } + +#if !MIN_VERSION_bytestring(0,10,0) +instance NFData B.ByteString where + rnf = rnf . B.length +#endif + +#ifdef WARP_TESTS +#if !MIN_VERSION_network(2,4,0) +bind :: Socket -> SockAddr -> IO () +bind = bindSocket +#endif + +warp :: Bool -> Server +warp ipv6 port handler = do + addrinfos <- getAddrInfo (Just $ defaultHints { addrFamily = AF_UNSPEC, addrSocketType = Stream }) + (Just $ if ipv6 then "::1" else "127.0.0.1") + (Just . show $ port) + case addrinfos of + [] -> fail "Couldn't obtain address information in warp" + (addri:_) -> do + sock <- socket (addrFamily addri) Stream defaultProtocol + setSocketOption sock ReuseAddr 1 + bind sock (addrAddress addri) + listen sock 5 +#if MIN_VERSION_wai(3,0,0) + Warp.runSettingsSocket (Warp.setPort port Warp.defaultSettings) sock $ \warpRequest warpRespond -> do + request <- requestFromWarp warpRequest + response <- handler request + warpRespond (responseToWarp response) +#else + Warp.runSettingsSocket (Warp.setPort port Warp.defaultSettings) sock $ \warpRequest -> do + request <- requestFromWarp warpRequest + response <- handler request + return (responseToWarp response) +#endif + where + responseToWarp (Response status hdrs body) = + Warp.responseLBS + (Warp.Status status B.empty) + (map headerToWarp hdrs) + (BLC.pack body) + headerToWarp (name, value) = (CI.mk (BC.pack name), BC.pack value) + headerFromWarp (name, value) = + (BC.unpack (CI.original name), BC.unpack value) + requestFromWarp request = do +#if MIN_VERSION_wai(3,0,1) + body <- fmap BLC.unpack $ Warp.strictRequestBody request +#else + body <- fmap BLC.unpack $ Warp.lazyRequestBody request + body `deepseq` return () +#endif + return $ + Request + { + reqMethod = BC.unpack (Warp.requestMethod request), + reqURI = fromJust . parseRelativeReference . + BC.unpack . Warp.rawPathInfo $ + request, + reqHeaders = map headerFromWarp (Warp.requestHeaders request), + reqBody = body + } +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/test/httpTests.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/test/httpTests.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/test/httpTests.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/test/httpTests.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,668 @@ +{-# LANGUAGE ImplicitParams, ViewPatterns, NoMonomorphismRestriction, CPP #-} +import Control.Concurrent + +import Control.Applicative ((<$)) +import Control.Concurrent (threadDelay) +import Control.Exception (try) +import qualified Data.ByteString.Lazy.Char8 as BL (pack) +import Data.Char (isSpace) +import qualified Data.Digest.Pure.MD5 as MD5 (md5) +import Data.List.Split (splitOn) +import Data.Maybe (fromJust) +import System.IO.Error (userError) + +import qualified Httpd +import qualified UnitTests + +import Network.Browser +import Network.HTTP +import Network.HTTP.Base +import Network.HTTP.Auth +import Network.HTTP.Headers +import Network.Stream (Result) +import Network.URI (uriPath, parseURI) + +import System.Environment (getArgs) +import System.Info (os) +import System.IO (getChar) + +import Test.Framework (defaultMainWithArgs, testGroup) +import Test.Framework.Providers.HUnit +import Test.HUnit + + +basicGetRequest :: (?testUrl :: ServerAddress) => Assertion +basicGetRequest = do + response <- simpleHTTP (getRequest (?testUrl "/basic/get")) + code <- getResponseCode response + assertEqual "HTTP status code" (2, 0, 0) code + body <- getResponseBody response + assertEqual "Receiving expected response" "It works." body + +basicGetRequestLBS :: (?testUrl :: ServerAddress) => Assertion +basicGetRequestLBS = do + response <- simpleHTTP (mkRequest GET (fromJust (parseURI (?testUrl ("/basic/get"))))) + code <- getResponseCode response + assertEqual "HTTP status code" (2, 0, 0) code + body <- getResponseBody response + assertEqual "Receiving expected response" (BL.pack "It works.") body + +basicHeadRequest :: (?testUrl :: ServerAddress) => Assertion +basicHeadRequest = do + response <- simpleHTTP (headRequest (?testUrl "/basic/head")) + code <- getResponseCode response + assertEqual "HTTP status code" (2, 0, 0) code + body <- getResponseBody response + -- the body should be empty, since this is a HEAD request + assertEqual "Receiving expected response" "" body + +basicExample :: (?testUrl :: ServerAddress) => Assertion +basicExample = do + result <- + -- sample code from Network.HTTP haddock, with URL changed + -- Note there's also a copy of the example in the .cabal file + simpleHTTP (getRequest (?testUrl "/basic/example")) >>= fmap (take 100) . getResponseBody + assertEqual "Receiving expected response" (take 100 haskellOrgText) result + +secureGetRequest :: (?secureTestUrl :: ServerAddress) => Assertion +secureGetRequest = do + response <- try $ simpleHTTP (getRequest (?secureTestUrl "/anything")) + assertEqual "Threw expected exception" + (Left (userError "https not supported")) + (fmap show response) -- fmap show because Response isn't in Eq + +basicPostRequest :: (?testUrl :: ServerAddress) => Assertion +basicPostRequest = do + let sendBody = "body" + response <- simpleHTTP $ postRequestWithBody (?testUrl "/basic/post") + "text/plain" + sendBody + code <- getResponseCode response + assertEqual "HTTP status code" (2, 0, 0) code + body <- getResponseBody response + assertEqual "Receiving expected response" + (show (Just "text/plain", Just "4", sendBody)) + body + +userpwAuthFailure :: (?baduserpwUrl :: ServerAddress) => Assertion +userpwAuthFailure = do + response <- simpleHTTP (getRequest (?baduserpwUrl "/auth/basic")) + code <- getResponseCode response + body <- getResponseBody response + assertEqual "HTTP status code" ((4, 0, 1), + "Just \"Basic dGVzdDp3cm9uZ3B3ZA==\"") (code, body) + -- in case of 401, the server returns the contents of the Authz header + +userpwAuthSuccess :: (?userpwUrl :: ServerAddress) => Assertion +userpwAuthSuccess = do + response <- simpleHTTP (getRequest (?userpwUrl "/auth/basic")) + code <- getResponseCode response + body <- getResponseBody response + assertEqual "Receiving expected response" ((2, 0, 0), "Here's the secret") (code, body) + +basicAuthFailure :: (?testUrl :: ServerAddress) => Assertion +basicAuthFailure = do + response <- simpleHTTP (getRequest (?testUrl "/auth/basic")) + code <- getResponseCode response + body <- getResponseBody response + assertEqual "HTTP status code" ((4, 0, 1), "Nothing") (code, body) + +credentialsBasic :: (?testUrl :: ServerAddress) => Authority +credentialsBasic = AuthBasic "Testing realm" "test" "password" + (fromJust . parseURI . ?testUrl $ "/auth/basic") + +basicAuthSuccess :: (?testUrl :: ServerAddress) => Assertion +basicAuthSuccess = do + let req = getRequest (?testUrl "/auth/basic") + let authString = withAuthority credentialsBasic req + let reqWithAuth = req { rqHeaders = mkHeader HdrAuthorization authString:rqHeaders req } + response <- simpleHTTP reqWithAuth + code <- getResponseCode response + body <- getResponseBody response + assertEqual "Receiving expected response" ((2, 0, 0), "Here's the secret") (code, body) + +utf8URLEncode :: Assertion +utf8URLEncode = do + assertEqual "Normal URL" (urlEncode "what-a_mess.com") "what-a_mess.com" + assertEqual "Chinese URL" (urlEncode "好") "%E5%A5%BD" + assertEqual "Russian URL" (urlEncode "ололо") "%D0%BE%D0%BB%D0%BE%D0%BB%D0%BE" + +utf8URLDecode :: Assertion +utf8URLDecode = do + assertEqual "Normal URL" (urlDecode "what-a_mess.com") "what-a_mess.com" + assertEqual "Mixed URL" (urlDecode "UTFin进入-wow") "UTFin进入-wow" + assertEqual "Chinese URL" (urlDecode "%E5%A5%BD") "好" + assertEqual "Russian URL" (urlDecode "%D0%BE%D0%BB%D0%BE%D0%BB%D0%BE") "ололо" + +browserExample :: (?testUrl :: ServerAddress) => Assertion +browserExample = do + result <- + -- sample code from Network.Browser haddock, with URL changed + -- Note there's also a copy of the example in the .cabal file + do + (_, rsp) + <- Network.Browser.browse $ do + setAllowRedirects True -- handle HTTP redirects + request $ getRequest (?testUrl "/browser/example") + return (take 100 (rspBody rsp)) + assertEqual "Receiving expected response" (take 100 haskellOrgText) result + +-- A vanilla HTTP request using Browser shouln't send a cookie header +browserNoCookie :: (?testUrl :: ServerAddress) => Assertion +browserNoCookie = do + (_, response) <- browse $ do + setOutHandler (const $ return ()) + request $ getRequest (?testUrl "/browser/no-cookie") + let code = rspCode response + assertEqual "HTTP status code" (2, 0, 0) code + + +-- Regression test +-- * Browser sends vanilla request to server +-- * Server sets one cookie "hello=world" +-- * Browser sends a second request +-- +-- Expected: Server gets single cookie with "hello=world" +-- Actual: Server gets 3 extra cookies, which are actually cookie attributes: +-- "$Version=0;hello=world;$Domain=localhost:8080\r" +browserOneCookie :: (?testUrl :: ServerAddress) => Assertion +browserOneCookie = do + (_, response) <- browse $ do + setOutHandler (const $ return ()) + -- This first requests returns a single Set-Cookie: hello=world + _ <- request $ getRequest (?testUrl "/browser/one-cookie/1") + + -- This second request should send a single Cookie: hello=world + request $ getRequest (?testUrl "/browser/one-cookie/2") + let body = rspBody response + assertEqual "Receiving expected response" "" body + let code = rspCode response + assertEqual "HTTP status code" (2, 0, 0) code + +browserTwoCookies :: (?testUrl :: ServerAddress) => Assertion +browserTwoCookies = do + (_, response) <- browse $ do + setOutHandler (const $ return ()) + -- This first request returns two cookies + _ <- request $ getRequest (?testUrl "/browser/two-cookies/1") + + -- This second request should send them back + request $ getRequest (?testUrl "/browser/two-cookies/2") + let body = rspBody response + assertEqual "Receiving expected response" "" body + let code = rspCode response + assertEqual "HTTP status code" (2, 0, 0) code + + +browserFollowsRedirect :: (?testUrl :: ServerAddress) => Int -> Assertion +browserFollowsRedirect n = do + (_, response) <- browse $ do + setOutHandler (const $ return ()) + request $ getRequest (?testUrl "/browser/redirect/relative/" ++ show n ++ "/basic/get") + assertEqual "Receiving expected response from server" + ((2, 0, 0), "It works.") + (rspCode response, rspBody response) + +browserReturnsRedirect :: (?testUrl :: ServerAddress) => Int -> Assertion +browserReturnsRedirect n = do + (_, response) <- browse $ do + setOutHandler (const $ return ()) + request $ getRequest (?testUrl "/browser/redirect/relative/" ++ show n ++ "/basic/get") + assertEqual "Receiving expected response from server" + ((n `div` 100, n `mod` 100 `div` 10, n `mod` 10), "") + (rspCode response, rspBody response) + +authGenBasic _ "Testing realm" = return $ Just ("test", "password") +authGenBasic _ realm = fail $ "Unexpected realm " ++ realm + +browserBasicAuth :: (?testUrl :: ServerAddress) => Assertion +browserBasicAuth = do + (_, response) <- browse $ do + setOutHandler (const $ return ()) + + setAuthorityGen authGenBasic + + request $ getRequest (?testUrl "/auth/basic") + + assertEqual "Receiving expected response from server" + ((2, 0, 0), "Here's the secret") + (rspCode response, rspBody response) + +authGenDigest _ "Digest testing realm" = return $ Just ("test", "digestpassword") +authGenDigest _ realm = fail $ "Unexpected digest realm " ++ realm + +browserDigestAuth :: (?testUrl :: ServerAddress) => Assertion +browserDigestAuth = do + (_, response) <- browse $ do + setOutHandler (const $ return ()) + + setAuthorityGen authGenDigest + + request $ getRequest (?testUrl "/auth/digest") + + assertEqual "Receiving expected response from server" + ((2, 0, 0), "Here's the digest secret") + (rspCode response, rspBody response) + + + +browserAlt :: (?altTestUrl :: ServerAddress) => Assertion +browserAlt = do + (response) <- browse $ do + + setOutHandler (const $ return ()) + + (_, response1) <- request $ getRequest (?altTestUrl "/basic/get") + + return response1 + + assertEqual "Receiving expected response from alternate server" + ((2, 0, 0), "This is the alternate server.") + (rspCode response, rspBody response) + +-- test that requests to multiple servers on the same host +-- don't get confused with each other +browserBoth :: (?testUrl :: ServerAddress, ?altTestUrl :: ServerAddress) => Assertion +browserBoth = do + (response1, response2) <- browse $ do + setOutHandler (const $ return ()) + + (_, response1) <- request $ getRequest (?testUrl "/basic/get") + (_, response2) <- request $ getRequest (?altTestUrl "/basic/get") + + return (response1, response2) + + assertEqual "Receiving expected response from main server" + ((2, 0, 0), "It works.") + (rspCode response1, rspBody response1) + + assertEqual "Receiving expected response from alternate server" + ((2, 0, 0), "This is the alternate server.") + (rspCode response2, rspBody response2) + +-- test that requests to multiple servers on the same host +-- don't get confused with each other +browserBothReversed :: (?testUrl :: ServerAddress, ?altTestUrl :: ServerAddress) => Assertion +browserBothReversed = do + (response1, response2) <- browse $ do + setOutHandler (const $ return ()) + + (_, response2) <- request $ getRequest (?altTestUrl "/basic/get") + (_, response1) <- request $ getRequest (?testUrl "/basic/get") + + return (response1, response2) + + assertEqual "Receiving expected response from main server" + ((2, 0, 0), "It works.") + (rspCode response1, rspBody response1) + + assertEqual "Receiving expected response from alternate server" + ((2, 0, 0), "This is the alternate server.") + (rspCode response2, rspBody response2) + +browserSecureRequest :: (?secureTestUrl :: ServerAddress) => Assertion +browserSecureRequest = do + res <- try $ browse $ do + setOutHandler (const $ return ()) + + request $ getRequest (?secureTestUrl "/anything") + + assertEqual "Threw expected exception" + (Left (userError "https not supported")) + (fmap show res) -- fmap show because Response isn't in Eq + +-- in case it tries to reuse the connection +browserSecureRequestAfterInsecure :: (?testUrl :: ServerAddress, ?secureTestUrl :: ServerAddress) => Assertion +browserSecureRequestAfterInsecure = do + res <- try $ browse $ do + setOutHandler (const $ return ()) + + request $ getRequest (?testUrl "/basic/get") + request $ getRequest (?secureTestUrl "/anything") + + assertEqual "Threw expected exception" + (Left (userError "https not supported")) + (fmap show res) -- fmap show because Response isn't in Eq + +browserRedirectToSecure :: (?testUrl :: ServerAddress, ?secureTestUrl :: ServerAddress) => Assertion +browserRedirectToSecure = do + res <- try $ browse $ do + setOutHandler (const $ return ()) + setErrHandler fail + + request $ getRequest (?testUrl "/browser/redirect/secure/301/anything") + + assertEqual "Threw expected exception" + (Left (userError $ "Unable to handle redirect, unsupported scheme: " ++ ?secureTestUrl "/anything")) + (fmap show res) -- fmap show because Response isn't in Eq + +browserTwoRequests :: (?testUrl :: ServerAddress) => Assertion +browserTwoRequests = do + (response1, response2) <- browse $ do + setOutHandler (const $ return ()) + + (_, response1) <- request $ getRequest (?testUrl "/basic/get") + (_, response2) <- request $ getRequest (?testUrl "/basic/get2") + + return (response1, response2) + + assertEqual "Receiving expected response from main server" + ((2, 0, 0), "It works.") + (rspCode response1, rspBody response1) + + assertEqual "Receiving expected response from main server" + ((2, 0, 0), "It works (2).") + (rspCode response2, rspBody response2) + + +browserTwoRequestsAlt :: (?altTestUrl :: ServerAddress) => Assertion +browserTwoRequestsAlt = do + (response1, response2) <- browse $ do + + setOutHandler (const $ return ()) + + (_, response1) <- request $ getRequest (?altTestUrl "/basic/get") + (_, response2) <- request $ getRequest (?altTestUrl "/basic/get2") + + return (response1, response2) + + assertEqual "Receiving expected response from alternate server" + ((2, 0, 0), "This is the alternate server.") + (rspCode response1, rspBody response1) + + assertEqual "Receiving expected response from alternate server" + ((2, 0, 0), "This is the alternate server (2).") + (rspCode response2, rspBody response2) + +browserTwoRequestsBoth :: (?testUrl :: ServerAddress, ?altTestUrl :: ServerAddress) => Assertion +browserTwoRequestsBoth = do + (response1, response2, response3, response4) <- browse $ do + setOutHandler (const $ return ()) + + (_, response1) <- request $ getRequest (?testUrl "/basic/get") + (_, response2) <- request $ getRequest (?altTestUrl "/basic/get") + (_, response3) <- request $ getRequest (?testUrl "/basic/get2") + (_, response4) <- request $ getRequest (?altTestUrl "/basic/get2") + + return (response1, response2, response3, response4) + + assertEqual "Receiving expected response from main server" + ((2, 0, 0), "It works.") + (rspCode response1, rspBody response1) + + assertEqual "Receiving expected response from alternate server" + ((2, 0, 0), "This is the alternate server.") + (rspCode response2, rspBody response2) + + assertEqual "Receiving expected response from main server" + ((2, 0, 0), "It works (2).") + (rspCode response3, rspBody response3) + + assertEqual "Receiving expected response from alternate server" + ((2, 0, 0), "This is the alternate server (2).") + (rspCode response4, rspBody response4) + +hasPrefix :: String -> String -> Maybe String +hasPrefix [] ys = Just ys +hasPrefix (x:xs) (y:ys) | x == y = hasPrefix xs ys +hasPrefix _ _ = Nothing + +maybeRead :: Read a => String -> Maybe a +maybeRead s = + case reads s of + [(v, "")] -> Just v + _ -> Nothing + +splitFields = map (toPair '=' . trim isSpace) . splitOn "," + +toPair c str = case break (==c) str of + (left, _:right) -> (left, right) + _ -> error $ "No " ++ show c ++ " in " ++ str +trim f = dropWhile f . reverse . dropWhile f . reverse + +isSubsetOf xs ys = all (`elem` ys) xs + +-- first bits of result text from haskell.org (just to give some representative text) +haskellOrgText = + "\ +\\t\ +\\t\ +\\t\t\ +\\t\t\t\t" + +digestMatch + username realm password + nonce opaque + method relativeURI makeAbsolute + headers + = + common `isSubsetOf` headers && (relative `isSubsetOf` headers || absolute `isSubsetOf` headers) + where + common = [("username", show username), ("realm", show realm), ("nonce", show nonce), + ("opaque", show opaque)] + md5 = show . MD5.md5 . BL.pack + ha1 = md5 (username++":"++realm++":"++password) + ha2 uri = md5 (method++":"++uri) + response uri = md5 (ha1 ++ ":" ++ nonce ++ ":" ++ ha2 uri) + mkUncommon uri hash = [("uri", show uri), ("response", show hash)] + relative = mkUncommon relativeURI (response relativeURI) + absoluteURI = makeAbsolute relativeURI + absolute = mkUncommon absoluteURI (response absoluteURI) + +processRequest :: (?testUrl :: ServerAddress, ?secureTestUrl :: ServerAddress) + => Httpd.Request + -> IO Httpd.Response +processRequest req = do + case (Httpd.reqMethod req, Network.URI.uriPath (Httpd.reqURI req)) of + ("GET", "/basic/get") -> return $ Httpd.mkResponse 200 [] "It works." + ("GET", "/basic/get2") -> return $ Httpd.mkResponse 200 [] "It works (2)." + ("GET", "/basic/head") -> return $ Httpd.mkResponse 200 [] "Body for /basic/head." + ("HEAD", "/basic/head") -> return $ Httpd.mkResponse 200 [] "Body for /basic/head." + ("POST", "/basic/post") -> + let typ = lookup "Content-Type" (Httpd.reqHeaders req) + len = lookup "Content-Length" (Httpd.reqHeaders req) + body = Httpd.reqBody req + in return $ Httpd.mkResponse 200 [] (show (typ, len, body)) + + ("GET", "/basic/example") -> + return $ Httpd.mkResponse 200 [] haskellOrgText + + ("GET", "/auth/basic") -> + case lookup "Authorization" (Httpd.reqHeaders req) of + Just "Basic dGVzdDpwYXNzd29yZA==" -> return $ Httpd.mkResponse 200 [] "Here's the secret" + x -> return $ Httpd.mkResponse 401 [("WWW-Authenticate", "Basic realm=\"Testing realm\"")] (show x) + + ("GET", "/auth/digest") -> + case lookup "Authorization" (Httpd.reqHeaders req) of + Just (hasPrefix "Digest " -> Just (splitFields -> items)) + | digestMatch "test" "Digest testing realm" "digestpassword" + "87e4" "057d" + "GET" "/auth/digest" ?testUrl + items + -> return $ Httpd.mkResponse 200 [] "Here's the digest secret" + x -> return $ Httpd.mkResponse + 401 + [("WWW-Authenticate", + "Digest realm=\"Digest testing realm\", opaque=\"057d\", nonce=\"87e4\"")] + (show x) + + ("GET", "/browser/example") -> + return $ Httpd.mkResponse 200 [] haskellOrgText + ("GET", "/browser/no-cookie") -> + case lookup "Cookie" (Httpd.reqHeaders req) of + Nothing -> return $ Httpd.mkResponse 200 [] "" + Just s -> return $ Httpd.mkResponse 500 [] s + ("GET", "/browser/one-cookie/1") -> + return $ Httpd.mkResponse 200 [("Set-Cookie", "hello=world")] "" + ("GET", "/browser/one-cookie/2") -> + case lookup "Cookie" (Httpd.reqHeaders req) of + Just "hello=world" -> return $ Httpd.mkResponse 200 [] "" + Just s -> return $ Httpd.mkResponse 500 [] s + Nothing -> return $ Httpd.mkResponse 500 [] (show $ Httpd.reqHeaders req) + ("GET", "/browser/two-cookies/1") -> + return $ Httpd.mkResponse 200 + [("Set-Cookie", "hello=world") + ,("Set-Cookie", "goodbye=cruelworld")] + "" + ("GET", "/browser/two-cookies/2") -> + case lookup "Cookie" (Httpd.reqHeaders req) of + -- TODO generalise the cookie parsing to allow for whitespace/ordering variations + Just "goodbye=cruelworld; hello=world" -> return $ Httpd.mkResponse 200 [] "" + Just s -> return $ Httpd.mkResponse 500 [] s + Nothing -> return $ Httpd.mkResponse 500 [] (show $ Httpd.reqHeaders req) + ("GET", hasPrefix "/browser/redirect/relative/" -> Just (break (=='/') -> (maybeRead -> Just n, rest))) -> + return $ Httpd.mkResponse n [("Location", rest)] "" + ("GET", hasPrefix "/browser/redirect/absolute/" -> Just (break (=='/') -> (maybeRead -> Just n, rest))) -> + return $ Httpd.mkResponse n [("Location", ?testUrl rest)] "" + ("GET", hasPrefix "/browser/redirect/secure/" -> Just (break (=='/') -> (maybeRead -> Just n, rest))) -> + return $ Httpd.mkResponse n [("Location", ?secureTestUrl rest)] "" + _ -> return $ Httpd.mkResponse 500 [] "Unknown request" + +altProcessRequest :: Httpd.Request -> IO Httpd.Response +altProcessRequest req = do + case (Httpd.reqMethod req, Network.URI.uriPath (Httpd.reqURI req)) of + ("GET", "/basic/get") -> return $ Httpd.mkResponse 200 [] "This is the alternate server." + ("GET", "/basic/get2") -> return $ Httpd.mkResponse 200 [] "This is the alternate server (2)." + _ -> return $ Httpd.mkResponse 500 [] "Unknown request" + +maybeTestGroup True name xs = testGroup name xs +maybeTestGroup False name _ = testGroup name [] + +basicTests = + testGroup "Basic tests" + [ testCase "Basic GET request" basicGetRequest + , testCase "Basic GET request (lazy bytestring)" basicGetRequestLBS + , testCase "Network.HTTP example code" basicExample + , testCase "Secure GET request" secureGetRequest + , testCase "Basic POST request" basicPostRequest + , testCase "Basic HEAD request" basicHeadRequest + , testCase "URI user:pass Auth failure" userpwAuthFailure + , testCase "URI user:pass Auth success" userpwAuthSuccess + , testCase "Basic Auth failure" basicAuthFailure + , testCase "Basic Auth success" basicAuthSuccess + , testCase "UTF-8 urlEncode" utf8URLEncode + , testCase "UTF-8 urlDecode" utf8URLDecode + ] + +browserTests = + testGroup "Browser tests" + [ testGroup "Basic" + [ + testCase "Network.Browser example code" browserExample + , testCase "Two requests" browserTwoRequests + ] + , testGroup "Secure" + [ + testCase "Secure request" browserSecureRequest + , testCase "After insecure" browserSecureRequestAfterInsecure + , testCase "Redirection" browserRedirectToSecure + ] + , testGroup "Cookies" + [ testCase "No cookie header" browserNoCookie + , testCase "One cookie" browserOneCookie + , testCase "Two cookies" browserTwoCookies + ] + , testGroup "Redirection" + [ -- See http://en.wikipedia.org/wiki/List_of_HTTP_status_codes#3xx_Redirection + -- 300 Multiple Choices: client has to handle this + testCase "300" (browserReturnsRedirect 300) + -- 301 Moved Permanently: should follow + , testCase "301" (browserFollowsRedirect 301) + -- 302 Found: should follow + , testCase "302" (browserFollowsRedirect 302) + -- 303 See Other: should follow (directly for GETs) + , testCase "303" (browserFollowsRedirect 303) + -- 304 Not Modified: maybe Browser could do something intelligent based on + -- being given locally cached content and sending If-Modified-Since, but it + -- doesn't at the moment + , testCase "304" (browserReturnsRedirect 304) + -- 305 Use Proxy: test harness doesn't have a proxy (yet) + -- 306 Switch Proxy: obsolete + -- 307 Temporary Redirect: should follow + , testCase "307" (browserFollowsRedirect 307) + -- 308 Resume Incomplete: no support for Resumable HTTP so client has to handle this + , testCase "308" (browserReturnsRedirect 308) + ] + , testGroup "Authentication" + [ testCase "Basic" browserBasicAuth + , testCase "Digest" browserDigestAuth + ] + ] + +port80Tests = + testGroup "Multiple servers" + [ testCase "Alternate server" browserAlt + , testCase "Both servers" browserBoth + , testCase "Both servers (reversed)" browserBothReversed + , testCase "Two requests - alternate server" browserTwoRequestsAlt + , testCase "Two requests - both servers" browserTwoRequestsBoth + ] + +data InetFamily = IPv4 | IPv6 + +familyToLocalhost :: InetFamily -> String +familyToLocalhost IPv4 = "127.0.0.1" +familyToLocalhost IPv6 = "[::1]" + +urlRoot :: InetFamily -> String -> Int -> String +urlRoot fam userpw 80 = "http://" ++ userpw ++ familyToLocalhost fam +urlRoot fam userpw n = "http://" ++ userpw ++ familyToLocalhost fam ++ ":" ++ show n + +secureRoot :: InetFamily -> String -> Int -> String +secureRoot fam userpw 443 = "https://" ++ userpw ++ familyToLocalhost fam +secureRoot fam userpw n = "https://" ++ userpw ++ familyToLocalhost fam ++ ":" ++ show n + +type ServerAddress = String -> String + +httpAddress, httpsAddress :: InetFamily -> String -> Int -> ServerAddress +httpAddress fam userpw port p = urlRoot fam userpw port ++ p +httpsAddress fam userpw port p = secureRoot fam userpw port ++ p + +main :: IO () +main = do + args <- getArgs + + let servers = + [ ("httpd-shed", Httpd.shed, IPv4) +#ifdef WARP_TESTS + , ("warp.v6", Httpd.warp True, IPv6) + , ("warp.v4", Httpd.warp False, IPv4) +#endif + ] + basePortNum, altPortNum :: Int + basePortNum = 5812 + altPortNum = 80 + numberedServers = zip [basePortNum..] servers + + let setupNormalTests = do + flip mapM numberedServers $ \(portNum, (serverName, server, family)) -> do + let ?testUrl = httpAddress family "" portNum + ?userpwUrl = httpAddress family "test:password@" portNum + ?baduserpwUrl = httpAddress family "test:wrongpwd@" portNum + ?secureTestUrl = httpsAddress family "" portNum + _ <- forkIO $ server portNum processRequest + return $ testGroup serverName [basicTests, browserTests] + + let setupAltTests = do + let (portNum, (_, server,family)) = head numberedServers + let ?testUrl = httpAddress family "" portNum + ?altTestUrl = httpAddress family "" altPortNum + _ <- forkIO $ server altPortNum altProcessRequest + return port80Tests + + case args of + ["server"] -> do -- run only the harness servers for diagnostic/debug purposes + -- halt on any keypress + _ <- setupNormalTests + _ <- setupAltTests + _ <- getChar + return () + ("--withport80":args) -> do + normalTests <- setupNormalTests + altTests <- setupAltTests + _ <- threadDelay 1000000 -- Give the server time to start :-( + defaultMainWithArgs (UnitTests.unitTests ++ normalTests ++ [altTests]) args + args -> do -- run the test harness as normal + normalTests <- setupNormalTests + _ <- threadDelay 1000000 -- Give the server time to start :-( + defaultMainWithArgs (UnitTests.unitTests ++ normalTests) args diff -Nru cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/test/UnitTests.hs cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/test/UnitTests.hs --- cabal-install-1.22-1.22.6.0/src/HTTP-4000.3.3/test/UnitTests.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/HTTP-4000.3.3/test/UnitTests.hs 2016-02-09 22:34:53.000000000 +0000 @@ -0,0 +1,32 @@ +module UnitTests ( unitTests ) where + +import Network.HTTP.Base +import Network.URI + +import Data.Maybe ( fromJust ) + +import Test.Framework ( testGroup ) +import Test.Framework.Providers.HUnit +import Test.HUnit + +parseIPv4Address :: Assertion +parseIPv4Address = + assertEqual "127.0.0.1 address is recognised" + (Just (URIAuthority {user = Nothing, password = Nothing, host = "127.0.0.1", port = Just 5313})) + (parseURIAuthority (uriToAuthorityString (fromJust (parseURI "http://127.0.0.1:5313/foo")))) + + +parseIPv6Address :: Assertion +parseIPv6Address = + assertEqual "::1 address" + (Just (URIAuthority {user = Nothing, password = Nothing, host = "::1", port = Just 5313})) + (parseURIAuthority (uriToAuthorityString (fromJust (parseURI "http://[::1]:5313/foo")))) + +unitTests = + [testGroup "Unit tests" + [ testGroup "URI parsing" + [ testCase "Parse IPv4 address" parseIPv4Address + , testCase "Parse IPv6 address" parseIPv6Address + ] + ] + ] diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/CHANGELOG.markdown cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/CHANGELOG.markdown --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/CHANGELOG.markdown 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/CHANGELOG.markdown 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,24 @@ +2.2.1 +------- +* Provide MINIMAL pragmas for `MonadState`, `MonadWriter`, `MonadReader` +* Added a cyclic definition of `ask` in terms of `reader` for consistency with `get`/`put` vs. `state` and `tell` vs. `writer` +* Fix deprecation warnings caused by `transformers` 0.4 deprecating `ErrorT`. +* Added `Control.Monad.Except` in the style of the other `mtl` re-export modules + +2.2.0.1 +------- +* Fixed a bug caused by the change in how `transformers` 0.4 exports its data types. We will now export `runFooT` for each transformer again! + +2.2 +--- +* `transformers` 0.4 support +* Added instances for `ExceptT` +* Added `modify'` to `Control.Monad.State.*` + +2.1.3.1 +------- +* Avoid importing `Control.Monad.Instances` on GHC 7.8 to build without deprecation warnings. + +2.1.3 +----- +* Removed the now-irrelevant `Error` constraint from the `MonadError` instance for `Either e`. diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Cont/Class.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Cont/Class.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Cont/Class.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Cont/Class.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,134 @@ +{- | +Module : Control.Monad.Cont.Class +Copyright : (c) The University of Glasgow 2001, + (c) Jeff Newbern 2003-2007, + (c) Andriy Palamarchuk 2007 +License : BSD-style (see the file LICENSE) + +Maintainer : libraries@haskell.org +Stability : experimental +Portability : portable + +[Computation type:] Computations which can be interrupted and resumed. + +[Binding strategy:] Binding a function to a monadic value creates +a new continuation which uses the function as the continuation of the monadic +computation. + +[Useful for:] Complex control structures, error handling, +and creating co-routines. + +[Zero and plus:] None. + +[Example type:] @'Cont' r a@ + +The Continuation monad represents computations in continuation-passing style +(CPS). +In continuation-passing style function result is not returned, +but instead is passed to another function, +received as a parameter (continuation). +Computations are built up from sequences +of nested continuations, terminated by a final continuation (often @id@) +which produces the final result. +Since continuations are functions which represent the future of a computation, +manipulation of the continuation functions can achieve complex manipulations +of the future of the computation, +such as interrupting a computation in the middle, aborting a portion +of a computation, restarting a computation, and interleaving execution of +computations. +The Continuation monad adapts CPS to the structure of a monad. + +Before using the Continuation monad, be sure that you have +a firm understanding of continuation-passing style +and that continuations represent the best solution to your particular +design problem. +Many algorithms which require continuations in other languages do not require +them in Haskell, due to Haskell's lazy semantics. +Abuse of the Continuation monad can produce code that is impossible +to understand and maintain. +-} + +module Control.Monad.Cont.Class ( + MonadCont(..), + ) where + +import Control.Monad.Trans.Cont (ContT) +import qualified Control.Monad.Trans.Cont as ContT +import Control.Monad.Trans.Error as Error +import Control.Monad.Trans.Except as Except +import Control.Monad.Trans.Identity as Identity +import Control.Monad.Trans.List as List +import Control.Monad.Trans.Maybe as Maybe +import Control.Monad.Trans.Reader as Reader +import Control.Monad.Trans.RWS.Lazy as LazyRWS +import Control.Monad.Trans.RWS.Strict as StrictRWS +import Control.Monad.Trans.State.Lazy as LazyState +import Control.Monad.Trans.State.Strict as StrictState +import Control.Monad.Trans.Writer.Lazy as LazyWriter +import Control.Monad.Trans.Writer.Strict as StrictWriter + +import Control.Monad +import Data.Monoid + +class Monad m => MonadCont m where + {- | @callCC@ (call-with-current-continuation) + calls a function with the current continuation as its argument. + Provides an escape continuation mechanism for use with Continuation monads. + Escape continuations allow to abort the current computation and return + a value immediately. + They achieve a similar effect to 'Control.Monad.Error.throwError' + and 'Control.Monad.Error.catchError' + within an 'Control.Monad.Error.Error' monad. + Advantage of this function over calling @return@ is that it makes + the continuation explicit, + allowing more flexibility and better control + (see examples in "Control.Monad.Cont"). + + The standard idiom used with @callCC@ is to provide a lambda-expression + to name the continuation. Then calling the named continuation anywhere + within its scope will escape from the computation, + even if it is many layers deep within nested computations. + -} + callCC :: ((a -> m b) -> m a) -> m a + +instance MonadCont (ContT r m) where + callCC = ContT.callCC + +-- --------------------------------------------------------------------------- +-- Instances for other mtl transformers + +instance (Error e, MonadCont m) => MonadCont (ErrorT e m) where + callCC = Error.liftCallCC callCC + +instance MonadCont m => MonadCont (ExceptT e m) where + callCC = Except.liftCallCC callCC + +instance MonadCont m => MonadCont (IdentityT m) where + callCC = Identity.liftCallCC callCC + +instance MonadCont m => MonadCont (ListT m) where + callCC = List.liftCallCC callCC + +instance MonadCont m => MonadCont (MaybeT m) where + callCC = Maybe.liftCallCC callCC + +instance MonadCont m => MonadCont (ReaderT r m) where + callCC = Reader.liftCallCC callCC + +instance (Monoid w, MonadCont m) => MonadCont (LazyRWS.RWST r w s m) where + callCC = LazyRWS.liftCallCC' callCC + +instance (Monoid w, MonadCont m) => MonadCont (StrictRWS.RWST r w s m) where + callCC = StrictRWS.liftCallCC' callCC + +instance MonadCont m => MonadCont (LazyState.StateT s m) where + callCC = LazyState.liftCallCC' callCC + +instance MonadCont m => MonadCont (StrictState.StateT s m) where + callCC = StrictState.liftCallCC' callCC + +instance (Monoid w, MonadCont m) => MonadCont (LazyWriter.WriterT w m) where + callCC = LazyWriter.liftCallCC callCC + +instance (Monoid w, MonadCont m) => MonadCont (StrictWriter.WriterT w m) where + callCC = StrictWriter.liftCallCC callCC diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Cont.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Cont.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Cont.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Cont.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,169 @@ +{- | +Module : Control.Monad.Cont +Copyright : (c) The University of Glasgow 2001, + (c) Jeff Newbern 2003-2007, + (c) Andriy Palamarchuk 2007 +License : BSD-style (see the file LICENSE) + +Maintainer : libraries@haskell.org +Stability : experimental +Portability : portable + +[Computation type:] Computations which can be interrupted and resumed. + +[Binding strategy:] Binding a function to a monadic value creates +a new continuation which uses the function as the continuation of the monadic +computation. + +[Useful for:] Complex control structures, error handling, +and creating co-routines. + +[Zero and plus:] None. + +[Example type:] @'Cont' r a@ + +The Continuation monad represents computations in continuation-passing style +(CPS). +In continuation-passing style function result is not returned, +but instead is passed to another function, +received as a parameter (continuation). +Computations are built up from sequences +of nested continuations, terminated by a final continuation (often @id@) +which produces the final result. +Since continuations are functions which represent the future of a computation, +manipulation of the continuation functions can achieve complex manipulations +of the future of the computation, +such as interrupting a computation in the middle, aborting a portion +of a computation, restarting a computation, and interleaving execution of +computations. +The Continuation monad adapts CPS to the structure of a monad. + +Before using the Continuation monad, be sure that you have +a firm understanding of continuation-passing style +and that continuations represent the best solution to your particular +design problem. +Many algorithms which require continuations in other languages do not require +them in Haskell, due to Haskell's lazy semantics. +Abuse of the Continuation monad can produce code that is impossible +to understand and maintain. +-} + +module Control.Monad.Cont ( + -- * MonadCont class + MonadCont(..), + -- * The Cont monad + Cont, + cont, + runCont, + mapCont, + withCont, + -- * The ContT monad transformer + ContT(ContT), + runContT, + mapContT, + withContT, + module Control.Monad, + module Control.Monad.Trans, + -- * Example 1: Simple Continuation Usage + -- $simpleContExample + + -- * Example 2: Using @callCC@ + -- $callCCExample + + -- * Example 3: Using @ContT@ Monad Transformer + -- $ContTExample + ) where + +import Control.Monad.Cont.Class + +import Control.Monad.Trans +import Control.Monad.Trans.Cont + +import Control.Monad + +{- $simpleContExample +Calculating length of a list continuation-style: + +>calculateLength :: [a] -> Cont r Int +>calculateLength l = return (length l) + +Here we use @calculateLength@ by making it to pass its result to @print@: + +>main = do +> runCont (calculateLength "123") print +> -- result: 3 + +It is possible to chain 'Cont' blocks with @>>=@. + +>double :: Int -> Cont r Int +>double n = return (n * 2) +> +>main = do +> runCont (calculateLength "123" >>= double) print +> -- result: 6 +-} + +{- $callCCExample +This example gives a taste of how escape continuations work, shows a typical +pattern for their usage. + +>-- Returns a string depending on the length of the name parameter. +>-- If the provided string is empty, returns an error. +>-- Otherwise, returns a welcome message. +>whatsYourName :: String -> String +>whatsYourName name = +> (`runCont` id) $ do -- 1 +> response <- callCC $ \exit -> do -- 2 +> validateName name exit -- 3 +> return $ "Welcome, " ++ name ++ "!" -- 4 +> return response -- 5 +> +>validateName name exit = do +> when (null name) (exit "You forgot to tell me your name!") + +Here is what this example does: + +(1) Runs an anonymous 'Cont' block and extracts value from it with +@(\`runCont\` id)@. Here @id@ is the continuation, passed to the @Cont@ block. + +(1) Binds @response@ to the result of the following 'Control.Monad.Cont.Class.callCC' block, +binds @exit@ to the continuation. + +(1) Validates @name@. +This approach illustrates advantage of using 'Control.Monad.Cont.Class.callCC' over @return@. +We pass the continuation to @validateName@, +and interrupt execution of the @Cont@ block from /inside/ of @validateName@. + +(1) Returns the welcome message from the 'Control.Monad.Cont.Class.callCC' block. +This line is not executed if @validateName@ fails. + +(1) Returns from the @Cont@ block. +-} + +{-$ContTExample +'ContT' can be used to add continuation handling to other monads. +Here is an example how to combine it with @IO@ monad: + +>import Control.Monad.Cont +>import System.IO +> +>main = do +> hSetBuffering stdout NoBuffering +> runContT (callCC askString) reportResult +> +>askString :: (String -> ContT () IO String) -> ContT () IO String +>askString next = do +> liftIO $ putStrLn "Please enter a string" +> s <- liftIO $ getLine +> next s +> +>reportResult :: String -> IO () +>reportResult s = do +> putStrLn ("You entered: " ++ s) + +Action @askString@ requests user to enter a string, +and passes it to the continuation. +@askString@ takes as a parameter a continuation taking a string parameter, +and returning @IO ()@. +Compare its signature to 'runContT' definition. +-} diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Error/Class.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Error/Class.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Error/Class.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Error/Class.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,164 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE UndecidableInstances #-} + +{- | +Module : Control.Monad.Error.Class +Copyright : (c) Michael Weber 2001, + (c) Jeff Newbern 2003-2006, + (c) Andriy Palamarchuk 2006 + (c) Edward Kmett 2012 +License : BSD-style (see the file LICENSE) + +Maintainer : libraries@haskell.org +Stability : experimental +Portability : non-portable (multi-parameter type classes) + +[Computation type:] Computations which may fail or throw exceptions. + +[Binding strategy:] Failure records information about the cause\/location +of the failure. Failure values bypass the bound function, +other values are used as inputs to the bound function. + +[Useful for:] Building computations from sequences of functions that may fail +or using exception handling to structure error handling. + +[Zero and plus:] Zero is represented by an empty error and the plus operation +executes its second argument if the first fails. + +[Example type:] @'Either' 'String' a@ + +The Error monad (also called the Exception monad). +-} + +{- + Rendered by Michael Weber , + inspired by the Haskell Monad Template Library from + Andy Gill () +-} +module Control.Monad.Error.Class ( + Error(..), + MonadError(..), + ) where + +import Control.Monad.Trans.Except (Except, ExceptT) +import Control.Monad.Trans.Error (Error(..), ErrorT) +import qualified Control.Monad.Trans.Except as ExceptT (throwE, catchE) +import qualified Control.Monad.Trans.Error as ErrorT (throwError, catchError) +import Control.Monad.Trans.Identity as Identity +import Control.Monad.Trans.List as List +import Control.Monad.Trans.Maybe as Maybe +import Control.Monad.Trans.Reader as Reader +import Control.Monad.Trans.RWS.Lazy as LazyRWS +import Control.Monad.Trans.RWS.Strict as StrictRWS +import Control.Monad.Trans.State.Lazy as LazyState +import Control.Monad.Trans.State.Strict as StrictState +import Control.Monad.Trans.Writer.Lazy as LazyWriter +import Control.Monad.Trans.Writer.Strict as StrictWriter + +import Control.Monad.Trans.Class (lift) +import Control.Exception (IOException, catch, ioError) +import Control.Monad + +#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ < 707 +import Control.Monad.Instances () +#endif + +import Data.Monoid +import Prelude (Either(..), (.), IO) + +{- | +The strategy of combining computations that can throw exceptions +by bypassing bound functions +from the point an exception is thrown to the point that it is handled. + +Is parameterized over the type of error information and +the monad type constructor. +It is common to use @'Data.Either' String@ as the monad type constructor +for an error monad in which error descriptions take the form of strings. +In that case and many other common cases the resulting monad is already defined +as an instance of the 'MonadError' class. +You can also define your own error type and\/or use a monad type constructor +other than @'Either' 'String'@ or @'Either' 'IOError'@. +In these cases you will have to explicitly define instances of the 'Error' +and\/or 'MonadError' classes. +-} +class (Monad m) => MonadError e m | m -> e where + -- | Is used within a monadic computation to begin exception processing. + throwError :: e -> m a + + {- | + A handler function to handle previous errors and return to normal execution. + A common idiom is: + + > do { action1; action2; action3 } `catchError` handler + + where the @action@ functions can call 'throwError'. + Note that @handler@ and the do-block must have the same return type. + -} + catchError :: m a -> (e -> m a) -> m a + +instance MonadError IOException IO where + throwError = ioError + catchError = catch + +-- --------------------------------------------------------------------------- +-- Our parameterizable error monad + +instance MonadError e (Either e) where + throwError = Left + Left l `catchError` h = h l + Right r `catchError` _ = Right r + +instance (Monad m, Error e) => MonadError e (ErrorT e m) where + throwError = ErrorT.throwError + catchError = ErrorT.catchError + +instance Monad m => MonadError e (ExceptT e m) where + throwError = ExceptT.throwE + catchError = ExceptT.catchE + +-- --------------------------------------------------------------------------- +-- Instances for other mtl transformers +-- +-- All of these instances need UndecidableInstances, +-- because they do not satisfy the coverage condition. + +instance MonadError e m => MonadError e (IdentityT m) where + throwError = lift . throwError + catchError = Identity.liftCatch catchError + +instance MonadError e m => MonadError e (ListT m) where + throwError = lift . throwError + catchError = List.liftCatch catchError + +instance MonadError e m => MonadError e (MaybeT m) where + throwError = lift . throwError + catchError = Maybe.liftCatch catchError + +instance MonadError e m => MonadError e (ReaderT r m) where + throwError = lift . throwError + catchError = Reader.liftCatch catchError + +instance (Monoid w, MonadError e m) => MonadError e (LazyRWS.RWST r w s m) where + throwError = lift . throwError + catchError = LazyRWS.liftCatch catchError + +instance (Monoid w, MonadError e m) => MonadError e (StrictRWS.RWST r w s m) where + throwError = lift . throwError + catchError = StrictRWS.liftCatch catchError + +instance MonadError e m => MonadError e (LazyState.StateT s m) where + throwError = lift . throwError + catchError = LazyState.liftCatch catchError + +instance MonadError e m => MonadError e (StrictState.StateT s m) where + throwError = lift . throwError + catchError = StrictState.liftCatch catchError + +instance (Monoid w, MonadError e m) => MonadError e (LazyWriter.WriterT w m) where + throwError = lift . throwError + catchError = LazyWriter.liftCatch catchError + +instance (Monoid w, MonadError e m) => MonadError e (StrictWriter.WriterT w m) where + throwError = lift . throwError + catchError = StrictWriter.liftCatch catchError diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Error.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Error.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Error.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Error.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,151 @@ +{-# LANGUAGE CPP #-} +{- | +Module : Control.Monad.Error +Copyright : (c) Michael Weber 2001, + (c) Jeff Newbern 2003-2006, + (c) Andriy Palamarchuk 2006 +License : BSD-style (see the file LICENSE) + +Maintainer : libraries@haskell.org +Stability : experimental +Portability : non-portable (multi-parameter type classes) + +[Computation type:] Computations which may fail or throw exceptions. + +[Binding strategy:] Failure records information about the cause\/location +of the failure. Failure values bypass the bound function, +other values are used as inputs to the bound function. + +[Useful for:] Building computations from sequences of functions that may fail +or using exception handling to structure error handling. + +[Zero and plus:] Zero is represented by an empty error and the plus operation +executes its second argument if the first fails. + +[Example type:] @'Data.Either' String a@ + +The Error monad (also called the Exception monad). +-} + +{- + Rendered by Michael Weber , + inspired by the Haskell Monad Template Library from + Andy Gill () +-} +module Control.Monad.Error + {-# DEPRECATED "Use Control.Monad.Except instead" #-} ( + -- * Monads with error handling + MonadError(..), + Error(..), + -- * The ErrorT monad transformer + ErrorT(ErrorT), + runErrorT, + mapErrorT, + module Control.Monad, + module Control.Monad.Fix, + module Control.Monad.Trans, + -- * Example 1: Custom Error Data Type + -- $customErrorExample + + -- * Example 2: Using ErrorT Monad Transformer + -- $ErrorTExample + ) where + +import Control.Monad.Error.Class +import Control.Monad.Trans +import Control.Monad.Trans.Error (ErrorT(ErrorT), runErrorT, mapErrorT) + +import Control.Monad +import Control.Monad.Fix + +#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ < 707 +import Control.Monad.Instances () +#endif + +{- $customErrorExample +Here is an example that demonstrates the use of a custom 'Error' data type with +the 'throwError' and 'catchError' exception mechanism from 'MonadError'. +The example throws an exception if the user enters an empty string +or a string longer than 5 characters. Otherwise it prints length of the string. + +>-- This is the type to represent length calculation error. +>data LengthError = EmptyString -- Entered string was empty. +> | StringTooLong Int -- A string is longer than 5 characters. +> -- Records a length of the string. +> | OtherError String -- Other error, stores the problem description. +> +>-- We make LengthError an instance of the Error class +>-- to be able to throw it as an exception. +>instance Error LengthError where +> noMsg = OtherError "A String Error!" +> strMsg s = OtherError s +> +>-- Converts LengthError to a readable message. +>instance Show LengthError where +> show EmptyString = "The string was empty!" +> show (StringTooLong len) = +> "The length of the string (" ++ (show len) ++ ") is bigger than 5!" +> show (OtherError msg) = msg +> +>-- For our monad type constructor, we use Either LengthError +>-- which represents failure using Left LengthError +>-- or a successful result of type a using Right a. +>type LengthMonad = Either LengthError +> +>main = do +> putStrLn "Please enter a string:" +> s <- getLine +> reportResult (calculateLength s) +> +>-- Wraps length calculation to catch the errors. +>-- Returns either length of the string or an error. +>calculateLength :: String -> LengthMonad Int +>calculateLength s = (calculateLengthOrFail s) `catchError` Left +> +>-- Attempts to calculate length and throws an error if the provided string is +>-- empty or longer than 5 characters. +>-- The processing is done in Either monad. +>calculateLengthOrFail :: String -> LengthMonad Int +>calculateLengthOrFail [] = throwError EmptyString +>calculateLengthOrFail s | len > 5 = throwError (StringTooLong len) +> | otherwise = return len +> where len = length s +> +>-- Prints result of the string length calculation. +>reportResult :: LengthMonad Int -> IO () +>reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) +>reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) +-} + +{- $ErrorTExample +@'ErrorT'@ monad transformer can be used to add error handling to another monad. +Here is an example how to combine it with an @IO@ monad: + +>import Control.Monad.Error +> +>-- An IO monad which can return String failure. +>-- It is convenient to define the monad type of the combined monad, +>-- especially if we combine more monad transformers. +>type LengthMonad = ErrorT String IO +> +>main = do +> -- runErrorT removes the ErrorT wrapper +> r <- runErrorT calculateLength +> reportResult r +> +>-- Asks user for a non-empty string and returns its length. +>-- Throws an error if user enters an empty string. +>calculateLength :: LengthMonad Int +>calculateLength = do +> -- all the IO operations have to be lifted to the IO monad in the monad stack +> liftIO $ putStrLn "Please enter a non-empty string: " +> s <- liftIO getLine +> if null s +> then throwError "The string was empty!" +> else return $ length s +> +>-- Prints result of the string length calculation. +>reportResult :: Either String Int -> IO () +>reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) +>reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) +-} diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Except.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Except.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Except.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Except.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,153 @@ +{-# LANGUAGE CPP #-} +{- | +Module : Control.Monad.Error +Copyright : (c) Michael Weber 2001, + (c) Jeff Newbern 2003-2006, + (c) Andriy Palamarchuk 2006 +License : BSD-style (see the file LICENSE) + +Maintainer : libraries@haskell.org +Stability : experimental +Portability : non-portable (multi-parameter type classes) + +[Computation type:] Computations which may fail or throw exceptions. + +[Binding strategy:] Failure records information about the cause\/location +of the failure. Failure values bypass the bound function, +other values are used as inputs to the bound function. + +[Useful for:] Building computations from sequences of functions that may fail +or using exception handling to structure error handling. + +[Example type:] @'Data.Either' String a@ + +The Error monad (also called the Exception monad). +-} + +{- + Rendered by Michael Weber , + inspired by the Haskell Monad Template Library from + Andy Gill () +-} +module Control.Monad.Except + ( + -- * Monads with error handling + MonadError(..), + -- * The ErrorT monad transformer + ExceptT(ExceptT), + Except, + + runExceptT, + mapExceptT, + withExceptT, + runExcept, + mapExcept, + withExcept, + + module Control.Monad, + module Control.Monad.Fix, + module Control.Monad.Trans, + -- * Example 1: Custom Error Data Type + -- $customErrorExample + + -- * Example 2: Using ExceptT Monad Transformer + -- $ExceptTExample + ) where + +import Control.Monad.Error.Class +import Control.Monad.Trans +import Control.Monad.Trans.Except + ( ExceptT(ExceptT), Except, except + , runExcept, runExceptT + , mapExcept, mapExceptT + , withExcept, withExceptT + ) + +import Control.Monad +import Control.Monad.Fix + +#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ < 707 +import Control.Monad.Instances () +#endif + +{- $customErrorExample +Here is an example that demonstrates the use of a custom error data type with +the 'throwError' and 'catchError' exception mechanism from 'MonadError'. +The example throws an exception if the user enters an empty string +or a string longer than 5 characters. Otherwise it prints length of the string. + +>-- This is the type to represent length calculation error. +>data LengthError = EmptyString -- Entered string was empty. +> | StringTooLong Int -- A string is longer than 5 characters. +> -- Records a length of the string. +> | OtherError String -- Other error, stores the problem description. +> +>-- Converts LengthError to a readable message. +>instance Show LengthError where +> show EmptyString = "The string was empty!" +> show (StringTooLong len) = +> "The length of the string (" ++ (show len) ++ ") is bigger than 5!" +> show (OtherError msg) = msg +> +>-- For our monad type constructor, we use Either LengthError +>-- which represents failure using Left LengthError +>-- or a successful result of type a using Right a. +>type LengthMonad = Either LengthError +> +>main = do +> putStrLn "Please enter a string:" +> s <- getLine +> reportResult (calculateLength s) +> +>-- Wraps length calculation to catch the errors. +>-- Returns either length of the string or an error. +>calculateLength :: String -> LengthMonad Int +>calculateLength s = (calculateLengthOrFail s) `catchError` Left +> +>-- Attempts to calculate length and throws an error if the provided string is +>-- empty or longer than 5 characters. +>-- The processing is done in Either monad. +>calculateLengthOrFail :: String -> LengthMonad Int +>calculateLengthOrFail [] = throwError EmptyString +>calculateLengthOrFail s | len > 5 = throwError (StringTooLong len) +> | otherwise = return len +> where len = length s +> +>-- Prints result of the string length calculation. +>reportResult :: LengthMonad Int -> IO () +>reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) +>reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) +-} + +{- $ExceptTExample +@'ExceptT'@ monad transformer can be used to add error handling to another monad. +Here is an example how to combine it with an @IO@ monad: + +>import Control.Monad.Except +> +>-- An IO monad which can return String failure. +>-- It is convenient to define the monad type of the combined monad, +>-- especially if we combine more monad transformers. +>type LengthMonad = ExceptT String IO +> +>main = do +> -- runExceptT removes the ExceptT wrapper +> r <- runExceptT calculateLength +> reportResult r +> +>-- Asks user for a non-empty string and returns its length. +>-- Throws an error if user enters an empty string. +>calculateLength :: LengthMonad Int +>calculateLength = do +> -- all the IO operations have to be lifted to the IO monad in the monad stack +> liftIO $ putStrLn "Please enter a non-empty string: " +> s <- liftIO getLine +> if null s +> then throwError "The string was empty!" +> else return $ length s +> +>-- Prints result of the string length calculation. +>reportResult :: Either String Int -> IO () +>reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) +>reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) +-} diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Identity.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Identity.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Identity.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Identity.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,44 @@ +{- | +Module : Control.Monad.Identity +Copyright : (c) Andy Gill 2001, + (c) Oregon Graduate Institute of Science and Technology 2001, + (c) Jeff Newbern 2003-2006, + (c) Andriy Palamarchuk 2006 +License : BSD-style (see the file LICENSE) + +Maintainer : libraries@haskell.org +Stability : experimental +Portability : portable + +[Computation type:] Simple function application. + +[Binding strategy:] The bound function is applied to the input value. +@'Identity' x >>= f == 'Identity' (f x)@ + +[Useful for:] Monads can be derived from monad transformers applied to the +'Identity' monad. + +[Zero and plus:] None. + +[Example type:] @'Identity' a@ + +The @Identity@ monad is a monad that does not embody any computational strategy. +It simply applies the bound function to its input without any modification. +Computationally, there is no reason to use the @Identity@ monad +instead of the much simpler act of simply applying functions to their arguments. +The purpose of the @Identity@ monad is its fundamental role in the theory +of monad transformers. +Any monad transformer applied to the @Identity@ monad yields a non-transformer +version of that monad. +-} + +module Control.Monad.Identity ( + module Data.Functor.Identity, + + module Control.Monad, + module Control.Monad.Fix, + ) where + +import Control.Monad +import Control.Monad.Fix +import Data.Functor.Identity diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/List.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/List.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/List.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/List.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,25 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.List +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : portable +-- +-- The List monad. +-- +----------------------------------------------------------------------------- + +module Control.Monad.List ( + ListT(..), + mapListT, + module Control.Monad, + module Control.Monad.Trans, + ) where + +import Control.Monad +import Control.Monad.Trans +import Control.Monad.Trans.List diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Reader/Class.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Reader/Class.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Reader/Class.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Reader/Class.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,175 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE UndecidableInstances #-} +-- Search for UndecidableInstances to see why this is needed +{- | +Module : Control.Monad.Reader.Class +Copyright : (c) Andy Gill 2001, + (c) Oregon Graduate Institute of Science and Technology 2001, + (c) Jeff Newbern 2003-2007, + (c) Andriy Palamarchuk 2007 +License : BSD-style (see the file LICENSE) + +Maintainer : libraries@haskell.org +Stability : experimental +Portability : non-portable (multi-param classes, functional dependencies) + +[Computation type:] Computations which read values from a shared environment. + +[Binding strategy:] Monad values are functions from the environment to a value. +The bound function is applied to the bound value, and both have access +to the shared environment. + +[Useful for:] Maintaining variable bindings, or other shared environment. + +[Zero and plus:] None. + +[Example type:] @'Reader' [(String,Value)] a@ + +The 'Reader' monad (also called the Environment monad). +Represents a computation, which can read values from +a shared environment, pass values from function to function, +and execute sub-computations in a modified environment. +Using 'Reader' monad for such computations is often clearer and easier +than using the 'Control.Monad.State.State' monad. + + Inspired by the paper + /Functional Programming with Overloading and Higher-Order Polymorphism/, + Mark P Jones () + Advanced School of Functional Programming, 1995. +-} + +module Control.Monad.Reader.Class ( + MonadReader(..), + asks, + ) where + +import Control.Monad.Trans.Cont as Cont +import Control.Monad.Trans.Except +import Control.Monad.Trans.Error +import Control.Monad.Trans.Identity +import Control.Monad.Trans.List +import Control.Monad.Trans.Maybe +import Control.Monad.Trans.Reader (ReaderT) +import qualified Control.Monad.Trans.Reader as ReaderT (ask, local, reader) +import qualified Control.Monad.Trans.RWS.Lazy as LazyRWS (RWST, ask, local, reader) +import qualified Control.Monad.Trans.RWS.Strict as StrictRWS (RWST, ask, local, reader) +import Control.Monad.Trans.State.Lazy as Lazy +import Control.Monad.Trans.State.Strict as Strict +import Control.Monad.Trans.Writer.Lazy as Lazy +import Control.Monad.Trans.Writer.Strict as Strict + +import Control.Monad.Trans.Class (lift) +import Control.Monad +import Data.Monoid + +-- ---------------------------------------------------------------------------- +-- class MonadReader +-- asks for the internal (non-mutable) state. + +-- | See examples in "Control.Monad.Reader". +-- Note, the partially applied function type @(->) r@ is a simple reader monad. +-- See the @instance@ declaration below. +class Monad m => MonadReader r m | m -> r where +#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ >= 707 + {-# MINIMAL (ask | reader), local #-} +#endif + -- | Retrieves the monad environment. + ask :: m r + ask = reader id + + -- | Executes a computation in a modified environment. + local :: (r -> r) -- ^ The function to modify the environment. + -> m a -- ^ @Reader@ to run in the modified environment. + -> m a + + -- | Retrieves a function of the current environment. + reader :: (r -> a) -- ^ The selector function to apply to the environment. + -> m a + reader f = do + r <- ask + return (f r) + +-- | Retrieves a function of the current environment. +asks :: MonadReader r m + => (r -> a) -- ^ The selector function to apply to the environment. + -> m a +asks = reader + +-- ---------------------------------------------------------------------------- +-- The partially applied function type is a simple reader monad + +instance MonadReader r ((->) r) where + ask = id + local f m = m . f + reader = id + +instance Monad m => MonadReader r (ReaderT r m) where + ask = ReaderT.ask + local = ReaderT.local + reader = ReaderT.reader + +instance (Monad m, Monoid w) => MonadReader r (LazyRWS.RWST r w s m) where + ask = LazyRWS.ask + local = LazyRWS.local + reader = LazyRWS.reader + +instance (Monad m, Monoid w) => MonadReader r (StrictRWS.RWST r w s m) where + ask = StrictRWS.ask + local = StrictRWS.local + reader = StrictRWS.reader + +-- --------------------------------------------------------------------------- +-- Instances for other mtl transformers +-- +-- All of these instances need UndecidableInstances, +-- because they do not satisfy the coverage condition. + +instance MonadReader r' m => MonadReader r' (ContT r m) where + ask = lift ask + local = Cont.liftLocal ask local + reader = lift . reader + +instance (Error e, MonadReader r m) => MonadReader r (ErrorT e m) where + ask = lift ask + local = mapErrorT . local + reader = lift . reader + +instance MonadReader r m => MonadReader r (ExceptT e m) where + ask = lift ask + local = mapExceptT . local + reader = lift . reader + +instance MonadReader r m => MonadReader r (IdentityT m) where + ask = lift ask + local = mapIdentityT . local + reader = lift . reader + +instance MonadReader r m => MonadReader r (ListT m) where + ask = lift ask + local = mapListT . local + reader = lift . reader + +instance MonadReader r m => MonadReader r (MaybeT m) where + ask = lift ask + local = mapMaybeT . local + reader = lift . reader + +instance MonadReader r m => MonadReader r (Lazy.StateT s m) where + ask = lift ask + local = Lazy.mapStateT . local + reader = lift . reader + +instance MonadReader r m => MonadReader r (Strict.StateT s m) where + ask = lift ask + local = Strict.mapStateT . local + reader = lift . reader + +instance (Monoid w, MonadReader r m) => MonadReader r (Lazy.WriterT w m) where + ask = lift ask + local = Lazy.mapWriterT . local + reader = lift . reader + +instance (Monoid w, MonadReader r m) => MonadReader r (Strict.WriterT w m) where + ask = lift ask + local = Strict.mapWriterT . local + reader = lift . reader diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Reader.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Reader.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Reader.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Reader.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,144 @@ +{- | +Module : Control.Monad.Reader +Copyright : (c) Andy Gill 2001, + (c) Oregon Graduate Institute of Science and Technology 2001, + (c) Jeff Newbern 2003-2007, + (c) Andriy Palamarchuk 2007 +License : BSD-style (see the file LICENSE) + +Maintainer : libraries@haskell.org +Stability : experimental +Portability : non-portable (multi-param classes, functional dependencies) + +[Computation type:] Computations which read values from a shared environment. + +[Binding strategy:] Monad values are functions from the environment to a value. +The bound function is applied to the bound value, and both have access +to the shared environment. + +[Useful for:] Maintaining variable bindings, or other shared environment. + +[Zero and plus:] None. + +[Example type:] @'Reader' [(String,Value)] a@ + +The 'Reader' monad (also called the Environment monad). +Represents a computation, which can read values from +a shared environment, pass values from function to function, +and execute sub-computations in a modified environment. +Using 'Reader' monad for such computations is often clearer and easier +than using the 'Control.Monad.State.State' monad. + + Inspired by the paper + /Functional Programming with Overloading and Higher-Order Polymorphism/, + Mark P Jones () + Advanced School of Functional Programming, 1995. +-} + +module Control.Monad.Reader ( + -- * MonadReader class + MonadReader(..), + asks, + -- * The Reader monad + Reader, + runReader, + mapReader, + withReader, + -- * The ReaderT monad transformer + ReaderT(ReaderT), + runReaderT, + mapReaderT, + withReaderT, + module Control.Monad, + module Control.Monad.Fix, + module Control.Monad.Trans, + -- * Example 1: Simple Reader Usage + -- $simpleReaderExample + + -- * Example 2: Modifying Reader Content With @local@ + -- $localExample + + -- * Example 3: @ReaderT@ Monad Transformer + -- $ReaderTExample + ) where + +import Control.Monad.Reader.Class + +import Control.Monad.Trans.Reader ( + Reader, runReader, mapReader, withReader, + ReaderT(ReaderT), runReaderT, mapReaderT, withReaderT) +import Control.Monad.Trans + +import Control.Monad +import Control.Monad.Fix + +{- $simpleReaderExample + +In this example the @Reader@ monad provides access to variable bindings. +Bindings are a @Map@ of integer variables. +The variable @count@ contains number of variables in the bindings. +You can see how to run a Reader monad and retrieve data from it +with 'runReader', how to access the Reader data with 'ask' and 'asks'. + +> type Bindings = Map String Int; +> +>-- Returns True if the "count" variable contains correct bindings size. +>isCountCorrect :: Bindings -> Bool +>isCountCorrect bindings = runReader calc_isCountCorrect bindings +> +>-- The Reader monad, which implements this complicated check. +>calc_isCountCorrect :: Reader Bindings Bool +>calc_isCountCorrect = do +> count <- asks (lookupVar "count") +> bindings <- ask +> return (count == (Map.size bindings)) +> +>-- The selector function to use with 'asks'. +>-- Returns value of the variable with specified name. +>lookupVar :: String -> Bindings -> Int +>lookupVar name bindings = fromJust (Map.lookup name bindings) +> +>sampleBindings = Map.fromList [("count",3), ("1",1), ("b",2)] +> +>main = do +> putStr $ "Count is correct for bindings " ++ (show sampleBindings) ++ ": "; +> putStrLn $ show (isCountCorrect sampleBindings); +-} + +{- $localExample + +Shows how to modify Reader content with 'local'. + +>calculateContentLen :: Reader String Int +>calculateContentLen = do +> content <- ask +> return (length content); +> +>-- Calls calculateContentLen after adding a prefix to the Reader content. +>calculateModifiedContentLen :: Reader String Int +>calculateModifiedContentLen = local ("Prefix " ++) calculateContentLen +> +>main = do +> let s = "12345"; +> let modifiedLen = runReader calculateModifiedContentLen s +> let len = runReader calculateContentLen s +> putStrLn $ "Modified 's' length: " ++ (show modifiedLen) +> putStrLn $ "Original 's' length: " ++ (show len) +-} + +{- $ReaderTExample + +Now you are thinking: 'Wow, what a great monad! I wish I could use +Reader functionality in MyFavoriteComplexMonad!'. Don't worry. +This can be easy done with the 'ReaderT' monad transformer. +This example shows how to combine @ReaderT@ with the IO monad. + +>-- The Reader/IO combined monad, where Reader stores a string. +>printReaderContent :: ReaderT String IO () +>printReaderContent = do +> content <- ask +> liftIO $ putStrLn ("The Reader Content: " ++ content) +> +>main = do +> runReaderT printReaderContent "Some Content" +-} diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/RWS/Class.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/RWS/Class.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/RWS/Class.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/RWS/Class.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,60 @@ +{-# LANGUAGE UndecidableInstances #-} +-- Search for UndecidableInstances to see why this is needed + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.RWS.Class +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- Declaration of the MonadRWS class. +-- +-- Inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. +----------------------------------------------------------------------------- + +module Control.Monad.RWS.Class ( + MonadRWS, + module Control.Monad.Reader.Class, + module Control.Monad.State.Class, + module Control.Monad.Writer.Class, + ) where + +import Control.Monad.Reader.Class +import Control.Monad.State.Class +import Control.Monad.Writer.Class + +import Control.Monad.Trans.Class +import Control.Monad.Trans.Error(Error, ErrorT) +import Control.Monad.Trans.Except(ExceptT) +import Control.Monad.Trans.Maybe(MaybeT) +import Control.Monad.Trans.Identity(IdentityT) +import Control.Monad.Trans.RWS.Lazy as Lazy (RWST) +import qualified Control.Monad.Trans.RWS.Strict as Strict (RWST) + +import Data.Monoid + +class (Monoid w, MonadReader r m, MonadWriter w m, MonadState s m) + => MonadRWS r w s m | m -> r, m -> w, m -> s + +instance (Monoid w, Monad m) => MonadRWS r w s (Lazy.RWST r w s m) + +instance (Monoid w, Monad m) => MonadRWS r w s (Strict.RWST r w s m) + +--------------------------------------------------------------------------- +-- Instances for other mtl transformers +-- +-- All of these instances need UndecidableInstances, +-- because they do not satisfy the coverage condition. + +instance MonadRWS r w s m => MonadRWS r w s (ExceptT e m) +instance (Error e, MonadRWS r w s m) => MonadRWS r w s (ErrorT e m) +instance MonadRWS r w s m => MonadRWS r w s (IdentityT m) +instance MonadRWS r w s m => MonadRWS r w s (MaybeT m) diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/RWS/Lazy.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/RWS/Lazy.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/RWS/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/RWS/Lazy.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,53 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.RWS.Lazy +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- Lazy RWS monad. +-- +-- Inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. +----------------------------------------------------------------------------- + +module Control.Monad.RWS.Lazy ( + -- * The RWS monad + RWS, + rws, + runRWS, + evalRWS, + execRWS, + mapRWS, + withRWS, + -- * The RWST monad transformer + RWST(RWST), + runRWST, + evalRWST, + execRWST, + mapRWST, + withRWST, + -- * Lazy Reader-writer-state monads + module Control.Monad.RWS.Class, + module Control.Monad, + module Control.Monad.Fix, + module Control.Monad.Trans, + module Data.Monoid, + ) where + +import Control.Monad.RWS.Class + +import Control.Monad.Trans +import Control.Monad.Trans.RWS.Lazy ( + RWS, rws, runRWS, evalRWS, execRWS, mapRWS, withRWS, + RWST(RWST), runRWST, evalRWST, execRWST, mapRWST, withRWST) + +import Control.Monad +import Control.Monad.Fix +import Data.Monoid diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/RWS/Strict.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/RWS/Strict.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/RWS/Strict.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/RWS/Strict.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,53 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.RWS.Strict +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- Strict RWS monad. +-- +-- Inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. +----------------------------------------------------------------------------- + +module Control.Monad.RWS.Strict ( + -- * The RWS monad + RWS, + rws, + runRWS, + evalRWS, + execRWS, + mapRWS, + withRWS, + -- * The RWST monad transformer + RWST(RWST), + runRWST, + evalRWST, + execRWST, + mapRWST, + withRWST, + -- * Strict Reader-writer-state monads + module Control.Monad.RWS.Class, + module Control.Monad, + module Control.Monad.Fix, + module Control.Monad.Trans, + module Data.Monoid, + ) where + +import Control.Monad.RWS.Class + +import Control.Monad.Trans +import Control.Monad.Trans.RWS.Strict ( + RWS, rws, runRWS, evalRWS, execRWS, mapRWS, withRWS, + RWST(RWST), runRWST, evalRWST, execRWST, mapRWST, withRWST) + +import Control.Monad +import Control.Monad.Fix +import Data.Monoid diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/RWS.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/RWS.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/RWS.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/RWS.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,24 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.RWS +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- Declaration of the MonadRWS class. +-- +-- Inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. +----------------------------------------------------------------------------- + +module Control.Monad.RWS ( + module Control.Monad.RWS.Lazy + ) where + +import Control.Monad.RWS.Lazy diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/State/Class.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/State/Class.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/State/Class.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/State/Class.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,168 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE UndecidableInstances #-} +-- Search for UndecidableInstances to see why this is needed + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.State.Class +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- MonadState class. +-- +-- This module is inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. + +----------------------------------------------------------------------------- + +module Control.Monad.State.Class ( + MonadState(..), + modify, + modify', + gets + ) where + +import Control.Monad.Trans.Cont +import Control.Monad.Trans.Error +import Control.Monad.Trans.Except +import Control.Monad.Trans.Identity +import Control.Monad.Trans.List +import Control.Monad.Trans.Maybe +import Control.Monad.Trans.Reader +import qualified Control.Monad.Trans.RWS.Lazy as LazyRWS (RWST, get, put, state) +import qualified Control.Monad.Trans.RWS.Strict as StrictRWS (RWST, get, put, state) +import qualified Control.Monad.Trans.State.Lazy as Lazy (StateT, get, put, state) +import qualified Control.Monad.Trans.State.Strict as Strict (StateT, get, put, state) +import Control.Monad.Trans.Writer.Lazy as Lazy +import Control.Monad.Trans.Writer.Strict as Strict + +import Control.Monad.Trans.Class (lift) +import Control.Monad +import Data.Monoid + +-- --------------------------------------------------------------------------- + +-- | Minimal definition is either both of @get@ and @put@ or just @state@ +class Monad m => MonadState s m | m -> s where + -- | Return the state from the internals of the monad. + get :: m s + get = state (\s -> (s, s)) + + -- | Replace the state inside the monad. + put :: s -> m () + put s = state (\_ -> ((), s)) + + -- | Embed a simple state action into the monad. + state :: (s -> (a, s)) -> m a + state f = do + s <- get + let ~(a, s') = f s + put s' + return a +#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ >= 707 + {-# MINIMAL state | get, put #-} +#endif + +-- | Monadic state transformer. +-- +-- Maps an old state to a new state inside a state monad. +-- The old state is thrown away. +-- +-- > Main> :t modify ((+1) :: Int -> Int) +-- > modify (...) :: (MonadState Int a) => a () +-- +-- This says that @modify (+1)@ acts over any +-- Monad that is a member of the @MonadState@ class, +-- with an @Int@ state. +modify :: MonadState s m => (s -> s) -> m () +modify f = state (\s -> ((), f s)) + +-- | A variant of 'modify' in which the computation is strict in the +-- new state. +modify' :: MonadState s m => (s -> s) -> m () +modify' f = state (\s -> let s' = f s in s' `seq` ((), s')) + +-- | Gets specific component of the state, using a projection function +-- supplied. +gets :: MonadState s m => (s -> a) -> m a +gets f = do + s <- get + return (f s) + +instance Monad m => MonadState s (Lazy.StateT s m) where + get = Lazy.get + put = Lazy.put + state = Lazy.state + +instance Monad m => MonadState s (Strict.StateT s m) where + get = Strict.get + put = Strict.put + state = Strict.state + +instance (Monad m, Monoid w) => MonadState s (LazyRWS.RWST r w s m) where + get = LazyRWS.get + put = LazyRWS.put + state = LazyRWS.state + +instance (Monad m, Monoid w) => MonadState s (StrictRWS.RWST r w s m) where + get = StrictRWS.get + put = StrictRWS.put + state = StrictRWS.state + +-- --------------------------------------------------------------------------- +-- Instances for other mtl transformers +-- +-- All of these instances need UndecidableInstances, +-- because they do not satisfy the coverage condition. + +instance MonadState s m => MonadState s (ContT r m) where + get = lift get + put = lift . put + state = lift . state + +instance (Error e, MonadState s m) => MonadState s (ErrorT e m) where + get = lift get + put = lift . put + state = lift . state + +instance MonadState s m => MonadState s (ExceptT e m) where + get = lift get + put = lift . put + state = lift . state + +instance MonadState s m => MonadState s (IdentityT m) where + get = lift get + put = lift . put + state = lift . state + +instance MonadState s m => MonadState s (ListT m) where + get = lift get + put = lift . put + state = lift . state + +instance MonadState s m => MonadState s (MaybeT m) where + get = lift get + put = lift . put + state = lift . state + +instance MonadState s m => MonadState s (ReaderT r m) where + get = lift get + put = lift . put + state = lift . state + +instance (Monoid w, MonadState s m) => MonadState s (Lazy.WriterT w m) where + get = lift get + put = lift . put + state = lift . state + +instance (Monoid w, MonadState s m) => MonadState s (Strict.WriterT w m) where + get = lift get + put = lift . put + state = lift . state diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/State/Lazy.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/State/Lazy.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/State/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/State/Lazy.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,130 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.State.Lazy +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- Lazy state monads. +-- +-- This module is inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. + +----------------------------------------------------------------------------- + +module Control.Monad.State.Lazy ( + -- * MonadState class + MonadState(..), + modify, + modify', + gets, + -- * The State monad + State, + runState, + evalState, + execState, + mapState, + withState, + -- * The StateT monad transformer + StateT(StateT), + runStateT, + evalStateT, + execStateT, + mapStateT, + withStateT, + module Control.Monad, + module Control.Monad.Fix, + module Control.Monad.Trans, + -- * Examples + -- $examples + ) where + +import Control.Monad.State.Class + +import Control.Monad.Trans +import Control.Monad.Trans.State.Lazy + (State, runState, evalState, execState, mapState, withState, + StateT(StateT), runStateT, evalStateT, execStateT, mapStateT, withStateT) + +import Control.Monad +import Control.Monad.Fix + +-- --------------------------------------------------------------------------- +-- $examples +-- A function to increment a counter. Taken from the paper +-- /Generalising Monads to Arrows/, John +-- Hughes (), November 1998: +-- +-- > tick :: State Int Int +-- > tick = do n <- get +-- > put (n+1) +-- > return n +-- +-- Add one to the given number using the state monad: +-- +-- > plusOne :: Int -> Int +-- > plusOne n = execState tick n +-- +-- A contrived addition example. Works only with positive numbers: +-- +-- > plus :: Int -> Int -> Int +-- > plus n x = execState (sequence $ replicate n tick) x +-- +-- An example from /The Craft of Functional Programming/, Simon +-- Thompson (), +-- Addison-Wesley 1999: \"Given an arbitrary tree, transform it to a +-- tree of integers in which the original elements are replaced by +-- natural numbers, starting from 0. The same element has to be +-- replaced by the same number at every occurrence, and when we meet +-- an as-yet-unvisited element we have to find a \'new\' number to match +-- it with:\" +-- +-- > data Tree a = Nil | Node a (Tree a) (Tree a) deriving (Show, Eq) +-- > type Table a = [a] +-- +-- > numberTree :: Eq a => Tree a -> State (Table a) (Tree Int) +-- > numberTree Nil = return Nil +-- > numberTree (Node x t1 t2) +-- > = do num <- numberNode x +-- > nt1 <- numberTree t1 +-- > nt2 <- numberTree t2 +-- > return (Node num nt1 nt2) +-- > where +-- > numberNode :: Eq a => a -> State (Table a) Int +-- > numberNode x +-- > = do table <- get +-- > (newTable, newPos) <- return (nNode x table) +-- > put newTable +-- > return newPos +-- > nNode:: (Eq a) => a -> Table a -> (Table a, Int) +-- > nNode x table +-- > = case (findIndexInList (== x) table) of +-- > Nothing -> (table ++ [x], length table) +-- > Just i -> (table, i) +-- > findIndexInList :: (a -> Bool) -> [a] -> Maybe Int +-- > findIndexInList = findIndexInListHelp 0 +-- > findIndexInListHelp _ _ [] = Nothing +-- > findIndexInListHelp count f (h:t) +-- > = if (f h) +-- > then Just count +-- > else findIndexInListHelp (count+1) f t +-- +-- numTree applies numberTree with an initial state: +-- +-- > numTree :: (Eq a) => Tree a -> Tree Int +-- > numTree t = evalState (numberTree t) [] +-- +-- > testTree = Node "Zero" (Node "One" (Node "Two" Nil Nil) (Node "One" (Node "Zero" Nil Nil) Nil)) Nil +-- > numTree testTree => Node 0 (Node 1 (Node 2 Nil Nil) (Node 1 (Node 0 Nil Nil) Nil)) Nil +-- +-- sumTree is a little helper function that does not use the State monad: +-- +-- > sumTree :: (Num a) => Tree a -> a +-- > sumTree Nil = 0 +-- > sumTree (Node e t1 t2) = e + (sumTree t1) + (sumTree t2) diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/State/Strict.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/State/Strict.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/State/Strict.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/State/Strict.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,130 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.State.Strict +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- Strict state monads. +-- +-- This module is inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. + +----------------------------------------------------------------------------- + +module Control.Monad.State.Strict ( + -- * MonadState class + MonadState(..), + modify, + modify', + gets, + -- * The State monad + State, + runState, + evalState, + execState, + mapState, + withState, + -- * The StateT monad transformer + StateT(StateT), + runStateT, + evalStateT, + execStateT, + mapStateT, + withStateT, + module Control.Monad, + module Control.Monad.Fix, + module Control.Monad.Trans, + -- * Examples + -- $examples + ) where + +import Control.Monad.State.Class + +import Control.Monad.Trans +import Control.Monad.Trans.State.Strict + (State, runState, evalState, execState, mapState, withState, + StateT(StateT), runStateT, evalStateT, execStateT, mapStateT, withStateT) + +import Control.Monad +import Control.Monad.Fix + +-- --------------------------------------------------------------------------- +-- $examples +-- A function to increment a counter. Taken from the paper +-- /Generalising Monads to Arrows/, John +-- Hughes (), November 1998: +-- +-- > tick :: State Int Int +-- > tick = do n <- get +-- > put (n+1) +-- > return n +-- +-- Add one to the given number using the state monad: +-- +-- > plusOne :: Int -> Int +-- > plusOne n = execState tick n +-- +-- A contrived addition example. Works only with positive numbers: +-- +-- > plus :: Int -> Int -> Int +-- > plus n x = execState (sequence $ replicate n tick) x +-- +-- An example from /The Craft of Functional Programming/, Simon +-- Thompson (), +-- Addison-Wesley 1999: \"Given an arbitrary tree, transform it to a +-- tree of integers in which the original elements are replaced by +-- natural numbers, starting from 0. The same element has to be +-- replaced by the same number at every occurrence, and when we meet +-- an as-yet-unvisited element we have to find a \'new\' number to match +-- it with:\" +-- +-- > data Tree a = Nil | Node a (Tree a) (Tree a) deriving (Show, Eq) +-- > type Table a = [a] +-- +-- > numberTree :: Eq a => Tree a -> State (Table a) (Tree Int) +-- > numberTree Nil = return Nil +-- > numberTree (Node x t1 t2) +-- > = do num <- numberNode x +-- > nt1 <- numberTree t1 +-- > nt2 <- numberTree t2 +-- > return (Node num nt1 nt2) +-- > where +-- > numberNode :: Eq a => a -> State (Table a) Int +-- > numberNode x +-- > = do table <- get +-- > (newTable, newPos) <- return (nNode x table) +-- > put newTable +-- > return newPos +-- > nNode:: (Eq a) => a -> Table a -> (Table a, Int) +-- > nNode x table +-- > = case (findIndexInList (== x) table) of +-- > Nothing -> (table ++ [x], length table) +-- > Just i -> (table, i) +-- > findIndexInList :: (a -> Bool) -> [a] -> Maybe Int +-- > findIndexInList = findIndexInListHelp 0 +-- > findIndexInListHelp _ _ [] = Nothing +-- > findIndexInListHelp count f (h:t) +-- > = if (f h) +-- > then Just count +-- > else findIndexInListHelp (count+1) f t +-- +-- numTree applies numberTree with an initial state: +-- +-- > numTree :: (Eq a) => Tree a -> Tree Int +-- > numTree t = evalState (numberTree t) [] +-- +-- > testTree = Node "Zero" (Node "One" (Node "Two" Nil Nil) (Node "One" (Node "Zero" Nil Nil) Nil)) Nil +-- > numTree testTree => Node 0 (Node 1 (Node 2 Nil Nil) (Node 1 (Node 0 Nil Nil) Nil)) Nil +-- +-- sumTree is a little helper function that does not use the State monad: +-- +-- > sumTree :: (Num a) => Tree a -> a +-- > sumTree Nil = 0 +-- > sumTree (Node e t1 t2) = e + (sumTree t1) + (sumTree t2) diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/State.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/State.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/State.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/State.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,25 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.State +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- State monads. +-- +-- This module is inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. + +----------------------------------------------------------------------------- + +module Control.Monad.State ( + module Control.Monad.State.Lazy + ) where + +import Control.Monad.State.Lazy diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Trans.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Trans.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Trans.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Trans.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,34 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.Trans +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : portable +-- +-- Classes for monad transformers. +-- +-- A monad transformer makes new monad out of an existing monad, such +-- that computations of the old monad may be embedded in the new one. +-- To construct a monad with a desired set of features, one typically +-- starts with a base monad, such as @Identity@, @[]@ or 'IO', and +-- applies a sequence of monad transformers. +-- +-- Most monad transformer modules include the special case of applying the +-- transformer to @Identity@. For example, @State s@ is an abbreviation +-- for @StateT s Identity@. +-- +-- Each monad transformer also comes with an operation @run@/XXX/ to +-- unwrap the transformer, exposing a computation of the inner monad. +----------------------------------------------------------------------------- + +module Control.Monad.Trans ( + module Control.Monad.Trans.Class, + module Control.Monad.IO.Class + ) where + +import Control.Monad.IO.Class +import Control.Monad.Trans.Class diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Writer/Class.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Writer/Class.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Writer/Class.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Writer/Class.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,173 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE UndecidableInstances #-} +-- Search for UndecidableInstances to see why this is needed + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.Writer.Class +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- The MonadWriter class. +-- +-- Inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. +----------------------------------------------------------------------------- + +module Control.Monad.Writer.Class ( + MonadWriter(..), + listens, + censor, + ) where + +import Control.Monad.Trans.Error as Error +import Control.Monad.Trans.Except as Except +import Control.Monad.Trans.Identity as Identity +import Control.Monad.Trans.Maybe as Maybe +import Control.Monad.Trans.Reader +import qualified Control.Monad.Trans.RWS.Lazy as LazyRWS ( + RWST, writer, tell, listen, pass) +import qualified Control.Monad.Trans.RWS.Strict as StrictRWS ( + RWST, writer, tell, listen, pass) +import Control.Monad.Trans.State.Lazy as Lazy +import Control.Monad.Trans.State.Strict as Strict +import qualified Control.Monad.Trans.Writer.Lazy as Lazy ( + WriterT, writer, tell, listen, pass) +import qualified Control.Monad.Trans.Writer.Strict as Strict ( + WriterT, writer, tell, listen, pass) + +import Control.Monad.Trans.Class (lift) +import Control.Monad +import Data.Monoid + +-- --------------------------------------------------------------------------- +-- MonadWriter class +-- +-- tell is like tell on the MUD's it shouts to monad +-- what you want to be heard. The monad carries this 'packet' +-- upwards, merging it if needed (hence the Monoid requirement). +-- +-- listen listens to a monad acting, and returns what the monad "said". +-- +-- pass lets you provide a writer transformer which changes internals of +-- the written object. + +class (Monoid w, Monad m) => MonadWriter w m | m -> w where +#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ >= 707 + {-# MINIMAL (writer | tell), listen, pass #-} +#endif + -- | @'writer' (a,w)@ embeds a simple writer action. + writer :: (a,w) -> m a + writer ~(a, w) = do + tell w + return a + + -- | @'tell' w@ is an action that produces the output @w@. + tell :: w -> m () + tell w = writer ((),w) + + -- | @'listen' m@ is an action that executes the action @m@ and adds + -- its output to the value of the computation. + listen :: m a -> m (a, w) + -- | @'pass' m@ is an action that executes the action @m@, which + -- returns a value and a function, and returns the value, applying + -- the function to the output. + pass :: m (a, w -> w) -> m a + +-- | @'listens' f m@ is an action that executes the action @m@ and adds +-- the result of applying @f@ to the output to the value of the computation. +-- +-- * @'listens' f m = 'liftM' (id *** f) ('listen' m)@ +listens :: MonadWriter w m => (w -> b) -> m a -> m (a, b) +listens f m = do + ~(a, w) <- listen m + return (a, f w) + +-- | @'censor' f m@ is an action that executes the action @m@ and +-- applies the function @f@ to its output, leaving the return value +-- unchanged. +-- +-- * @'censor' f m = 'pass' ('liftM' (\\x -> (x,f)) m)@ +censor :: MonadWriter w m => (w -> w) -> m a -> m a +censor f m = pass $ do + a <- m + return (a, f) + +instance (Monoid w, Monad m) => MonadWriter w (Lazy.WriterT w m) where + writer = Lazy.writer + tell = Lazy.tell + listen = Lazy.listen + pass = Lazy.pass + +instance (Monoid w, Monad m) => MonadWriter w (Strict.WriterT w m) where + writer = Strict.writer + tell = Strict.tell + listen = Strict.listen + pass = Strict.pass + +instance (Monoid w, Monad m) => MonadWriter w (LazyRWS.RWST r w s m) where + writer = LazyRWS.writer + tell = LazyRWS.tell + listen = LazyRWS.listen + pass = LazyRWS.pass + +instance (Monoid w, Monad m) => MonadWriter w (StrictRWS.RWST r w s m) where + writer = StrictRWS.writer + tell = StrictRWS.tell + listen = StrictRWS.listen + pass = StrictRWS.pass + +-- --------------------------------------------------------------------------- +-- Instances for other mtl transformers +-- +-- All of these instances need UndecidableInstances, +-- because they do not satisfy the coverage condition. + +instance (Error e, MonadWriter w m) => MonadWriter w (ErrorT e m) where + writer = lift . writer + tell = lift . tell + listen = Error.liftListen listen + pass = Error.liftPass pass + +instance MonadWriter w m => MonadWriter w (ExceptT e m) where + writer = lift . writer + tell = lift . tell + listen = Except.liftListen listen + pass = Except.liftPass pass + +instance MonadWriter w m => MonadWriter w (IdentityT m) where + writer = lift . writer + tell = lift . tell + listen = Identity.mapIdentityT listen + pass = Identity.mapIdentityT pass + +instance MonadWriter w m => MonadWriter w (MaybeT m) where + writer = lift . writer + tell = lift . tell + listen = Maybe.liftListen listen + pass = Maybe.liftPass pass + +instance MonadWriter w m => MonadWriter w (ReaderT r m) where + writer = lift . writer + tell = lift . tell + listen = mapReaderT listen + pass = mapReaderT pass + +instance MonadWriter w m => MonadWriter w (Lazy.StateT s m) where + writer = lift . writer + tell = lift . tell + listen = Lazy.liftListen listen + pass = Lazy.liftPass pass + +instance MonadWriter w m => MonadWriter w (Strict.StateT s m) where + writer = lift . writer + tell = lift . tell + listen = Strict.liftListen listen + pass = Strict.liftPass pass diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Writer/Lazy.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Writer/Lazy.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Writer/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Writer/Lazy.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,50 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.Writer.Lazy +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- Lazy writer monads. +-- +-- Inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. +----------------------------------------------------------------------------- + +module Control.Monad.Writer.Lazy ( + -- * MonadWriter class + MonadWriter(..), + listens, + censor, + -- * The Writer monad + Writer, + runWriter, + execWriter, + mapWriter, + -- * The WriterT monad transformer + WriterT(WriterT), + runWriterT, + execWriterT, + mapWriterT, + module Control.Monad, + module Control.Monad.Fix, + module Control.Monad.Trans, + module Data.Monoid, + ) where + +import Control.Monad.Writer.Class + +import Control.Monad.Trans +import Control.Monad.Trans.Writer.Lazy ( + Writer, runWriter, execWriter, mapWriter, + WriterT(WriterT), runWriterT, execWriterT, mapWriterT) + +import Control.Monad +import Control.Monad.Fix +import Data.Monoid diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Writer/Strict.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Writer/Strict.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Writer/Strict.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Writer/Strict.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,49 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.Writer.Strict +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- Strict writer monads. +-- +-- Inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. +----------------------------------------------------------------------------- + +module Control.Monad.Writer.Strict ( + -- * MonadWriter class + MonadWriter(..), + listens, + censor, + -- * The Writer monad + Writer, + runWriter, + execWriter, + mapWriter, + -- * The WriterT monad transformer + WriterT(..), + execWriterT, + mapWriterT, + module Control.Monad, + module Control.Monad.Fix, + module Control.Monad.Trans, + module Data.Monoid, + ) where + +import Control.Monad.Writer.Class + +import Control.Monad.Trans +import Control.Monad.Trans.Writer.Strict ( + Writer, runWriter, execWriter, mapWriter, + WriterT(..), execWriterT, mapWriterT) + +import Control.Monad +import Control.Monad.Fix +import Data.Monoid diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Writer.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Writer.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Control/Monad/Writer.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Control/Monad/Writer.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,24 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.Writer +-- Copyright : (c) Andy Gill 2001, +-- (c) Oregon Graduate Institute of Science and Technology, 2001 +-- License : BSD-style (see the file LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (multi-param classes, functional dependencies) +-- +-- The MonadWriter class. +-- +-- Inspired by the paper +-- /Functional Programming with Overloading and Higher-Order Polymorphism/, +-- Mark P Jones () +-- Advanced School of Functional Programming, 1995. +----------------------------------------------------------------------------- + +module Control.Monad.Writer ( + module Control.Monad.Writer.Lazy + ) where + +import Control.Monad.Writer.Lazy diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/LICENSE cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/LICENSE --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/LICENSE 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,31 @@ +The Glasgow Haskell Compiler License + +Copyright 2004, The University Court of the University of Glasgow. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +- Neither name of the University nor the names of its contributors may be +used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF +GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/mtl.cabal cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/mtl.cabal --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/mtl.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/mtl.cabal 2016-06-02 07:15:39.000000000 +0000 @@ -0,0 +1,55 @@ +name: mtl +version: 2.2.1 +x-revision: 1 +cabal-version: >= 1.6 +license: BSD3 +license-file: LICENSE +author: Andy Gill +maintainer: Edward Kmett +category: Control +synopsis: Monad classes, using functional dependencies +homepage: http://github.com/ekmett/mtl +bug-reports: http://github.com/ekmett/mtl/issues +description: + Monad classes using functional dependencies, with instances + for various monad transformers, inspired by the paper + /Functional Programming with Overloading and Higher-Order Polymorphism/, + by Mark P Jones, in /Advanced School of Functional Programming/, 1995 + (). +build-type: Simple +extra-source-files: CHANGELOG.markdown + +source-repository head + type: git + location: git://github.com/ekmett/mtl.git + +Library + exposed-modules: + Control.Monad.Cont + Control.Monad.Cont.Class + Control.Monad.Error + Control.Monad.Error.Class + Control.Monad.Except + Control.Monad.Identity + Control.Monad.List + Control.Monad.RWS + Control.Monad.RWS.Class + Control.Monad.RWS.Lazy + Control.Monad.RWS.Strict + Control.Monad.Reader + Control.Monad.Reader.Class + Control.Monad.State + Control.Monad.State.Class + Control.Monad.State.Lazy + Control.Monad.State.Strict + Control.Monad.Trans + Control.Monad.Writer + Control.Monad.Writer.Class + Control.Monad.Writer.Lazy + Control.Monad.Writer.Strict + build-depends: base < 6, transformers >= 0.4 && < 0.6 + extensions: + MultiParamTypeClasses + FunctionalDependencies + FlexibleInstances + ghc-options: -Wall -fno-warn-unused-imports -fno-warn-warnings-deprecations diff -Nru cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Setup.hs cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Setup.hs --- cabal-install-1.22-1.22.6.0/src/mtl-2.2.1/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/mtl-2.2.1/Setup.hs 2014-06-02 02:22:37.000000000 +0000 @@ -0,0 +1,2 @@ +import Distribution.Simple +main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/ancilData.c cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/ancilData.c --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/ancilData.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/ancilData.c 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,132 @@ +/* + * Copyright(c), 2002 The GHC Team. + */ + +#ifdef aix_HOST_OS +#define _LINUX_SOURCE_COMPAT +// Required to get CMSG_SPACE/CMSG_LEN macros. See #265. +// Alternative is to #define COMPAT_43 and use the +// HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS code instead, but that means +// fiddling with the configure script too. +#endif + +#include "HsNet.h" +#include + +#if HAVE_STRUCT_MSGHDR_MSG_CONTROL || HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS /* until end */ + +/* + * Support for transmitting file descriptors. + * + * + */ + + +/* + * sendmsg() and recvmsg() wrappers for transmitting + * ancillary socket data. + * + * Doesn't provide the full generality of either, specifically: + * + * - no support for scattered read/writes. + * - only possible to send one ancillary chunk of data at a time. + */ + +int +sendFd(int sock, + int outfd) +{ + struct msghdr msg = {0}; + struct iovec iov[1]; + char buf[2]; +#if HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS + msg.msg_accrights = (void*)&outfd; + msg.msg_accrightslen = sizeof(int); +#else + struct cmsghdr *cmsg; + char ancBuffer[CMSG_SPACE(sizeof(int))]; + char* dPtr; + + msg.msg_control = ancBuffer; + msg.msg_controllen = sizeof(ancBuffer); + + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(int)); + dPtr = (char*)CMSG_DATA(cmsg); + + *(int*)dPtr = outfd; + msg.msg_controllen = cmsg->cmsg_len; +#endif + + buf[0] = 0; buf[1] = '\0'; + iov[0].iov_base = buf; + iov[0].iov_len = 2; + + msg.msg_iov = iov; + msg.msg_iovlen = 1; + + return sendmsg(sock,&msg,0); +} + +int +recvFd(int sock) +{ + struct msghdr msg = {0}; + char duffBuf[10]; + int rc; + int len = sizeof(int); + struct iovec iov[1]; +#if HAVE_STRUCT_MSGHDR_MSG_CONTROL + struct cmsghdr *cmsg = NULL; + struct cmsghdr *cptr; +#else + int* fdBuffer; +#endif + int fd; + + iov[0].iov_base = duffBuf; + iov[0].iov_len = sizeof(duffBuf); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + +#if HAVE_STRUCT_MSGHDR_MSG_CONTROL + cmsg = (struct cmsghdr*)malloc(CMSG_SPACE(len)); + if (cmsg==NULL) { + return -1; + } + + msg.msg_control = (void *)cmsg; + msg.msg_controllen = CMSG_LEN(len); +#else + fdBuffer = (int*)malloc(len); + if (fdBuffer) { + msg.msg_accrights = (void *)fdBuffer; + } else { + return -1; + } + msg.msg_accrightslen = len; +#endif + + if ((rc = recvmsg(sock,&msg,0)) < 0) { +#if HAVE_STRUCT_MSGHDR_MSG_CONTROL + free(cmsg); +#else + free(fdBuffer); +#endif + return rc; + } + +#if HAVE_STRUCT_MSGHDR_MSG_CONTROL + cptr = (struct cmsghdr*)CMSG_FIRSTHDR(&msg); + fd = *(int*)CMSG_DATA(cptr); + free(cmsg); +#else + fd = *(int*)fdBuffer; + free(fdBuffer); +#endif + return fd; +} + +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/asyncAccept.c cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/asyncAccept.c --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/asyncAccept.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/asyncAccept.c 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,72 @@ +/* + * (c) sof, 2003. + */ + +#include "HsNet.h" +#include "HsFFI.h" + +#if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) + +/* all the way to the end */ + +/* + * To support non-blocking accept()s with WinSock, we use the asyncDoProc# + * primop, which lets a Haskell thread call an external routine without + * blocking the progress of other threads. + * + * As can readily be seen, this is a low-level mechanism. + * + */ + +typedef struct AcceptData { + int fdSock; + int newSock; + void* sockAddr; + int size; +} AcceptData; + +/* + * Fill in parameter block that's passed along when the RTS invokes the + * accept()-calling proc below (acceptDoProc()) + */ +void* +newAcceptParams(int sock, + int sz, + void* sockaddr) +{ + AcceptData* data = (AcceptData*)malloc(sizeof(AcceptData)); + if (!data) return NULL; + data->fdSock = sock; + data->newSock = 0; + data->sockAddr = sockaddr; + data->size = sz; + + return data; +} + +/* Accessors for return code and accept()'s socket result. */ + +int +acceptNewSock(void* d) +{ + return (((AcceptData*)d)->newSock); +} + +/* Routine invoked by an RTS worker thread */ +int +acceptDoProc(void* param) +{ + SOCKET s; + + AcceptData* data = (AcceptData*)param; + s = accept( data->fdSock, + data->sockAddr, + &data->size); + data->newSock = s; + if ( s == INVALID_SOCKET ) { + return GetLastError(); + } else { + return 0; + } +} +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/HsNet.c cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/HsNet.c --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/HsNet.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/HsNet.c 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,8 @@ +/* ----------------------------------------------------------------------------- + * (c) The University of Glasgow 2002 + * + * static versions of the inline functions from HsNet.h + * -------------------------------------------------------------------------- */ + +#define INLINE +#include "HsNet.h" diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/initWinSock.c cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/initWinSock.c --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/initWinSock.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/initWinSock.c 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,43 @@ +#include "HsNet.h" +#include "HsFFI.h" + +#if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) + +static int winsock_inited = 0; + +static void +shutdownHandler(void) +{ + WSACleanup(); +} + +/* Initialising WinSock... */ +int +initWinSock () +{ + WORD wVersionRequested; + WSADATA wsaData; + int err; + + if (!winsock_inited) { + wVersionRequested = MAKEWORD( 2, 2 ); + + err = WSAStartup ( wVersionRequested, &wsaData ); + + if ( err != 0 ) { + return err; + } + + if ( LOBYTE( wsaData.wVersion ) != 2 || + HIBYTE( wsaData.wVersion ) != 2 ) { + WSACleanup(); + return (-1); + } + + atexit(shutdownHandler); + winsock_inited = 1; + } + return 0; +} + +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/winSockErr.c cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/winSockErr.c --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/cbits/winSockErr.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/cbits/winSockErr.c 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,76 @@ +#include "HsNet.h" +#include "HsFFI.h" + +#if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) +#include + +/* to the end */ + +const char* +getWSErrorDescr(int err) +{ + static char otherErrMsg[256]; + + switch (err) { + case WSAEINTR: return "Interrupted function call (WSAEINTR)"; + case WSAEBADF: return "bad socket descriptor (WSAEBADF)"; + case WSAEACCES: return "Permission denied (WSAEACCESS)"; + case WSAEFAULT: return "Bad address (WSAEFAULT)"; + case WSAEINVAL: return "Invalid argument (WSAEINVAL)"; + case WSAEMFILE: return "Too many open files (WSAEMFILE)"; + case WSAEWOULDBLOCK: return "Resource temporarily unavailable (WSAEWOULDBLOCK)"; + case WSAEINPROGRESS: return "Operation now in progress (WSAEINPROGRESS)"; + case WSAEALREADY: return "Operation already in progress (WSAEALREADY)"; + case WSAENOTSOCK: return "Socket operation on non-socket (WSAENOTSOCK)"; + case WSAEDESTADDRREQ: return "Destination address required (WSAEDESTADDRREQ)"; + case WSAEMSGSIZE: return "Message too long (WSAEMSGSIZE)"; + case WSAEPROTOTYPE: return "Protocol wrong type for socket (WSAEPROTOTYPE)"; + case WSAENOPROTOOPT: return "Bad protocol option (WSAENOPROTOOPT)"; + case WSAEPROTONOSUPPORT: return "Protocol not supported (WSAEPROTONOSUPPORT)"; + case WSAESOCKTNOSUPPORT: return "Socket type not supported (WSAESOCKTNOSUPPORT)"; + case WSAEOPNOTSUPP: return "Operation not supported (WSAEOPNOTSUPP)"; + case WSAEPFNOSUPPORT: return "Protocol family not supported (WSAEPFNOSUPPORT)"; + case WSAEAFNOSUPPORT: return "Address family not supported by protocol family (WSAEAFNOSUPPORT)"; + case WSAEADDRINUSE: return "Address already in use (WSAEADDRINUSE)"; + case WSAEADDRNOTAVAIL: return "Cannot assign requested address (WSAEADDRNOTAVAIL)"; + case WSAENETDOWN: return "Network is down (WSAENETDOWN)"; + case WSAENETUNREACH: return "Network is unreachable (WSAENETUNREACH)"; + case WSAENETRESET: return "Network dropped connection on reset (WSAENETRESET)"; + case WSAECONNABORTED: return "Software caused connection abort (WSAECONNABORTED)"; + case WSAECONNRESET: return "Connection reset by peer (WSAECONNRESET)"; + case WSAENOBUFS: return "No buffer space available (WSAENOBUFS)"; + case WSAEISCONN: return "Socket is already connected (WSAEISCONN)"; + case WSAENOTCONN: return "Socket is not connected (WSAENOTCONN)"; + case WSAESHUTDOWN: return "Cannot send after socket shutdown (WSAESHUTDOWN)"; + case WSAETOOMANYREFS: return "Too many references (WSAETOOMANYREFS)"; + case WSAETIMEDOUT: return "Connection timed out (WSAETIMEDOUT)"; + case WSAECONNREFUSED: return "Connection refused (WSAECONNREFUSED)"; + case WSAELOOP: return "Too many levels of symbolic links (WSAELOOP)"; + case WSAENAMETOOLONG: return "Filename too long (WSAENAMETOOLONG)"; + case WSAEHOSTDOWN: return "Host is down (WSAEHOSTDOWN)"; + case WSAEHOSTUNREACH: return "Host is unreachable (WSAEHOSTUNREACH)"; + case WSAENOTEMPTY: return "Resource not empty (WSAENOTEMPTY)"; + case WSAEPROCLIM: return "Too many processes (WSAEPROCLIM)"; + case WSAEUSERS: return "Too many users (WSAEUSERS)"; + case WSAEDQUOT: return "Disk quota exceeded (WSAEDQUOT)"; + case WSAESTALE: return "Stale NFS file handle (WSAESTALE)"; + case WSAEREMOTE: return "Too many levels of remote in path (WSAEREMOTE)"; + case WSAEDISCON: return "Graceful shutdown in progress (WSAEDISCON)"; + case WSASYSNOTREADY: return "Network subsystem is unavailable (WSASYSNOTREADY)"; + case WSAVERNOTSUPPORTED: return "Winsock.dll version out of range (WSAVERNOTSUPPORTED)"; + case WSANOTINITIALISED: return "Successful WSAStartup not yet performed (WSANOTINITIALISED)"; +#ifdef WSATYPE_NOT_FOUND + case WSATYPE_NOT_FOUND: return "Class type not found (WSATYPE_NOT_FOUND)"; +#endif + case WSAHOST_NOT_FOUND: return "Host not found (WSAHOST_NOT_FOUND)"; + case WSATRY_AGAIN: return "Nonauthoritative host not found (WSATRY_AGAIN)"; + case WSANO_RECOVERY: return "This is a nonrecoverable error (WSANO_RECOVERY)"; + case WSANO_DATA: return "Valid name, no data record of requested type (WSANO_DATA)"; + default: + sprintf(otherErrMsg, "Unknown WinSock error: %u", err); + return otherErrMsg; + } +} + +#endif + diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/CHANGELOG.md cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/CHANGELOG.md --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/CHANGELOG.md 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,14 @@ +## Version 2.6.2.1 + + * Regenerate configure and HsNetworkConfig.h.in. + + * Better detection of CAN sockets. + +## Version 2.6.2.0 + + * Add support for TCP_USER_TIMEOUT. + + * Don't conditionally export the SockAddr constructors. + + * Add isSupportSockAddr to allow checking for supported address types + at runtime. diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/config.guess cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/config.guess --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/config.guess 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/config.guess 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,1420 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2014 Free Software Foundation, Inc. + +timestamp='2014-03-23' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD +# +# Please send patches with a ChangeLog entry to config-patches@gnu.org. + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2014 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "${UNAME_SYSTEM}" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval $set_cc_for_build + cat <<-EOF > $dummy.c + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH="i386" + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH="x86_64" + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + *:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC="gnulibc1" ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + else + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-${LIBC} + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-${LIBC} + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-${LIBC} + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-${LIBC} + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configury will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + eval $set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-?:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; +esac + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/config.sub cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/config.sub --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/config.sub 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/config.sub 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,1790 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2014 Free Software Foundation, Inc. + +timestamp='2014-04-03' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches with a ChangeLog entry to config-patches@gnu.org. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2014 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | \ + kopensolaris*-gnu* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | be32 | be64 \ + | bfin \ + | c4x | c8051 | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | epiphany \ + | fido | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 | nios2eb | nios2el \ + | ns16k | ns32k \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pyramid \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | k1om-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | or1k*-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pyramid-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i686-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* | -plan9* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/configure cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/configure --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/configure 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/configure 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,5377 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69 for Haskell network package 2.3.0.14. +# +# Report bugs to . +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and +$0: libraries@haskell.org about your system, including any +$0: error possibly output before this message. Then install +$0: a modern shell, or manually run the script under such a +$0: shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='Haskell network package' +PACKAGE_TARNAME='network' +PACKAGE_VERSION='2.3.0.14' +PACKAGE_STRING='Haskell network package 2.3.0.14' +PACKAGE_BUGREPORT='libraries@haskell.org' +PACKAGE_URL='' + +ac_unique_file="include/HsNet.h" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='LTLIBOBJS +LIBOBJS +EXTRA_SRCS +EXTRA_LIBS +EXTRA_CPPFLAGS +CALLCONV +EGREP +GREP +CPP +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +with_cc +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CPP' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures Haskell network package 2.3.0.14 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/network] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of Haskell network package 2.3.0.14:";; + esac + cat <<\_ACEOF + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) +C compiler + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CPP C preprocessor + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +Haskell network package configure 2.3.0.14 +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists, giving a warning if it cannot be compiled using +# the include files in INCLUDES and setting the cache variable VAR +# accordingly. +ac_fn_c_check_header_mongrel () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if eval \${$3+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 +$as_echo_n "checking $2 usability... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_header_compiler=yes +else + ac_header_compiler=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 +$as_echo_n "checking $2 presence... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <$2> +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + ac_header_preproc=yes +else + ac_header_preproc=no +fi +rm -f conftest.err conftest.i conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( + yes:no: ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; + no:yes:* ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} +( $as_echo "## ------------------------------------ ## +## Report this to libraries@haskell.org ## +## ------------------------------------ ##" + ) | sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=\$ac_header_compiler" +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_mongrel + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + +# ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES +# ---------------------------------------------------- +# Tries to find if the field MEMBER exists in type AGGR, after including +# INCLUDES, setting cache variable VAR accordingly. +ac_fn_c_check_member () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 +$as_echo_n "checking for $2.$3... " >&6; } +if eval \${$4+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$4=yes" +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (sizeof ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$4=yes" +else + eval "$4=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$4 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_member + +# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES +# --------------------------------------------- +# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR +# accordingly. +ac_fn_c_check_decl () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + as_decl_name=`echo $2|sed 's/ *(.*//'` + as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 +$as_echo_n "checking whether $as_decl_name is declared... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +#ifndef $as_decl_name +#ifdef __cplusplus + (void) $as_decl_use; +#else + (void) $as_decl_name; +#endif +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_decl +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by Haskell network package $as_me 2.3.0.14, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +ac_includes_default="$ac_includes_default +#ifdef HAVE_SYS_SOCKET_H +# include +#endif +#ifdef HAVE_NETINET_IN_H +# include +#endif +#ifdef HAVE_NETDB_H +# include +#endif +#ifdef HAVE_WINSOCK2_H +# include +#endif +#ifdef HAVE_WS2TCPIP_H +# include +// fix for MingW not defining IPV6_V6ONLY +# define IPV6_V6ONLY 27 +#endif" + +# Safety check: Ensure that we are in the correct source directory. + + +ac_config_headers="$ac_config_headers include/HsNetworkConfig.h" + + +ac_aux_dir= +for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + + + +# Check whether --with-cc was given. +if test "${with_cc+set}" = set; then : + withval=$with_cc; CC=$withval +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 +$as_echo_n "checking for an ANSI C-conforming const... " >&6; } +if ${ac_cv_c_const+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#ifndef __cplusplus + /* Ultrix mips cc rejects this sort of thing. */ + typedef int charset[2]; + const charset cs = { 0, 0 }; + /* SunOS 4.1.1 cc rejects this. */ + char const *const *pcpcc; + char **ppc; + /* NEC SVR4.0.2 mips cc rejects this. */ + struct point {int x, y;}; + static struct point const zero = {0,0}; + /* AIX XL C 1.02.0.0 rejects this. + It does not let you subtract one const X* pointer from another in + an arm of an if-expression whose if-part is not a constant + expression */ + const char *g = "string"; + pcpcc = &g + (g ? g-g : 0); + /* HPUX 7.0 cc rejects these. */ + ++pcpcc; + ppc = (char**) pcpcc; + pcpcc = (char const *const *) ppc; + { /* SCO 3.2v4 cc rejects this sort of thing. */ + char tx; + char *t = &tx; + char const *s = 0 ? (char *) 0 : (char const *) 0; + + *t++ = 0; + if (s) return 0; + } + { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ + int x[] = {25, 17}; + const int *foo = &x[0]; + ++foo; + } + { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ + typedef const int *iptr; + iptr p = 0; + ++p; + } + { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying + "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ + struct s { int j; const int *ap[3]; } bx; + struct s *b = &bx; b->j = 5; + } + { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ + const int foo = 10; + if (!foo) return 0; + } + return !cs[0] && !zero.x; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_const=yes +else + ac_cv_c_const=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 +$as_echo "$ac_cv_c_const" >&6; } +if test $ac_cv_c_const = no; then + +$as_echo "#define const /**/" >>confdefs.h + +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +for ac_header in fcntl.h limits.h stdlib.h sys/types.h unistd.h winsock2.h ws2tcpip.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +for ac_header in arpa/inet.h netdb.h netinet/in.h netinet/tcp.h sys/socket.h sys/uio.h sys/un.h linux/can.h linux/tcp.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +for ac_header in net/if.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "net/if.h" "ac_cv_header_net_if_h" "$ac_includes_default" +if test "x$ac_cv_header_net_if_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_NET_IF_H 1 +_ACEOF + +fi + +done + + +for ac_func in readlink symlink if_nametoindex +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + +ac_fn_c_check_member "$LINENO" "struct msghdr" "msg_control" "ac_cv_member_struct_msghdr_msg_control" "#if HAVE_SYS_TYPES_H +# include +#endif +#if HAVE_SYS_SOCKET_H +# include +#endif +#if HAVE_SYS_UIO_H +# include +#endif +" +if test "x$ac_cv_member_struct_msghdr_msg_control" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_MSGHDR_MSG_CONTROL 1 +_ACEOF + + +fi +ac_fn_c_check_member "$LINENO" "struct msghdr" "msg_accrights" "ac_cv_member_struct_msghdr_msg_accrights" "#if HAVE_SYS_TYPES_H +# include +#endif +#if HAVE_SYS_SOCKET_H +# include +#endif +#if HAVE_SYS_UIO_H +# include +#endif +" +if test "x$ac_cv_member_struct_msghdr_msg_accrights" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS 1 +_ACEOF + + +fi + + +ac_fn_c_check_member "$LINENO" "struct sockaddr" "sa_len" "ac_cv_member_struct_sockaddr_sa_len" "#if HAVE_SYS_TYPES_H +# include +#endif +#if HAVE_SYS_SOCKET_H +# include +#endif +" +if test "x$ac_cv_member_struct_sockaddr_sa_len" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 +_ACEOF + + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for in_addr_t in netinet/in.h" >&5 +$as_echo_n "checking for in_addr_t in netinet/in.h... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "in_addr_t" >/dev/null 2>&1; then : + +$as_echo "#define HAVE_IN_ADDR_T 1" >>confdefs.h + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +rm -f conftest* + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for SO_PEERCRED and struct ucred in sys/socket.h" >&5 +$as_echo_n "checking for SO_PEERCRED and struct ucred in sys/socket.h... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#ifndef SO_PEERCRED +# error no SO_PEERCRED +#endif +struct ucred u; +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_ucred=yes +else + ac_cv_ucred=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x$ac_cv_ucred" = xno; then + old_CFLAGS="$CFLAGS" + CFLAGS="-D_GNU_SOURCE $CFLAGS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#ifndef SO_PEERCRED +# error no SO_PEERCRED +#endif +struct ucred u; +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_ucred=yes +else + ac_cv_ucred=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + if test "x$ac_cv_ucred" = xyes; then + EXTRA_CPPFLAGS=-D_GNU_SOURCE + fi +else + old_CFLAGS="$CFLAGS" +fi +if test "x$ac_cv_ucred" = xno; then + CFLAGS="$old_CFLAGS" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +else + +$as_echo "#define HAVE_STRUCT_UCRED 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for getpeereid in unistd.h" >&5 +$as_echo_n "checking for getpeereid in unistd.h... " >&6; } +ac_fn_c_check_func "$LINENO" "getpeereid" "ac_cv_func_getpeereid" +if test "x$ac_cv_func_getpeereid" = xyes; then : + +$as_echo "#define HAVE_GETPEEREID 1" >>confdefs.h + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for _head_libws2_32_a in -lws2_32" >&5 +$as_echo_n "checking for _head_libws2_32_a in -lws2_32... " >&6; } +if ${ac_cv_lib_ws2_32__head_libws2_32_a+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lws2_32 $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char _head_libws2_32_a (); +int +main () +{ +return _head_libws2_32_a (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ws2_32__head_libws2_32_a=yes +else + ac_cv_lib_ws2_32__head_libws2_32_a=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ws2_32__head_libws2_32_a" >&5 +$as_echo "$ac_cv_lib_ws2_32__head_libws2_32_a" >&6; } +if test "x$ac_cv_lib_ws2_32__head_libws2_32_a" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBWS2_32 1 +_ACEOF + + LIBS="-lws2_32 $LIBS" + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for getaddrinfo" >&5 +$as_echo_n "checking for getaddrinfo... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int testme(){ getaddrinfo; } +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +$as_echo "#define HAVE_GETADDRINFO 1" >>confdefs.h + ac_have_getaddrinfo=yes; { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test "x$ac_have_getaddrinfo" = x; then + old_CFLAGS="$CFLAGS" + if test "z$ac_cv_lib_ws2_32__head_libws2_32_a" = zyes; then + CFLAGS="-DWINVER=0x0501 $CFLAGS" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for getaddrinfo if WINVER is 0x0501" >&5 +$as_echo_n "checking for getaddrinfo if WINVER is 0x0501... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default + int testme(){ getaddrinfo; } +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +$as_echo "#define HAVE_GETADDRINFO 1" >>confdefs.h + +$as_echo "#define NEED_WINVER_XP 1" >>confdefs.h + EXTRA_CPPFLAGS="-DWINVER=0x0501 $EXTRA_CPPFLAGS"; { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + CFLAGS="$old_CFLAGS"; { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +fi + +for ac_func in gai_strerror +do : + ac_fn_c_check_func "$LINENO" "gai_strerror" "ac_cv_func_gai_strerror" +if test "x$ac_cv_func_gai_strerror" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GAI_STRERROR 1 +_ACEOF + +fi +done + + +ac_fn_c_check_decl "$LINENO" "AI_ADDRCONFIG" "ac_cv_have_decl_AI_ADDRCONFIG" "$ac_includes_default" +if test "x$ac_cv_have_decl_AI_ADDRCONFIG" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_AI_ADDRCONFIG $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "AI_ALL" "ac_cv_have_decl_AI_ALL" "$ac_includes_default" +if test "x$ac_cv_have_decl_AI_ALL" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_AI_ALL $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "AI_NUMERICSERV" "ac_cv_have_decl_AI_NUMERICSERV" "$ac_includes_default" +if test "x$ac_cv_have_decl_AI_NUMERICSERV" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_AI_NUMERICSERV $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "AI_V4MAPPED" "ac_cv_have_decl_AI_V4MAPPED" "$ac_includes_default" +if test "x$ac_cv_have_decl_AI_V4MAPPED" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_AI_V4MAPPED $ac_have_decl +_ACEOF + + +ac_fn_c_check_decl "$LINENO" "IPV6_V6ONLY" "ac_cv_have_decl_IPV6_V6ONLY" "$ac_includes_default" +if test "x$ac_cv_have_decl_IPV6_V6ONLY" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_IPV6_V6ONLY $ac_have_decl +_ACEOF + + +ac_fn_c_check_decl "$LINENO" "IPPROTO_IP" "ac_cv_have_decl_IPPROTO_IP" "$ac_includes_default" +if test "x$ac_cv_have_decl_IPPROTO_IP" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_IPPROTO_IP $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "IPPROTO_TCP" "ac_cv_have_decl_IPPROTO_TCP" "$ac_includes_default" +if test "x$ac_cv_have_decl_IPPROTO_TCP" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_IPPROTO_TCP $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "IPPROTO_IPV6" "ac_cv_have_decl_IPPROTO_IPV6" "$ac_includes_default" +if test "x$ac_cv_have_decl_IPPROTO_IPV6" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_IPPROTO_IPV6 $ac_have_decl +_ACEOF + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sendfile in sys/sendfile.h" >&5 +$as_echo_n "checking for sendfile in sys/sendfile.h... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "sendfile" >/dev/null 2>&1; then : + +$as_echo "#define HAVE_LINUX_SENDFILE 1" >>confdefs.h + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +rm -f conftest* + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sendfile in sys/socket.h" >&5 +$as_echo_n "checking for sendfile in sys/socket.h... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "sendfile" >/dev/null 2>&1; then : + +$as_echo "#define HAVE_BSD_SENDFILE 1" >>confdefs.h + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +rm -f conftest* + + +for ac_func in gethostent +do : + ac_fn_c_check_func "$LINENO" "gethostent" "ac_cv_func_gethostent" +if test "x$ac_cv_func_gethostent" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GETHOSTENT 1 +_ACEOF + +fi +done + + +for ac_func in accept4 +do : + ac_fn_c_check_func "$LINENO" "accept4" "ac_cv_func_accept4" +if test "x$ac_cv_func_accept4" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_ACCEPT4 1 +_ACEOF + +fi +done + + +case "$host" in +*-mingw* | *-msys*) + EXTRA_SRCS="cbits/initWinSock.c, cbits/winSockErr.c, cbits/asyncAccept.c" + EXTRA_LIBS=ws2_32 + CALLCONV=stdcall ;; +*-solaris2*) + EXTRA_SRCS="cbits/ancilData.c" + EXTRA_LIBS="nsl, socket" + CALLCONV=ccall ;; +*) + EXTRA_SRCS="cbits/ancilData.c" + EXTRA_LIBS= + CALLCONV=ccall ;; +esac + + + + + +ac_config_files="$ac_config_files network.buildinfo" + + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by Haskell network package $as_me 2.3.0.14, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +Haskell network package config.status 2.3.0.14 +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "include/HsNetworkConfig.h") CONFIG_HEADERS="$CONFIG_HEADERS include/HsNetworkConfig.h" ;; + "network.buildinfo") CONFIG_FILES="$CONFIG_FILES network.buildinfo" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi + ;; + + + esac + +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/configure.ac cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/configure.ac --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/configure.ac 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/configure.ac 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,193 @@ +AC_INIT([Haskell network package], [2.3.0.14], [libraries@haskell.org], [network]) + +ac_includes_default="$ac_includes_default +#ifdef HAVE_SYS_SOCKET_H +# include +#endif +#ifdef HAVE_NETINET_IN_H +# include +#endif +#ifdef HAVE_NETDB_H +# include +#endif +#ifdef HAVE_WINSOCK2_H +# include +#endif +#ifdef HAVE_WS2TCPIP_H +# include +// fix for MingW not defining IPV6_V6ONLY +# define IPV6_V6ONLY 27 +#endif" + +# Safety check: Ensure that we are in the correct source directory. +AC_CONFIG_SRCDIR([include/HsNet.h]) + +AC_CONFIG_HEADERS([include/HsNetworkConfig.h]) + +AC_CANONICAL_HOST + +AC_ARG_WITH([cc], + [C compiler], + [CC=$withval]) +AC_PROG_CC() + +AC_C_CONST + +dnl ** check for specific header (.h) files that we are interested in +AC_CHECK_HEADERS([fcntl.h limits.h stdlib.h sys/types.h unistd.h winsock2.h ws2tcpip.h]) +AC_CHECK_HEADERS([arpa/inet.h netdb.h netinet/in.h netinet/tcp.h sys/socket.h sys/uio.h sys/un.h linux/can.h linux/tcp.h]) +AC_CHECK_HEADERS([net/if.h]) + +AC_CHECK_FUNCS([readlink symlink if_nametoindex]) + +dnl ** check what fields struct msghdr contains +AC_CHECK_MEMBERS([struct msghdr.msg_control, struct msghdr.msg_accrights], [], [], [#if HAVE_SYS_TYPES_H +# include +#endif +#if HAVE_SYS_SOCKET_H +# include +#endif +#if HAVE_SYS_UIO_H +# include +#endif]) + +dnl ** check if struct sockaddr contains sa_len +AC_CHECK_MEMBERS([struct sockaddr.sa_len], [], [], [#if HAVE_SYS_TYPES_H +# include +#endif +#if HAVE_SYS_SOCKET_H +# include +#endif]) + +dnl -------------------------------------------------- +dnl * test for in_addr_t +dnl -------------------------------------------------- +AC_MSG_CHECKING(for in_addr_t in netinet/in.h) +AC_EGREP_HEADER(in_addr_t, netinet/in.h, + [ AC_DEFINE([HAVE_IN_ADDR_T], [1], [Define to 1 if in_addr_t is available.]) AC_MSG_RESULT(yes) ], + AC_MSG_RESULT(no)) + +dnl -------------------------------------------------- +dnl * test for SO_PEERCRED and struct ucred +dnl -------------------------------------------------- +AC_MSG_CHECKING(for SO_PEERCRED and struct ucred in sys/socket.h) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include +#include +#ifndef SO_PEERCRED +# error no SO_PEERCRED +#endif +struct ucred u;]])],ac_cv_ucred=yes,ac_cv_ucred=no) +if test "x$ac_cv_ucred" = xno; then + old_CFLAGS="$CFLAGS" + CFLAGS="-D_GNU_SOURCE $CFLAGS" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include +#include +#ifndef SO_PEERCRED +# error no SO_PEERCRED +#endif +struct ucred u;]])],ac_cv_ucred=yes,ac_cv_ucred=no) + if test "x$ac_cv_ucred" = xyes; then + EXTRA_CPPFLAGS=-D_GNU_SOURCE + fi +else + old_CFLAGS="$CFLAGS" +fi +if test "x$ac_cv_ucred" = xno; then + CFLAGS="$old_CFLAGS" + AC_MSG_RESULT(no) +else + AC_DEFINE([HAVE_STRUCT_UCRED], [1], [Define to 1 if you have both SO_PEERCRED and struct ucred.]) + AC_MSG_RESULT(yes) +fi + +dnl -------------------------------------------------- +dnl * test for GETPEEREID(3) +dnl -------------------------------------------------- +AC_MSG_CHECKING(for getpeereid in unistd.h) +AC_CHECK_FUNC( getpeereid, AC_DEFINE([HAVE_GETPEEREID], [1], [Define to 1 if you have getpeereid.] )) + +dnl -------------------------------------------------- +dnl * check for Windows networking libraries +dnl -------------------------------------------------- +AC_CHECK_LIB(ws2_32, _head_libws2_32_a) + +dnl -------------------------------------------------- +dnl * test for getaddrinfo as proxy for IPv6 support +dnl -------------------------------------------------- +AC_MSG_CHECKING(for getaddrinfo) +dnl Can't use AC_CHECK_FUNC here, because it doesn't do the right +dnl thing on Windows. +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[$ac_includes_default +int testme(){ getaddrinfo; }]])],[AC_DEFINE([HAVE_GETADDRINFO], [1], [Define to 1 if you have the `getaddrinfo' function.]) ac_have_getaddrinfo=yes; AC_MSG_RESULT(yes)],[AC_MSG_RESULT(no)]) + +dnl Under mingw, we may need to set WINVER to 0x0501 to expose getaddrinfo. +if test "x$ac_have_getaddrinfo" = x; then + old_CFLAGS="$CFLAGS" + if test "z$ac_cv_lib_ws2_32__head_libws2_32_a" = zyes; then + CFLAGS="-DWINVER=0x0501 $CFLAGS" + AC_MSG_CHECKING(for getaddrinfo if WINVER is 0x0501) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[$ac_includes_default + int testme(){ getaddrinfo; }]])],[AC_DEFINE([HAVE_GETADDRINFO], [1], [Define to 1 if you have the `getaddrinfo' function.]) AC_DEFINE([NEED_WINVER_XP], [1], [Define to 1 if the `getaddrinfo' function needs WINVER set.]) EXTRA_CPPFLAGS="-DWINVER=0x0501 $EXTRA_CPPFLAGS"; AC_MSG_RESULT(yes)],[CFLAGS="$old_CFLAGS"; AC_MSG_RESULT(no)]) + fi +fi + +dnl Missing under mingw, sigh. +AC_CHECK_FUNCS(gai_strerror) + +dnl ------------------------------------------------------- +dnl * test for AI_* flags that not all implementations have +dnl ------------------------------------------------------- +AC_CHECK_DECLS([AI_ADDRCONFIG, AI_ALL, AI_NUMERICSERV, AI_V4MAPPED]) + +dnl ------------------------------------------------------- +dnl * test for IPV6_V6ONLY flags that not all implementations have +dnl ------------------------------------------------------- +AC_CHECK_DECLS([IPV6_V6ONLY]) + +dnl ------------------------------------------------------- +dnl * test for IPPROTO_* macros/constants +dnl ------------------------------------------------------- +AC_CHECK_DECLS([IPPROTO_IP, IPPROTO_TCP, IPPROTO_IPV6]) + +dnl -------------------------------------------------- +dnl * test for Linux sendfile(2) +dnl -------------------------------------------------- +AC_MSG_CHECKING(for sendfile in sys/sendfile.h) +AC_EGREP_HEADER(sendfile, sys/sendfile.h, + [ AC_DEFINE([HAVE_LINUX_SENDFILE], [1], [Define to 1 if you have a Linux sendfile(2) implementation.]) AC_MSG_RESULT(yes) ], + AC_MSG_RESULT(no)) + +dnl -------------------------------------------------- +dnl * test for BSD sendfile(2) +dnl -------------------------------------------------- +AC_MSG_CHECKING(for sendfile in sys/socket.h) +AC_EGREP_HEADER(sendfile, sys/socket.h, + [ AC_DEFINE([HAVE_BSD_SENDFILE], [1], [Define to 1 if you have a BSDish sendfile(2) implementation.]) AC_MSG_RESULT(yes) ], + AC_MSG_RESULT(no)) + +AC_CHECK_FUNCS(gethostent) + +AC_CHECK_FUNCS(accept4) + +case "$host" in +*-mingw* | *-msys*) + EXTRA_SRCS="cbits/initWinSock.c, cbits/winSockErr.c, cbits/asyncAccept.c" + EXTRA_LIBS=ws2_32 + CALLCONV=stdcall ;; +*-solaris2*) + EXTRA_SRCS="cbits/ancilData.c" + EXTRA_LIBS="nsl, socket" + CALLCONV=ccall ;; +*) + EXTRA_SRCS="cbits/ancilData.c" + EXTRA_LIBS= + CALLCONV=ccall ;; +esac +AC_SUBST([CALLCONV]) +AC_SUBST([EXTRA_CPPFLAGS]) +AC_SUBST([EXTRA_LIBS]) +AC_SUBST([EXTRA_SRCS]) + +AC_CONFIG_FILES([network.buildinfo]) + +AC_OUTPUT diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/examples/EchoClient.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/examples/EchoClient.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/examples/EchoClient.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/examples/EchoClient.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,18 @@ +-- Echo client program +module Main where + +import Network.Socket hiding (recv) +import Network.Socket.ByteString (recv, sendAll) +import qualified Data.ByteString.Char8 as C + +main :: IO () +main = withSocketsDo $ + do addrinfos <- getAddrInfo Nothing (Just "") (Just "3000") + let serveraddr = head addrinfos + sock <- socket (addrFamily serveraddr) Stream defaultProtocol + connect sock (addrAddress serveraddr) + sendAll sock $ C.pack "Hello, world!" + msg <- recv sock 1024 + sClose sock + putStr "Received " + C.putStrLn msg diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/examples/EchoServer.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/examples/EchoServer.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/examples/EchoServer.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/examples/EchoServer.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,27 @@ +-- Echo server program +module Main where + +import Control.Monad (unless) +import Network.Socket hiding (recv) +import qualified Data.ByteString as S +import Network.Socket.ByteString (recv, sendAll) + +main :: IO () +main = withSocketsDo $ + do addrinfos <- getAddrInfo + (Just (defaultHints {addrFlags = [AI_PASSIVE]})) + Nothing (Just "3000") + let serveraddr = head addrinfos + sock <- socket (addrFamily serveraddr) Stream defaultProtocol + bindSocket sock (addrAddress serveraddr) + listen sock 1 + (conn, _) <- accept sock + talk conn + sClose conn + sClose sock + + where + talk :: Socket -> IO () + talk conn = + do msg <- recv conn 1024 + unless (S.null msg) $ sendAll conn msg >> talk conn diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/include/HsNet.h cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/include/HsNet.h --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/include/HsNet.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/include/HsNet.h 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,188 @@ +/* ----------------------------------------------------------------------------- + * + * Definitions for package `net' which are visible in Haskell land. + * + * ---------------------------------------------------------------------------*/ + +#ifndef HSNET_H +#define HSNET_H + +#include "HsNetworkConfig.h" + +#ifdef NEED_WINVER +# define WINVER 0x0501 +#endif + +/* ultra-evil... */ +#undef PACKAGE_BUGREPORT +#undef PACKAGE_NAME +#undef PACKAGE_STRING +#undef PACKAGE_TARNAME +#undef PACKAGE_VERSION + +#ifndef INLINE +# if defined(_MSC_VER) +# define INLINE extern __inline +# elif defined(__GNUC_GNU_INLINE__) +# define INLINE extern inline +# else +# define INLINE inline +# endif +#endif + +#ifdef HAVE_GETADDRINFO +# define IPV6_SOCKET_SUPPORT 1 +#else +# undef IPV6_SOCKET_SUPPORT +#endif + +#if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) +#include +# ifdef HAVE_WS2TCPIP_H +# include +// fix for MingW not defining IPV6_V6ONLY +# define IPV6_V6ONLY 27 +# endif + +extern int initWinSock (); +extern const char* getWSErrorDescr(int err); +extern void* newAcceptParams(int sock, + int sz, + void* sockaddr); +extern int acceptNewSock(void* d); +extern int acceptDoProc(void* param); + +#else + +#ifdef HAVE_LIMITS_H +# include +#endif +#ifdef HAVE_STDLIB_H +# include +#endif +#ifdef HAVE_UNISTD_H +#include +#endif +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_FCNTL_H +# include +#endif +#ifdef HAVE_SYS_UIO_H +# include +#endif +#ifdef HAVE_SYS_SOCKET_H +# include +#endif +#ifdef HAVE_LINUX_TCP_H +# include +#elif HAVE_NETINET_TCP_H +# include +#endif +#ifdef HAVE_NETINET_IN_H +# include +#endif +#ifdef HAVE_SYS_UN_H +# include +#endif +#ifdef HAVE_ARPA_INET_H +# include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#ifdef HAVE_LINUX_CAN_H +# include +# define CAN_SOCKET_SUPPORT 1 +#endif +#ifdef HAVE_NET_IF +# include +#endif + +#ifdef HAVE_BSD_SENDFILE +#include +#endif +#ifdef HAVE_LINUX_SENDFILE +#if !defined(__USE_FILE_OFFSET64) +#include +#endif +#endif + +extern int +sendFd(int sock, int outfd); + +extern int +recvFd(int sock); + +#endif /* HAVE_WINSOCK2_H && !__CYGWIN */ + +INLINE char * +my_inet_ntoa( +#if defined(HAVE_WINSOCK2_H) + u_long addr +#elif defined(HAVE_IN_ADDR_T) + in_addr_t addr +#elif defined(HAVE_INTTYPES_H) + u_int32_t addr +#else + unsigned long addr +#endif + ) +{ + struct in_addr a; + a.s_addr = addr; + return inet_ntoa(a); +} + +#ifdef HAVE_GETADDRINFO +INLINE int +hsnet_getnameinfo(const struct sockaddr* a,socklen_t b, char* c, +# if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) + DWORD d, char* e, DWORD f, int g) +# else + socklen_t d, char* e, socklen_t f, int g) +# endif +{ + return getnameinfo(a,b,c,d,e,f,g); +} + +INLINE int +hsnet_getaddrinfo(const char *hostname, const char *servname, + const struct addrinfo *hints, struct addrinfo **res) +{ + return getaddrinfo(hostname, servname, hints, res); +} + +INLINE void +hsnet_freeaddrinfo(struct addrinfo *ai) +{ + freeaddrinfo(ai); +} +#endif + +#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) +# define WITH_WINSOCK 1 +#endif + +#if !defined(mingw32_HOST_OS) && !defined(_WIN32) +# define DOMAIN_SOCKET_SUPPORT 1 +#endif + +#if !defined(CALLCONV) +# if defined(WITH_WINSOCK) +# define CALLCONV stdcall +# else +# define CALLCONV ccall +# endif +#endif + +#if !defined(IOV_MAX) +# define IOV_MAX 1024 +#endif + +#if !defined(SOCK_NONBLOCK) // Missing define in Bionic libc (Android) +# define SOCK_NONBLOCK O_NONBLOCK +#endif + +#endif /* HSNET_H */ diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/include/HsNetworkConfig.h cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/include/HsNetworkConfig.h --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/include/HsNetworkConfig.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/include/HsNetworkConfig.h 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,175 @@ +/* include/HsNetworkConfig.h. Generated from HsNetworkConfig.h.in by configure. */ +/* include/HsNetworkConfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the `accept4' function. */ +/* #undef HAVE_ACCEPT4 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have a BSDish sendfile(2) implementation. */ +#define HAVE_BSD_SENDFILE 1 + +/* Define to 1 if you have the declaration of `AI_ADDRCONFIG', and to 0 if you + don't. */ +#define HAVE_DECL_AI_ADDRCONFIG 1 + +/* Define to 1 if you have the declaration of `AI_ALL', and to 0 if you don't. + */ +#define HAVE_DECL_AI_ALL 1 + +/* Define to 1 if you have the declaration of `AI_NUMERICSERV', and to 0 if + you don't. */ +#define HAVE_DECL_AI_NUMERICSERV 1 + +/* Define to 1 if you have the declaration of `AI_V4MAPPED', and to 0 if you + don't. */ +#define HAVE_DECL_AI_V4MAPPED 1 + +/* Define to 1 if you have the declaration of `IPPROTO_IP', and to 0 if you + don't. */ +#define HAVE_DECL_IPPROTO_IP 1 + +/* Define to 1 if you have the declaration of `IPPROTO_IPV6', and to 0 if you + don't. */ +#define HAVE_DECL_IPPROTO_IPV6 1 + +/* Define to 1 if you have the declaration of `IPPROTO_TCP', and to 0 if you + don't. */ +#define HAVE_DECL_IPPROTO_TCP 1 + +/* Define to 1 if you have the declaration of `IPV6_V6ONLY', and to 0 if you + don't. */ +#define HAVE_DECL_IPV6_V6ONLY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* Define to 1 if you have the `gai_strerror' function. */ +#define HAVE_GAI_STRERROR 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `gethostent' function. */ +#define HAVE_GETHOSTENT 1 + +/* Define to 1 if you have getpeereid. */ +#define HAVE_GETPEEREID 1 + +/* Define to 1 if you have the `if_nametoindex' function. */ +#define HAVE_IF_NAMETOINDEX 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if in_addr_t is available. */ +#define HAVE_IN_ADDR_T 1 + +/* Define to 1 if you have the `ws2_32' library (-lws2_32). */ +/* #undef HAVE_LIBWS2_32 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LINUX_CAN_H */ + +/* Define to 1 if you have a Linux sendfile(2) implementation. */ +/* #undef HAVE_LINUX_SENDFILE */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_NETDB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_IN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_TCP_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_NET_IF_H 1 + +/* Define to 1 if you have the `readlink' function. */ +#define HAVE_READLINK 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if `msg_accrights' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS */ + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +#define HAVE_STRUCT_MSGHDR_MSG_CONTROL 1 + +/* Define to 1 if `sa_len' is a member of `struct sockaddr'. */ +#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 + +/* Define to 1 if you have both SO_PEERCRED and struct ucred. */ +/* #undef HAVE_STRUCT_UCRED */ + +/* Define to 1 if you have the `symlink' function. */ +#define HAVE_SYMLINK 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK2_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WS2TCPIP_H */ + +/* Define to 1 if the `getaddrinfo' function needs WINVER set. */ +/* #undef NEED_WINVER_XP */ + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "libraries@haskell.org" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "Haskell network package" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "Haskell network package 2.3.0.14" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "network" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "2.3.0.14" + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/include/HsNetworkConfig.h.in cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/include/HsNetworkConfig.h.in --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/include/HsNetworkConfig.h.in 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/include/HsNetworkConfig.h.in 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,177 @@ +/* include/HsNetworkConfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the `accept4' function. */ +#undef HAVE_ACCEPT4 + +/* Define to 1 if you have the header file. */ +#undef HAVE_ARPA_INET_H + +/* Define to 1 if you have a BSDish sendfile(2) implementation. */ +#undef HAVE_BSD_SENDFILE + +/* Define to 1 if you have the declaration of `AI_ADDRCONFIG', and to 0 if you + don't. */ +#undef HAVE_DECL_AI_ADDRCONFIG + +/* Define to 1 if you have the declaration of `AI_ALL', and to 0 if you don't. + */ +#undef HAVE_DECL_AI_ALL + +/* Define to 1 if you have the declaration of `AI_NUMERICSERV', and to 0 if + you don't. */ +#undef HAVE_DECL_AI_NUMERICSERV + +/* Define to 1 if you have the declaration of `AI_V4MAPPED', and to 0 if you + don't. */ +#undef HAVE_DECL_AI_V4MAPPED + +/* Define to 1 if you have the declaration of `IPPROTO_IP', and to 0 if you + don't. */ +#undef HAVE_DECL_IPPROTO_IP + +/* Define to 1 if you have the declaration of `IPPROTO_IPV6', and to 0 if you + don't. */ +#undef HAVE_DECL_IPPROTO_IPV6 + +/* Define to 1 if you have the declaration of `IPPROTO_TCP', and to 0 if you + don't. */ +#undef HAVE_DECL_IPPROTO_TCP + +/* Define to 1 if you have the declaration of `IPV6_V6ONLY', and to 0 if you + don't. */ +#undef HAVE_DECL_IPV6_V6ONLY + +/* Define to 1 if you have the header file. */ +#undef HAVE_FCNTL_H + +/* Define to 1 if you have the `gai_strerror' function. */ +#undef HAVE_GAI_STRERROR + +/* Define to 1 if you have the `getaddrinfo' function. */ +#undef HAVE_GETADDRINFO + +/* Define to 1 if you have the `gethostent' function. */ +#undef HAVE_GETHOSTENT + +/* Define to 1 if you have getpeereid. */ +#undef HAVE_GETPEEREID + +/* Define to 1 if you have the `if_nametoindex' function. */ +#undef HAVE_IF_NAMETOINDEX + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if in_addr_t is available. */ +#undef HAVE_IN_ADDR_T + +/* Define to 1 if you have the `ws2_32' library (-lws2_32). */ +#undef HAVE_LIBWS2_32 + +/* Define to 1 if you have the header file. */ +#undef HAVE_LIMITS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_LINUX_CAN_H + +/* Define to 1 if you have a Linux sendfile(2) implementation. */ +#undef HAVE_LINUX_SENDFILE + +/* Define to 1 if you have the header file. */ +#undef HAVE_LINUX_TCP_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_NETDB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_NETINET_IN_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_NETINET_TCP_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_NET_IF_H + +/* Define to 1 if you have the `readlink' function. */ +#undef HAVE_READLINK + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if `msg_accrights' is a member of `struct msghdr'. */ +#undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +#undef HAVE_STRUCT_MSGHDR_MSG_CONTROL + +/* Define to 1 if `sa_len' is a member of `struct sockaddr'. */ +#undef HAVE_STRUCT_SOCKADDR_SA_LEN + +/* Define to 1 if you have both SO_PEERCRED and struct ucred. */ +#undef HAVE_STRUCT_UCRED + +/* Define to 1 if you have the `symlink' function. */ +#undef HAVE_SYMLINK + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_SOCKET_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_UIO_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_UN_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_WINSOCK2_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_WS2TCPIP_H + +/* Define to 1 if the `getaddrinfo' function needs WINVER set. */ +#undef NEED_WINVER_XP + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Define to empty if `const' does not conform to ANSI C. */ +#undef const diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/install-sh cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/install-sh --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/install-sh 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/install-sh 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,295 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2003-09-24.23 + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. It can only install one file at a time, a restriction +# shared with many OS's install programs. + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +transformbasename= +transform_arg= +instcmd="$mvprog" +chmodcmd="$chmodprog 0755" +chowncmd= +chgrpcmd= +stripcmd= +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src= +dst= +dir_arg= + +usage="Usage: $0 [OPTION]... SRCFILE DSTFILE + or: $0 -d DIR1 DIR2... + +In the first form, install SRCFILE to DSTFILE, removing SRCFILE by default. +In the second, create the directory path DIR. + +Options: +-b=TRANSFORMBASENAME +-c copy source (using $cpprog) instead of moving (using $mvprog). +-d create directories instead of installing files. +-g GROUP $chgrp installed files to GROUP. +-m MODE $chmod installed files to MODE. +-o USER $chown installed files to USER. +-s strip installed files (using $stripprog). +-t=TRANSFORM +--help display this help and exit. +--version display version info and exit. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG +" + +while test -n "$1"; do + case $1 in + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + -c) instcmd=$cpprog + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + --help) echo "$usage"; exit 0;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -s) stripcmd=$stripprog + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + --version) echo "$0 $scriptversion"; exit 0;; + + *) if test -z "$src"; then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if test -z "$src"; then + echo "$0: no input file specified." >&2 + exit 1 +fi + +# Protect names starting with `-'. +case $src in + -*) src=./$src ;; +esac + +if test -n "$dir_arg"; then + dst=$src + src= + + if test -d "$dst"; then + instcmd=: + chmodcmd= + else + instcmd=$mkdirprog + fi +else + # Waiting for this to be detected by the "$instcmd $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + + # Protect names starting with `-'. + case $dst in + -*) dst=./$dst ;; + esac + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + dst=$dst/`basename "$src"` + fi +fi + +# This sed command emulates the dirname command. +dstdir=`echo "$dst" | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. + +# Skip lots of stat calls in the usual case. +if test ! -d "$dstdir"; then + defaultIFS=' + ' + IFS="${IFS-$defaultIFS}" + + oIFS=$IFS + # Some sh's can't handle IFS=/ for some reason. + IFS='%' + set - `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'` + IFS=$oIFS + + pathcomp= + + while test $# -ne 0 ; do + pathcomp=$pathcomp$1 + shift + test -d "$pathcomp" || $mkdirprog "$pathcomp" + pathcomp=$pathcomp/ + done +fi + +if test -n "$dir_arg"; then + $doit $instcmd "$dst" \ + && { test -z "$chowncmd" || $doit $chowncmd "$dst"; } \ + && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } \ + && { test -z "$stripcmd" || $doit $stripcmd "$dst"; } \ + && { test -z "$chmodcmd" || $doit $chmodcmd "$dst"; } + +else + # If we're going to rename the final executable, determine the name now. + if test -z "$transformarg"; then + dstfile=`basename "$dst"` + else + dstfile=`basename "$dst" $transformbasename \ + | sed $transformarg`$transformbasename + fi + + # don't allow the sed command to completely eliminate the filename. + test -z "$dstfile" && dstfile=`basename "$dst"` + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'status=$?; rm -f "$dsttmp" "$rmtmp" && exit $status' 0 + trap '(exit $?); exit' 1 2 13 15 + + # Move or copy the file name to the temp name + $doit $instcmd "$src" "$dsttmp" && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $instcmd $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \ + && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \ + && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \ + && { test -z "$chmodcmd" || $doit $chmodcmd "$dsttmp"; } && + + # Now remove or move aside any old file at destination location. We + # try this two ways since rm can't unlink itself on some systems and + # the destination file might be busy for other reasons. In this case, + # the final cleanup might fail but the new file should still install + # successfully. + { + if test -f "$dstdir/$dstfile"; then + $doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null \ + || $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null \ + || { + echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2 + (exit 1); exit + } + else + : + fi + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dstdir/$dstfile" +fi && + +# The final little trick to "correctly" pass the exit status to the exit trap. +{ + (exit 0); exit +} + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-end: "$" +# End: diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/LICENSE cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/LICENSE --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/LICENSE 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,29 @@ +Copyright (c) 2002-2010, The University Court of the University of Glasgow. +Copyright (c) 2007-2010, Johan Tibell + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +- Neither name of the University nor the names of its contributors may be +used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF +GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/BSD.hsc cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/BSD.hsc --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/BSD.hsc 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/BSD.hsc 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,579 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network.BSD +-- Copyright : (c) The University of Glasgow 2001 +-- License : BSD-style (see the file libraries/network/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable +-- +-- The "Network.BSD" module defines Haskell bindings to network +-- programming functionality provided by BSD Unix derivatives. +-- +----------------------------------------------------------------------------- + +#include "HsNet.h" + +module Network.BSD + ( + -- * Host names + HostName + , getHostName + + , HostEntry(..) + , getHostByName + , getHostByAddr + , hostAddress + +#if defined(HAVE_GETHOSTENT) && !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) + , getHostEntries + + -- ** Low level functionality + , setHostEntry + , getHostEntry + , endHostEntry +#endif + + -- * Service names + , ServiceEntry(..) + , ServiceName + , getServiceByName + , getServiceByPort + , getServicePortNumber + +#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) + , getServiceEntries + + -- ** Low level functionality + , getServiceEntry + , setServiceEntry + , endServiceEntry +#endif + + -- * Protocol names + , ProtocolName + , ProtocolNumber + , ProtocolEntry(..) + , getProtocolByName + , getProtocolByNumber + , getProtocolNumber + , defaultProtocol + +#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) + , getProtocolEntries + -- ** Low level functionality + , setProtocolEntry + , getProtocolEntry + , endProtocolEntry +#endif + + -- * Port numbers + , PortNumber + + -- * Network names + , NetworkName + , NetworkAddr + , NetworkEntry(..) + +#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) + , getNetworkByName + , getNetworkByAddr + , getNetworkEntries + -- ** Low level functionality + , setNetworkEntry + , getNetworkEntry + , endNetworkEntry +#endif + +#if defined(HAVE_IF_NAMETOINDEX) + -- * Interface names + , ifNameToIndex +#endif + + ) where + +import Network.Socket + +import Control.Concurrent (MVar, newMVar, withMVar) +import qualified Control.Exception as E +import Foreign.C.String (CString, peekCString, withCString) +#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) +import Foreign.C.Types ( CShort ) +#endif +import Foreign.C.Types ( CInt(..), CUInt(..), CULong(..), CSize(..) ) +import Foreign.Ptr (Ptr, nullPtr) +import Foreign.Storable (Storable(..)) +import Foreign.Marshal.Array (allocaArray0, peekArray0) +import Foreign.Marshal.Utils (with, fromBool) +import Data.Typeable +import System.IO.Error (ioeSetErrorString, mkIOError) +import System.IO.Unsafe (unsafePerformIO) + +import GHC.IO.Exception + +import Control.Monad (liftM) + +import Network.Socket.Internal (throwSocketErrorIfMinus1_) + +-- --------------------------------------------------------------------------- +-- Basic Types + +type ProtocolName = String + +-- --------------------------------------------------------------------------- +-- Service Database Access + +-- Calling getServiceByName for a given service and protocol returns +-- the systems service entry. This should be used to find the port +-- numbers for standard protocols such as SMTP and FTP. The remaining +-- three functions should be used for browsing the service database +-- sequentially. + +-- Calling setServiceEntry with True indicates that the service +-- database should be left open between calls to getServiceEntry. To +-- close the database a call to endServiceEntry is required. This +-- database file is usually stored in the file /etc/services. + +data ServiceEntry = + ServiceEntry { + serviceName :: ServiceName, -- Official Name + serviceAliases :: [ServiceName], -- aliases + servicePort :: PortNumber, -- Port Number ( network byte order ) + serviceProtocol :: ProtocolName -- Protocol + } deriving (Show, Typeable) + +instance Storable ServiceEntry where + sizeOf _ = #const sizeof(struct servent) + alignment _ = alignment (undefined :: CInt) -- ??? + + peek p = do + s_name <- (#peek struct servent, s_name) p >>= peekCString + s_aliases <- (#peek struct servent, s_aliases) p + >>= peekArray0 nullPtr + >>= mapM peekCString + s_port <- (#peek struct servent, s_port) p + s_proto <- (#peek struct servent, s_proto) p >>= peekCString + return (ServiceEntry { + serviceName = s_name, + serviceAliases = s_aliases, +#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) + servicePort = PortNum (fromIntegral (s_port :: CShort)), +#else + -- s_port is already in network byte order, but it + -- might be the wrong size. + servicePort = PortNum (fromIntegral (s_port :: CInt)), +#endif + serviceProtocol = s_proto + }) + + poke _p = error "Storable.poke(BSD.ServiceEntry) not implemented" + + +-- | Get service by name. +getServiceByName :: ServiceName -- Service Name + -> ProtocolName -- Protocol Name + -> IO ServiceEntry -- Service Entry +getServiceByName name proto = withLock $ do + withCString name $ \ cstr_name -> do + withCString proto $ \ cstr_proto -> do + throwNoSuchThingIfNull "getServiceByName" "no such service entry" + $ c_getservbyname cstr_name cstr_proto + >>= peek + +foreign import CALLCONV unsafe "getservbyname" + c_getservbyname :: CString -> CString -> IO (Ptr ServiceEntry) + +-- | Get the service given a 'PortNumber' and 'ProtocolName'. +getServiceByPort :: PortNumber -> ProtocolName -> IO ServiceEntry +getServiceByPort (PortNum port) proto = withLock $ do + withCString proto $ \ cstr_proto -> do + throwNoSuchThingIfNull "getServiceByPort" "no such service entry" + $ c_getservbyport (fromIntegral port) cstr_proto + >>= peek + +foreign import CALLCONV unsafe "getservbyport" + c_getservbyport :: CInt -> CString -> IO (Ptr ServiceEntry) + +-- | Get the 'PortNumber' corresponding to the 'ServiceName'. +getServicePortNumber :: ServiceName -> IO PortNumber +getServicePortNumber name = do + (ServiceEntry _ _ port _) <- getServiceByName name "tcp" + return port + +#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) +getServiceEntry :: IO ServiceEntry +getServiceEntry = withLock $ do + throwNoSuchThingIfNull "getServiceEntry" "no such service entry" + $ c_getservent + >>= peek + +foreign import ccall unsafe "getservent" c_getservent :: IO (Ptr ServiceEntry) + +setServiceEntry :: Bool -> IO () +setServiceEntry flg = withLock $ c_setservent (fromBool flg) + +foreign import ccall unsafe "setservent" c_setservent :: CInt -> IO () + +endServiceEntry :: IO () +endServiceEntry = withLock $ c_endservent + +foreign import ccall unsafe "endservent" c_endservent :: IO () + +getServiceEntries :: Bool -> IO [ServiceEntry] +getServiceEntries stayOpen = do + setServiceEntry stayOpen + getEntries (getServiceEntry) (endServiceEntry) +#endif + +-- --------------------------------------------------------------------------- +-- Protocol Entries + +-- The following relate directly to the corresponding UNIX C +-- calls for returning the protocol entries. The protocol entry is +-- represented by the Haskell type ProtocolEntry. + +-- As for setServiceEntry above, calling setProtocolEntry. +-- determines whether or not the protocol database file, usually +-- @/etc/protocols@, is to be kept open between calls of +-- getProtocolEntry. Similarly, + +data ProtocolEntry = + ProtocolEntry { + protoName :: ProtocolName, -- Official Name + protoAliases :: [ProtocolName], -- aliases + protoNumber :: ProtocolNumber -- Protocol Number + } deriving (Read, Show, Typeable) + +instance Storable ProtocolEntry where + sizeOf _ = #const sizeof(struct protoent) + alignment _ = alignment (undefined :: CInt) -- ??? + + peek p = do + p_name <- (#peek struct protoent, p_name) p >>= peekCString + p_aliases <- (#peek struct protoent, p_aliases) p + >>= peekArray0 nullPtr + >>= mapM peekCString +#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) + -- With WinSock, the protocol number is only a short; + -- hoist it in as such, but represent it on the Haskell side + -- as a CInt. + p_proto_short <- (#peek struct protoent, p_proto) p + let p_proto = fromIntegral (p_proto_short :: CShort) +#else + p_proto <- (#peek struct protoent, p_proto) p +#endif + return (ProtocolEntry { + protoName = p_name, + protoAliases = p_aliases, + protoNumber = p_proto + }) + + poke _p = error "Storable.poke(BSD.ProtocolEntry) not implemented" + +getProtocolByName :: ProtocolName -> IO ProtocolEntry +getProtocolByName name = withLock $ do + withCString name $ \ name_cstr -> do + throwNoSuchThingIfNull "getProtocolByName" ("no such protocol name: " ++ name) + $ c_getprotobyname name_cstr + >>= peek + +foreign import CALLCONV unsafe "getprotobyname" + c_getprotobyname :: CString -> IO (Ptr ProtocolEntry) + + +getProtocolByNumber :: ProtocolNumber -> IO ProtocolEntry +getProtocolByNumber num = withLock $ do + throwNoSuchThingIfNull "getProtocolByNumber" ("no such protocol number: " ++ show num) + $ c_getprotobynumber (fromIntegral num) + >>= peek + +foreign import CALLCONV unsafe "getprotobynumber" + c_getprotobynumber :: CInt -> IO (Ptr ProtocolEntry) + + +getProtocolNumber :: ProtocolName -> IO ProtocolNumber +getProtocolNumber proto = do + (ProtocolEntry _ _ num) <- getProtocolByName proto + return num + +#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) +getProtocolEntry :: IO ProtocolEntry -- Next Protocol Entry from DB +getProtocolEntry = withLock $ do + ent <- throwNoSuchThingIfNull "getProtocolEntry" "no such protocol entry" + $ c_getprotoent + peek ent + +foreign import ccall unsafe "getprotoent" c_getprotoent :: IO (Ptr ProtocolEntry) + +setProtocolEntry :: Bool -> IO () -- Keep DB Open ? +setProtocolEntry flg = withLock $ c_setprotoent (fromBool flg) + +foreign import ccall unsafe "setprotoent" c_setprotoent :: CInt -> IO () + +endProtocolEntry :: IO () +endProtocolEntry = withLock $ c_endprotoent + +foreign import ccall unsafe "endprotoent" c_endprotoent :: IO () + +getProtocolEntries :: Bool -> IO [ProtocolEntry] +getProtocolEntries stayOpen = withLock $ do + setProtocolEntry stayOpen + getEntries (getProtocolEntry) (endProtocolEntry) +#endif + +-- --------------------------------------------------------------------------- +-- Host lookups + +data HostEntry = + HostEntry { + hostName :: HostName, -- Official Name + hostAliases :: [HostName], -- aliases + hostFamily :: Family, -- Host Type (currently AF_INET) + hostAddresses :: [HostAddress] -- Set of Network Addresses (in network byte order) + } deriving (Read, Show, Typeable) + +instance Storable HostEntry where + sizeOf _ = #const sizeof(struct hostent) + alignment _ = alignment (undefined :: CInt) -- ??? + + peek p = do + h_name <- (#peek struct hostent, h_name) p >>= peekCString + h_aliases <- (#peek struct hostent, h_aliases) p + >>= peekArray0 nullPtr + >>= mapM peekCString + h_addrtype <- (#peek struct hostent, h_addrtype) p + -- h_length <- (#peek struct hostent, h_length) p + h_addr_list <- (#peek struct hostent, h_addr_list) p + >>= peekArray0 nullPtr + >>= mapM peek + return (HostEntry { + hostName = h_name, + hostAliases = h_aliases, +#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) + hostFamily = unpackFamily (fromIntegral (h_addrtype :: CShort)), +#else + hostFamily = unpackFamily h_addrtype, +#endif + hostAddresses = h_addr_list + }) + + poke _p = error "Storable.poke(BSD.ServiceEntry) not implemented" + + +-- convenience function: +hostAddress :: HostEntry -> HostAddress +hostAddress (HostEntry nm _ _ ls) = + case ls of + [] -> error ("BSD.hostAddress: empty network address list for " ++ nm) + (x:_) -> x + +-- getHostByName must use the same lock as the *hostent functions +-- may cause problems if called concurrently. + +-- | Resolve a 'HostName' to IPv4 address. +getHostByName :: HostName -> IO HostEntry +getHostByName name = withLock $ do + withCString name $ \ name_cstr -> do + ent <- throwNoSuchThingIfNull "getHostByName" "no such host entry" + $ c_gethostbyname name_cstr + peek ent + +foreign import CALLCONV safe "gethostbyname" + c_gethostbyname :: CString -> IO (Ptr HostEntry) + + +-- The locking of gethostbyaddr is similar to gethostbyname. +-- | Get a 'HostEntry' corresponding to the given address and family. +-- Note that only IPv4 is currently supported. +getHostByAddr :: Family -> HostAddress -> IO HostEntry +getHostByAddr family addr = do + with addr $ \ ptr_addr -> withLock $ do + throwNoSuchThingIfNull "getHostByAddr" "no such host entry" + $ c_gethostbyaddr ptr_addr (fromIntegral (sizeOf addr)) (packFamily family) + >>= peek + +foreign import CALLCONV safe "gethostbyaddr" + c_gethostbyaddr :: Ptr HostAddress -> CInt -> CInt -> IO (Ptr HostEntry) + +#if defined(HAVE_GETHOSTENT) && !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) +getHostEntry :: IO HostEntry +getHostEntry = withLock $ do + throwNoSuchThingIfNull "getHostEntry" "unable to retrieve host entry" + $ c_gethostent + >>= peek + +foreign import ccall unsafe "gethostent" c_gethostent :: IO (Ptr HostEntry) + +setHostEntry :: Bool -> IO () +setHostEntry flg = withLock $ c_sethostent (fromBool flg) + +foreign import ccall unsafe "sethostent" c_sethostent :: CInt -> IO () + +endHostEntry :: IO () +endHostEntry = withLock $ c_endhostent + +foreign import ccall unsafe "endhostent" c_endhostent :: IO () + +getHostEntries :: Bool -> IO [HostEntry] +getHostEntries stayOpen = do + setHostEntry stayOpen + getEntries (getHostEntry) (endHostEntry) +#endif + +-- --------------------------------------------------------------------------- +-- Accessing network information + +-- Same set of access functions as for accessing host,protocol and +-- service system info, this time for the types of networks supported. + +-- network addresses are represented in host byte order. +type NetworkAddr = CULong + +type NetworkName = String + +data NetworkEntry = + NetworkEntry { + networkName :: NetworkName, -- official name + networkAliases :: [NetworkName], -- aliases + networkFamily :: Family, -- type + networkAddress :: NetworkAddr + } deriving (Read, Show, Typeable) + +instance Storable NetworkEntry where + sizeOf _ = #const sizeof(struct hostent) + alignment _ = alignment (undefined :: CInt) -- ??? + + peek p = do + n_name <- (#peek struct netent, n_name) p >>= peekCString + n_aliases <- (#peek struct netent, n_aliases) p + >>= peekArray0 nullPtr + >>= mapM peekCString + n_addrtype <- (#peek struct netent, n_addrtype) p + n_net <- (#peek struct netent, n_net) p + return (NetworkEntry { + networkName = n_name, + networkAliases = n_aliases, + networkFamily = unpackFamily (fromIntegral + (n_addrtype :: CInt)), + networkAddress = n_net + }) + + poke _p = error "Storable.poke(BSD.NetEntry) not implemented" + + +#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) +getNetworkByName :: NetworkName -> IO NetworkEntry +getNetworkByName name = withLock $ do + withCString name $ \ name_cstr -> do + throwNoSuchThingIfNull "getNetworkByName" "no such network entry" + $ c_getnetbyname name_cstr + >>= peek + +foreign import ccall unsafe "getnetbyname" + c_getnetbyname :: CString -> IO (Ptr NetworkEntry) + +getNetworkByAddr :: NetworkAddr -> Family -> IO NetworkEntry +getNetworkByAddr addr family = withLock $ do + throwNoSuchThingIfNull "getNetworkByAddr" "no such network entry" + $ c_getnetbyaddr addr (packFamily family) + >>= peek + +foreign import ccall unsafe "getnetbyaddr" + c_getnetbyaddr :: NetworkAddr -> CInt -> IO (Ptr NetworkEntry) + +getNetworkEntry :: IO NetworkEntry +getNetworkEntry = withLock $ do + throwNoSuchThingIfNull "getNetworkEntry" "no more network entries" + $ c_getnetent + >>= peek + +foreign import ccall unsafe "getnetent" c_getnetent :: IO (Ptr NetworkEntry) + +-- | Open the network name database. The parameter specifies +-- whether a connection is maintained open between various +-- networkEntry calls +setNetworkEntry :: Bool -> IO () +setNetworkEntry flg = withLock $ c_setnetent (fromBool flg) + +foreign import ccall unsafe "setnetent" c_setnetent :: CInt -> IO () + +-- | Close the connection to the network name database. +endNetworkEntry :: IO () +endNetworkEntry = withLock $ c_endnetent + +foreign import ccall unsafe "endnetent" c_endnetent :: IO () + +-- | Get the list of network entries. +getNetworkEntries :: Bool -> IO [NetworkEntry] +getNetworkEntries stayOpen = do + setNetworkEntry stayOpen + getEntries (getNetworkEntry) (endNetworkEntry) +#endif + +-- --------------------------------------------------------------------------- +-- Interface names + +#if defined(HAVE_IF_NAMETOINDEX) + +-- returns the index of the network interface corresponding to the name ifname. +ifNameToIndex :: String -> IO (Maybe Int) +ifNameToIndex ifname = do + index <- withCString ifname c_if_nametoindex + -- On failure zero is returned. We'll return Nothing. + return $ if index == 0 then Nothing else Just $ fromIntegral index + +foreign import CALLCONV safe "if_nametoindex" + c_if_nametoindex :: CString -> IO CUInt + +#endif + + +-- Mutex for name service lockdown + +{-# NOINLINE lock #-} +lock :: MVar () +lock = unsafePerformIO $ withSocketsDo $ newMVar () + +withLock :: IO a -> IO a +withLock act = withMVar lock (\_ -> act) + +-- --------------------------------------------------------------------------- +-- Miscellaneous Functions + +-- | Calling getHostName returns the standard host name for the current +-- processor, as set at boot time. + +getHostName :: IO HostName +getHostName = do + let size = 256 + allocaArray0 size $ \ cstr -> do + throwSocketErrorIfMinus1_ "getHostName" $ c_gethostname cstr (fromIntegral size) + peekCString cstr + +foreign import CALLCONV unsafe "gethostname" + c_gethostname :: CString -> CSize -> IO CInt + +-- Helper function used by the exported functions that provides a +-- Haskellised view of the enumerator functions: + +getEntries :: IO a -- read + -> IO () -- at end + -> IO [a] +getEntries getOne atEnd = loop + where + loop = do + vv <- E.catch (liftM Just getOne) + (\ e -> let _types = e :: IOException in return Nothing) + case vv of + Nothing -> return [] + Just v -> loop >>= \ vs -> atEnd >> return (v:vs) + + +throwNoSuchThingIfNull :: String -> String -> IO (Ptr a) -> IO (Ptr a) +throwNoSuchThingIfNull loc desc act = do + ptr <- act + if (ptr == nullPtr) + then ioError (ioeSetErrorString (mkIOError NoSuchThing loc Nothing Nothing) desc) + else return ptr diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/Internal.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/Internal.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/Internal.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/Internal.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,45 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface #-} + +-- | +-- Module : Network.Socket.ByteString.Internal +-- Copyright : (c) Johan Tibell 2007-2010 +-- License : BSD-style +-- +-- Maintainer : johan.tibell@gmail.com +-- Stability : stable +-- Portability : portable +-- +module Network.Socket.ByteString.Internal + ( + mkInvalidRecvArgError +#if !defined(mingw32_HOST_OS) + , c_writev + , c_sendmsg +#endif + ) where + +import System.IO.Error (ioeSetErrorString, mkIOError) + +#if !defined(mingw32_HOST_OS) +import Foreign.C.Types (CInt(..)) +import System.Posix.Types (CSsize(..)) +import Foreign.Ptr (Ptr) + +import Network.Socket.ByteString.IOVec (IOVec) +import Network.Socket.ByteString.MsgHdr (MsgHdr) +#endif + +import GHC.IO.Exception (IOErrorType(..)) + +mkInvalidRecvArgError :: String -> IOError +mkInvalidRecvArgError loc = ioeSetErrorString (mkIOError + InvalidArgument + loc Nothing Nothing) "non-positive length" + +#if !defined(mingw32_HOST_OS) +foreign import ccall unsafe "writev" + c_writev :: CInt -> Ptr IOVec -> CInt -> IO CSsize + +foreign import ccall unsafe "sendmsg" + c_sendmsg :: CInt -> Ptr MsgHdr -> CInt -> IO CSsize +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/IOVec.hsc cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/IOVec.hsc --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/IOVec.hsc 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/IOVec.hsc 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,31 @@ +{-# OPTIONS_GHC -funbox-strict-fields #-} + +-- | Support module for the POSIX writev system call. +module Network.Socket.ByteString.IOVec + ( IOVec(..) + ) where + +import Foreign.C.Types (CChar, CInt, CSize) +import Foreign.Ptr (Ptr) +import Foreign.Storable (Storable(..)) + +#include +#include + +data IOVec = IOVec + { iovBase :: !(Ptr CChar) + , iovLen :: !CSize + } + +instance Storable IOVec where + sizeOf _ = (#const sizeof(struct iovec)) + alignment _ = alignment (undefined :: CInt) + + peek p = do + base <- (#peek struct iovec, iov_base) p + len <- (#peek struct iovec, iov_len) p + return $ IOVec base len + + poke p iov = do + (#poke struct iovec, iov_base) p (iovBase iov) + (#poke struct iovec, iov_len) p (iovLen iov) diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy/Posix.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy/Posix.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy/Posix.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy/Posix.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,56 @@ +{-# LANGUAGE BangPatterns #-} +module Network.Socket.ByteString.Lazy.Posix + ( + -- * Send data to a socket + send + , sendAll + ) where + +import Control.Monad (liftM) +import Control.Monad (unless) +import qualified Data.ByteString.Lazy as L +import Data.ByteString.Lazy.Internal (ByteString(..)) +import Data.ByteString.Unsafe (unsafeUseAsCStringLen) +import Data.Int (Int64) +import Foreign.Marshal.Array (allocaArray) +import Foreign.Ptr (plusPtr) +import Foreign.Storable (Storable(..)) + +import Network.Socket (Socket(..)) +import Network.Socket.ByteString.IOVec (IOVec(IOVec)) +import Network.Socket.ByteString.Internal (c_writev) +import Network.Socket.Internal + +-- ----------------------------------------------------------------------------- +-- Sending + +send :: Socket -- ^ Connected socket + -> ByteString -- ^ Data to send + -> IO Int64 -- ^ Number of bytes sent +send sock@(MkSocket fd _ _ _ _) s = do + let cs = take maxNumChunks (L.toChunks s) + len = length cs + liftM fromIntegral . allocaArray len $ \ptr -> + withPokes cs ptr $ \niovs -> + throwSocketErrorWaitWrite sock "writev" $ + c_writev (fromIntegral fd) ptr niovs + where + withPokes ss p f = loop ss p 0 0 + where loop (c:cs) q k !niovs + | k < maxNumBytes = + unsafeUseAsCStringLen c $ \(ptr,len) -> do + poke q $ IOVec ptr (fromIntegral len) + loop cs (q `plusPtr` sizeOf (undefined :: IOVec)) + (k + fromIntegral len) (niovs + 1) + | otherwise = f niovs + loop _ _ _ niovs = f niovs + maxNumBytes = 4194304 :: Int -- maximum number of bytes to transmit in one system call + maxNumChunks = 1024 :: Int -- maximum number of chunks to transmit in one system call + +sendAll :: Socket -- ^ Connected socket + -> ByteString -- ^ Data to send + -> IO () +sendAll sock bs = do + sent <- send sock bs + let bs' = L.drop sent bs + unless (L.null bs') $ sendAll sock bs' diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy/Windows.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy/Windows.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy/Windows.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy/Windows.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,36 @@ +{-# LANGUAGE BangPatterns #-} +module Network.Socket.ByteString.Lazy.Windows + ( + -- * Send data to a socket + send + , sendAll + ) where + +import Control.Applicative ((<$>)) +import Control.Monad (unless) +import qualified Data.ByteString as S +import qualified Data.ByteString.Lazy as L +import Data.Int (Int64) + +import Network.Socket (Socket(..)) +import qualified Network.Socket.ByteString as Socket + +-- ----------------------------------------------------------------------------- +-- Sending + +send :: Socket -- ^ Connected socket + -> L.ByteString -- ^ Data to send + -> IO Int64 -- ^ Number of bytes sent +send sock s = do + fromIntegral <$> case L.toChunks s of + -- TODO: Consider doing nothing if the string is empty. + [] -> Socket.send sock S.empty + (x:_) -> Socket.send sock x + +sendAll :: Socket -- ^ Connected socket + -> L.ByteString -- ^ Data to send + -> IO () +sendAll sock bs = do + sent <- send sock bs + let bs' = L.drop sent bs + unless (L.null bs') $ sendAll sock bs' diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/Lazy.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,86 @@ +{-# LANGUAGE CPP #-} + +-- | +-- Module : Network.Socket.ByteString.Lazy +-- Copyright : (c) Bryan O'Sullivan 2009 +-- License : BSD-style +-- +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : POSIX, GHC +-- +-- This module provides access to the BSD /socket/ interface. This +-- module is generally more efficient than the 'String' based network +-- functions in 'Network.Socket'. For detailed documentation, consult +-- your favorite POSIX socket reference. All functions communicate +-- failures by converting the error number to 'System.IO.IOError'. +-- +-- This module is made to be imported with 'Network.Socket' like so: +-- +-- > import Network.Socket hiding (send, sendTo, recv, recvFrom) +-- > import Network.Socket.ByteString.Lazy +-- > import Prelude hiding (getContents) +-- +module Network.Socket.ByteString.Lazy + ( + -- * Send data to a socket + send + , sendAll + , + + -- * Receive data from a socket + getContents + , recv + ) where + +import Control.Monad (liftM) +import Data.ByteString.Lazy.Internal (ByteString(..), defaultChunkSize) +import Data.Int (Int64) +import Network.Socket (Socket(..), ShutdownCmd(..), shutdown) +import Prelude hiding (getContents) +import System.IO.Unsafe (unsafeInterleaveIO) + +import qualified Data.ByteString as S +import qualified Network.Socket.ByteString as N + +#if defined(mingw32_HOST_OS) +import Network.Socket.ByteString.Lazy.Windows (send, sendAll) +#else +import Network.Socket.ByteString.Lazy.Posix (send, sendAll) +#endif + +-- ----------------------------------------------------------------------------- +-- Receiving + +-- | Receive data from the socket. The socket must be in a connected +-- state. Data is received on demand, in chunks; each chunk will be +-- sized to reflect the amount of data received by individual 'recv' +-- calls. +-- +-- All remaining data from the socket is consumed. When there is no +-- more data to be received, the receiving side of the socket is shut +-- down. If there is an error and an exception is thrown, the socket +-- is not shut down. +getContents :: Socket -- ^ Connected socket + -> IO ByteString -- ^ Data received +getContents sock = loop where + loop = unsafeInterleaveIO $ do + s <- N.recv sock defaultChunkSize + if S.null s + then shutdown sock ShutdownReceive >> return Empty + else Chunk s `liftM` loop + +-- | Receive data from the socket. The socket must be in a connected +-- state. This function may return fewer bytes than specified. If +-- the received data is longer than the specified length, it may be +-- discarded depending on the type of socket. This function may block +-- until a message arrives. +-- +-- If there is no more data to be received, returns an empty 'ByteString'. +recv :: Socket -- ^ Connected socket + -> Int64 -- ^ Maximum number of bytes to receive + -> IO ByteString -- ^ Data received +recv sock nbytes = chunk `liftM` N.recv sock (fromIntegral nbytes) where + chunk k + | S.null k = Empty + | otherwise = Chunk k Empty diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/MsgHdr.hsc cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/MsgHdr.hsc --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString/MsgHdr.hsc 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString/MsgHdr.hsc 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,48 @@ +{-# LANGUAGE CPP #-} +{-# OPTIONS_GHC -funbox-strict-fields #-} + +-- | Support module for the POSIX 'sendmsg' system call. +module Network.Socket.ByteString.MsgHdr + ( MsgHdr(..) + ) where + +#include +#include + +import Foreign.C.Types (CInt, CSize, CUInt) +import Foreign.Ptr (Ptr) +import Foreign.Storable (Storable(..)) +import Network.Socket (SockAddr) +import Network.Socket.Internal (zeroMemory) + +import Network.Socket.ByteString.IOVec (IOVec) + +-- We don't use msg_control, msg_controllen, and msg_flags as these +-- don't exist on OpenSolaris. +data MsgHdr = MsgHdr + { msgName :: !(Ptr SockAddr) + , msgNameLen :: !CUInt + , msgIov :: !(Ptr IOVec) + , msgIovLen :: !CSize + } + +instance Storable MsgHdr where + sizeOf _ = (#const sizeof(struct msghdr)) + alignment _ = alignment (undefined :: CInt) + + peek p = do + name <- (#peek struct msghdr, msg_name) p + nameLen <- (#peek struct msghdr, msg_namelen) p + iov <- (#peek struct msghdr, msg_iov) p + iovLen <- (#peek struct msghdr, msg_iovlen) p + return $ MsgHdr name nameLen iov iovLen + + poke p mh = do + -- We need to zero the msg_control, msg_controllen, and msg_flags + -- fields, but they only exist on some platforms (e.g. not on + -- Solaris). Instead of using CPP, we zero the entire struct. + zeroMemory p (#const sizeof(struct msghdr)) + (#poke struct msghdr, msg_name) p (msgName mh) + (#poke struct msghdr, msg_namelen) p (msgNameLen mh) + (#poke struct msghdr, msg_iov) p (msgIov mh) + (#poke struct msghdr, msg_iovlen) p (msgIovLen mh) diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString.hsc cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString.hsc --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/ByteString.hsc 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/ByteString.hsc 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,351 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface #-} + +#include "HsNet.h" + +-- | +-- Module : Network.Socket.ByteString +-- Copyright : (c) Johan Tibell 2007-2010 +-- License : BSD-style +-- +-- Maintainer : johan.tibell@gmail.com +-- Stability : stable +-- Portability : portable +-- +-- This module provides access to the BSD /socket/ interface. This +-- module is generally more efficient than the 'String' based network +-- functions in 'Network.Socket'. For detailed documentation, consult +-- your favorite POSIX socket reference. All functions communicate +-- failures by converting the error number to 'System.IO.IOError'. +-- +-- This module is made to be imported with 'Network.Socket' like so: +-- +-- > import Network.Socket hiding (send, sendTo, recv, recvFrom) +-- > import Network.Socket.ByteString +-- +module Network.Socket.ByteString + ( + -- * Send data to a socket + send + , sendAll + , sendTo + , sendAllTo + + -- ** Vectored I/O + -- $vectored + , sendMany + , sendManyTo + + -- * Receive data from a socket + , recv + , recvFrom + + -- * Example + -- $example + ) where + +import Control.Monad (liftM, when) +import Data.ByteString (ByteString) +import Data.ByteString.Internal (createAndTrim) +import Data.ByteString.Unsafe (unsafeUseAsCStringLen) +import Data.Word (Word8) +import Foreign.C.Types (CInt(..)) +import Foreign.Marshal.Alloc (allocaBytes) +import Foreign.Ptr (Ptr, castPtr) +import Network.Socket (SockAddr, Socket(..), sendBufTo, recvBufFrom) + +import qualified Data.ByteString as B + +import Network.Socket.ByteString.Internal +import Network.Socket.Internal +import Network.Socket.Types + +#if !defined(mingw32_HOST_OS) +import Control.Monad (zipWithM_) +import Foreign.C.Types (CChar) +import Foreign.C.Types (CSize(..)) +import Foreign.Marshal.Array (allocaArray) +import Foreign.Marshal.Utils (with) +import Foreign.Ptr (plusPtr) +import Foreign.Storable (Storable(..)) + +import Network.Socket.ByteString.IOVec (IOVec(..)) +import Network.Socket.ByteString.MsgHdr (MsgHdr(..)) + +#else +import GHC.IO.FD +#endif + +#if !defined(mingw32_HOST_OS) +foreign import CALLCONV unsafe "send" + c_send :: CInt -> Ptr a -> CSize -> CInt -> IO CInt +foreign import CALLCONV unsafe "recv" + c_recv :: CInt -> Ptr CChar -> CSize -> CInt -> IO CInt +#endif + +-- ---------------------------------------------------------------------------- +-- Sending + +-- | Send data to the socket. The socket must be connected to a +-- remote socket. Returns the number of bytes sent. Applications are +-- responsible for ensuring that all data has been sent. +send :: Socket -- ^ Connected socket + -> ByteString -- ^ Data to send + -> IO Int -- ^ Number of bytes sent +send sock@(MkSocket s _ _ _ _) xs = + unsafeUseAsCStringLen xs $ \(str, len) -> + liftM fromIntegral $ +#if defined(mingw32_HOST_OS) + writeRawBufferPtr "Network.Socket.ByteString.send" + (FD s 1) (castPtr str) 0 (fromIntegral len) +#else + throwSocketErrorWaitWrite sock "send" $ + c_send s str (fromIntegral len) 0 +#endif + +-- | Send data to the socket. The socket must be connected to a +-- remote socket. Unlike 'send', this function continues to send data +-- until either all data has been sent or an error occurs. On error, +-- an exception is raised, and there is no way to determine how much +-- data, if any, was successfully sent. +sendAll :: Socket -- ^ Connected socket + -> ByteString -- ^ Data to send + -> IO () +sendAll sock bs = do + sent <- send sock bs + when (sent < B.length bs) $ sendAll sock (B.drop sent bs) + +-- | Send data to the socket. The recipient can be specified +-- explicitly, so the socket need not be in a connected state. +-- Returns the number of bytes sent. Applications are responsible for +-- ensuring that all data has been sent. +sendTo :: Socket -- ^ Socket + -> ByteString -- ^ Data to send + -> SockAddr -- ^ Recipient address + -> IO Int -- ^ Number of bytes sent +sendTo sock xs addr = + unsafeUseAsCStringLen xs $ \(str, len) -> sendBufTo sock str len addr + +-- | Send data to the socket. The recipient can be specified +-- explicitly, so the socket need not be in a connected state. Unlike +-- 'sendTo', this function continues to send data until either all +-- data has been sent or an error occurs. On error, an exception is +-- raised, and there is no way to determine how much data, if any, was +-- successfully sent. +sendAllTo :: Socket -- ^ Socket + -> ByteString -- ^ Data to send + -> SockAddr -- ^ Recipient address + -> IO () +sendAllTo sock xs addr = do + sent <- sendTo sock xs addr + when (sent < B.length xs) $ sendAllTo sock (B.drop sent xs) addr + +-- ---------------------------------------------------------------------------- +-- ** Vectored I/O + +-- $vectored +-- +-- Vectored I\/O, also known as scatter\/gather I\/O, allows multiple +-- data segments to be sent using a single system call, without first +-- concatenating the segments. For example, given a list of +-- @ByteString@s, @xs@, +-- +-- > sendMany sock xs +-- +-- is equivalent to +-- +-- > sendAll sock (concat xs) +-- +-- but potentially more efficient. +-- +-- Vectored I\/O are often useful when implementing network protocols +-- that, for example, group data into segments consisting of one or +-- more fixed-length headers followed by a variable-length body. + +-- | Send data to the socket. The socket must be in a connected +-- state. The data is sent as if the parts have been concatenated. +-- This function continues to send data until either all data has been +-- sent or an error occurs. On error, an exception is raised, and +-- there is no way to determine how much data, if any, was +-- successfully sent. +sendMany :: Socket -- ^ Connected socket + -> [ByteString] -- ^ Data to send + -> IO () +#if !defined(mingw32_HOST_OS) +sendMany sock@(MkSocket fd _ _ _ _) cs = do + sent <- sendManyInner + when (sent < totalLength cs) $ sendMany sock (remainingChunks sent cs) + where + sendManyInner = + liftM fromIntegral . withIOVec cs $ \(iovsPtr, iovsLen) -> + throwSocketErrorWaitWrite sock "writev" $ + c_writev (fromIntegral fd) iovsPtr + (fromIntegral (min iovsLen (#const IOV_MAX))) +#else +sendMany sock = sendAll sock . B.concat +#endif + +-- | Send data to the socket. The recipient can be specified +-- explicitly, so the socket need not be in a connected state. The +-- data is sent as if the parts have been concatenated. This function +-- continues to send data until either all data has been sent or an +-- error occurs. On error, an exception is raised, and there is no +-- way to determine how much data, if any, was successfully sent. +sendManyTo :: Socket -- ^ Socket + -> [ByteString] -- ^ Data to send + -> SockAddr -- ^ Recipient address + -> IO () +#if !defined(mingw32_HOST_OS) +sendManyTo sock@(MkSocket fd _ _ _ _) cs addr = do + sent <- liftM fromIntegral sendManyToInner + when (sent < totalLength cs) $ sendManyTo sock (remainingChunks sent cs) addr + where + sendManyToInner = + withSockAddr addr $ \addrPtr addrSize -> + withIOVec cs $ \(iovsPtr, iovsLen) -> do + let msgHdr = MsgHdr + addrPtr (fromIntegral addrSize) + iovsPtr (fromIntegral iovsLen) + with msgHdr $ \msgHdrPtr -> + throwSocketErrorWaitWrite sock "sendmsg" $ + c_sendmsg (fromIntegral fd) msgHdrPtr 0 +#else +sendManyTo sock cs = sendAllTo sock (B.concat cs) +#endif + +-- ---------------------------------------------------------------------------- +-- Receiving + +-- | Receive data from the socket. The socket must be in a connected +-- state. This function may return fewer bytes than specified. If +-- the message is longer than the specified length, it may be +-- discarded depending on the type of socket. This function may block +-- until a message arrives. +-- +-- Considering hardware and network realities, the maximum number of bytes to +-- receive should be a small power of 2, e.g., 4096. +-- +-- For TCP sockets, a zero length return value means the peer has +-- closed its half side of the connection. +recv :: Socket -- ^ Connected socket + -> Int -- ^ Maximum number of bytes to receive + -> IO ByteString -- ^ Data received +recv sock nbytes + | nbytes < 0 = ioError (mkInvalidRecvArgError "Network.Socket.ByteString.recv") + | otherwise = createAndTrim nbytes $ recvInner sock nbytes + +recvInner :: Socket -> Int -> Ptr Word8 -> IO Int +recvInner sock nbytes ptr = + fmap fromIntegral $ +#if defined(mingw32_HOST_OS) + readRawBufferPtr "Network.Socket.ByteString.recv" (FD s 1) ptr 0 (fromIntegral nbytes) +#else + throwSocketErrorWaitRead sock "recv" $ + c_recv s (castPtr ptr) (fromIntegral nbytes) 0 +#endif + where + s = sockFd sock + +-- | Receive data from the socket. The socket need not be in a +-- connected state. Returns @(bytes, address)@ where @bytes@ is a +-- 'ByteString' representing the data received and @address@ is a +-- 'SockAddr' representing the address of the sending socket. +recvFrom :: Socket -- ^ Socket + -> Int -- ^ Maximum number of bytes to receive + -> IO (ByteString, SockAddr) -- ^ Data received and sender address +recvFrom sock nbytes = + allocaBytes nbytes $ \ptr -> do + (len, sockaddr) <- recvBufFrom sock ptr nbytes + str <- B.packCStringLen (ptr, len) + return (str, sockaddr) + +-- ---------------------------------------------------------------------------- +-- Not exported + +#if !defined(mingw32_HOST_OS) +-- | Suppose we try to transmit a list of chunks @cs@ via a gathering write +-- operation and find that @n@ bytes were sent. Then @remainingChunks n cs@ is +-- list of chunks remaining to be sent. +remainingChunks :: Int -> [ByteString] -> [ByteString] +remainingChunks _ [] = [] +remainingChunks i (x:xs) + | i < len = B.drop i x : xs + | otherwise = let i' = i - len in i' `seq` remainingChunks i' xs + where + len = B.length x + +-- | @totalLength cs@ is the sum of the lengths of the chunks in the list @cs@. +totalLength :: [ByteString] -> Int +totalLength = sum . map B.length + +-- | @withIOVec cs f@ executes the computation @f@, passing as argument a pair +-- consisting of a pointer to a temporarily allocated array of pointers to +-- 'IOVec' made from @cs@ and the number of pointers (@length cs@). +-- /Unix only/. +withIOVec :: [ByteString] -> ((Ptr IOVec, Int) -> IO a) -> IO a +withIOVec cs f = + allocaArray csLen $ \aPtr -> do + zipWithM_ pokeIov (ptrs aPtr) cs + f (aPtr, csLen) + where + csLen = length cs + ptrs = iterate (`plusPtr` sizeOf (undefined :: IOVec)) + pokeIov ptr s = + unsafeUseAsCStringLen s $ \(sPtr, sLen) -> + poke ptr $ IOVec sPtr (fromIntegral sLen) +#endif + +-- --------------------------------------------------------------------- +-- Example + +-- $example +-- +-- Here are two minimal example programs using the TCP/IP protocol: a +-- server that echoes all data that it receives back (servicing only +-- one client) and a client using it. +-- +-- > -- Echo server program +-- > module Main where +-- > +-- > import Control.Monad (unless) +-- > import Network.Socket hiding (recv) +-- > import qualified Data.ByteString as S +-- > import Network.Socket.ByteString (recv, sendAll) +-- > +-- > main :: IO () +-- > main = withSocketsDo $ +-- > do addrinfos <- getAddrInfo +-- > (Just (defaultHints {addrFlags = [AI_PASSIVE]})) +-- > Nothing (Just "3000") +-- > let serveraddr = head addrinfos +-- > sock <- socket (addrFamily serveraddr) Stream defaultProtocol +-- > bindSocket sock (addrAddress serveraddr) +-- > listen sock 1 +-- > (conn, _) <- accept sock +-- > talk conn +-- > sClose conn +-- > sClose sock +-- > +-- > where +-- > talk :: Socket -> IO () +-- > talk conn = +-- > do msg <- recv conn 1024 +-- > unless (S.null msg) $ sendAll conn msg >> talk conn +-- +-- > -- Echo client program +-- > module Main where +-- > +-- > import Network.Socket hiding (recv) +-- > import Network.Socket.ByteString (recv, sendAll) +-- > import qualified Data.ByteString.Char8 as C +-- > +-- > main :: IO () +-- > main = withSocketsDo $ +-- > do addrinfos <- getAddrInfo Nothing (Just "") (Just "3000") +-- > let serveraddr = head addrinfos +-- > sock <- socket (addrFamily serveraddr) Stream defaultProtocol +-- > connect sock (addrAddress serveraddr) +-- > sendAll sock $ C.pack "Hello, world!" +-- > msg <- recv sock 1024 +-- > sClose sock +-- > putStr "Received " +-- > C.putStrLn msg diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/Internal.hsc cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/Internal.hsc --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/Internal.hsc 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/Internal.hsc 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,271 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE ForeignFunctionInterface #-} +{-# OPTIONS_GHC -fno-warn-orphans #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network.Socket.Internal +-- Copyright : (c) The University of Glasgow 2001 +-- License : BSD-style (see the file libraries/network/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- A module containing semi-public 'Network.Socket' internals. +-- Modules which extend the 'Network.Socket' module will need to use +-- this module while ideally most users will be able to make do with +-- the public interface. +-- +----------------------------------------------------------------------------- + +#include "HsNet.h" + +module Network.Socket.Internal + ( + -- * Socket addresses + HostAddress +#if defined(IPV6_SOCKET_SUPPORT) + , HostAddress6 + , FlowInfo + , ScopeID +#endif + , PortNumber(..) + , SockAddr(..) + + , peekSockAddr + , pokeSockAddr + , sizeOfSockAddr + , sizeOfSockAddrByFamily + , withSockAddr + , withNewSockAddr + + -- * Protocol families + , Family(..) + + -- * Socket error functions +#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) + , c_getLastError +#endif + , throwSocketError + , throwSocketErrorCode + + -- * Guards for socket operations that may fail + , throwSocketErrorIfMinus1_ + , throwSocketErrorIfMinus1Retry + , throwSocketErrorIfMinus1Retry_ + , throwSocketErrorIfMinus1RetryMayBlock + + -- ** Guards that wait and retry if the operation would block + -- | These guards are based on 'throwSocketErrorIfMinus1RetryMayBlock'. + -- They wait for socket readiness if the action fails with @EWOULDBLOCK@ + -- or similar. + , throwSocketErrorWaitRead + , throwSocketErrorWaitWrite + + -- * Initialization + , withSocketsDo + + -- * Low-level helpers + , zeroMemory + ) where + +import Foreign.C.Error (throwErrno, throwErrnoIfMinus1Retry, + throwErrnoIfMinus1RetryMayBlock, throwErrnoIfMinus1_, + Errno(..), errnoToIOError) +import Foreign.C.String (peekCString) +import Foreign.C.Types (CInt(..)) +import Foreign.Ptr (Ptr) +import GHC.Conc (threadWaitRead, threadWaitWrite) + +#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) +import Control.Exception ( evaluate ) +import System.IO.Unsafe ( unsafePerformIO ) +import Control.Monad ( when ) +# if __GLASGOW_HASKELL__ >= 707 +import GHC.IO.Exception ( IOErrorType(..) ) +# else +import GHC.IOBase ( IOErrorType(..) ) +# endif +import Foreign.C.Types ( CChar ) +import System.IO.Error ( ioeSetErrorString, mkIOError ) +#endif + +import Network.Socket.Types + +-- --------------------------------------------------------------------- +-- Guards for socket operations that may fail + +-- | Throw an 'IOError' corresponding to the current socket error. +throwSocketError :: String -- ^ textual description of the error location + -> IO a + +-- | Like 'throwSocketError', but the error code is supplied as an argument. +-- +-- On Windows, do not use errno. Use a system error code instead. +throwSocketErrorCode :: String -> CInt -> IO a + +-- | Throw an 'IOError' corresponding to the current socket error if +-- the IO action returns a result of @-1@. Discards the result of the +-- IO action after error handling. +throwSocketErrorIfMinus1_ + :: (Eq a, Num a) + => String -- ^ textual description of the location + -> IO a -- ^ the 'IO' operation to be executed + -> IO () + +{-# SPECIALIZE throwSocketErrorIfMinus1_ :: String -> IO CInt -> IO () #-} + +-- | Throw an 'IOError' corresponding to the current socket error if +-- the IO action returns a result of @-1@, but retries in case of an +-- interrupted operation. +throwSocketErrorIfMinus1Retry + :: (Eq a, Num a) + => String -- ^ textual description of the location + -> IO a -- ^ the 'IO' operation to be executed + -> IO a + +{-# SPECIALIZE throwSocketErrorIfMinus1Retry :: String -> IO CInt -> IO CInt #-} + +-- | Throw an 'IOError' corresponding to the current socket error if +-- the IO action returns a result of @-1@, but retries in case of an +-- interrupted operation. Discards the result of the IO action after +-- error handling. +throwSocketErrorIfMinus1Retry_ + :: (Eq a, Num a) + => String -- ^ textual description of the location + -> IO a -- ^ the 'IO' operation to be executed + -> IO () +throwSocketErrorIfMinus1Retry_ loc m = + throwSocketErrorIfMinus1Retry loc m >> return () +{-# SPECIALIZE throwSocketErrorIfMinus1Retry_ :: String -> IO CInt -> IO () #-} + +-- | Throw an 'IOError' corresponding to the current socket error if +-- the IO action returns a result of @-1@, but retries in case of an +-- interrupted operation. Checks for operations that would block and +-- executes an alternative action before retrying in that case. +throwSocketErrorIfMinus1RetryMayBlock + :: (Eq a, Num a) + => String -- ^ textual description of the location + -> IO b -- ^ action to execute before retrying if an + -- immediate retry would block + -> IO a -- ^ the 'IO' operation to be executed + -> IO a + +{-# SPECIALIZE throwSocketErrorIfMinus1RetryMayBlock + :: String -> IO b -> IO CInt -> IO CInt #-} + +#if (!defined(HAVE_WINSOCK2_H) || defined(cygwin32_HOST_OS)) + +throwSocketErrorIfMinus1RetryMayBlock name on_block act = + throwErrnoIfMinus1RetryMayBlock name act on_block + +throwSocketErrorIfMinus1Retry = throwErrnoIfMinus1Retry + +throwSocketErrorIfMinus1_ = throwErrnoIfMinus1_ + +throwSocketError = throwErrno + +throwSocketErrorCode loc errno = + ioError (errnoToIOError loc (Errno errno) Nothing Nothing) + +#else + +throwSocketErrorIfMinus1RetryMayBlock name _ act + = throwSocketErrorIfMinus1Retry name act + +throwSocketErrorIfMinus1_ name act = do + throwSocketErrorIfMinus1Retry name act + return () + +# if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) +throwSocketErrorIfMinus1Retry name act = do + r <- act + if (r == -1) + then do + rc <- c_getLastError + case rc of + #{const WSANOTINITIALISED} -> do + withSocketsDo (return ()) + r <- act + if (r == -1) + then throwSocketError name + else return r + _ -> throwSocketError name + else return r + +throwSocketErrorCode name rc = do + pstr <- c_getWSError rc + str <- peekCString pstr + ioError (ioeSetErrorString (mkIOError OtherError name Nothing Nothing) str) + +throwSocketError name = + c_getLastError >>= throwSocketErrorCode name + +foreign import CALLCONV unsafe "WSAGetLastError" + c_getLastError :: IO CInt + +foreign import ccall unsafe "getWSErrorDescr" + c_getWSError :: CInt -> IO (Ptr CChar) + + +# else +throwSocketErrorIfMinus1Retry = throwErrnoIfMinus1Retry +throwSocketError = throwErrno +throwSocketErrorCode loc errno = + ioError (errnoToIOError loc (Errno errno) Nothing Nothing) +# endif +#endif + +-- | Like 'throwSocketErrorIfMinus1Retry', but if the action fails with +-- @EWOULDBLOCK@ or similar, wait for the socket to be read-ready, +-- and try again. +throwSocketErrorWaitRead :: (Eq a, Num a) => Socket -> String -> IO a -> IO a +throwSocketErrorWaitRead sock name io = + throwSocketErrorIfMinus1RetryMayBlock name + (threadWaitRead $ fromIntegral $ sockFd sock) + io + +-- | Like 'throwSocketErrorIfMinus1Retry', but if the action fails with +-- @EWOULDBLOCK@ or similar, wait for the socket to be write-ready, +-- and try again. +throwSocketErrorWaitWrite :: (Eq a, Num a) => Socket -> String -> IO a -> IO a +throwSocketErrorWaitWrite sock name io = + throwSocketErrorIfMinus1RetryMayBlock name + (threadWaitWrite $ fromIntegral $ sockFd sock) + io + +-- --------------------------------------------------------------------------- +-- WinSock support + +{-| With older versions of the @network@ library on Windows operating systems, +the networking subsystem must be initialised using 'withSocketsDo' before +any networking operations can be used. eg. + +> main = withSocketsDo $ do {...} + +It is fine to nest calls to 'withSocketsDo', and to perform networking operations +after 'withSocketsDo' has returned. + +In newer versions of the @network@ library it is only necessary to call +'withSocketsDo' if you are calling the 'MkSocket' constructor directly. +However, for compatibility with older versions on Windows, it is good practice +to always call 'withSocketsDo' (it's very cheap). +-} +{-# INLINE withSocketsDo #-} +withSocketsDo :: IO a -> IO a +#if !defined(WITH_WINSOCK) +withSocketsDo x = x +#else +withSocketsDo act = evaluate withSocketsInit >> act + + +{-# NOINLINE withSocketsInit #-} +withSocketsInit :: () +-- Use a CAF to make forcing it do initialisation once, but subsequent forces will be cheap +withSocketsInit = unsafePerformIO $ do + x <- initWinSock + when (x /= 0) $ ioError $ userError "Failed to initialise WinSock" + +foreign import ccall unsafe "initWinSock" initWinSock :: IO Int + +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/Types.hsc cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/Types.hsc --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket/Types.hsc 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket/Types.hsc 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,1037 @@ +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE ForeignFunctionInterface #-} + +#include "HsNet.h" + +module Network.Socket.Types + ( + -- * Socket + Socket(..) + , sockFd + , sockFamily + , sockType + , sockProtocol + , sockStatus + , SocketStatus(..) + + -- * Socket types + , SocketType(..) + , isSupportedSocketType + , packSocketType + , packSocketType' + , packSocketTypeOrThrow + , unpackSocketType + , unpackSocketType' + + -- * Family + , Family(..) + , isSupportedFamily + , packFamily + , unpackFamily + + -- * Socket addresses + , SockAddr(..) + , isSupportedSockAddr + , HostAddress +#if defined(IPV6_SOCKET_SUPPORT) + , HostAddress6 + , FlowInfo + , ScopeID +#endif + , peekSockAddr + , pokeSockAddr + , sizeOfSockAddr + , sizeOfSockAddrByFamily + , withSockAddr + , withNewSockAddr + + -- * Unsorted + , ProtocolNumber + , PortNumber(..) + + -- * Low-level helpers + , zeroMemory + ) where + +import Control.Concurrent.MVar +import Control.Monad +import Data.Bits +import Data.Maybe +import Data.Ratio +import Data.Typeable +import Data.Word +import Data.Int +import Foreign.C +import Foreign.Marshal.Alloc +import Foreign.Marshal.Array +import Foreign.Ptr +import Foreign.Storable + +-- | Represents a socket. The fields are, respectively: +-- +-- * File descriptor +-- * Socket family +-- * Socket type +-- * Protocol number +-- * Status flag +-- +-- If you are calling the 'MkSocket' constructor directly you should ensure +-- you have called 'Network.withSocketsDo'. +data Socket + = MkSocket + CInt -- File Descriptor + Family + SocketType + ProtocolNumber -- Protocol Number + (MVar SocketStatus) -- Status Flag + deriving Typeable + +sockFd :: Socket -> CInt +sockFd (MkSocket n _ _ _ _) = n + +sockFamily :: Socket -> Family +sockFamily (MkSocket _ f _ _ _) = f + +sockType :: Socket -> SocketType +sockType (MkSocket _ _ t _ _) = t + +sockProtocol :: Socket -> ProtocolNumber +sockProtocol (MkSocket _ _ _ p _) = p + +sockStatus :: Socket -> MVar SocketStatus +sockStatus (MkSocket _ _ _ _ s) = s + +instance Eq Socket where + (MkSocket _ _ _ _ m1) == (MkSocket _ _ _ _ m2) = m1 == m2 + +instance Show Socket where + showsPrec _n (MkSocket fd _ _ _ _) = + showString "" + +type ProtocolNumber = CInt + +-- | The status of the socket as /determined by this library/, not +-- necessarily reflecting the state of the connection itself. +-- +-- For example, the 'Closed' status is applied when the 'close' +-- function is called. +data SocketStatus + -- Returned Status Function called + = NotConnected -- ^ Newly created, unconnected socket + | Bound -- ^ Bound, via 'bind' + | Listening -- ^ Listening, via 'listen' + | Connected -- ^ Connected or accepted, via 'connect' or 'accept' + | ConvertedToHandle -- ^ Is now a 'Handle' (via 'socketToHandle'), don't touch + | Closed -- ^ Closed was closed by 'close' + deriving (Eq, Show, Typeable) + +----------------------------------------------------------------------------- +-- Socket types + +-- There are a few possible ways to do this. The first is convert the +-- structs used in the C library into an equivalent Haskell type. An +-- other possible implementation is to keep all the internals in the C +-- code and use an Int## and a status flag. The second method is used +-- here since a lot of the C structures are not required to be +-- manipulated. + +-- Originally the status was non-mutable so we had to return a new +-- socket each time we changed the status. This version now uses +-- mutable variables to avoid the need to do this. The result is a +-- cleaner interface and better security since the application +-- programmer now can't circumvent the status information to perform +-- invalid operations on sockets. + +-- | Socket Types. +-- +-- The existence of a constructor does not necessarily imply that that +-- socket type is supported on your system: see 'isSupportedSocketType'. +data SocketType + = NoSocketType -- ^ 0, used in getAddrInfo hints, for example + | Stream -- ^ SOCK_STREAM + | Datagram -- ^ SOCK_DGRAM + | Raw -- ^ SOCK_RAW + | RDM -- ^ SOCK_RDM + | SeqPacket -- ^ SOCK_SEQPACKET + deriving (Eq, Ord, Read, Show, Typeable) + +-- | Does the SOCK_ constant corresponding to the given SocketType exist on +-- this system? +isSupportedSocketType :: SocketType -> Bool +isSupportedSocketType = isJust . packSocketType' + +-- | Find the SOCK_ constant corresponding to the SocketType value. +packSocketType' :: SocketType -> Maybe CInt +packSocketType' stype = case Just stype of + -- the Just above is to disable GHC's overlapping pattern + -- detection: see comments for packSocketOption + Just NoSocketType -> Just 0 +#ifdef SOCK_STREAM + Just Stream -> Just #const SOCK_STREAM +#endif +#ifdef SOCK_DGRAM + Just Datagram -> Just #const SOCK_DGRAM +#endif +#ifdef SOCK_RAW + Just Raw -> Just #const SOCK_RAW +#endif +#ifdef SOCK_RDM + Just RDM -> Just #const SOCK_RDM +#endif +#ifdef SOCK_SEQPACKET + Just SeqPacket -> Just #const SOCK_SEQPACKET +#endif + _ -> Nothing + +packSocketType :: SocketType -> CInt +packSocketType stype = fromMaybe (error errMsg) (packSocketType' stype) + where + errMsg = concat ["Network.Socket.packSocketType: ", + "socket type ", show stype, " unsupported on this system"] + +-- | Try packSocketType' on the SocketType, if it fails throw an error with +-- message starting "Network.Socket." ++ the String parameter +packSocketTypeOrThrow :: String -> SocketType -> IO CInt +packSocketTypeOrThrow caller stype = maybe err return (packSocketType' stype) + where + err = ioError . userError . concat $ ["Network.Socket.", caller, ": ", + "socket type ", show stype, " unsupported on this system"] + + +unpackSocketType:: CInt -> Maybe SocketType +unpackSocketType t = case t of + 0 -> Just NoSocketType +#ifdef SOCK_STREAM + (#const SOCK_STREAM) -> Just Stream +#endif +#ifdef SOCK_DGRAM + (#const SOCK_DGRAM) -> Just Datagram +#endif +#ifdef SOCK_RAW + (#const SOCK_RAW) -> Just Raw +#endif +#ifdef SOCK_RDM + (#const SOCK_RDM) -> Just RDM +#endif +#ifdef SOCK_SEQPACKET + (#const SOCK_SEQPACKET) -> Just SeqPacket +#endif + _ -> Nothing + +-- | Try unpackSocketType on the CInt, if it fails throw an error with +-- message starting "Network.Socket." ++ the String parameter +unpackSocketType' :: String -> CInt -> IO SocketType +unpackSocketType' caller ty = maybe err return (unpackSocketType ty) + where + err = ioError . userError . concat $ ["Network.Socket.", caller, ": ", + "socket type ", show ty, " unsupported on this system"] + +------------------------------------------------------------------------ +-- Protocol Families. + +-- | Address families. +-- +-- A constructor being present here does not mean it is supported by the +-- operating system: see 'isSupportedFamily'. +data Family + = AF_UNSPEC -- unspecified + | AF_UNIX -- local to host (pipes, portals + | AF_INET -- internetwork: UDP, TCP, etc + | AF_INET6 -- Internet Protocol version 6 + | AF_IMPLINK -- arpanet imp addresses + | AF_PUP -- pup protocols: e.g. BSP + | AF_CHAOS -- mit CHAOS protocols + | AF_NS -- XEROX NS protocols + | AF_NBS -- nbs protocols + | AF_ECMA -- european computer manufacturers + | AF_DATAKIT -- datakit protocols + | AF_CCITT -- CCITT protocols, X.25 etc + | AF_SNA -- IBM SNA + | AF_DECnet -- DECnet + | AF_DLI -- Direct data link interface + | AF_LAT -- LAT + | AF_HYLINK -- NSC Hyperchannel + | AF_APPLETALK -- Apple Talk + | AF_ROUTE -- Internal Routing Protocol + | AF_NETBIOS -- NetBios-style addresses + | AF_NIT -- Network Interface Tap + | AF_802 -- IEEE 802.2, also ISO 8802 + | AF_ISO -- ISO protocols + | AF_OSI -- umbrella of all families used by OSI + | AF_NETMAN -- DNA Network Management + | AF_X25 -- CCITT X.25 + | AF_AX25 + | AF_OSINET -- AFI + | AF_GOSSIP -- US Government OSI + | AF_IPX -- Novell Internet Protocol + | Pseudo_AF_XTP -- eXpress Transfer Protocol (no AF) + | AF_CTF -- Common Trace Facility + | AF_WAN -- Wide Area Network protocols + | AF_SDL -- SGI Data Link for DLPI + | AF_NETWARE + | AF_NDD + | AF_INTF -- Debugging use only + | AF_COIP -- connection-oriented IP, aka ST II + | AF_CNT -- Computer Network Technology + | Pseudo_AF_RTIP -- Help Identify RTIP packets + | Pseudo_AF_PIP -- Help Identify PIP packets + | AF_SIP -- Simple Internet Protocol + | AF_ISDN -- Integrated Services Digital Network + | Pseudo_AF_KEY -- Internal key-management function + | AF_NATM -- native ATM access + | AF_ARP -- (rev.) addr. res. prot. (RFC 826) + | Pseudo_AF_HDRCMPLT -- Used by BPF to not rewrite hdrs in iface output + | AF_ENCAP + | AF_LINK -- Link layer interface + | AF_RAW -- Link layer interface + | AF_RIF -- raw interface + | AF_NETROM -- Amateur radio NetROM + | AF_BRIDGE -- multiprotocol bridge + | AF_ATMPVC -- ATM PVCs + | AF_ROSE -- Amateur Radio X.25 PLP + | AF_NETBEUI -- 802.2LLC + | AF_SECURITY -- Security callback pseudo AF + | AF_PACKET -- Packet family + | AF_ASH -- Ash + | AF_ECONET -- Acorn Econet + | AF_ATMSVC -- ATM SVCs + | AF_IRDA -- IRDA sockets + | AF_PPPOX -- PPPoX sockets + | AF_WANPIPE -- Wanpipe API sockets + | AF_BLUETOOTH -- bluetooth sockets + | AF_CAN -- Controller Area Network + deriving (Eq, Ord, Read, Show) + +packFamily :: Family -> CInt +packFamily f = case packFamily' f of + Just fam -> fam + Nothing -> error $ + "Network.Socket.packFamily: unsupported address family: " ++ + show f + +-- | Does the AF_ constant corresponding to the given family exist on this +-- system? +isSupportedFamily :: Family -> Bool +isSupportedFamily = isJust . packFamily' + +packFamily' :: Family -> Maybe CInt +packFamily' f = case Just f of + -- the Just above is to disable GHC's overlapping pattern + -- detection: see comments for packSocketOption + Just AF_UNSPEC -> Just #const AF_UNSPEC +#ifdef AF_UNIX + Just AF_UNIX -> Just #const AF_UNIX +#endif +#ifdef AF_INET + Just AF_INET -> Just #const AF_INET +#endif +#ifdef AF_INET6 + Just AF_INET6 -> Just #const AF_INET6 +#endif +#ifdef AF_IMPLINK + Just AF_IMPLINK -> Just #const AF_IMPLINK +#endif +#ifdef AF_PUP + Just AF_PUP -> Just #const AF_PUP +#endif +#ifdef AF_CHAOS + Just AF_CHAOS -> Just #const AF_CHAOS +#endif +#ifdef AF_NS + Just AF_NS -> Just #const AF_NS +#endif +#ifdef AF_NBS + Just AF_NBS -> Just #const AF_NBS +#endif +#ifdef AF_ECMA + Just AF_ECMA -> Just #const AF_ECMA +#endif +#ifdef AF_DATAKIT + Just AF_DATAKIT -> Just #const AF_DATAKIT +#endif +#ifdef AF_CCITT + Just AF_CCITT -> Just #const AF_CCITT +#endif +#ifdef AF_SNA + Just AF_SNA -> Just #const AF_SNA +#endif +#ifdef AF_DECnet + Just AF_DECnet -> Just #const AF_DECnet +#endif +#ifdef AF_DLI + Just AF_DLI -> Just #const AF_DLI +#endif +#ifdef AF_LAT + Just AF_LAT -> Just #const AF_LAT +#endif +#ifdef AF_HYLINK + Just AF_HYLINK -> Just #const AF_HYLINK +#endif +#ifdef AF_APPLETALK + Just AF_APPLETALK -> Just #const AF_APPLETALK +#endif +#ifdef AF_ROUTE + Just AF_ROUTE -> Just #const AF_ROUTE +#endif +#ifdef AF_NETBIOS + Just AF_NETBIOS -> Just #const AF_NETBIOS +#endif +#ifdef AF_NIT + Just AF_NIT -> Just #const AF_NIT +#endif +#ifdef AF_802 + Just AF_802 -> Just #const AF_802 +#endif +#ifdef AF_ISO + Just AF_ISO -> Just #const AF_ISO +#endif +#ifdef AF_OSI + Just AF_OSI -> Just #const AF_OSI +#endif +#ifdef AF_NETMAN + Just AF_NETMAN -> Just #const AF_NETMAN +#endif +#ifdef AF_X25 + Just AF_X25 -> Just #const AF_X25 +#endif +#ifdef AF_AX25 + Just AF_AX25 -> Just #const AF_AX25 +#endif +#ifdef AF_OSINET + Just AF_OSINET -> Just #const AF_OSINET +#endif +#ifdef AF_GOSSIP + Just AF_GOSSIP -> Just #const AF_GOSSIP +#endif +#ifdef AF_IPX + Just AF_IPX -> Just #const AF_IPX +#endif +#ifdef Pseudo_AF_XTP + Just Pseudo_AF_XTP -> Just #const Pseudo_AF_XTP +#endif +#ifdef AF_CTF + Just AF_CTF -> Just #const AF_CTF +#endif +#ifdef AF_WAN + Just AF_WAN -> Just #const AF_WAN +#endif +#ifdef AF_SDL + Just AF_SDL -> Just #const AF_SDL +#endif +#ifdef AF_NETWARE + Just AF_NETWARE -> Just #const AF_NETWARE +#endif +#ifdef AF_NDD + Just AF_NDD -> Just #const AF_NDD +#endif +#ifdef AF_INTF + Just AF_INTF -> Just #const AF_INTF +#endif +#ifdef AF_COIP + Just AF_COIP -> Just #const AF_COIP +#endif +#ifdef AF_CNT + Just AF_CNT -> Just #const AF_CNT +#endif +#ifdef Pseudo_AF_RTIP + Just Pseudo_AF_RTIP -> Just #const Pseudo_AF_RTIP +#endif +#ifdef Pseudo_AF_PIP + Just Pseudo_AF_PIP -> Just #const Pseudo_AF_PIP +#endif +#ifdef AF_SIP + Just AF_SIP -> Just #const AF_SIP +#endif +#ifdef AF_ISDN + Just AF_ISDN -> Just #const AF_ISDN +#endif +#ifdef Pseudo_AF_KEY + Just Pseudo_AF_KEY -> Just #const Pseudo_AF_KEY +#endif +#ifdef AF_NATM + Just AF_NATM -> Just #const AF_NATM +#endif +#ifdef AF_ARP + Just AF_ARP -> Just #const AF_ARP +#endif +#ifdef Pseudo_AF_HDRCMPLT + Just Pseudo_AF_HDRCMPLT -> Just #const Pseudo_AF_HDRCMPLT +#endif +#ifdef AF_ENCAP + Just AF_ENCAP -> Just #const AF_ENCAP +#endif +#ifdef AF_LINK + Just AF_LINK -> Just #const AF_LINK +#endif +#ifdef AF_RAW + Just AF_RAW -> Just #const AF_RAW +#endif +#ifdef AF_RIF + Just AF_RIF -> Just #const AF_RIF +#endif +#ifdef AF_NETROM + Just AF_NETROM -> Just #const AF_NETROM +#endif +#ifdef AF_BRIDGE + Just AF_BRIDGE -> Just #const AF_BRIDGE +#endif +#ifdef AF_ATMPVC + Just AF_ATMPVC -> Just #const AF_ATMPVC +#endif +#ifdef AF_ROSE + Just AF_ROSE -> Just #const AF_ROSE +#endif +#ifdef AF_NETBEUI + Just AF_NETBEUI -> Just #const AF_NETBEUI +#endif +#ifdef AF_SECURITY + Just AF_SECURITY -> Just #const AF_SECURITY +#endif +#ifdef AF_PACKET + Just AF_PACKET -> Just #const AF_PACKET +#endif +#ifdef AF_ASH + Just AF_ASH -> Just #const AF_ASH +#endif +#ifdef AF_ECONET + Just AF_ECONET -> Just #const AF_ECONET +#endif +#ifdef AF_ATMSVC + Just AF_ATMSVC -> Just #const AF_ATMSVC +#endif +#ifdef AF_IRDA + Just AF_IRDA -> Just #const AF_IRDA +#endif +#ifdef AF_PPPOX + Just AF_PPPOX -> Just #const AF_PPPOX +#endif +#ifdef AF_WANPIPE + Just AF_WANPIPE -> Just #const AF_WANPIPE +#endif +#ifdef AF_BLUETOOTH + Just AF_BLUETOOTH -> Just #const AF_BLUETOOTH +#endif +#ifdef AF_CAN + Just AF_CAN -> Just #const AF_CAN +#endif + _ -> Nothing + +--------- ---------- + +unpackFamily :: CInt -> Family +unpackFamily f = case f of + (#const AF_UNSPEC) -> AF_UNSPEC +#ifdef AF_UNIX + (#const AF_UNIX) -> AF_UNIX +#endif +#ifdef AF_INET + (#const AF_INET) -> AF_INET +#endif +#ifdef AF_INET6 + (#const AF_INET6) -> AF_INET6 +#endif +#ifdef AF_IMPLINK + (#const AF_IMPLINK) -> AF_IMPLINK +#endif +#ifdef AF_PUP + (#const AF_PUP) -> AF_PUP +#endif +#ifdef AF_CHAOS + (#const AF_CHAOS) -> AF_CHAOS +#endif +#ifdef AF_NS + (#const AF_NS) -> AF_NS +#endif +#ifdef AF_NBS + (#const AF_NBS) -> AF_NBS +#endif +#ifdef AF_ECMA + (#const AF_ECMA) -> AF_ECMA +#endif +#ifdef AF_DATAKIT + (#const AF_DATAKIT) -> AF_DATAKIT +#endif +#ifdef AF_CCITT + (#const AF_CCITT) -> AF_CCITT +#endif +#ifdef AF_SNA + (#const AF_SNA) -> AF_SNA +#endif +#ifdef AF_DECnet + (#const AF_DECnet) -> AF_DECnet +#endif +#ifdef AF_DLI + (#const AF_DLI) -> AF_DLI +#endif +#ifdef AF_LAT + (#const AF_LAT) -> AF_LAT +#endif +#ifdef AF_HYLINK + (#const AF_HYLINK) -> AF_HYLINK +#endif +#ifdef AF_APPLETALK + (#const AF_APPLETALK) -> AF_APPLETALK +#endif +#ifdef AF_ROUTE + (#const AF_ROUTE) -> AF_ROUTE +#endif +#ifdef AF_NETBIOS + (#const AF_NETBIOS) -> AF_NETBIOS +#endif +#ifdef AF_NIT + (#const AF_NIT) -> AF_NIT +#endif +#ifdef AF_802 + (#const AF_802) -> AF_802 +#endif +#ifdef AF_ISO + (#const AF_ISO) -> AF_ISO +#endif +#ifdef AF_OSI +# if (!defined(AF_ISO)) || (defined(AF_ISO) && (AF_ISO != AF_OSI)) + (#const AF_OSI) -> AF_OSI +# endif +#endif +#ifdef AF_NETMAN + (#const AF_NETMAN) -> AF_NETMAN +#endif +#ifdef AF_X25 + (#const AF_X25) -> AF_X25 +#endif +#ifdef AF_AX25 + (#const AF_AX25) -> AF_AX25 +#endif +#ifdef AF_OSINET + (#const AF_OSINET) -> AF_OSINET +#endif +#ifdef AF_GOSSIP + (#const AF_GOSSIP) -> AF_GOSSIP +#endif +#if defined(AF_IPX) && (!defined(AF_NS) || AF_NS != AF_IPX) + (#const AF_IPX) -> AF_IPX +#endif +#ifdef Pseudo_AF_XTP + (#const Pseudo_AF_XTP) -> Pseudo_AF_XTP +#endif +#ifdef AF_CTF + (#const AF_CTF) -> AF_CTF +#endif +#ifdef AF_WAN + (#const AF_WAN) -> AF_WAN +#endif +#ifdef AF_SDL + (#const AF_SDL) -> AF_SDL +#endif +#ifdef AF_NETWARE + (#const AF_NETWARE) -> AF_NETWARE +#endif +#ifdef AF_NDD + (#const AF_NDD) -> AF_NDD +#endif +#ifdef AF_INTF + (#const AF_INTF) -> AF_INTF +#endif +#ifdef AF_COIP + (#const AF_COIP) -> AF_COIP +#endif +#ifdef AF_CNT + (#const AF_CNT) -> AF_CNT +#endif +#ifdef Pseudo_AF_RTIP + (#const Pseudo_AF_RTIP) -> Pseudo_AF_RTIP +#endif +#ifdef Pseudo_AF_PIP + (#const Pseudo_AF_PIP) -> Pseudo_AF_PIP +#endif +#ifdef AF_SIP + (#const AF_SIP) -> AF_SIP +#endif +#ifdef AF_ISDN + (#const AF_ISDN) -> AF_ISDN +#endif +#ifdef Pseudo_AF_KEY + (#const Pseudo_AF_KEY) -> Pseudo_AF_KEY +#endif +#ifdef AF_NATM + (#const AF_NATM) -> AF_NATM +#endif +#ifdef AF_ARP + (#const AF_ARP) -> AF_ARP +#endif +#ifdef Pseudo_AF_HDRCMPLT + (#const Pseudo_AF_HDRCMPLT) -> Pseudo_AF_HDRCMPLT +#endif +#ifdef AF_ENCAP + (#const AF_ENCAP) -> AF_ENCAP +#endif +#ifdef AF_LINK + (#const AF_LINK) -> AF_LINK +#endif +#ifdef AF_RAW + (#const AF_RAW) -> AF_RAW +#endif +#ifdef AF_RIF + (#const AF_RIF) -> AF_RIF +#endif +#ifdef AF_NETROM + (#const AF_NETROM) -> AF_NETROM +#endif +#ifdef AF_BRIDGE + (#const AF_BRIDGE) -> AF_BRIDGE +#endif +#ifdef AF_ATMPVC + (#const AF_ATMPVC) -> AF_ATMPVC +#endif +#ifdef AF_ROSE + (#const AF_ROSE) -> AF_ROSE +#endif +#ifdef AF_NETBEUI + (#const AF_NETBEUI) -> AF_NETBEUI +#endif +#ifdef AF_SECURITY + (#const AF_SECURITY) -> AF_SECURITY +#endif +#ifdef AF_PACKET + (#const AF_PACKET) -> AF_PACKET +#endif +#ifdef AF_ASH + (#const AF_ASH) -> AF_ASH +#endif +#ifdef AF_ECONET + (#const AF_ECONET) -> AF_ECONET +#endif +#ifdef AF_ATMSVC + (#const AF_ATMSVC) -> AF_ATMSVC +#endif +#ifdef AF_IRDA + (#const AF_IRDA) -> AF_IRDA +#endif +#ifdef AF_PPPOX + (#const AF_PPPOX) -> AF_PPPOX +#endif +#ifdef AF_WANPIPE + (#const AF_WANPIPE) -> AF_WANPIPE +#endif +#ifdef AF_BLUETOOTH + (#const AF_BLUETOOTH) -> AF_BLUETOOTH +#endif +#ifdef AF_CAN + (#const AF_CAN) -> AF_CAN +#endif + unknown -> error ("Network.Socket.unpackFamily: unknown address " ++ + "family " ++ show unknown) + +------------------------------------------------------------------------ +-- Port Numbers + +-- | Use the @Num@ instance (i.e. use a literal) to create a +-- @PortNumber@ value with the correct network-byte-ordering. You +-- should not use the PortNum constructor. It will be removed in the +-- next release. +newtype PortNumber = PortNum Word16 deriving (Eq, Ord, Typeable) +-- newtyped to prevent accidental use of sane-looking +-- port numbers that haven't actually been converted to +-- network-byte-order first. + +{-# DEPRECATED PortNum "Do not use the PortNum constructor. Use the Num instance. PortNum will be removed in the next release." #-} + +instance Show PortNumber where + showsPrec p pn = showsPrec p (portNumberToInt pn) + +intToPortNumber :: Int -> PortNumber +intToPortNumber v = PortNum (htons (fromIntegral v)) + +portNumberToInt :: PortNumber -> Int +portNumberToInt (PortNum po) = fromIntegral (ntohs po) + +foreign import CALLCONV unsafe "ntohs" ntohs :: Word16 -> Word16 +foreign import CALLCONV unsafe "htons" htons :: Word16 -> Word16 +--foreign import CALLCONV unsafe "ntohl" ntohl :: Word32 -> Word32 + +instance Enum PortNumber where + toEnum = intToPortNumber + fromEnum = portNumberToInt + +instance Num PortNumber where + fromInteger i = intToPortNumber (fromInteger i) + -- for completeness. + (+) x y = intToPortNumber (portNumberToInt x + portNumberToInt y) + (-) x y = intToPortNumber (portNumberToInt x - portNumberToInt y) + negate x = intToPortNumber (-portNumberToInt x) + (*) x y = intToPortNumber (portNumberToInt x * portNumberToInt y) + abs n = intToPortNumber (abs (portNumberToInt n)) + signum n = intToPortNumber (signum (portNumberToInt n)) + +instance Real PortNumber where + toRational x = toInteger x % 1 + +instance Integral PortNumber where + quotRem a b = let (c,d) = quotRem (portNumberToInt a) (portNumberToInt b) in + (intToPortNumber c, intToPortNumber d) + toInteger a = toInteger (portNumberToInt a) + +instance Storable PortNumber where + sizeOf _ = sizeOf (undefined :: Word16) + alignment _ = alignment (undefined :: Word16) + poke p (PortNum po) = poke (castPtr p) po + peek p = PortNum `liftM` peek (castPtr p) + +------------------------------------------------------------------------ +-- Socket addresses + +-- The scheme used for addressing sockets is somewhat quirky. The +-- calls in the BSD socket API that need to know the socket address +-- all operate in terms of struct sockaddr, a `virtual' type of +-- socket address. + +-- The Internet family of sockets are addressed as struct sockaddr_in, +-- so when calling functions that operate on struct sockaddr, we have +-- to type cast the Internet socket address into a struct sockaddr. +-- Instances of the structure for different families might *not* be +-- the same size. Same casting is required of other families of +-- sockets such as Xerox NS. Similarly for Unix domain sockets. + +-- To represent these socket addresses in Haskell-land, we do what BSD +-- didn't do, and use a union/algebraic type for the different +-- families. Currently only Unix domain sockets and the Internet +-- families are supported. + +#if defined(IPV6_SOCKET_SUPPORT) +type FlowInfo = Word32 +type ScopeID = Word32 +#endif + +-- | The existence of a constructor does not necessarily imply that +-- that socket address type is supported on your system: see +-- 'isSupportedSockAddr'. +data SockAddr -- C Names + = SockAddrInet + PortNumber -- sin_port (network byte order) + HostAddress -- sin_addr (ditto) + | SockAddrInet6 + PortNumber -- sin6_port (network byte order) + FlowInfo -- sin6_flowinfo (ditto) + HostAddress6 -- sin6_addr (ditto) + ScopeID -- sin6_scope_id (ditto) + | SockAddrUnix + String -- sun_path + | SockAddrCan + Int32 -- can_ifindex (can be get by Network.BSD.ifNameToIndex "can0") + -- TODO: Extend this to include transport protocol information + deriving (Eq, Ord, Typeable) + +-- | Is the socket address type supported on this system? +isSupportedSockAddr :: SockAddr -> Bool +isSupportedSockAddr addr = case addr of + SockAddrInet {} -> True +#if defined(IPV6_SOCKET_SUPPORT) + SockAddrInet6 {} -> True +#endif +#if defined(DOMAIN_SOCKET_SUPPORT) + SockAddrUnix{} -> True +#endif +#if defined(CAN_SOCKET_SUPPORT) + SockAddrCan{} -> True +#endif + _ -> False + +#if defined(WITH_WINSOCK) || defined(cygwin32_HOST_OS) +type CSaFamily = (#type unsigned short) +#elif defined(darwin_HOST_OS) +type CSaFamily = (#type u_char) +#else +type CSaFamily = (#type sa_family_t) +#endif + +-- | Computes the storage requirements (in bytes) of the given +-- 'SockAddr'. This function differs from 'Foreign.Storable.sizeOf' +-- in that the value of the argument /is/ used. +sizeOfSockAddr :: SockAddr -> Int +#if defined(DOMAIN_SOCKET_SUPPORT) +sizeOfSockAddr (SockAddrUnix path) = + case path of + '\0':_ -> (#const sizeof(sa_family_t)) + length path + _ -> #const sizeof(struct sockaddr_un) +#endif +sizeOfSockAddr (SockAddrInet _ _) = #const sizeof(struct sockaddr_in) +#if defined(IPV6_SOCKET_SUPPORT) +sizeOfSockAddr (SockAddrInet6 _ _ _ _) = #const sizeof(struct sockaddr_in6) +#endif +#if defined(CAN_SOCKET_SUPPORT) +sizeOfSockAddr (SockAddrCan _) = #const sizeof(struct sockaddr_can) +#endif + +-- | Computes the storage requirements (in bytes) required for a +-- 'SockAddr' with the given 'Family'. +sizeOfSockAddrByFamily :: Family -> Int +#if defined(DOMAIN_SOCKET_SUPPORT) +sizeOfSockAddrByFamily AF_UNIX = #const sizeof(struct sockaddr_un) +#endif +#if defined(IPV6_SOCKET_SUPPORT) +sizeOfSockAddrByFamily AF_INET6 = #const sizeof(struct sockaddr_in6) +#endif +sizeOfSockAddrByFamily AF_INET = #const sizeof(struct sockaddr_in) +#if defined(CAN_SOCKET_SUPPORT) +sizeOfSockAddrByFamily AF_CAN = #const sizeof(struct sockaddr_can) +#endif + +-- | Use a 'SockAddr' with a function requiring a pointer to a +-- 'SockAddr' and the length of that 'SockAddr'. +withSockAddr :: SockAddr -> (Ptr SockAddr -> Int -> IO a) -> IO a +withSockAddr addr f = do + let sz = sizeOfSockAddr addr + allocaBytes sz $ \p -> pokeSockAddr p addr >> f (castPtr p) sz + +-- | Create a new 'SockAddr' for use with a function requiring a +-- pointer to a 'SockAddr' and the length of that 'SockAddr'. +withNewSockAddr :: Family -> (Ptr SockAddr -> Int -> IO a) -> IO a +withNewSockAddr family f = do + let sz = sizeOfSockAddrByFamily family + allocaBytes sz $ \ptr -> f ptr sz + +-- We can't write an instance of 'Storable' for 'SockAddr' because +-- @sockaddr@ is a sum type of variable size but +-- 'Foreign.Storable.sizeOf' is required to be constant. + +-- Note that on Darwin, the sockaddr structure must be zeroed before +-- use. + +-- | Write the given 'SockAddr' to the given memory location. +pokeSockAddr :: Ptr a -> SockAddr -> IO () +#if defined(DOMAIN_SOCKET_SUPPORT) +pokeSockAddr p (SockAddrUnix path) = do +#if defined(darwin_HOST_OS) + zeroMemory p (#const sizeof(struct sockaddr_un)) +#endif +#if defined(HAVE_STRUCT_SOCKADDR_SA_LEN) + (#poke struct sockaddr_un, sun_len) p ((#const sizeof(struct sockaddr_un)) :: Word8) +#endif + (#poke struct sockaddr_un, sun_family) p ((#const AF_UNIX) :: CSaFamily) + let pathC = map castCharToCChar path + poker = case path of ('\0':_) -> pokeArray; _ -> pokeArray0 0 + poker ((#ptr struct sockaddr_un, sun_path) p) pathC +#endif +pokeSockAddr p (SockAddrInet (PortNum port) addr) = do +#if defined(darwin_HOST_OS) + zeroMemory p (#const sizeof(struct sockaddr_in)) +#endif +#if defined(HAVE_STRUCT_SOCKADDR_SA_LEN) + (#poke struct sockaddr_in, sin_len) p ((#const sizeof(struct sockaddr_in)) :: Word8) +#endif + (#poke struct sockaddr_in, sin_family) p ((#const AF_INET) :: CSaFamily) + (#poke struct sockaddr_in, sin_port) p port + (#poke struct sockaddr_in, sin_addr) p addr +#if defined(IPV6_SOCKET_SUPPORT) +pokeSockAddr p (SockAddrInet6 (PortNum port) flow addr scope) = do +#if defined(darwin_HOST_OS) + zeroMemory p (#const sizeof(struct sockaddr_in6)) +#endif +#if defined(HAVE_STRUCT_SOCKADDR_SA_LEN) + (#poke struct sockaddr_in6, sin6_len) p ((#const sizeof(struct sockaddr_in6)) :: Word8) +#endif + (#poke struct sockaddr_in6, sin6_family) p ((#const AF_INET6) :: CSaFamily) + (#poke struct sockaddr_in6, sin6_port) p port + (#poke struct sockaddr_in6, sin6_flowinfo) p flow + (#poke struct sockaddr_in6, sin6_addr) p addr + (#poke struct sockaddr_in6, sin6_scope_id) p scope +#endif +#if defined(CAN_SOCKET_SUPPORT) +pokeSockAddr p (SockAddrCan ifIndex) = do +#if defined(darwin_HOST_OS) + zeroMemory p (#const sizeof(struct sockaddr_can)) +#endif + (#poke struct sockaddr_can, can_ifindex) p ifIndex +#endif + +-- | Read a 'SockAddr' from the given memory location. +peekSockAddr :: Ptr SockAddr -> IO SockAddr +peekSockAddr p = do + family <- (#peek struct sockaddr, sa_family) p + case family :: CSaFamily of +#if defined(DOMAIN_SOCKET_SUPPORT) + (#const AF_UNIX) -> do + str <- peekCString ((#ptr struct sockaddr_un, sun_path) p) + return (SockAddrUnix str) +#endif + (#const AF_INET) -> do + addr <- (#peek struct sockaddr_in, sin_addr) p + port <- (#peek struct sockaddr_in, sin_port) p + return (SockAddrInet (PortNum port) addr) +#if defined(IPV6_SOCKET_SUPPORT) + (#const AF_INET6) -> do + port <- (#peek struct sockaddr_in6, sin6_port) p + flow <- (#peek struct sockaddr_in6, sin6_flowinfo) p + addr <- (#peek struct sockaddr_in6, sin6_addr) p + scope <- (#peek struct sockaddr_in6, sin6_scope_id) p + return (SockAddrInet6 (PortNum port) flow addr scope) +#endif +#if defined(CAN_SOCKET_SUPPORT) + (#const AF_CAN) -> do + ifidx <- (#peek struct sockaddr_can, can_ifindex) p + return (SockAddrCan ifidx) +#endif + +------------------------------------------------------------------------ + +-- | Network byte order. +type HostAddress = Word32 + +#if defined(IPV6_SOCKET_SUPPORT) +-- | Host byte order. +type HostAddress6 = (Word32, Word32, Word32, Word32) + +-- The peek32 and poke32 functions work around the fact that the RFCs +-- don't require 32-bit-wide address fields to be present. We can +-- only portably rely on an 8-bit field, s6_addr. + +s6_addr_offset :: Int +s6_addr_offset = (#offset struct in6_addr, s6_addr) + +peek32 :: Ptr a -> Int -> IO Word32 +peek32 p i0 = do + let i' = i0 * 4 + peekByte n = peekByteOff p (s6_addr_offset + i' + n) :: IO Word8 + a `sl` i = fromIntegral a `shiftL` i + a0 <- peekByte 0 + a1 <- peekByte 1 + a2 <- peekByte 2 + a3 <- peekByte 3 + return ((a0 `sl` 24) .|. (a1 `sl` 16) .|. (a2 `sl` 8) .|. (a3 `sl` 0)) + +poke32 :: Ptr a -> Int -> Word32 -> IO () +poke32 p i0 a = do + let i' = i0 * 4 + pokeByte n = pokeByteOff p (s6_addr_offset + i' + n) + x `sr` i = fromIntegral (x `shiftR` i) :: Word8 + pokeByte 0 (a `sr` 24) + pokeByte 1 (a `sr` 16) + pokeByte 2 (a `sr` 8) + pokeByte 3 (a `sr` 0) + +instance Storable HostAddress6 where + sizeOf _ = (#const sizeof(struct in6_addr)) + alignment _ = alignment (undefined :: CInt) + + peek p = do + a <- peek32 p 0 + b <- peek32 p 1 + c <- peek32 p 2 + d <- peek32 p 3 + return (a, b, c, d) + + poke p (a, b, c, d) = do + poke32 p 0 a + poke32 p 1 b + poke32 p 2 c + poke32 p 3 d +#endif + +------------------------------------------------------------------------ +-- Helper functions + +foreign import ccall unsafe "string.h" memset :: Ptr a -> CInt -> CSize -> IO () + +-- | Zero a structure. +zeroMemory :: Ptr a -> CSize -> IO () +zeroMemory dest nbytes = memset dest 0 (fromIntegral nbytes) diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket.hsc cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket.hsc --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network/Socket.hsc 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network/Socket.hsc 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,1645 @@ +{-# LANGUAGE CPP, ScopedTypeVariables #-} +{-# OPTIONS_GHC -fno-warn-orphans #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network.Socket +-- Copyright : (c) The University of Glasgow 2001 +-- License : BSD-style (see the file libraries/network/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- The "Network.Socket" module is for when you want full control over +-- sockets. Essentially the entire C socket API is exposed through +-- this module; in general the operations follow the behaviour of the C +-- functions of the same name (consult your favourite Unix networking book). +-- +-- A higher level interface to networking operations is provided +-- through the module "Network". +-- +----------------------------------------------------------------------------- + +#include "HsNet.h" + +-- In order to process this file, you need to have CALLCONV defined. + +module Network.Socket + ( + -- * Types + Socket(..) + , Family(..) + , isSupportedFamily + , SocketType(..) + , isSupportedSocketType + , SockAddr(..) + , isSupportedSockAddr + , SocketStatus(..) + , HostAddress +#if defined(IPV6_SOCKET_SUPPORT) + , HostAddress6 + , FlowInfo + , ScopeID +#endif + , ShutdownCmd(..) + , ProtocolNumber + , defaultProtocol + , PortNumber(..) + -- PortNumber is used non-abstractly in Network.BSD. ToDo: remove + -- this use and make the type abstract. + + -- * Address operations + + , HostName + , ServiceName + +#if defined(IPV6_SOCKET_SUPPORT) + , AddrInfo(..) + + , AddrInfoFlag(..) + , addrInfoFlagImplemented + + , defaultHints + + , getAddrInfo + + , NameInfoFlag(..) + + , getNameInfo +#endif + + -- * Socket operations + , socket +#if defined(DOMAIN_SOCKET_SUPPORT) + , socketPair +#endif + , connect + , bind + , listen + , accept + , getPeerName + , getSocketName + +#if defined(HAVE_STRUCT_UCRED) || defined(HAVE_GETPEEREID) + -- get the credentials of our domain socket peer. + , getPeerCred +#if defined(HAVE_GETPEEREID) + , getPeerEid +#endif +#endif + + , socketPort + + , socketToHandle + + -- ** Sending and receiving data + -- $sendrecv + , sendTo + , sendBufTo + + , recvFrom + , recvBufFrom + + , send + , recv + , recvLen + , sendBuf + , recvBuf + + , inet_addr + , inet_ntoa + + , shutdown + , close + + -- ** Predicates on sockets + , isConnected + , isBound + , isListening + , isReadable + , isWritable + + -- * Socket options + , SocketOption(..) + , isSupportedSocketOption + , getSocketOption + , setSocketOption + + -- * File descriptor transmission +#ifdef DOMAIN_SOCKET_SUPPORT + , sendFd + , recvFd + +#endif + + -- * Special constants + , aNY_PORT + , iNADDR_ANY +#if defined(IPV6_SOCKET_SUPPORT) + , iN6ADDR_ANY +#endif + , sOMAXCONN + , sOL_SOCKET +#ifdef SCM_RIGHTS + , sCM_RIGHTS +#endif + , maxListenQueue + + -- * Initialisation + , withSocketsDo + + -- * Very low level operations + -- in case you ever want to get at the underlying file descriptor.. + , fdSocket + , mkSocket + + -- * Deprecated aliases + -- $deprecated-aliases + , bindSocket + , sClose + , sIsConnected + , sIsBound + , sIsListening + , sIsReadable + , sIsWritable + + -- * Internal + + -- | The following are exported ONLY for use in the BSD module and + -- should not be used anywhere else. + + , packFamily + , unpackFamily + , packSocketType + ) where + +import Data.Bits +import Data.List (delete, foldl') +import Data.Maybe (isJust) +import Data.Word (Word8, Word32) +import Foreign.Ptr (Ptr, castPtr, nullPtr) +import Foreign.Storable (Storable(..)) +import Foreign.C.Error +import Foreign.C.String (CString, withCString, withCStringLen, peekCString, peekCStringLen) +import Foreign.C.Types (CUInt, CChar) +import Foreign.C.Types (CInt(..), CSize(..)) +import Foreign.Marshal.Alloc ( alloca, allocaBytes ) +import Foreign.Marshal.Array ( peekArray ) +import Foreign.Marshal.Utils ( maybeWith, with ) + +import System.IO +import Control.Monad (liftM, when) + +import qualified Control.Exception as E +import Control.Concurrent.MVar +import Data.Typeable +import System.IO.Error + +import GHC.Conc (threadWaitRead, threadWaitWrite) +##if MIN_VERSION_base(4,3,1) +import GHC.Conc (closeFdWith) +##endif +# if defined(mingw32_HOST_OS) +import GHC.Conc (asyncDoProc) +import Foreign (FunPtr) +# endif +import qualified GHC.IO.Device +import GHC.IO.Handle.FD +import GHC.IO.Exception +import GHC.IO +import qualified System.Posix.Internals + +import GHC.IO.FD + +import Network.Socket.Internal +import Network.Socket.Types + +-- | Either a host name e.g., @\"haskell.org\"@ or a numeric host +-- address string consisting of a dotted decimal IPv4 address or an +-- IPv6 address e.g., @\"192.168.0.1\"@. +type HostName = String +type ServiceName = String + +-- ---------------------------------------------------------------------------- +-- On Windows, our sockets are not put in non-blocking mode (non-blocking +-- is not supported for regular file descriptors on Windows, and it would +-- be a pain to support it only for sockets). So there are two cases: +-- +-- - the threaded RTS uses safe calls for socket operations to get +-- non-blocking I/O, just like the rest of the I/O library +-- +-- - with the non-threaded RTS, only some operations on sockets will be +-- non-blocking. Reads and writes go through the normal async I/O +-- system. accept() uses asyncDoProc so is non-blocking. A handful +-- of others (recvFrom, sendFd, recvFd) will block all threads - if this +-- is a problem, -threaded is the workaround. +-- +##if defined(mingw32_HOST_OS) +##define SAFE_ON_WIN safe +##else +##define SAFE_ON_WIN unsafe +##endif + +----------------------------------------------------------------------------- +-- Socket types + +#if defined(mingw32_HOST_OS) +socket2FD (MkSocket fd _ _ _ _) = + -- HACK, 1 means True + FD{fdFD = fd,fdIsSocket_ = 1} +#endif + +mkSocket :: CInt + -> Family + -> SocketType + -> ProtocolNumber + -> SocketStatus + -> IO Socket +mkSocket fd fam sType pNum stat = do + mStat <- newMVar stat + withSocketsDo $ return () + return (MkSocket fd fam sType pNum mStat) + + +fdSocket :: Socket -> CInt +fdSocket (MkSocket fd _ _ _ _) = fd + +-- | This is the default protocol for a given service. +defaultProtocol :: ProtocolNumber +defaultProtocol = 0 + +----------------------------------------------------------------------------- +-- SockAddr + +instance Show SockAddr where +#if defined(DOMAIN_SOCKET_SUPPORT) + showsPrec _ (SockAddrUnix str) = showString str +#endif + showsPrec _ (SockAddrInet port ha) + = showString (unsafePerformIO (inet_ntoa ha)) + . showString ":" + . shows port +#if defined(IPV6_SOCKET_SUPPORT) + showsPrec _ addr@(SockAddrInet6 port _ _ _) + = showChar '[' + . showString (unsafePerformIO $ + fst `liftM` getNameInfo [NI_NUMERICHOST] True False addr >>= + maybe (fail "showsPrec: impossible internal error") return) + . showString "]:" + . shows port +#endif + +----------------------------------------------------------------------------- +-- Connection Functions + +-- In the following connection and binding primitives. The names of +-- the equivalent C functions have been preserved where possible. It +-- should be noted that some of these names used in the C library, +-- \tr{bind} in particular, have a different meaning to many Haskell +-- programmers and have thus been renamed by appending the prefix +-- Socket. + +-- | Create a new socket using the given address family, socket type +-- and protocol number. The address family is usually 'AF_INET', +-- 'AF_INET6', or 'AF_UNIX'. The socket type is usually 'Stream' or +-- 'Datagram'. The protocol number is usually 'defaultProtocol'. +-- If 'AF_INET6' is used, the 'IPv6Only' socket option is set to 0 +-- so that both IPv4 and IPv6 can be handled with one socket. +socket :: Family -- Family Name (usually AF_INET) + -> SocketType -- Socket Type (usually Stream) + -> ProtocolNumber -- Protocol Number (getProtocolByName to find value) + -> IO Socket -- Unconnected Socket +socket family stype protocol = do + c_stype <- packSocketTypeOrThrow "socket" stype + fd <- throwSocketErrorIfMinus1Retry "socket" $ + c_socket (packFamily family) c_stype protocol + setNonBlockIfNeeded fd + socket_status <- newMVar NotConnected + withSocketsDo $ return () + let sock = MkSocket fd family stype protocol socket_status +#if HAVE_DECL_IPV6_V6ONLY +# if defined(mingw32_HOST_OS) + -- the IPv6Only option is only supported on Windows Vista and later, + -- so trying to change it might throw an error + when (family == AF_INET6) $ + E.catch (setSocketOption sock IPv6Only 0) $ (\(_ :: E.IOException) -> return ()) +# else + when (family == AF_INET6) $ setSocketOption sock IPv6Only 0 +# endif +#endif + return sock + +-- | Build a pair of connected socket objects using the given address +-- family, socket type, and protocol number. Address family, socket +-- type, and protocol number are as for the 'socket' function above. +-- Availability: Unix. +#if defined(DOMAIN_SOCKET_SUPPORT) +socketPair :: Family -- Family Name (usually AF_INET or AF_INET6) + -> SocketType -- Socket Type (usually Stream) + -> ProtocolNumber -- Protocol Number + -> IO (Socket, Socket) -- unnamed and connected. +socketPair family stype protocol = do + allocaBytes (2 * sizeOf (1 :: CInt)) $ \ fdArr -> do + c_stype <- packSocketTypeOrThrow "socketPair" stype + _rc <- throwSocketErrorIfMinus1Retry "socketpair" $ + c_socketpair (packFamily family) c_stype protocol fdArr + [fd1,fd2] <- peekArray 2 fdArr + s1 <- mkNonBlockingSocket fd1 + s2 <- mkNonBlockingSocket fd2 + return (s1,s2) + where + mkNonBlockingSocket fd = do + setNonBlockIfNeeded fd + stat <- newMVar Connected + withSocketsDo $ return () + return (MkSocket fd family stype protocol stat) + +foreign import ccall unsafe "socketpair" + c_socketpair :: CInt -> CInt -> CInt -> Ptr CInt -> IO CInt +#endif + +-- | Set the socket to nonblocking, if applicable to this platform. +setNonBlockIfNeeded :: CInt -> IO () +setNonBlockIfNeeded fd = + System.Posix.Internals.setNonBlockingFD fd True + +----------------------------------------------------------------------------- +-- Binding a socket + +-- | Bind the socket to an address. The socket must not already be +-- bound. The 'Family' passed to @bind@ must be the +-- same as that passed to 'socket'. If the special port number +-- 'aNY_PORT' is passed then the system assigns the next available +-- use port. +bind :: Socket -- Unconnected Socket + -> SockAddr -- Address to Bind to + -> IO () +bind (MkSocket s _family _stype _protocol socketStatus) addr = do + modifyMVar_ socketStatus $ \ status -> do + if status /= NotConnected + then + ioError (userError ("bind: can't peform bind on socket in status " ++ + show status)) + else do + withSockAddr addr $ \p_addr sz -> do + _status <- throwSocketErrorIfMinus1Retry "bind" $ c_bind s p_addr (fromIntegral sz) + return Bound + +----------------------------------------------------------------------------- +-- Connecting a socket + +-- | Connect to a remote socket at address. +connect :: Socket -- Unconnected Socket + -> SockAddr -- Socket address stuff + -> IO () +connect sock@(MkSocket s _family _stype _protocol socketStatus) addr = withSocketsDo $ do + modifyMVar_ socketStatus $ \currentStatus -> do + if currentStatus /= NotConnected && currentStatus /= Bound + then + ioError (userError ("connect: can't peform connect on socket in status " ++ + show currentStatus)) + else do + withSockAddr addr $ \p_addr sz -> do + + let connectLoop = do + r <- c_connect s p_addr (fromIntegral sz) + if r == -1 + then do +#if !(defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS)) + err <- getErrno + case () of + _ | err == eINTR -> connectLoop + _ | err == eINPROGRESS -> connectBlocked +-- _ | err == eAGAIN -> connectBlocked + _otherwise -> throwSocketError "connect" +#else + throwSocketError "connect" +#endif + else return () + + connectBlocked = do + threadWaitWrite (fromIntegral s) + err <- getSocketOption sock SoError + if (err == 0) + then return () + else throwSocketErrorCode "connect" (fromIntegral err) + + connectLoop + return Connected + +----------------------------------------------------------------------------- +-- Listen + +-- | Listen for connections made to the socket. The second argument +-- specifies the maximum number of queued connections and should be at +-- least 1; the maximum value is system-dependent (usually 5). +listen :: Socket -- Connected & Bound Socket + -> Int -- Queue Length + -> IO () +listen (MkSocket s _family _stype _protocol socketStatus) backlog = do + modifyMVar_ socketStatus $ \ status -> do + if status /= Bound + then + ioError (userError ("listen: can't peform listen on socket in status " ++ + show status)) + else do + throwSocketErrorIfMinus1Retry_ "listen" (c_listen s (fromIntegral backlog)) + return Listening + +----------------------------------------------------------------------------- +-- Accept +-- +-- A call to `accept' only returns when data is available on the given +-- socket, unless the socket has been set to non-blocking. It will +-- return a new socket which should be used to read the incoming data and +-- should then be closed. Using the socket returned by `accept' allows +-- incoming requests to be queued on the original socket. + +-- | Accept a connection. The socket must be bound to an address and +-- listening for connections. The return value is a pair @(conn, +-- address)@ where @conn@ is a new socket object usable to send and +-- receive data on the connection, and @address@ is the address bound +-- to the socket on the other end of the connection. +accept :: Socket -- Queue Socket + -> IO (Socket, -- Readable Socket + SockAddr) -- Peer details + +accept sock@(MkSocket s family stype protocol status) = do + currentStatus <- readMVar status + okay <- isAcceptable sock + if not okay + then + ioError (userError ("accept: can't perform accept on socket (" ++ (show (family,stype,protocol)) ++") in status " ++ + show currentStatus)) + else do + let sz = sizeOfSockAddrByFamily family + allocaBytes sz $ \ sockaddr -> do +#if defined(mingw32_HOST_OS) + new_sock <- + if threaded + then with (fromIntegral sz) $ \ ptr_len -> + throwSocketErrorIfMinus1Retry "Network.Socket.accept" $ + c_accept_safe s sockaddr ptr_len + else do + paramData <- c_newAcceptParams s (fromIntegral sz) sockaddr + rc <- asyncDoProc c_acceptDoProc paramData + new_sock <- c_acceptNewSock paramData + c_free paramData + when (rc /= 0) $ + throwSocketErrorCode "Network.Socket.accept" (fromIntegral rc) + return new_sock +#else + with (fromIntegral sz) $ \ ptr_len -> do +# ifdef HAVE_ACCEPT4 + new_sock <- throwSocketErrorIfMinus1RetryMayBlock "accept" + (threadWaitRead (fromIntegral s)) + (c_accept4 s sockaddr ptr_len (#const SOCK_NONBLOCK)) +# else + new_sock <- throwSocketErrorWaitRead sock "accept" + (c_accept s sockaddr ptr_len) + setNonBlockIfNeeded new_sock +# endif /* HAVE_ACCEPT4 */ +#endif + addr <- peekSockAddr sockaddr + new_status <- newMVar Connected + return ((MkSocket new_sock family stype protocol new_status), addr) + +#if defined(mingw32_HOST_OS) +foreign import ccall unsafe "HsNet.h acceptNewSock" + c_acceptNewSock :: Ptr () -> IO CInt +foreign import ccall unsafe "HsNet.h newAcceptParams" + c_newAcceptParams :: CInt -> CInt -> Ptr a -> IO (Ptr ()) +foreign import ccall unsafe "HsNet.h &acceptDoProc" + c_acceptDoProc :: FunPtr (Ptr () -> IO Int) +foreign import ccall unsafe "free" + c_free:: Ptr a -> IO () +#endif + +----------------------------------------------------------------------------- +-- ** Sending and reciving data + +-- $sendrecv +-- +-- Do not use the @send@ and @recv@ functions defined in this module +-- in new code, as they incorrectly represent binary data as a Unicode +-- string. As a result, these functions are inefficient and may lead +-- to bugs in the program. Instead use the @send@ and @recv@ +-- functions defined in the 'Network.Socket.ByteString' module. + +----------------------------------------------------------------------------- +-- sendTo & recvFrom + +-- | Send data to the socket. The recipient can be specified +-- explicitly, so the socket need not be in a connected state. +-- Returns the number of bytes sent. Applications are responsible for +-- ensuring that all data has been sent. +-- +-- NOTE: blocking on Windows unless you compile with -threaded (see +-- GHC ticket #1129) +sendTo :: Socket -- (possibly) bound/connected Socket + -> String -- Data to send + -> SockAddr + -> IO Int -- Number of Bytes sent +sendTo sock xs addr = do + withCStringLen xs $ \(str, len) -> do + sendBufTo sock str len addr + +-- | Send data to the socket. The recipient can be specified +-- explicitly, so the socket need not be in a connected state. +-- Returns the number of bytes sent. Applications are responsible for +-- ensuring that all data has been sent. +sendBufTo :: Socket -- (possibly) bound/connected Socket + -> Ptr a -> Int -- Data to send + -> SockAddr + -> IO Int -- Number of Bytes sent +sendBufTo sock@(MkSocket s _family _stype _protocol _status) ptr nbytes addr = do + withSockAddr addr $ \p_addr sz -> do + liftM fromIntegral $ + throwSocketErrorWaitWrite sock "sendTo" $ + c_sendto s ptr (fromIntegral $ nbytes) 0{-flags-} + p_addr (fromIntegral sz) + +-- | Receive data from the socket. The socket need not be in a +-- connected state. Returns @(bytes, nbytes, address)@ where @bytes@ +-- is a @String@ of length @nbytes@ representing the data received and +-- @address@ is a 'SockAddr' representing the address of the sending +-- socket. +-- +-- NOTE: blocking on Windows unless you compile with -threaded (see +-- GHC ticket #1129) +recvFrom :: Socket -> Int -> IO (String, Int, SockAddr) +recvFrom sock nbytes = + allocaBytes nbytes $ \ptr -> do + (len, sockaddr) <- recvBufFrom sock ptr nbytes + str <- peekCStringLen (ptr, len) + return (str, len, sockaddr) + +-- | Receive data from the socket, writing it into buffer instead of +-- creating a new string. The socket need not be in a connected +-- state. Returns @(nbytes, address)@ where @nbytes@ is the number of +-- bytes received and @address@ is a 'SockAddr' representing the +-- address of the sending socket. +-- +-- NOTE: blocking on Windows unless you compile with -threaded (see +-- GHC ticket #1129) +recvBufFrom :: Socket -> Ptr a -> Int -> IO (Int, SockAddr) +recvBufFrom sock@(MkSocket s family _stype _protocol _status) ptr nbytes + | nbytes <= 0 = ioError (mkInvalidRecvArgError "Network.Socket.recvFrom") + | otherwise = + withNewSockAddr family $ \ptr_addr sz -> do + alloca $ \ptr_len -> do + poke ptr_len (fromIntegral sz) + len <- throwSocketErrorWaitRead sock "recvFrom" $ + c_recvfrom s ptr (fromIntegral nbytes) 0{-flags-} + ptr_addr ptr_len + let len' = fromIntegral len + if len' == 0 + then ioError (mkEOFError "Network.Socket.recvFrom") + else do + flg <- isConnected sock + -- For at least one implementation (WinSock 2), recvfrom() ignores + -- filling in the sockaddr for connected TCP sockets. Cope with + -- this by using getPeerName instead. + sockaddr <- + if flg then + getPeerName sock + else + peekSockAddr ptr_addr + return (len', sockaddr) + +----------------------------------------------------------------------------- +-- send & recv + +-- | Send data to the socket. The socket must be connected to a remote +-- socket. Returns the number of bytes sent. Applications are +-- responsible for ensuring that all data has been sent. +send :: Socket -- Bound/Connected Socket + -> String -- Data to send + -> IO Int -- Number of Bytes sent +send sock@(MkSocket s _family _stype _protocol _status) xs = do + withCStringLen xs $ \(str, len) -> do + liftM fromIntegral $ +#if defined(mingw32_HOST_OS) + writeRawBufferPtr + "Network.Socket.send" + (socket2FD sock) + (castPtr str) + 0 + (fromIntegral len) +#else + throwSocketErrorWaitWrite sock "send" $ + c_send s str (fromIntegral len) 0{-flags-} +#endif + +-- | Send data to the socket. The socket must be connected to a remote +-- socket. Returns the number of bytes sent. Applications are +-- responsible for ensuring that all data has been sent. +sendBuf :: Socket -- Bound/Connected Socket + -> Ptr Word8 -- Pointer to the data to send + -> Int -- Length of the buffer + -> IO Int -- Number of Bytes sent +sendBuf sock@(MkSocket s _family _stype _protocol _status) str len = do + liftM fromIntegral $ +#if defined(mingw32_HOST_OS) + writeRawBufferPtr + "Network.Socket.sendBuf" + (socket2FD sock) + (castPtr str) + 0 + (fromIntegral len) +#else + throwSocketErrorWaitWrite sock "sendBuf" $ + c_send s str (fromIntegral len) 0{-flags-} +#endif + + +-- | Receive data from the socket. The socket must be in a connected +-- state. This function may return fewer bytes than specified. If the +-- message is longer than the specified length, it may be discarded +-- depending on the type of socket. This function may block until a +-- message arrives. +-- +-- Considering hardware and network realities, the maximum number of +-- bytes to receive should be a small power of 2, e.g., 4096. +-- +-- For TCP sockets, a zero length return value means the peer has +-- closed its half side of the connection. +recv :: Socket -> Int -> IO String +recv sock l = recvLen sock l >>= \ (s,_) -> return s + +recvLen :: Socket -> Int -> IO (String, Int) +recvLen sock@(MkSocket s _family _stype _protocol _status) nbytes + | nbytes <= 0 = ioError (mkInvalidRecvArgError "Network.Socket.recv") + | otherwise = do + allocaBytes nbytes $ \ptr -> do + len <- +#if defined(mingw32_HOST_OS) + readRawBufferPtr "Network.Socket.recvLen" (socket2FD sock) ptr 0 + (fromIntegral nbytes) +#else + throwSocketErrorWaitRead sock "recv" $ + c_recv s ptr (fromIntegral nbytes) 0{-flags-} +#endif + let len' = fromIntegral len + if len' == 0 + then ioError (mkEOFError "Network.Socket.recv") + else do + s' <- peekCStringLen (castPtr ptr,len') + return (s', len') + +-- | Receive data from the socket. The socket must be in a connected +-- state. This function may return fewer bytes than specified. If the +-- message is longer than the specified length, it may be discarded +-- depending on the type of socket. This function may block until a +-- message arrives. +-- +-- Considering hardware and network realities, the maximum number of +-- bytes to receive should be a small power of 2, e.g., 4096. +-- +-- For TCP sockets, a zero length return value means the peer has +-- closed its half side of the connection. +recvBuf :: Socket -> Ptr Word8 -> Int -> IO Int +recvBuf sock p l = recvLenBuf sock p l + +recvLenBuf :: Socket -> Ptr Word8 -> Int -> IO Int +recvLenBuf sock@(MkSocket s _family _stype _protocol _status) ptr nbytes + | nbytes <= 0 = ioError (mkInvalidRecvArgError "Network.Socket.recvBuf") + | otherwise = do + len <- +#if defined(mingw32_HOST_OS) + readRawBufferPtr "Network.Socket.recvLenBuf" (socket2FD sock) ptr 0 + (fromIntegral nbytes) +#else + throwSocketErrorWaitRead sock "recvBuf" $ + c_recv s (castPtr ptr) (fromIntegral nbytes) 0{-flags-} +#endif + let len' = fromIntegral len + if len' == 0 + then ioError (mkEOFError "Network.Socket.recvBuf") + else return len' + + +-- --------------------------------------------------------------------------- +-- socketPort +-- +-- The port number the given socket is currently connected to can be +-- determined by calling $port$, is generally only useful when bind +-- was given $aNY\_PORT$. + +socketPort :: Socket -- Connected & Bound Socket + -> IO PortNumber -- Port Number of Socket +socketPort sock@(MkSocket _ AF_INET _ _ _) = do + (SockAddrInet port _) <- getSocketName sock + return port +#if defined(IPV6_SOCKET_SUPPORT) +socketPort sock@(MkSocket _ AF_INET6 _ _ _) = do + (SockAddrInet6 port _ _ _) <- getSocketName sock + return port +#endif +socketPort (MkSocket _ family _ _ _) = + ioError (userError ("socketPort: not supported for Family " ++ show family)) + + +-- --------------------------------------------------------------------------- +-- getPeerName + +-- Calling $getPeerName$ returns the address details of the machine, +-- other than the local one, which is connected to the socket. This is +-- used in programs such as FTP to determine where to send the +-- returning data. The corresponding call to get the details of the +-- local machine is $getSocketName$. + +getPeerName :: Socket -> IO SockAddr +getPeerName (MkSocket s family _ _ _) = do + withNewSockAddr family $ \ptr sz -> do + with (fromIntegral sz) $ \int_star -> do + throwSocketErrorIfMinus1Retry_ "getPeerName" $ c_getpeername s ptr int_star + _sz <- peek int_star + peekSockAddr ptr + +getSocketName :: Socket -> IO SockAddr +getSocketName (MkSocket s family _ _ _) = do + withNewSockAddr family $ \ptr sz -> do + with (fromIntegral sz) $ \int_star -> do + throwSocketErrorIfMinus1Retry_ "getSocketName" $ c_getsockname s ptr int_star + peekSockAddr ptr + +----------------------------------------------------------------------------- +-- Socket Properties + +-- | Socket options for use with 'setSocketOption' and 'getSocketOption'. +-- +-- The existence of a constructor does not imply that the relevant option +-- is supported on your system: see 'isSupportedSocketOption' +data SocketOption + = Debug -- ^ SO_DEBUG + | ReuseAddr -- ^ SO_REUSEADDR + | Type -- ^ SO_TYPE + | SoError -- ^ SO_ERROR + | DontRoute -- ^ SO_DONTROUTE + | Broadcast -- ^ SO_BROADCAST + | SendBuffer -- ^ SO_SNDBUF + | RecvBuffer -- ^ SO_RCVBUF + | KeepAlive -- ^ SO_KEEPALIVE + | OOBInline -- ^ SO_OOBINLINE + | TimeToLive -- ^ IP_TTL + | MaxSegment -- ^ TCP_MAXSEG + | NoDelay -- ^ TCP_NODELAY + | Cork -- ^ TCP_CORK + | Linger -- ^ SO_LINGER + | ReusePort -- ^ SO_REUSEPORT + | RecvLowWater -- ^ SO_RCVLOWAT + | SendLowWater -- ^ SO_SNDLOWAT + | RecvTimeOut -- ^ SO_RCVTIMEO + | SendTimeOut -- ^ SO_SNDTIMEO + | UseLoopBack -- ^ SO_USELOOPBACK + | UserTimeout -- ^ TCP_USER_TIMEOUT + | IPv6Only -- ^ IPV6_V6ONLY + | CustomSockOpt (CInt, CInt) + deriving (Show, Typeable) + +-- | Does the 'SocketOption' exist on this system? +isSupportedSocketOption :: SocketOption -> Bool +isSupportedSocketOption = isJust . packSocketOption + +-- | For a socket option, return Just (level, value) where level is the +-- corresponding C option level constant (e.g. SOL_SOCKET) and value is +-- the option constant itself (e.g. SO_DEBUG) +-- If either constant does not exist, return Nothing. +packSocketOption :: SocketOption -> Maybe (CInt, CInt) +packSocketOption so = + -- The Just here is a hack to disable GHC's overlapping pattern detection: + -- the problem is if all constants are present, the fallback pattern is + -- redundant, but if they aren't then it isn't. Hence we introduce an + -- extra pattern (Nothing) that can't possibly happen, so that the + -- fallback is always (in principle) necessary. + -- I feel a little bad for including this, but such are the sacrifices we + -- make while working with CPP - excluding the fallback pattern correctly + -- would be a serious nuisance. + -- (NB: comments elsewhere in this file refer to this one) + case Just so of +#ifdef SOL_SOCKET +#ifdef SO_DEBUG + Just Debug -> Just ((#const SOL_SOCKET), (#const SO_DEBUG)) +#endif +#ifdef SO_REUSEADDR + Just ReuseAddr -> Just ((#const SOL_SOCKET), (#const SO_REUSEADDR)) +#endif +#ifdef SO_TYPE + Just Type -> Just ((#const SOL_SOCKET), (#const SO_TYPE)) +#endif +#ifdef SO_ERROR + Just SoError -> Just ((#const SOL_SOCKET), (#const SO_ERROR)) +#endif +#ifdef SO_DONTROUTE + Just DontRoute -> Just ((#const SOL_SOCKET), (#const SO_DONTROUTE)) +#endif +#ifdef SO_BROADCAST + Just Broadcast -> Just ((#const SOL_SOCKET), (#const SO_BROADCAST)) +#endif +#ifdef SO_SNDBUF + Just SendBuffer -> Just ((#const SOL_SOCKET), (#const SO_SNDBUF)) +#endif +#ifdef SO_RCVBUF + Just RecvBuffer -> Just ((#const SOL_SOCKET), (#const SO_RCVBUF)) +#endif +#ifdef SO_KEEPALIVE + Just KeepAlive -> Just ((#const SOL_SOCKET), (#const SO_KEEPALIVE)) +#endif +#ifdef SO_OOBINLINE + Just OOBInline -> Just ((#const SOL_SOCKET), (#const SO_OOBINLINE)) +#endif +#ifdef SO_LINGER + Just Linger -> Just ((#const SOL_SOCKET), (#const SO_LINGER)) +#endif +#ifdef SO_REUSEPORT + Just ReusePort -> Just ((#const SOL_SOCKET), (#const SO_REUSEPORT)) +#endif +#ifdef SO_RCVLOWAT + Just RecvLowWater -> Just ((#const SOL_SOCKET), (#const SO_RCVLOWAT)) +#endif +#ifdef SO_SNDLOWAT + Just SendLowWater -> Just ((#const SOL_SOCKET), (#const SO_SNDLOWAT)) +#endif +#ifdef SO_RCVTIMEO + Just RecvTimeOut -> Just ((#const SOL_SOCKET), (#const SO_RCVTIMEO)) +#endif +#ifdef SO_SNDTIMEO + Just SendTimeOut -> Just ((#const SOL_SOCKET), (#const SO_SNDTIMEO)) +#endif +#ifdef SO_USELOOPBACK + Just UseLoopBack -> Just ((#const SOL_SOCKET), (#const SO_USELOOPBACK)) +#endif +#endif // SOL_SOCKET +#if HAVE_DECL_IPPROTO_IP +#ifdef IP_TTL + Just TimeToLive -> Just ((#const IPPROTO_IP), (#const IP_TTL)) +#endif +#endif // HAVE_DECL_IPPROTO_IP +#if HAVE_DECL_IPPROTO_TCP +#ifdef TCP_MAXSEG + Just MaxSegment -> Just ((#const IPPROTO_TCP), (#const TCP_MAXSEG)) +#endif +#ifdef TCP_NODELAY + Just NoDelay -> Just ((#const IPPROTO_TCP), (#const TCP_NODELAY)) +#endif +#ifdef TCP_USER_TIMEOUT + Just UserTimeout -> Just ((#const IPPROTO_TCP), (#const TCP_USER_TIMEOUT)) +#endif +#ifdef TCP_CORK + Just Cork -> Just ((#const IPPROTO_TCP), (#const TCP_CORK)) +#endif +#endif // HAVE_DECL_IPPROTO_TCP +#if HAVE_DECL_IPPROTO_IPV6 +#if HAVE_DECL_IPV6_V6ONLY + Just IPv6Only -> Just ((#const IPPROTO_IPV6), (#const IPV6_V6ONLY)) +#endif +#endif // HAVE_DECL_IPPROTO_IPV6 + Just (CustomSockOpt opt) -> Just opt + _ -> Nothing + +-- | Return the option level and option value if they exist, +-- otherwise throw an error that begins "Network.Socket." ++ the String +-- parameter +packSocketOption' :: String -> SocketOption -> IO (CInt, CInt) +packSocketOption' caller so = maybe err return (packSocketOption so) + where + err = ioError . userError . concat $ ["Network.Socket.", caller, + ": socket option ", show so, " unsupported on this system"] + +-- | Set a socket option that expects an Int value. +-- There is currently no API to set e.g. the timeval socket options +setSocketOption :: Socket + -> SocketOption -- Option Name + -> Int -- Option Value + -> IO () +setSocketOption (MkSocket s _ _ _ _) so v = do + (level, opt) <- packSocketOption' "setSocketOption" so + with (fromIntegral v) $ \ptr_v -> do + throwSocketErrorIfMinus1_ "setSocketOption" $ + c_setsockopt s level opt ptr_v + (fromIntegral (sizeOf (undefined :: CInt))) + return () + + +-- | Get a socket option that gives an Int value. +-- There is currently no API to get e.g. the timeval socket options +getSocketOption :: Socket + -> SocketOption -- Option Name + -> IO Int -- Option Value +getSocketOption (MkSocket s _ _ _ _) so = do + (level, opt) <- packSocketOption' "getSocketOption" so + alloca $ \ptr_v -> + with (fromIntegral (sizeOf (undefined :: CInt))) $ \ptr_sz -> do + throwSocketErrorIfMinus1Retry_ "getSocketOption" $ + c_getsockopt s level opt ptr_v ptr_sz + fromIntegral `liftM` peek ptr_v + + +#if defined(HAVE_STRUCT_UCRED) || defined(HAVE_GETPEEREID) +-- | Returns the processID, userID and groupID of the socket's peer. +-- +-- Only available on platforms that support SO_PEERCRED or GETPEEREID(3) +-- on domain sockets. +-- GETPEEREID(3) returns userID and groupID. processID is always 0. +getPeerCred :: Socket -> IO (CUInt, CUInt, CUInt) +getPeerCred sock = do +#ifdef HAVE_STRUCT_UCRED + let fd = fdSocket sock + let sz = (fromIntegral (#const sizeof(struct ucred))) + with sz $ \ ptr_cr -> + alloca $ \ ptr_sz -> do + poke ptr_sz sz + throwSocketErrorIfMinus1Retry "getPeerCred" $ + c_getsockopt fd (#const SOL_SOCKET) (#const SO_PEERCRED) ptr_cr ptr_sz + pid <- (#peek struct ucred, pid) ptr_cr + uid <- (#peek struct ucred, uid) ptr_cr + gid <- (#peek struct ucred, gid) ptr_cr + return (pid, uid, gid) +#else + (uid,gid) <- getPeerEid sock + return (0,uid,gid) +#endif + +#ifdef HAVE_GETPEEREID +-- | The getpeereid() function returns the effective user and group IDs of the +-- peer connected to a UNIX-domain socket +getPeerEid :: Socket -> IO (CUInt, CUInt) +getPeerEid sock = do + let fd = fdSocket sock + alloca $ \ ptr_uid -> + alloca $ \ ptr_gid -> do + throwSocketErrorIfMinus1Retry_ "getPeerEid" $ + c_getpeereid fd ptr_uid ptr_gid + uid <- peek ptr_uid + gid <- peek ptr_gid + return (uid, gid) +#endif +#endif + +##if !(MIN_VERSION_base(4,3,1)) +closeFdWith closer fd = closer fd +##endif + +#if defined(DOMAIN_SOCKET_SUPPORT) +-- sending/receiving ancillary socket data; low-level mechanism +-- for transmitting file descriptors, mainly. +sendFd :: Socket -> CInt -> IO () +sendFd sock outfd = do + throwSocketErrorWaitWrite sock "sendFd" $ + c_sendFd (fdSocket sock) outfd + -- Note: If Winsock supported FD-passing, thi would have been + -- incorrect (since socket FDs need to be closed via closesocket().) + closeFd outfd + +recvFd :: Socket -> IO CInt +recvFd sock = do + theFd <- throwSocketErrorWaitRead sock "recvFd" $ + c_recvFd (fdSocket sock) + return theFd + +foreign import ccall SAFE_ON_WIN "sendFd" c_sendFd :: CInt -> CInt -> IO CInt +foreign import ccall SAFE_ON_WIN "recvFd" c_recvFd :: CInt -> IO CInt + +#endif + +-- --------------------------------------------------------------------------- +-- Utility Functions + +aNY_PORT :: PortNumber +aNY_PORT = 0 + +-- | The IPv4 wild card address. + +iNADDR_ANY :: HostAddress +iNADDR_ANY = htonl (#const INADDR_ANY) + +foreign import CALLCONV unsafe "htonl" htonl :: Word32 -> Word32 + +#if defined(IPV6_SOCKET_SUPPORT) +-- | The IPv6 wild card address. + +iN6ADDR_ANY :: HostAddress6 +iN6ADDR_ANY = (0, 0, 0, 0) +#endif + +sOMAXCONN :: Int +sOMAXCONN = #const SOMAXCONN + +sOL_SOCKET :: Int +sOL_SOCKET = #const SOL_SOCKET + +#ifdef SCM_RIGHTS +sCM_RIGHTS :: Int +sCM_RIGHTS = #const SCM_RIGHTS +#endif + +-- | This is the value of SOMAXCONN, typically 128. +-- 128 is good enough for normal network servers but +-- is too small for high performance servers. +maxListenQueue :: Int +maxListenQueue = sOMAXCONN + +-- ----------------------------------------------------------------------------- + +data ShutdownCmd + = ShutdownReceive + | ShutdownSend + | ShutdownBoth + deriving Typeable + +sdownCmdToInt :: ShutdownCmd -> CInt +sdownCmdToInt ShutdownReceive = 0 +sdownCmdToInt ShutdownSend = 1 +sdownCmdToInt ShutdownBoth = 2 + +-- | Shut down one or both halves of the connection, depending on the +-- second argument to the function. If the second argument is +-- 'ShutdownReceive', further receives are disallowed. If it is +-- 'ShutdownSend', further sends are disallowed. If it is +-- 'ShutdownBoth', further sends and receives are disallowed. +shutdown :: Socket -> ShutdownCmd -> IO () +shutdown (MkSocket s _ _ _ _) stype = do + throwSocketErrorIfMinus1Retry_ "shutdown" (c_shutdown s (sdownCmdToInt stype)) + return () + +-- ----------------------------------------------------------------------------- + +-- | Close the socket. All future operations on the socket object +-- will fail. The remote end will receive no more data (after queued +-- data is flushed). +close :: Socket -> IO () +close (MkSocket s _ _ _ socketStatus) = do + modifyMVar_ socketStatus $ \ status -> + case status of + ConvertedToHandle -> + ioError (userError ("close: converted to a Handle, use hClose instead")) + Closed -> + return status + _ -> closeFdWith (closeFd . fromIntegral) (fromIntegral s) >> return Closed + +-- ----------------------------------------------------------------------------- + +-- | Determines whether 'close' has been used on the 'Socket'. This +-- does /not/ indicate any status about the socket beyond this. If the +-- socket has been closed remotely, this function can still return +-- 'True'. +isConnected :: Socket -> IO Bool +isConnected (MkSocket _ _ _ _ status) = do + value <- readMVar status + return (value == Connected) + +-- ----------------------------------------------------------------------------- +-- Socket Predicates + +isBound :: Socket -> IO Bool +isBound (MkSocket _ _ _ _ status) = do + value <- readMVar status + return (value == Bound) + +isListening :: Socket -> IO Bool +isListening (MkSocket _ _ _ _ status) = do + value <- readMVar status + return (value == Listening) + +isReadable :: Socket -> IO Bool +isReadable (MkSocket _ _ _ _ status) = do + value <- readMVar status + return (value == Listening || value == Connected) + +isWritable :: Socket -> IO Bool +isWritable = isReadable -- sort of. + +isAcceptable :: Socket -> IO Bool +#if defined(DOMAIN_SOCKET_SUPPORT) +isAcceptable (MkSocket _ AF_UNIX x _ status) + | x == Stream || x == SeqPacket = do + value <- readMVar status + return (value == Connected || value == Bound || value == Listening) +isAcceptable (MkSocket _ AF_UNIX _ _ _) = return False +#endif +isAcceptable (MkSocket _ _ _ _ status) = do + value <- readMVar status + return (value == Connected || value == Listening) + +-- ----------------------------------------------------------------------------- +-- Internet address manipulation routines: + +inet_addr :: String -> IO HostAddress +inet_addr ipstr = withSocketsDo $ do + withCString ipstr $ \str -> do + had <- c_inet_addr str + if had == -1 + then ioError (userError ("inet_addr: Malformed address: " ++ ipstr)) + else return had -- network byte order + +inet_ntoa :: HostAddress -> IO String +inet_ntoa haddr = withSocketsDo $ do + pstr <- c_inet_ntoa haddr + peekCString pstr + +-- | Turns a Socket into an 'Handle'. By default, the new handle is +-- unbuffered. Use 'System.IO.hSetBuffering' to change the buffering. +-- +-- Note that since a 'Handle' is automatically closed by a finalizer +-- when it is no longer referenced, you should avoid doing any more +-- operations on the 'Socket' after calling 'socketToHandle'. To +-- close the 'Socket' after 'socketToHandle', call 'System.IO.hClose' +-- on the 'Handle'. + +socketToHandle :: Socket -> IOMode -> IO Handle +socketToHandle s@(MkSocket fd _ _ _ socketStatus) mode = do + modifyMVar socketStatus $ \ status -> + if status == ConvertedToHandle + then ioError (userError ("socketToHandle: already a Handle")) + else do + h <- fdToHandle' (fromIntegral fd) (Just GHC.IO.Device.Stream) True (show s) mode True{-bin-} + hSetBuffering h NoBuffering + return (ConvertedToHandle, h) + +-- | Pack a list of values into a bitmask. The possible mappings from +-- value to bit-to-set are given as the first argument. We assume +-- that each value can cause exactly one bit to be set; unpackBits will +-- break if this property is not true. + +packBits :: (Eq a, Num b, Bits b) => [(a, b)] -> [a] -> b + +packBits mapping xs = foldl' pack 0 mapping + where pack acc (k, v) | k `elem` xs = acc .|. v + | otherwise = acc + +-- | Unpack a bitmask into a list of values. + +unpackBits :: (Num b, Bits b) => [(a, b)] -> b -> [a] + +-- Be permissive and ignore unknown bit values. At least on OS X, +-- getaddrinfo returns an ai_flags field with bits set that have no +-- entry in . +unpackBits [] _ = [] +unpackBits ((k,v):xs) r + | r .&. v /= 0 = k : unpackBits xs (r .&. complement v) + | otherwise = unpackBits xs r + +----------------------------------------------------------------------------- +-- Address and service lookups + +#if defined(IPV6_SOCKET_SUPPORT) + +-- | Flags that control the querying behaviour of 'getAddrInfo'. +data AddrInfoFlag + = AI_ADDRCONFIG + | AI_ALL + | AI_CANONNAME + | AI_NUMERICHOST + | AI_NUMERICSERV + | AI_PASSIVE + | AI_V4MAPPED + deriving (Eq, Read, Show, Typeable) + +aiFlagMapping :: [(AddrInfoFlag, CInt)] + +aiFlagMapping = + [ +#if HAVE_DECL_AI_ADDRCONFIG + (AI_ADDRCONFIG, #const AI_ADDRCONFIG), +#else + (AI_ADDRCONFIG, 0), +#endif +#if HAVE_DECL_AI_ALL + (AI_ALL, #const AI_ALL), +#else + (AI_ALL, 0), +#endif + (AI_CANONNAME, #const AI_CANONNAME), + (AI_NUMERICHOST, #const AI_NUMERICHOST), +#if HAVE_DECL_AI_NUMERICSERV + (AI_NUMERICSERV, #const AI_NUMERICSERV), +#else + (AI_NUMERICSERV, 0), +#endif + (AI_PASSIVE, #const AI_PASSIVE), +#if HAVE_DECL_AI_V4MAPPED + (AI_V4MAPPED, #const AI_V4MAPPED) +#else + (AI_V4MAPPED, 0) +#endif + ] + +-- | Indicate whether the given 'AddrInfoFlag' will have any effect on +-- this system. +addrInfoFlagImplemented :: AddrInfoFlag -> Bool +addrInfoFlagImplemented f = packBits aiFlagMapping [f] /= 0 + +data AddrInfo = + AddrInfo { + addrFlags :: [AddrInfoFlag], + addrFamily :: Family, + addrSocketType :: SocketType, + addrProtocol :: ProtocolNumber, + addrAddress :: SockAddr, + addrCanonName :: Maybe String + } + deriving (Eq, Show, Typeable) + +instance Storable AddrInfo where + sizeOf _ = #const sizeof(struct addrinfo) + alignment _ = alignment (undefined :: CInt) + + peek p = do + ai_flags <- (#peek struct addrinfo, ai_flags) p + ai_family <- (#peek struct addrinfo, ai_family) p + ai_socktype <- (#peek struct addrinfo, ai_socktype) p + ai_protocol <- (#peek struct addrinfo, ai_protocol) p + ai_addr <- (#peek struct addrinfo, ai_addr) p >>= peekSockAddr + ai_canonname_ptr <- (#peek struct addrinfo, ai_canonname) p + + ai_canonname <- if ai_canonname_ptr == nullPtr + then return Nothing + else liftM Just $ peekCString ai_canonname_ptr + + socktype <- unpackSocketType' "AddrInfo.peek" ai_socktype + return (AddrInfo + { + addrFlags = unpackBits aiFlagMapping ai_flags, + addrFamily = unpackFamily ai_family, + addrSocketType = socktype, + addrProtocol = ai_protocol, + addrAddress = ai_addr, + addrCanonName = ai_canonname + }) + + poke p (AddrInfo flags family socketType protocol _ _) = do + c_stype <- packSocketTypeOrThrow "AddrInfo.poke" socketType + + (#poke struct addrinfo, ai_flags) p (packBits aiFlagMapping flags) + (#poke struct addrinfo, ai_family) p (packFamily family) + (#poke struct addrinfo, ai_socktype) p c_stype + (#poke struct addrinfo, ai_protocol) p protocol + + -- stuff below is probably not needed, but let's zero it for safety + + (#poke struct addrinfo, ai_addrlen) p (0::CSize) + (#poke struct addrinfo, ai_addr) p nullPtr + (#poke struct addrinfo, ai_canonname) p nullPtr + (#poke struct addrinfo, ai_next) p nullPtr + +data NameInfoFlag + = NI_DGRAM + | NI_NAMEREQD + | NI_NOFQDN + | NI_NUMERICHOST + | NI_NUMERICSERV + deriving (Eq, Read, Show, Typeable) + +niFlagMapping :: [(NameInfoFlag, CInt)] + +niFlagMapping = [(NI_DGRAM, #const NI_DGRAM), + (NI_NAMEREQD, #const NI_NAMEREQD), + (NI_NOFQDN, #const NI_NOFQDN), + (NI_NUMERICHOST, #const NI_NUMERICHOST), + (NI_NUMERICSERV, #const NI_NUMERICSERV)] + +-- | Default hints for address lookup with 'getAddrInfo'. The values +-- of the 'addrAddress' and 'addrCanonName' fields are 'undefined', +-- and are never inspected by 'getAddrInfo'. + +defaultHints :: AddrInfo + +defaultHints = AddrInfo { + addrFlags = [], + addrFamily = AF_UNSPEC, + addrSocketType = NoSocketType, + addrProtocol = defaultProtocol, + addrAddress = undefined, + addrCanonName = undefined + } + +-- | Resolve a host or service name to one or more addresses. +-- The 'AddrInfo' values that this function returns contain 'SockAddr' +-- values that you can pass directly to 'connect' or +-- 'bind'. +-- +-- This function is protocol independent. It can return both IPv4 and +-- IPv6 address information. +-- +-- The 'AddrInfo' argument specifies the preferred query behaviour, +-- socket options, or protocol. You can override these conveniently +-- using Haskell's record update syntax on 'defaultHints', for example +-- as follows: +-- +-- @ +-- myHints = defaultHints { addrFlags = [AI_ADDRCONFIG, AI_CANONNAME] } +-- @ +-- +-- Values for 'addrFlags' control query behaviour. The supported +-- flags are as follows: +-- +-- [@AI_PASSIVE@] If no 'HostName' value is provided, the network +-- address in each 'SockAddr' +-- will be left as a "wild card", i.e. as either 'iNADDR_ANY' +-- or 'iN6ADDR_ANY'. This is useful for server applications that +-- will accept connections from any client. +-- +-- [@AI_CANONNAME@] The 'addrCanonName' field of the first returned +-- 'AddrInfo' will contain the "canonical name" of the host. +-- +-- [@AI_NUMERICHOST@] The 'HostName' argument /must/ be a numeric +-- address in string form, and network name lookups will not be +-- attempted. +-- +-- /Note/: Although the following flags are required by RFC 3493, they +-- may not have an effect on all platforms, because the underlying +-- network stack may not support them. To see whether a flag from the +-- list below will have any effect, call 'addrInfoFlagImplemented'. +-- +-- [@AI_NUMERICSERV@] The 'ServiceName' argument /must/ be a port +-- number in string form, and service name lookups will not be +-- attempted. +-- +-- [@AI_ADDRCONFIG@] The list of returned 'AddrInfo' values will +-- only contain IPv4 addresses if the local system has at least +-- one IPv4 interface configured, and likewise for IPv6. +-- +-- [@AI_V4MAPPED@] If an IPv6 lookup is performed, and no IPv6 +-- addresses are found, IPv6-mapped IPv4 addresses will be +-- returned. +-- +-- [@AI_ALL@] If 'AI_ALL' is specified, return all matching IPv6 and +-- IPv4 addresses. Otherwise, this flag has no effect. +-- +-- You must provide a 'Just' value for at least one of the 'HostName' +-- or 'ServiceName' arguments. 'HostName' can be either a numeric +-- network address (dotted quad for IPv4, colon-separated hex for +-- IPv6) or a hostname. In the latter case, its addresses will be +-- looked up unless 'AI_NUMERICHOST' is specified as a hint. If you +-- do not provide a 'HostName' value /and/ do not set 'AI_PASSIVE' as +-- a hint, network addresses in the result will contain the address of +-- the loopback interface. +-- +-- If the query fails, this function throws an IO exception instead of +-- returning an empty list. Otherwise, it returns a non-empty list +-- of 'AddrInfo' values. +-- +-- There are several reasons why a query might result in several +-- values. For example, the queried-for host could be multihomed, or +-- the service might be available via several protocols. +-- +-- Note: the order of arguments is slightly different to that defined +-- for @getaddrinfo@ in RFC 2553. The 'AddrInfo' parameter comes first +-- to make partial application easier. +-- +-- Example: +-- @ +-- let hints = defaultHints { addrFlags = [AI_ADDRCONFIG, AI_CANONNAME] } +-- addrs <- getAddrInfo (Just hints) (Just "www.haskell.org") (Just "http") +-- let addr = head addrs +-- sock <- socket (addrFamily addr) (addrSocketType addr) (addrProtocol addr) +-- connect sock (addrAddress addr) +-- @ + +getAddrInfo :: Maybe AddrInfo -- ^ preferred socket type or protocol + -> Maybe HostName -- ^ host name to look up + -> Maybe ServiceName -- ^ service name to look up + -> IO [AddrInfo] -- ^ resolved addresses, with "best" first + +getAddrInfo hints node service = withSocketsDo $ + maybeWith withCString node $ \c_node -> + maybeWith withCString service $ \c_service -> + maybeWith with filteredHints $ \c_hints -> + alloca $ \ptr_ptr_addrs -> do + ret <- c_getaddrinfo c_node c_service c_hints ptr_ptr_addrs + case ret of + 0 -> do ptr_addrs <- peek ptr_ptr_addrs + ais <- followAddrInfo ptr_addrs + c_freeaddrinfo ptr_addrs + return ais + _ -> do err <- gai_strerror ret + ioError (ioeSetErrorString + (mkIOError NoSuchThing "getAddrInfo" Nothing + Nothing) err) + -- Leaving out the service and using AI_NUMERICSERV causes a + -- segfault on OS X 10.8.2. This code removes AI_NUMERICSERV + -- (which has no effect) in that case. + where +#if defined(darwin_HOST_OS) + filteredHints = case service of + Nothing -> fmap (\ h -> h { addrFlags = delete AI_NUMERICSERV (addrFlags h) }) hints + _ -> hints +#else + filteredHints = hints +#endif + +followAddrInfo :: Ptr AddrInfo -> IO [AddrInfo] + +followAddrInfo ptr_ai | ptr_ai == nullPtr = return [] + | otherwise = do + a <- peek ptr_ai + as <- (#peek struct addrinfo, ai_next) ptr_ai >>= followAddrInfo + return (a:as) + +foreign import ccall safe "hsnet_getaddrinfo" + c_getaddrinfo :: CString -> CString -> Ptr AddrInfo -> Ptr (Ptr AddrInfo) + -> IO CInt + +foreign import ccall safe "hsnet_freeaddrinfo" + c_freeaddrinfo :: Ptr AddrInfo -> IO () + +gai_strerror :: CInt -> IO String + +#ifdef HAVE_GAI_STRERROR +gai_strerror n = c_gai_strerror n >>= peekCString + +foreign import ccall safe "gai_strerror" + c_gai_strerror :: CInt -> IO CString +#else +gai_strerror n = return ("error " ++ show n) +#endif + +withCStringIf :: Bool -> Int -> (CSize -> CString -> IO a) -> IO a +withCStringIf False _ f = f 0 nullPtr +withCStringIf True n f = allocaBytes n (f (fromIntegral n)) + +-- | Resolve an address to a host or service name. +-- This function is protocol independent. +-- +-- The list of 'NameInfoFlag' values controls query behaviour. The +-- supported flags are as follows: +-- +-- [@NI_NOFQDN@] If a host is local, return only the +-- hostname part of the FQDN. +-- +-- [@NI_NUMERICHOST@] The name of the host is not +-- looked up. Instead, a numeric representation of the host's +-- address is returned. For an IPv4 address, this will be a +-- dotted-quad string. For IPv6, it will be colon-separated +-- hexadecimal. +-- +-- [@NI_NUMERICSERV@] The name of the service is not +-- looked up. Instead, a numeric representation of the +-- service is returned. +-- +-- [@NI_NAMEREQD@] If the hostname cannot be looked up, an IO error +-- is thrown. +-- +-- [@NI_DGRAM@] Resolve a datagram-based service name. This is +-- required only for the few protocols that have different port +-- numbers for their datagram-based versions than for their +-- stream-based versions. +-- +-- Hostname and service name lookups can be expensive. You can +-- specify which lookups to perform via the two 'Bool' arguments. If +-- one of these is 'False', the corresponding value in the returned +-- tuple will be 'Nothing', and no lookup will be performed. +-- +-- If a host or service's name cannot be looked up, then the numeric +-- form of the address or service will be returned. +-- +-- If the query fails, this function throws an IO exception. +-- +-- Example: +-- @ +-- (hostName, _) <- getNameInfo [] True False myAddress +-- @ + +getNameInfo :: [NameInfoFlag] -- ^ flags to control lookup behaviour + -> Bool -- ^ whether to look up a hostname + -> Bool -- ^ whether to look up a service name + -> SockAddr -- ^ the address to look up + -> IO (Maybe HostName, Maybe ServiceName) + +getNameInfo flags doHost doService addr = withSocketsDo $ + withCStringIf doHost (#const NI_MAXHOST) $ \c_hostlen c_host -> + withCStringIf doService (#const NI_MAXSERV) $ \c_servlen c_serv -> do + withSockAddr addr $ \ptr_addr sz -> do + ret <- c_getnameinfo ptr_addr (fromIntegral sz) c_host c_hostlen + c_serv c_servlen (packBits niFlagMapping flags) + case ret of + 0 -> do + let peekIf doIf c_val = if doIf + then liftM Just $ peekCString c_val + else return Nothing + host <- peekIf doHost c_host + serv <- peekIf doService c_serv + return (host, serv) + _ -> do err <- gai_strerror ret + ioError (ioeSetErrorString + (mkIOError NoSuchThing "getNameInfo" Nothing + Nothing) err) + +foreign import ccall safe "hsnet_getnameinfo" + c_getnameinfo :: Ptr SockAddr -> CInt{-CSockLen???-} -> CString -> CSize -> CString + -> CSize -> CInt -> IO CInt +#endif + +mkInvalidRecvArgError :: String -> IOError +mkInvalidRecvArgError loc = ioeSetErrorString (mkIOError + InvalidArgument + loc Nothing Nothing) "non-positive length" + +mkEOFError :: String -> IOError +mkEOFError loc = ioeSetErrorString (mkIOError EOF loc Nothing Nothing) "end of file" + +-- --------------------------------------------------------------------------- +-- foreign imports from the C library + +foreign import ccall unsafe "my_inet_ntoa" + c_inet_ntoa :: HostAddress -> IO (Ptr CChar) + +foreign import CALLCONV unsafe "inet_addr" + c_inet_addr :: Ptr CChar -> IO HostAddress + +foreign import CALLCONV unsafe "shutdown" + c_shutdown :: CInt -> CInt -> IO CInt + +closeFd :: CInt -> IO () +closeFd fd = throwSocketErrorIfMinus1_ "Network.Socket.close" $ c_close fd + +#if !defined(WITH_WINSOCK) +foreign import ccall unsafe "close" + c_close :: CInt -> IO CInt +#else +foreign import stdcall unsafe "closesocket" + c_close :: CInt -> IO CInt +#endif + +foreign import CALLCONV unsafe "socket" + c_socket :: CInt -> CInt -> CInt -> IO CInt +foreign import CALLCONV unsafe "bind" + c_bind :: CInt -> Ptr SockAddr -> CInt{-CSockLen???-} -> IO CInt +foreign import CALLCONV SAFE_ON_WIN "connect" + c_connect :: CInt -> Ptr SockAddr -> CInt{-CSockLen???-} -> IO CInt +foreign import CALLCONV unsafe "accept" + c_accept :: CInt -> Ptr SockAddr -> Ptr CInt{-CSockLen???-} -> IO CInt +#ifdef HAVE_ACCEPT4 +foreign import CALLCONV unsafe "accept4" + c_accept4 :: CInt -> Ptr SockAddr -> Ptr CInt{-CSockLen???-} -> CInt -> IO CInt +#endif +foreign import CALLCONV unsafe "listen" + c_listen :: CInt -> CInt -> IO CInt + +#if defined(mingw32_HOST_OS) +foreign import CALLCONV safe "accept" + c_accept_safe :: CInt -> Ptr SockAddr -> Ptr CInt{-CSockLen???-} -> IO CInt + +foreign import ccall unsafe "rtsSupportsBoundThreads" threaded :: Bool +#endif + +foreign import CALLCONV unsafe "send" + c_send :: CInt -> Ptr a -> CSize -> CInt -> IO CInt +foreign import CALLCONV SAFE_ON_WIN "sendto" + c_sendto :: CInt -> Ptr a -> CSize -> CInt -> Ptr SockAddr -> CInt -> IO CInt +foreign import CALLCONV unsafe "recv" + c_recv :: CInt -> Ptr CChar -> CSize -> CInt -> IO CInt +foreign import CALLCONV SAFE_ON_WIN "recvfrom" + c_recvfrom :: CInt -> Ptr a -> CSize -> CInt -> Ptr SockAddr -> Ptr CInt -> IO CInt +foreign import CALLCONV unsafe "getpeername" + c_getpeername :: CInt -> Ptr SockAddr -> Ptr CInt -> IO CInt +foreign import CALLCONV unsafe "getsockname" + c_getsockname :: CInt -> Ptr SockAddr -> Ptr CInt -> IO CInt + +foreign import CALLCONV unsafe "getsockopt" + c_getsockopt :: CInt -> CInt -> CInt -> Ptr CInt -> Ptr CInt -> IO CInt +foreign import CALLCONV unsafe "setsockopt" + c_setsockopt :: CInt -> CInt -> CInt -> Ptr CInt -> CInt -> IO CInt + +#if defined(HAVE_GETPEEREID) +foreign import CALLCONV unsafe "getpeereid" + c_getpeereid :: CInt -> Ptr CUInt -> Ptr CUInt -> IO CInt +#endif +-- --------------------------------------------------------------------------- +-- * Deprecated aliases + +-- $deprecated-aliases +-- +-- These aliases are deprecated and should not be used in new code. +-- They will be removed in some future version of the package. + +-- | Deprecated alias for 'bind'. +bindSocket :: Socket -- Unconnected Socket + -> SockAddr -- Address to Bind to + -> IO () +bindSocket = bind + +-- | Deprecated alias for 'close'. +sClose :: Socket -> IO () +sClose = close + +-- | Deprecated alias for 'isConnected'. +sIsConnected :: Socket -> IO Bool +sIsConnected = isConnected + +-- | Deprecated alias for 'isBound'. +sIsBound :: Socket -> IO Bool +sIsBound = isBound + +-- | Deprecated alias for 'isListening'. +sIsListening :: Socket -> IO Bool +sIsListening = isListening + +-- | Deprecated alias for 'isReadable'. +sIsReadable :: Socket -> IO Bool +sIsReadable = isReadable + +-- | Deprecated alias for 'isWritable'. +sIsWritable :: Socket -> IO Bool +sIsWritable = isWritable diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/network.buildinfo.in cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/network.buildinfo.in --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/network.buildinfo.in 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/network.buildinfo.in 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,6 @@ +ghc-options: -DCALLCONV=@CALLCONV@ @EXTRA_CPPFLAGS@ +ghc-prof-options: -DCALLCONV=@CALLCONV@ @EXTRA_CPPFLAGS@ +ld-options: @LDFLAGS@ +cc-options: -DCALLCONV=@CALLCONV@ @EXTRA_CPPFLAGS@ +c-sources: @EXTRA_SRCS@ +extra-libraries: @EXTRA_LIBS@ diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/network.cabal cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/network.cabal --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/network.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/network.cabal 2016-06-02 07:15:40.000000000 +0000 @@ -0,0 +1,112 @@ +name: network +version: 2.6.2.1 +license: BSD3 +license-file: LICENSE +maintainer: Johan Tibell +synopsis: Low-level networking interface +description: + This package provides a low-level networking interface. + . + In network-2.6 the @Network.URI@ module was split off into its own + package, network-uri-2.6. If you're using the @Network.URI@ module + you can automatically get it from the right package by adding this + to your .cabal file: + . + > flag network-uri + > description: Get Network.URI from the network-uri package + > default: True + > + > library + > -- ... + > if flag(network-uri) + > build-depends: network-uri >= 2.6, network >= 2.6 + > else + > build-depends: network-uri < 2.6, network < 2.6 + . + That is, get the module from either network < 2.6 or from + network-uri >= 2.6. +category: Network +build-type: Configure +cabal-version: >=1.8 +extra-tmp-files: + config.log config.status autom4te.cache network.buildinfo + include/HsNetworkConfig.h +extra-source-files: + README.md CHANGELOG.md + examples/*.hs tests/*.hs config.guess config.sub install-sh + configure.ac configure network.buildinfo.in + include/HsNetworkConfig.h.in include/HsNet.h + -- C sources only used on some systems + cbits/ancilData.c cbits/asyncAccept.c cbits/initWinSock.c + cbits/winSockErr.c +homepage: https://github.com/haskell/network +bug-reports: https://github.com/haskell/network/issues + +library + exposed-modules: + Network + Network.BSD + Network.Socket + Network.Socket.ByteString + Network.Socket.ByteString.Lazy + Network.Socket.Internal + other-modules: + Network.Socket.ByteString.Internal + Network.Socket.Types + + if !os(windows) + other-modules: + Network.Socket.ByteString.IOVec + Network.Socket.ByteString.Lazy.Posix + Network.Socket.ByteString.MsgHdr + if os(windows) + other-modules: + Network.Socket.ByteString.Lazy.Windows + + build-depends: + base >= 3 && < 5, + bytestring < 0.11 + + if !os(windows) + build-depends: + unix >= 2 + + extensions: + CPP, DeriveDataTypeable, ForeignFunctionInterface, TypeSynonymInstances + include-dirs: include + includes: HsNet.h + install-includes: HsNet.h HsNetworkConfig.h + c-sources: cbits/HsNet.c + ghc-options: -Wall -fwarn-tabs + +test-suite simple + hs-source-dirs: tests + main-is: Simple.hs + type: exitcode-stdio-1.0 + + build-depends: + base < 5, + bytestring, + HUnit, + network, + test-framework, + test-framework-hunit + +test-suite regression + hs-source-dirs: tests + main-is: Regression.hs + type: exitcode-stdio-1.0 + + build-depends: + base < 5, + bytestring, + HUnit, + network, + test-framework, + test-framework-hunit + + ghc-options: -Wall + +source-repository head + type: git + location: git://github.com/haskell/network.git diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Network.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Network.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,486 @@ +{-# LANGUAGE CPP #-} +----------------------------------------------------------------------------- +-- | +-- Module : Network +-- Copyright : (c) The University of Glasgow 2001 +-- License : BSD-style (see the file libraries/network/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : provisional +-- Portability : portable +-- +-- This module is kept for backwards-compatibility. New users are +-- encouraged to use "Network.Socket" instead. +-- +-- "Network" was intended as a \"higher-level\" interface to networking +-- facilities, and only supports TCP. +-- +----------------------------------------------------------------------------- + +#include "HsNetworkConfig.h" + +#ifdef HAVE_GETADDRINFO +-- Use IPv6-capable function definitions if the OS supports it. +#define IPV6_SOCKET_SUPPORT 1 +#endif + +module Network + ( + -- * Basic data types + Socket + , PortID(..) + , HostName + , PortNumber + + -- * Initialisation + , withSocketsDo + + -- * Server-side connections + , listenOn + , accept + , sClose + + -- * Client-side connections + , connectTo + + -- * Simple sending and receiving + {-$sendrecv-} + , sendTo + , recvFrom + + -- * Miscellaneous + , socketPort + + -- * Networking Issues + -- ** Buffering + {-$buffering-} + + -- ** Improving I\/O Performance over sockets + {-$performance-} + + -- ** @SIGPIPE@ + {-$sigpipe-} + ) where + +import Control.Monad (liftM) +import Data.Maybe (fromJust) +import Network.BSD +import Network.Socket hiding (accept, socketPort, recvFrom, + sendTo, PortNumber, sClose) +import qualified Network.Socket as Socket (accept) +import System.IO +import Prelude +import qualified Control.Exception as Exception + +-- --------------------------------------------------------------------------- +-- High Level ``Setup'' functions + +-- If the @PortID@ specifies a unix family socket and the @Hostname@ +-- differs from that returned by @getHostname@ then an error is +-- raised. Alternatively an empty string may be given to @connectTo@ +-- signalling that the current hostname applies. + +data PortID = + Service String -- Service Name eg "ftp" + | PortNumber PortNumber -- User defined Port Number +#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) + | UnixSocket String -- Unix family socket in file system +#endif + deriving (Show, Eq) + +-- | Calling 'connectTo' creates a client side socket which is +-- connected to the given host and port. The Protocol and socket type is +-- derived from the given port identifier. If a port number is given +-- then the result is always an internet family 'Stream' socket. + +connectTo :: HostName -- Hostname + -> PortID -- Port Identifier + -> IO Handle -- Connected Socket + +#if defined(IPV6_SOCKET_SUPPORT) +-- IPv6 and IPv4. + +connectTo hostname (Service serv) = connect' hostname serv + +connectTo hostname (PortNumber port) = connect' hostname (show port) +#else +-- IPv4 only. + +connectTo hostname (Service serv) = do + proto <- getProtocolNumber "tcp" + bracketOnError + (socket AF_INET Stream proto) + (sClose) -- only done if there's an error + (\sock -> do + port <- getServicePortNumber serv + he <- getHostByName hostname + connect sock (SockAddrInet port (hostAddress he)) + socketToHandle sock ReadWriteMode + ) + +connectTo hostname (PortNumber port) = do + proto <- getProtocolNumber "tcp" + bracketOnError + (socket AF_INET Stream proto) + (sClose) -- only done if there's an error + (\sock -> do + he <- getHostByName hostname + connect sock (SockAddrInet port (hostAddress he)) + socketToHandle sock ReadWriteMode + ) +#endif + +#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) +connectTo _ (UnixSocket path) = do + bracketOnError + (socket AF_UNIX Stream 0) + (sClose) + (\sock -> do + connect sock (SockAddrUnix path) + socketToHandle sock ReadWriteMode + ) +#endif + +#if defined(IPV6_SOCKET_SUPPORT) +connect' :: HostName -> ServiceName -> IO Handle + +connect' host serv = do + proto <- getProtocolNumber "tcp" + let hints = defaultHints { addrFlags = [AI_ADDRCONFIG] + , addrProtocol = proto + , addrSocketType = Stream } + addrs <- getAddrInfo (Just hints) (Just host) (Just serv) + firstSuccessful $ map tryToConnect addrs + where + tryToConnect addr = + bracketOnError + (socket (addrFamily addr) (addrSocketType addr) (addrProtocol addr)) + (sClose) -- only done if there's an error + (\sock -> do + connect sock (addrAddress addr) + socketToHandle sock ReadWriteMode + ) +#endif + +-- | Creates the server side socket which has been bound to the +-- specified port. +-- +-- 'maxListenQueue' (typically 128) is specified to the listen queue. +-- This is good enough for normal network servers but is too small +-- for high performance servers. +-- +-- To avoid the \"Address already in use\" problems, +-- the 'ReuseAddr' socket option is set on the listening socket. +-- +-- If available, the 'IPv6Only' socket option is set to 0 +-- so that both IPv4 and IPv6 can be accepted with this socket. +-- +-- If you don't like the behavior above, please use the lower level +-- 'Network.Socket.listen' instead. + +listenOn :: PortID -- ^ Port Identifier + -> IO Socket -- ^ Listening Socket + +#if defined(IPV6_SOCKET_SUPPORT) +-- IPv6 and IPv4. + +listenOn (Service serv) = listen' serv + +listenOn (PortNumber port) = listen' (show port) +#else +-- IPv4 only. + +listenOn (Service serv) = do + proto <- getProtocolNumber "tcp" + bracketOnError + (socket AF_INET Stream proto) + (sClose) + (\sock -> do + port <- getServicePortNumber serv + setSocketOption sock ReuseAddr 1 + bindSocket sock (SockAddrInet port iNADDR_ANY) + listen sock maxListenQueue + return sock + ) + +listenOn (PortNumber port) = do + proto <- getProtocolNumber "tcp" + bracketOnError + (socket AF_INET Stream proto) + (sClose) + (\sock -> do + setSocketOption sock ReuseAddr 1 + bindSocket sock (SockAddrInet port iNADDR_ANY) + listen sock maxListenQueue + return sock + ) +#endif + +#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) +listenOn (UnixSocket path) = + bracketOnError + (socket AF_UNIX Stream 0) + (sClose) + (\sock -> do + setSocketOption sock ReuseAddr 1 + bindSocket sock (SockAddrUnix path) + listen sock maxListenQueue + return sock + ) +#endif + +#if defined(IPV6_SOCKET_SUPPORT) +listen' :: ServiceName -> IO Socket + +listen' serv = do + proto <- getProtocolNumber "tcp" + -- We should probably specify addrFamily = AF_INET6 and the filter + -- code below should be removed. AI_ADDRCONFIG is probably not + -- necessary. But this code is well-tested. So, let's keep it. + let hints = defaultHints { addrFlags = [AI_ADDRCONFIG, AI_PASSIVE] + , addrSocketType = Stream + , addrProtocol = proto } + addrs <- getAddrInfo (Just hints) Nothing (Just serv) + -- Choose an IPv6 socket if exists. This ensures the socket can + -- handle both IPv4 and IPv6 if v6only is false. + let addrs' = filter (\x -> addrFamily x == AF_INET6) addrs + addr = if null addrs' then head addrs else head addrs' + bracketOnError + (socket (addrFamily addr) (addrSocketType addr) (addrProtocol addr)) + (sClose) + (\sock -> do + setSocketOption sock ReuseAddr 1 + bindSocket sock (addrAddress addr) + listen sock maxListenQueue + return sock + ) +#endif + +-- ----------------------------------------------------------------------------- +-- accept + +-- | Accept a connection on a socket created by 'listenOn'. Normal +-- I\/O operations (see "System.IO") can be used on the 'Handle' +-- returned to communicate with the client. +-- Notice that although you can pass any Socket to Network.accept, +-- only sockets of either AF_UNIX, AF_INET, or AF_INET6 will work +-- (this shouldn't be a problem, though). When using AF_UNIX, HostName +-- will be set to the path of the socket and PortNumber to -1. +-- +accept :: Socket -- ^ Listening Socket + -> IO (Handle, + HostName, + PortNumber) -- ^ Triple of: read\/write 'Handle' for + -- communicating with the client, + -- the 'HostName' of the peer socket, and + -- the 'PortNumber' of the remote connection. +accept sock@(MkSocket _ AF_INET _ _ _) = do + ~(sock', (SockAddrInet port haddr)) <- Socket.accept sock + peer <- catchIO + (do + (HostEntry peer _ _ _) <- getHostByAddr AF_INET haddr + return peer + ) + (\_e -> inet_ntoa haddr) + -- if getHostByName fails, we fall back to the IP address + handle <- socketToHandle sock' ReadWriteMode + return (handle, peer, port) +#if defined(IPV6_SOCKET_SUPPORT) +accept sock@(MkSocket _ AF_INET6 _ _ _) = do + (sock', addr) <- Socket.accept sock + peer <- catchIO ((fromJust . fst) `liftM` getNameInfo [] True False addr) $ + \_ -> case addr of + SockAddrInet _ a -> inet_ntoa a + SockAddrInet6 _ _ a _ -> return (show a) +# if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) + SockAddrUnix a -> return a +# endif + handle <- socketToHandle sock' ReadWriteMode + let port = case addr of + SockAddrInet p _ -> p + SockAddrInet6 p _ _ _ -> p + _ -> -1 + return (handle, peer, port) +#endif +#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) +accept sock@(MkSocket _ AF_UNIX _ _ _) = do + ~(sock', (SockAddrUnix path)) <- Socket.accept sock + handle <- socketToHandle sock' ReadWriteMode + return (handle, path, -1) +#endif +accept (MkSocket _ family _ _ _) = + error $ "Sorry, address family " ++ (show family) ++ " is not supported!" + + +-- | Close the socket. All future operations on the socket object will fail. +-- The remote end will receive no more data (after queued data is flushed). +sClose :: Socket -> IO () +sClose = close -- Explicit redefinition because Network.sClose is deperecated, + -- hence the re-export would also be marked as such. + +-- ----------------------------------------------------------------------------- +-- sendTo/recvFrom + +{-$sendrecv +Send and receive data from\/to the given host and port number. These +should normally only be used where the socket will not be required for +further calls. Also, note that due to the use of 'hGetContents' in 'recvFrom' +the socket will remain open (i.e. not available) even if the function already +returned. Their use is strongly discouraged except for small test-applications +or invocations from the command line. +-} + +sendTo :: HostName -- Hostname + -> PortID -- Port Number + -> String -- Message to send + -> IO () +sendTo h p msg = do + s <- connectTo h p + hPutStr s msg + hClose s + +recvFrom :: HostName -- Hostname + -> PortID -- Port Number + -> IO String -- Received Data + +#if defined(IPV6_SOCKET_SUPPORT) +recvFrom host port = do + proto <- getProtocolNumber "tcp" + let hints = defaultHints { addrFlags = [AI_ADDRCONFIG] + , addrProtocol = proto + , addrSocketType = Stream } + allowed <- map addrAddress `liftM` getAddrInfo (Just hints) (Just host) + Nothing + s <- listenOn port + let waiting = do + (s', addr) <- Socket.accept s + if not (addr `oneOf` allowed) + then sClose s' >> waiting + else socketToHandle s' ReadMode >>= hGetContents + waiting + where + a@(SockAddrInet _ ha) `oneOf` ((SockAddrInet _ hb):bs) + | ha == hb = True + | otherwise = a `oneOf` bs + a@(SockAddrInet6 _ _ ha _) `oneOf` ((SockAddrInet6 _ _ hb _):bs) + | ha == hb = True + | otherwise = a `oneOf` bs + _ `oneOf` _ = False +#else +recvFrom host port = do + ip <- getHostByName host + let ipHs = hostAddresses ip + s <- listenOn port + let + waiting = do + ~(s', SockAddrInet _ haddr) <- Socket.accept s + he <- getHostByAddr AF_INET haddr + if not (any (`elem` ipHs) (hostAddresses he)) + then do + sClose s' + waiting + else do + h <- socketToHandle s' ReadMode + msg <- hGetContents h + return msg + + message <- waiting + return message +#endif + +-- --------------------------------------------------------------------------- +-- Access function returning the port type/id of socket. + +-- | Returns the 'PortID' associated with a given socket. +socketPort :: Socket -> IO PortID +socketPort s = do + sockaddr <- getSocketName s + return (portID sockaddr) + where + portID sa = + case sa of + SockAddrInet port _ -> PortNumber port +#if defined(IPV6_SOCKET_SUPPORT) + SockAddrInet6 port _ _ _ -> PortNumber port +#endif +#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) + SockAddrUnix path -> UnixSocket path +#endif + +-- --------------------------------------------------------------------------- +-- Utils + +-- Like bracket, but only performs the final action if there was an +-- exception raised by the middle bit. +bracketOnError + :: IO a -- ^ computation to run first (\"acquire resource\") + -> (a -> IO b) -- ^ computation to run last (\"release resource\") + -> (a -> IO c) -- ^ computation to run in-between + -> IO c -- returns the value from the in-between computation +bracketOnError = Exception.bracketOnError + +----------------------------------------------------------------------------- +-- Extra documentation + +{-$buffering + +The 'Handle' returned by 'connectTo' and 'accept' is block-buffered by +default. For an interactive application you may want to set the +buffering mode on the 'Handle' to +'LineBuffering' or 'NoBuffering', like so: + +> h <- connectTo host port +> hSetBuffering h LineBuffering +-} + +{-$performance + +For really fast I\/O, it might be worth looking at the 'hGetBuf' and +'hPutBuf' family of functions in "System.IO". +-} + +{-$sigpipe + +On Unix, when writing to a socket and the reading end is +closed by the remote client, the program is normally sent a +@SIGPIPE@ signal by the operating system. The +default behaviour when a @SIGPIPE@ is received is +to terminate the program silently, which can be somewhat confusing +if you haven't encountered this before. The solution is to +specify that @SIGPIPE@ is to be ignored, using +the POSIX library: + +> import Posix +> main = do installHandler sigPIPE Ignore Nothing; ... +-} + +catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a +#if MIN_VERSION_base(4,0,0) +catchIO = Exception.catch +#else +catchIO = Exception.catchJust Exception.ioErrors +#endif + +-- Version of try implemented in terms of the locally defined catchIO +tryIO :: IO a -> IO (Either Exception.IOException a) +tryIO m = catchIO (liftM Right m) (return . Left) + +-- Returns the first action from a list which does not throw an exception. +-- If all the actions throw exceptions (and the list of actions is not empty), +-- the last exception is thrown. +-- The operations are run outside of the catchIO cleanup handler because +-- catchIO masks asynchronous exceptions in the cleanup handler. +-- In the case of complete failure, the last exception is actually thrown. +firstSuccessful :: [IO a] -> IO a +firstSuccessful = go Nothing + where + -- Attempt the next operation, remember exception on failure + go _ (p:ps) = + do r <- tryIO p + case r of + Right x -> return x + Left e -> go (Just e) ps + + -- All operations failed, throw error if one exists + go Nothing [] = error "firstSuccessful: empty list" + go (Just e) [] = Exception.throwIO e diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/README.md cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/README.md --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/README.md 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,8 @@ +# [`network`](http://hackage.haskell.org/package/network) [![Build Status](https://travis-ci.org/haskell/network.svg?branch=master)](https://travis-ci.org/haskell/network) + +To build this package using Cabal directly from git, you must run +`autoreconf` before the usual Cabal build steps +(configure/build/install). `autoreconf` is included in the +[GNU Autoconf](http://www.gnu.org/software/autoconf/) tools. There is +no need to run the `configure` script: the `setup configure` step will +do this for you. diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Setup.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Setup.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/Setup.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,6 @@ +module Main (main) where + +import Distribution.Simple + +main :: IO () +main = defaultMainWithHooks autoconfUserHooks diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/tests/Regression.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/tests/Regression.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/tests/Regression.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/tests/Regression.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,31 @@ +-- | Tests for things that didn't work in the past. +module Main where + +import Network.Socket +import Test.Framework (Test, defaultMain) +import Test.Framework.Providers.HUnit (testCase) + +------------------------------------------------------------------------ +-- Tests + +-- Used to segfault on OS X 10.8.2 due to AI_NUMERICSERV being set +-- without a service being set. This is a OS X bug. +testGetAddrInfo :: IO () +testGetAddrInfo = do + let hints = defaultHints { addrFlags = [AI_NUMERICSERV] } + _ <- getAddrInfo (Just hints) (Just "localhost") Nothing + return () + +------------------------------------------------------------------------ +-- List of all tests + +tests :: [Test] +tests = + [ testCase "testGetAddrInfo" testGetAddrInfo + ] + +------------------------------------------------------------------------ +-- Test harness + +main :: IO () +main = withSocketsDo $ defaultMain tests diff -Nru cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/tests/Simple.hs cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/tests/Simple.hs --- cabal-install-1.22-1.22.6.0/src/network-2.6.2.1/tests/Simple.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-2.6.2.1/tests/Simple.hs 2015-07-07 18:40:52.000000000 +0000 @@ -0,0 +1,365 @@ +{-# LANGUAGE CPP, ScopedTypeVariables #-} + +module Main where + +import Control.Concurrent (ThreadId, forkIO, myThreadId) +import Control.Concurrent.MVar (newEmptyMVar, putMVar, takeMVar, readMVar) +import qualified Control.Exception as E +import Control.Monad (liftM, when) +import qualified Data.ByteString as S +import qualified Data.ByteString.Char8 as C +import Data.Maybe (fromJust) +import Network.Socket hiding (recv, recvFrom, send, sendTo) +import Network.Socket.ByteString + +--- To tests for AF_CAN on Linux, you need to bring up a virtual (or real can +--- interface.). Run as root: +--- # modprobe can +--- # modprobe can_raw +--- # modprobe vcan +--- # sudo ip link add dev vcan0 type vcan +--- # ip link show vcan0 +--- 3: can0: mtu 16 qdisc noqueue state UNKNOWN link/can +--- Define HAVE_LINUX_CAN to run CAN tests as well. +--- #define HAVE_LINUX_CAN 1 +-- #include "../include/HsNetworkConfig.h" +#if defined(HAVE_LINUX_CAN_H) +import Network.BSD (ifNameToIndex) +#endif +import Test.Framework (Test, defaultMain, testGroup) +import Test.Framework.Providers.HUnit (testCase) +import Test.HUnit (Assertion, (@=?)) + +------------------------------------------------------------------------ + +serverAddr :: String +serverAddr = "127.0.0.1" + +testMsg :: S.ByteString +testMsg = C.pack "This is a test message." + +------------------------------------------------------------------------ +-- Tests + +------------------------------------------------------------------------ +-- Sending and receiving + +testSend :: Assertion +testSend = tcpTest client server + where + server sock = recv sock 1024 >>= (@=?) testMsg + client sock = send sock testMsg + +testSendAll :: Assertion +testSendAll = tcpTest client server + where + server sock = recv sock 1024 >>= (@=?) testMsg + client sock = sendAll sock testMsg + +testSendTo :: Assertion +testSendTo = udpTest client server + where + server sock = recv sock 1024 >>= (@=?) testMsg + client sock serverPort = do + addr <- inet_addr serverAddr + sendTo sock testMsg (SockAddrInet serverPort addr) + +testSendAllTo :: Assertion +testSendAllTo = udpTest client server + where + server sock = recv sock 1024 >>= (@=?) testMsg + client sock serverPort = do + addr <- inet_addr serverAddr + sendAllTo sock testMsg (SockAddrInet serverPort addr) + +testSendMany :: Assertion +testSendMany = tcpTest client server + where + server sock = recv sock 1024 >>= (@=?) (S.append seg1 seg2) + client sock = sendMany sock [seg1, seg2] + + seg1 = C.pack "This is a " + seg2 = C.pack "test message." + +testSendManyTo :: Assertion +testSendManyTo = udpTest client server + where + server sock = recv sock 1024 >>= (@=?) (S.append seg1 seg2) + client sock serverPort = do + addr <- inet_addr serverAddr + sendManyTo sock [seg1, seg2] (SockAddrInet serverPort addr) + + seg1 = C.pack "This is a " + seg2 = C.pack "test message." + +testRecv :: Assertion +testRecv = tcpTest client server + where + server sock = recv sock 1024 >>= (@=?) testMsg + client sock = send sock testMsg + +testOverFlowRecv :: Assertion +testOverFlowRecv = tcpTest client server + where + server sock = do seg1 <- recv sock (S.length testMsg - 3) + seg2 <- recv sock 1024 + let msg = S.append seg1 seg2 + testMsg @=? msg + + client sock = send sock testMsg + +testRecvFrom :: Assertion +testRecvFrom = tcpTest client server + where + server sock = do (msg, _) <- recvFrom sock 1024 + testMsg @=? msg + + client sock = do + serverPort <- getPeerPort sock + addr <- inet_addr serverAddr + sendTo sock testMsg (SockAddrInet serverPort addr) + +testOverFlowRecvFrom :: Assertion +testOverFlowRecvFrom = tcpTest client server + where + server sock = do (seg1, _) <- recvFrom sock (S.length testMsg - 3) + (seg2, _) <- recvFrom sock 1024 + let msg = S.append seg1 seg2 + testMsg @=? msg + + client sock = send sock testMsg + +testUserTimeout :: Assertion +testUserTimeout = do + when (isSupportedSocketOption UserTimeout) $ do + sock <- socket AF_INET Stream defaultProtocol + setSocketOption sock UserTimeout 1000 + getSocketOption sock UserTimeout >>= (@=?) 1000 + setSocketOption sock UserTimeout 2000 + getSocketOption sock UserTimeout >>= (@=?) 2000 + sClose sock + +{- +testGetPeerCred:: Assertion +testGetPeerCred = + test clientSetup clientAct serverSetup server + where + clientSetup = do + sock <- socket AF_UNIX Stream defaultProtocol + connect sock $ SockAddrUnix addr + return sock + + serverSetup = do + sock <- socket AF_UNIX Stream defaultProtocol + bindSocket sock $ SockAddrUnix addr + listen sock 1 + return sock + + server sock = do + (clientSock, _) <- accept sock + serverAct clientSock + sClose clientSock + + addr = "/tmp/testAddr1" + clientAct sock = withSocketsDo $ do + sendAll sock testMsg + (pid,uid,gid) <- getPeerCred sock + putStrLn $ unwords ["pid=",show pid,"uid=",show uid, "gid=", show gid] + serverAct sock = withSocketsDo $ do + msg <- recv sock 1024 + putStrLn $ C.unpack msg + + +testGetPeerEid :: Assertion +testGetPeerEid = + test clientSetup clientAct serverSetup server + where + clientSetup = do + sock <- socket AF_UNIX Stream defaultProtocol + connect sock $ SockAddrUnix addr + return sock + + serverSetup = do + sock <- socket AF_UNIX Stream defaultProtocol + bindSocket sock $ SockAddrUnix addr + listen sock 1 + return sock + + server sock = do + (clientSock, _) <- accept sock + serverAct clientSock + sClose clientSock + + addr = "/tmp/testAddr2" + clientAct sock = withSocketsDo $ do + sendAll sock testMsg + (uid,gid) <- getPeerEid sock + putStrLn $ unwords ["uid=",show uid, "gid=", show gid] + serverAct sock = withSocketsDo $ do + msg <- recv sock 1024 + putStrLn $ C.unpack msg +-} + +#if defined(HAVE_LINUX_CAN_H) +canTestMsg = S.pack [ 0,0,0,0 -- can ID = 0 + , 4,0,0,0 -- data length counter = 2 (bytes) + , 0x80,123,321,55 -- SYNC with some random extra bytes + , 0, 0, 0, 0 -- padding + ] + +testCanSend :: Assertion +testCanSend = canTest "vcan0" client server + where + server sock = recv sock 1024 >>= (@=?) canTestMsg + client sock = send sock canTestMsg + +canTest :: String -> (Socket -> IO a) -> (Socket -> IO b) -> IO () +canTest ifname clientAct serverAct = do + ifIndex <- liftM fromJust $ ifNameToIndex ifname + test (clientSetup ifIndex) clientAct (serverSetup ifIndex) serverAct + where + clientSetup ifIndex = do + sock <- socket AF_CAN Raw 1 -- protocol 1 = raw CAN + -- bind the socket to the interface + bind sock (SockAddrCan $ fromIntegral $ ifIndex) + return sock + + serverSetup = clientSetup +#endif + +------------------------------------------------------------------------ +-- Other + +------------------------------------------------------------------------ +-- List of all tests + +basicTests :: Test +basicTests = testGroup "Basic socket operations" + [ + -- Sending and receiving + testCase "testSend" testSend + , testCase "testSendAll" testSendAll + , testCase "testSendTo" testSendTo + , testCase "testSendAllTo" testSendAllTo + , testCase "testSendMany" testSendMany + , testCase "testSendManyTo" testSendManyTo + , testCase "testRecv" testRecv + , testCase "testOverFlowRecv" testOverFlowRecv + , testCase "testRecvFrom" testRecvFrom + , testCase "testOverFlowRecvFrom" testOverFlowRecvFrom + , testCase "testUserTimeout" testUserTimeout +-- , testCase "testGetPeerCred" testGetPeerCred +-- , testCase "testGetPeerEid" testGetPeerEid +#if defined(HAVE_LINUX_CAN_H) + , testCase "testCanSend" testCanSend +#endif + ] + +tests :: [Test] +tests = [basicTests] + +------------------------------------------------------------------------ +-- Test helpers + +-- | Returns the 'PortNumber' of the peer. Will throw an 'error' if +-- used on a non-IP socket. +getPeerPort :: Socket -> IO PortNumber +getPeerPort sock = do + sockAddr <- getPeerName sock + case sockAddr of + (SockAddrInet port _) -> return port + (SockAddrInet6 port _ _ _) -> return port + _ -> error "getPeerPort: only works with IP sockets" + +-- | Establish a connection between client and server and then run +-- 'clientAct' and 'serverAct', in different threads. Both actions +-- get passed a connected 'Socket', used for communicating between +-- client and server. 'tcpTest' makes sure that the 'Socket' is +-- closed after the actions have run. +tcpTest :: (Socket -> IO a) -> (Socket -> IO b) -> IO () +tcpTest clientAct serverAct = do + portVar <- newEmptyMVar + test (clientSetup portVar) clientAct (serverSetup portVar) server + where + clientSetup portVar = do + sock <- socket AF_INET Stream defaultProtocol + addr <- inet_addr serverAddr + serverPort <- readMVar portVar + connect sock $ SockAddrInet serverPort addr + return sock + + serverSetup portVar = do + sock <- socket AF_INET Stream defaultProtocol + setSocketOption sock ReuseAddr 1 + addr <- inet_addr serverAddr + bindSocket sock (SockAddrInet aNY_PORT addr) + listen sock 1 + serverPort <- socketPort sock + putMVar portVar serverPort + return sock + + server sock = do + (clientSock, _) <- accept sock + serverAct clientSock + sClose clientSock + +-- | Create an unconnected 'Socket' for sending UDP and receiving +-- datagrams and then run 'clientAct' and 'serverAct'. +udpTest :: (Socket -> PortNumber -> IO a) -> (Socket -> IO b) -> IO () +udpTest clientAct serverAct = do + portVar <- newEmptyMVar + test clientSetup (client portVar) (serverSetup portVar) serverAct + where + clientSetup = socket AF_INET Datagram defaultProtocol + + client portVar sock = do + serverPort <- readMVar portVar + clientAct sock serverPort + + serverSetup portVar = do + sock <- socket AF_INET Datagram defaultProtocol + setSocketOption sock ReuseAddr 1 + addr <- inet_addr serverAddr + bindSocket sock (SockAddrInet aNY_PORT addr) + serverPort <- socketPort sock + putMVar portVar serverPort + return sock + +-- | Run a client/server pair and synchronize them so that the server +-- is started before the client and the specified server action is +-- finished before the client closes the 'Socket'. +test :: IO Socket -> (Socket -> IO b) -> IO Socket -> (Socket -> IO c) -> IO () +test clientSetup clientAct serverSetup serverAct = do + tid <- myThreadId + barrier <- newEmptyMVar + forkIO $ server barrier + client tid barrier + where + server barrier = do + E.bracket serverSetup sClose $ \sock -> do + serverReady + serverAct sock + putMVar barrier () + where + -- | Signal to the client that it can proceed. + serverReady = putMVar barrier () + + client tid barrier = do + takeMVar barrier + -- Transfer exceptions to the main thread. + bracketWithReraise tid clientSetup sClose $ \res -> do + clientAct res + takeMVar barrier + +-- | Like 'bracket' but catches and reraises the exception in another +-- thread, specified by the first argument. +bracketWithReraise :: ThreadId -> IO a -> (a -> IO b) -> (a -> IO ()) -> IO () +bracketWithReraise tid before after thing = + E.bracket before after thing + `E.catch` \ (e :: E.SomeException) -> E.throwTo tid e + +------------------------------------------------------------------------ +-- Test harness + +main :: IO () +main = withSocketsDo $ defaultMain tests diff -Nru cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/LICENSE cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/LICENSE --- cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/LICENSE 2016-03-19 20:56:32.000000000 +0000 @@ -0,0 +1,29 @@ +Copyright (c) 2002-2010, The University Court of the University of Glasgow. +Copyright (c) 2007-2010, Johan Tibell + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +- Neither name of the University nor the names of its contributors may be +used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF +GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/Network/URI.hs cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/Network/URI.hs --- cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/Network/URI.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/Network/URI.hs 2016-03-19 20:56:32.000000000 +0000 @@ -0,0 +1,1361 @@ +{-# LANGUAGE CPP #-} +-------------------------------------------------------------------------------- +-- | +-- Module : Network.URI +-- Copyright : (c) 2004, Graham Klyne +-- License : BSD-style (see end of this file) +-- +-- Maintainer : Graham Klyne +-- Stability : provisional +-- Portability : portable +-- +-- This module defines functions for handling URIs. It presents substantially the +-- same interface as the older GHC Network.URI module, but is implemented using +-- Parsec rather than a Regex library that is not available with Hugs. The internal +-- representation of URI has been changed so that URI strings are more +-- completely preserved when round-tripping to a URI value and back. +-- +-- In addition, four methods are provided for parsing different +-- kinds of URI string (as noted in RFC3986): +-- 'parseURI', +-- 'parseURIReference', +-- 'parseRelativeReference' and +-- 'parseAbsoluteURI'. +-- +-- Further, four methods are provided for classifying different +-- kinds of URI string (as noted in RFC3986): +-- 'isURI', +-- 'isURIReference', +-- 'isRelativeReference' and +-- 'isAbsoluteURI'. +-- +-- The long-standing official reference for URI handling was RFC2396 [1], +-- as updated by RFC 2732 [2], but this was replaced by a new specification, +-- RFC3986 [3] in January 2005. This latter specification has been used +-- as the primary reference for constructing the URI parser implemented +-- here, and it is intended that there is a direct relationship between +-- the syntax definition in that document and this parser implementation. +-- +-- RFC 1808 [4] contains a number of test cases for relative URI handling. +-- Dan Connolly's Python module @uripath.py@ [5] also contains useful details +-- and test cases. +-- +-- Some of the code has been copied from the previous GHC implementation, +-- but the parser is replaced with one that performs more complete +-- syntax checking of the URI itself, according to RFC3986 [3]. +-- +-- References +-- +-- (1) +-- +-- (2) +-- +-- (3) +-- +-- (4) +-- +-- (5) +-- +-------------------------------------------------------------------------------- + +module Network.URI + ( + -- * The URI type + URI(..) + , URIAuth(..) + , nullURI + + -- * Parsing + , parseURI + , parseURIReference + , parseRelativeReference + , parseAbsoluteURI + + -- * Test for strings containing various kinds of URI + , isURI + , isURIReference + , isRelativeReference + , isAbsoluteURI + , isIPv6address + , isIPv4address + + -- * Predicates + , uriIsAbsolute + , uriIsRelative + + -- * Relative URIs + , relativeTo + , nonStrictRelativeTo + , relativeFrom + + -- * Operations on URI strings + -- | Support for putting strings into URI-friendly + -- escaped format and getting them back again. + -- This can't be done transparently in all cases, because certain + -- characters have different meanings in different kinds of URI. + -- The URI spec [3], section 2.4, indicates that all URI components + -- should be escaped before they are assembled as a URI: + -- \"Once produced, a URI is always in its percent-encoded form\" + , uriToString + , isReserved, isUnreserved + , isAllowedInURI, isUnescapedInURI + , isUnescapedInURIComponent + , escapeURIChar + , escapeURIString + , unEscapeString + , pathSegments + + -- * URI Normalization functions + , normalizeCase + , normalizeEscape + , normalizePathSegments + + -- * Deprecated functions + , parseabsoluteURI + , escapeString + , reserved, unreserved + , scheme, authority, path, query, fragment + ) where + +import Text.ParserCombinators.Parsec + ( GenParser, ParseError + , parse, (), try + , option, many1, count, notFollowedBy + , char, satisfy, oneOf, string, eof + , unexpected + ) + +import Control.Applicative +import Control.Monad (MonadPlus(..)) +import Control.DeepSeq (NFData(rnf), deepseq) +import Data.Char (ord, chr, isHexDigit, toLower, toUpper, digitToInt) +import Data.Bits ((.|.),(.&.),shiftL,shiftR) +import Data.List (unfoldr) +import Numeric (showIntAtBase) + +#if !MIN_VERSION_base(4,8,0) +import Data.Traversable (sequenceA) +#endif + +import Data.Typeable (Typeable) +#if MIN_VERSION_base(4,0,0) +import Data.Data (Data) +#else +import Data.Generics (Data) +#endif + +#if MIN_VERSION_base(4,6,0) +import GHC.Generics (Generic) +#else +#endif + +------------------------------------------------------------ +-- The URI datatype +------------------------------------------------------------ + +-- |Represents a general universal resource identifier using +-- its component parts. +-- +-- For example, for the URI +-- +-- > foo://anonymous@www.haskell.org:42/ghc?query#frag +-- +-- the components are: +-- +data URI = URI + { uriScheme :: String -- ^ @foo:@ + , uriAuthority :: Maybe URIAuth -- ^ @\/\/anonymous\@www.haskell.org:42@ + , uriPath :: String -- ^ @\/ghc@ + , uriQuery :: String -- ^ @?query@ + , uriFragment :: String -- ^ @#frag@ +#if MIN_VERSION_base(4,6,0) + } deriving (Eq, Ord, Typeable, Data, Generic) +#else + } deriving (Eq, Ord, Typeable, Data) +#endif + +instance NFData URI where + rnf (URI s a p q f) + = s `deepseq` a `deepseq` p `deepseq` q `deepseq` f `deepseq` () + +-- |Type for authority value within a URI +data URIAuth = URIAuth + { uriUserInfo :: String -- ^ @anonymous\@@ + , uriRegName :: String -- ^ @www.haskell.org@ + , uriPort :: String -- ^ @:42@ + } deriving (Eq, Ord, Show, Typeable, Data) + +instance NFData URIAuth where + rnf (URIAuth ui rn p) = ui `deepseq` rn `deepseq` p `deepseq` () + +-- |Blank URI +nullURI :: URI +nullURI = URI + { uriScheme = "" + , uriAuthority = Nothing + , uriPath = "" + , uriQuery = "" + , uriFragment = "" + } + +-- URI as instance of Show. Note that for security reasons, the default +-- behaviour is to suppress any userinfo field (see RFC3986, section 7.5). +-- This can be overridden by using uriToString directly with first +-- argument @id@ (noting that this returns a ShowS value rather than a string). +-- +-- [[[Another design would be to embed the userinfo mapping function in +-- the URIAuth value, with the default value suppressing userinfo formatting, +-- but providing a function to return a new URI value with userinfo +-- data exposed by show.]]] +-- +instance Show URI where + showsPrec _ = uriToString defaultUserInfoMap + +defaultUserInfoMap :: String -> String +defaultUserInfoMap uinf = user++newpass + where + (user,pass) = break (==':') uinf + newpass = if null pass || (pass == "@") + || (pass == ":@") + then pass + else ":...@" + +------------------------------------------------------------ +-- Parse a URI +------------------------------------------------------------ + +-- |Turn a string containing a URI into a 'URI'. +-- Returns 'Nothing' if the string is not a valid URI; +-- (an absolute URI with optional fragment identifier). +-- +-- NOTE: this is different from the previous network.URI, +-- whose @parseURI@ function works like 'parseURIReference' +-- in this module. +-- +parseURI :: String -> Maybe URI +parseURI = parseURIAny uri + +-- |Parse a URI reference to a 'URI' value. +-- Returns 'Nothing' if the string is not a valid URI reference. +-- (an absolute or relative URI with optional fragment identifier). +-- +parseURIReference :: String -> Maybe URI +parseURIReference = parseURIAny uriReference + +-- |Parse a relative URI to a 'URI' value. +-- Returns 'Nothing' if the string is not a valid relative URI. +-- (a relative URI with optional fragment identifier). +-- +parseRelativeReference :: String -> Maybe URI +parseRelativeReference = parseURIAny relativeRef + +-- |Parse an absolute URI to a 'URI' value. +-- Returns 'Nothing' if the string is not a valid absolute URI. +-- (an absolute URI without a fragment identifier). +-- +parseAbsoluteURI :: String -> Maybe URI +parseAbsoluteURI = parseURIAny absoluteURI + +-- |Test if string contains a valid URI +-- (an absolute URI with optional fragment identifier). +-- +isURI :: String -> Bool +isURI = isValidParse uri + +-- |Test if string contains a valid URI reference +-- (an absolute or relative URI with optional fragment identifier). +-- +isURIReference :: String -> Bool +isURIReference = isValidParse uriReference + +-- |Test if string contains a valid relative URI +-- (a relative URI with optional fragment identifier). +-- +isRelativeReference :: String -> Bool +isRelativeReference = isValidParse relativeRef + +-- |Test if string contains a valid absolute URI +-- (an absolute URI without a fragment identifier). +-- +isAbsoluteURI :: String -> Bool +isAbsoluteURI = isValidParse absoluteURI + +-- |Test if string contains a valid IPv6 address +-- +isIPv6address :: String -> Bool +isIPv6address = isValidParse ipv6address + +-- |Test if string contains a valid IPv4 address +-- +isIPv4address :: String -> Bool +isIPv4address = isValidParse ipv4address + +-- Helper function for turning a string into a URI +-- +parseURIAny :: URIParser URI -> String -> Maybe URI +parseURIAny parser uristr = case parseAll parser "" uristr of + Left _ -> Nothing + Right u -> Just u + +-- Helper function to test a string match to a parser +-- +isValidParse :: URIParser a -> String -> Bool +isValidParse parser uristr = case parseAll parser "" uristr of + -- Left e -> error (show e) + Left _ -> False + Right _ -> True + +parseAll :: URIParser a -> String -> String -> Either ParseError a +parseAll parser filename uristr = parse newparser filename uristr + where + newparser = + do { res <- parser + ; eof + ; return res + } + +------------------------------------------------------------ +-- Predicates +------------------------------------------------------------ + +uriIsAbsolute :: URI -> Bool +uriIsAbsolute (URI {uriScheme = scheme'}) = scheme' /= "" + +uriIsRelative :: URI -> Bool +uriIsRelative = not . uriIsAbsolute + +------------------------------------------------------------ +-- URI parser body based on Parsec elements and combinators +------------------------------------------------------------ + +-- Parser parser type. +-- Currently +type URIParser a = GenParser Char () a + +-- RFC3986, section 2.1 +-- +-- Parse and return a 'pct-encoded' sequence +-- +escaped :: URIParser String +escaped = sequenceA [char '%', hexDigitChar, hexDigitChar] + +-- RFC3986, section 2.2 +-- +-- |Returns 'True' if the character is a \"reserved\" character in a +-- URI. To include a literal instance of one of these characters in a +-- component of a URI, it must be escaped. +-- +isReserved :: Char -> Bool +isReserved c = isGenDelims c || isSubDelims c + +isGenDelims :: Char -> Bool +isGenDelims c = c `elem` ":/?#[]@" + +isSubDelims :: Char -> Bool +isSubDelims c = c `elem` "!$&'()*+,;=" + +subDelims :: URIParser String +subDelims = (:[]) <$> oneOf "!$&'()*+,;=" + +-- RFC3986, section 2.3 +-- +-- |Returns 'True' if the character is an \"unreserved\" character in +-- a URI. These characters do not need to be escaped in a URI. The +-- only characters allowed in a URI are either \"reserved\", +-- \"unreserved\", or an escape sequence (@%@ followed by two hex digits). +-- +isUnreserved :: Char -> Bool +isUnreserved c = isAlphaNumChar c || (c `elem` "-_.~") + +unreservedChar :: URIParser String +unreservedChar = (:[]) <$> satisfy isUnreserved + +-- RFC3986, section 3 +-- +-- URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ] +-- +-- hier-part = "//" authority path-abempty +-- / path-abs +-- / path-rootless +-- / path-empty + +uri :: URIParser URI +uri = + do { us <- try uscheme + -- ; ua <- option Nothing ( do { try (string "//") ; uauthority } ) + -- ; up <- upath + ; (ua,up) <- hierPart + ; uq <- option "" ( do { _ <- char '?' ; uquery } ) + ; uf <- option "" ( do { _ <- char '#' ; ufragment } ) + ; return $ URI + { uriScheme = us + , uriAuthority = ua + , uriPath = up + , uriQuery = uq + , uriFragment = uf + } + } + +hierPart :: URIParser ((Maybe URIAuth),String) +hierPart = + do { _ <- try (string "//") + ; ua <- uauthority + ; up <- pathAbEmpty + ; return (ua,up) + } + <|> do { up <- pathAbs + ; return (Nothing,up) + } + <|> do { up <- pathRootLess + ; return (Nothing,up) + } + <|> do { return (Nothing,"") + } + +-- RFC3986, section 3.1 + +uscheme :: URIParser String +uscheme = + do { s <- oneThenMany alphaChar (satisfy isSchemeChar) + ; _ <- char ':' + ; return $ s++":" + } + +-- RFC3986, section 3.2 + +uauthority :: URIParser (Maybe URIAuth) +uauthority = + do { uu <- option "" (try userinfo) + ; uh <- host + ; up <- option "" port + ; return $ Just $ URIAuth + { uriUserInfo = uu + , uriRegName = uh + , uriPort = up + } + } + +-- RFC3986, section 3.2.1 + +userinfo :: URIParser String +userinfo = + do { uu <- many (uchar ";:&=+$,") + ; _ <- char '@' + ; return (concat uu ++"@") + } + +-- RFC3986, section 3.2.2 + +host :: URIParser String +host = ipLiteral <|> try ipv4address <|> regName + +ipLiteral :: URIParser String +ipLiteral = + do { _ <- char '[' + ; ua <- ( ipv6address <|> ipvFuture ) + ; _ <- char ']' + ; return $ "[" ++ ua ++ "]" + } + "IP address literal" + +ipvFuture :: URIParser String +ipvFuture = + do { _ <- char 'v' + ; h <- hexDigitChar + ; _ <- char '.' + ; a <- many1 (satisfy isIpvFutureChar) + ; return $ 'v':h:'.':a + } + +isIpvFutureChar :: Char -> Bool +isIpvFutureChar c = isUnreserved c || isSubDelims c || (c==';') + +ipv6address :: URIParser String +ipv6address = + try ( do + { a2 <- count 6 h4c + ; a3 <- ls32 + ; return $ concat a2 ++ a3 + } ) + <|> try ( do + { _ <- string "::" + ; a2 <- count 5 h4c + ; a3 <- ls32 + ; return $ "::" ++ concat a2 ++ a3 + } ) + <|> try ( do + { a1 <- opt_n_h4c_h4 0 + ; _ <- string "::" + ; a2 <- count 4 h4c + ; a3 <- ls32 + ; return $ a1 ++ "::" ++ concat a2 ++ a3 + } ) + <|> try ( do + { a1 <- opt_n_h4c_h4 1 + ; _ <- string "::" + ; a2 <- count 3 h4c + ; a3 <- ls32 + ; return $ a1 ++ "::" ++ concat a2 ++ a3 + } ) + <|> try ( do + { a1 <- opt_n_h4c_h4 2 + ; _ <- string "::" + ; a2 <- count 2 h4c + ; a3 <- ls32 + ; return $ a1 ++ "::" ++ concat a2 ++ a3 + } ) + <|> try ( do + { a1 <- opt_n_h4c_h4 3 + ; _ <- string "::" + ; a2 <- h4c + ; a3 <- ls32 + ; return $ a1 ++ "::" ++ a2 ++ a3 + } ) + <|> try ( do + { a1 <- opt_n_h4c_h4 4 + ; _ <- string "::" + ; a3 <- ls32 + ; return $ a1 ++ "::" ++ a3 + } ) + <|> try ( do + { a1 <- opt_n_h4c_h4 5 + ; _ <- string "::" + ; a3 <- h4 + ; return $ a1 ++ "::" ++ a3 + } ) + <|> try ( do + { a1 <- opt_n_h4c_h4 6 + ; _ <- string "::" + ; return $ a1 ++ "::" + } ) + "IPv6 address" + +opt_n_h4c_h4 :: Int -> URIParser String +opt_n_h4c_h4 n = option "" $ + do { a1 <- countMinMax 0 n h4c + ; a2 <- h4 + ; return $ concat a1 ++ a2 + } + +ls32 :: URIParser String +ls32 = try ( do + { a1 <- h4c + ; a2 <- h4 + ; return (a1++a2) + } ) + <|> ipv4address + +h4c :: URIParser String +h4c = try $ + do { a1 <- h4 + ; _ <- char ':' + ; _ <- notFollowedBy (char ':') + ; return $ a1 ++ ":" + } + +h4 :: URIParser String +h4 = countMinMax 1 4 hexDigitChar + +ipv4address :: URIParser String +ipv4address = + do { a1 <- decOctet ; _ <- char '.' + ; a2 <- decOctet ; _ <- char '.' + ; a3 <- decOctet ; _ <- char '.' + ; a4 <- decOctet + ; _ <- notFollowedBy nameChar + ; return $ a1++"."++a2++"."++a3++"."++a4 + } + "IPv4 Address" + +decOctet :: URIParser String +decOctet = + do { a1 <- countMinMax 1 3 digitChar + ; if (read a1 :: Integer) > 255 then + fail "Decimal octet value too large" + else + return a1 + } + +regName :: URIParser String +regName = + do { ss <- countMinMax 0 255 nameChar + ; return $ concat ss + } + "Registered name" + + +nameChar :: URIParser String +nameChar = (unreservedChar <|> escaped <|> subDelims) + "Name character" + +-- RFC3986, section 3.2.3 + +port :: URIParser String +port = + do { _ <- char ':' + ; p <- many digitChar + ; return (':':p) + } + +-- +-- RFC3986, section 3.3 +-- +-- path = path-abempty ; begins with "/" or is empty +-- / path-abs ; begins with "/" but not "//" +-- / path-noscheme ; begins with a non-colon segment +-- / path-rootless ; begins with a segment +-- / path-empty ; zero characters +-- +-- path-abempty = *( "/" segment ) +-- path-abs = "/" [ segment-nz *( "/" segment ) ] +-- path-noscheme = segment-nzc *( "/" segment ) +-- path-rootless = segment-nz *( "/" segment ) +-- path-empty = 0 +-- +-- segment = *pchar +-- segment-nz = 1*pchar +-- segment-nzc = 1*( unreserved / pct-encoded / sub-delims / "@" ) +-- +-- pchar = unreserved / pct-encoded / sub-delims / ":" / "@" + +{- +upath :: URIParser String +upath = pathAbEmpty + <|> pathAbs + <|> pathNoScheme + <|> pathRootLess + <|> pathEmpty +-} + +pathAbEmpty :: URIParser String +pathAbEmpty = + do { ss <- many slashSegment + ; return $ concat ss + } + +pathAbs :: URIParser String +pathAbs = + do { _ <- char '/' + ; ss <- option "" pathRootLess + ; return $ '/':ss + } + +pathNoScheme :: URIParser String +pathNoScheme = + do { s1 <- segmentNzc + ; ss <- many slashSegment + ; return $ concat (s1:ss) + } + +pathRootLess :: URIParser String +pathRootLess = + do { s1 <- segmentNz + ; ss <- many slashSegment + ; return $ concat (s1:ss) + } + +slashSegment :: URIParser String +slashSegment = + do { _ <- char '/' + ; s <- segment + ; return ('/':s) + } + +segment :: URIParser String +segment = + do { ps <- many pchar + ; return $ concat ps + } + +segmentNz :: URIParser String +segmentNz = + do { ps <- many1 pchar + ; return $ concat ps + } + +segmentNzc :: URIParser String +segmentNzc = + do { ps <- many1 (uchar "@") + ; return $ concat ps + } + +pchar :: URIParser String +pchar = uchar ":@" + +-- helper function for pchar and friends +uchar :: String -> URIParser String +uchar extras = + unreservedChar + <|> escaped + <|> subDelims + <|> do { c <- oneOf extras ; return [c] } + +-- RFC3986, section 3.4 + +uquery :: URIParser String +uquery = + do { ss <- many $ uchar (":@"++"/?") + ; return $ '?':concat ss + } + +-- RFC3986, section 3.5 + +ufragment :: URIParser String +ufragment = + do { ss <- many $ uchar (":@"++"/?") + ; return $ '#':concat ss + } + +-- Reference, Relative and Absolute URI forms +-- +-- RFC3986, section 4.1 + +uriReference :: URIParser URI +uriReference = uri <|> relativeRef + +-- RFC3986, section 4.2 +-- +-- relative-URI = relative-part [ "?" query ] [ "#" fragment ] +-- +-- relative-part = "//" authority path-abempty +-- / path-abs +-- / path-noscheme +-- / path-empty + +relativeRef :: URIParser URI +relativeRef = + do { notMatching uscheme + -- ; ua <- option Nothing ( do { try (string "//") ; uauthority } ) + -- ; up <- upath + ; (ua,up) <- relativePart + ; uq <- option "" ( do { _ <- char '?' ; uquery } ) + ; uf <- option "" ( do { _ <- char '#' ; ufragment } ) + ; return $ URI + { uriScheme = "" + , uriAuthority = ua + , uriPath = up + , uriQuery = uq + , uriFragment = uf + } + } + +relativePart :: URIParser ((Maybe URIAuth),String) +relativePart = + do { _ <- try (string "//") + ; ua <- uauthority + ; up <- pathAbEmpty + ; return (ua,up) + } + <|> do { up <- pathAbs + ; return (Nothing,up) + } + <|> do { up <- pathNoScheme + ; return (Nothing,up) + } + <|> do { return (Nothing,"") + } + +-- RFC3986, section 4.3 + +absoluteURI :: URIParser URI +absoluteURI = + do { us <- uscheme + -- ; ua <- option Nothing ( do { try (string "//") ; uauthority } ) + -- ; up <- upath + ; (ua,up) <- hierPart + ; uq <- option "" ( do { _ <- char '?' ; uquery } ) + ; return $ URI + { uriScheme = us + , uriAuthority = ua + , uriPath = up + , uriQuery = uq + , uriFragment = "" + } + } + +-- Imports from RFC 2234 + + -- NOTE: can't use isAlphaNum etc. because these deal with ISO 8859 + -- (and possibly Unicode!) chars. + -- [[[Above was a comment originally in GHC Network/URI.hs: + -- when IRIs are introduced then most codepoints above 128(?) should + -- be treated as unreserved, and higher codepoints for letters should + -- certainly be allowed. + -- ]]] + +isAlphaChar :: Char -> Bool +isAlphaChar c = (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') + +isDigitChar :: Char -> Bool +isDigitChar c = (c >= '0' && c <= '9') + +isAlphaNumChar :: Char -> Bool +isAlphaNumChar c = isAlphaChar c || isDigitChar c + +isHexDigitChar :: Char -> Bool +isHexDigitChar c = isHexDigit c + +isSchemeChar :: Char -> Bool +isSchemeChar c = (isAlphaNumChar c) || (c `elem` "+-.") + +alphaChar :: URIParser Char +alphaChar = satisfy isAlphaChar -- or: Parsec.letter ? + +digitChar :: URIParser Char +digitChar = satisfy isDigitChar -- or: Parsec.digit ? + +hexDigitChar :: URIParser Char +hexDigitChar = satisfy isHexDigitChar -- or: Parsec.hexDigit ? + +-- Additional parser combinators for common patterns + +oneThenMany :: GenParser t s a -> GenParser t s a -> GenParser t s [a] +oneThenMany p1 pr = + do { a1 <- p1 + ; ar <- many pr + ; return (a1:ar) + } + +countMinMax :: Int -> Int -> GenParser t s a -> GenParser t s [a] +countMinMax m n p | m > 0 = + do { a1 <- p + ; ar <- countMinMax (m-1) (n-1) p + ; return (a1:ar) + } +countMinMax _ n _ | n <= 0 = return [] +countMinMax _ n p = option [] $ + do { a1 <- p + ; ar <- countMinMax 0 (n-1) p + ; return (a1:ar) + } + +notMatching :: Show a => GenParser tok st a -> GenParser tok st () +notMatching p = do { a <- try p ; unexpected (show a) } <|> return () + +------------------------------------------------------------ +-- Reconstruct a URI string +------------------------------------------------------------ +-- +-- |Turn a 'URI' into a string. +-- +-- Uses a supplied function to map the userinfo part of the URI. +-- +-- The Show instance for URI uses a mapping that hides any password +-- that may be present in the URI. Use this function with argument @id@ +-- to preserve the password in the formatted output. +-- +uriToString :: (String->String) -> URI -> ShowS +uriToString userinfomap URI { uriScheme=myscheme + , uriAuthority=myauthority + , uriPath=mypath + , uriQuery=myquery + , uriFragment=myfragment + } = + (myscheme++) . (uriAuthToString userinfomap myauthority) + . (mypath++) . (myquery++) . (myfragment++) + +uriAuthToString :: (String->String) -> (Maybe URIAuth) -> ShowS +uriAuthToString _ Nothing = id -- shows "" +uriAuthToString userinfomap + (Just URIAuth { uriUserInfo = myuinfo + , uriRegName = myregname + , uriPort = myport + } ) = + ("//"++) . (if null myuinfo then id else ((userinfomap myuinfo)++)) + . (myregname++) + . (myport++) + +------------------------------------------------------------ +-- Character classes +------------------------------------------------------------ + +-- | Returns 'True' if the character is allowed in a URI. +-- +isAllowedInURI :: Char -> Bool +isAllowedInURI c = isReserved c || isUnreserved c || c == '%' -- escape char + +-- | Returns 'True' if the character is allowed unescaped in a URI. +-- +-- >>> escapeURIString isUnescapedInURI "http://haskell.org:80?some_param=true&other_param=їґ" +-- "http://haskell.org:80?some_param=true&other_param=%D1%97%D2%91" +isUnescapedInURI :: Char -> Bool +isUnescapedInURI c = isReserved c || isUnreserved c + +-- | Returns 'True' if the character is allowed unescaped in a URI component. +-- +-- >>> escapeURIString isUnescapedInURIComponent "http://haskell.org:80?some_param=true&other_param=їґ" +-- "http%3A%2F%2Fhaskell.org%3A80%3Fsome_param%3Dtrue%26other_param%3D%D1%97%D2%91" +isUnescapedInURIComponent :: Char -> Bool +isUnescapedInURIComponent c = not (isReserved c || not (isUnescapedInURI c)) + +------------------------------------------------------------ +-- Escape sequence handling +------------------------------------------------------------ + +-- |Escape character if supplied predicate is not satisfied, +-- otherwise return character as singleton string. +-- +escapeURIChar :: (Char->Bool) -> Char -> String +escapeURIChar p c + | p c = [c] + | otherwise = concatMap (\i -> '%' : myShowHex i "") (utf8EncodeChar c) + where + myShowHex :: Int -> ShowS + myShowHex n r = case showIntAtBase 16 (toChrHex) n r of + [] -> "00" + [x] -> ['0',x] + cs -> cs + toChrHex d + | d < 10 = chr (ord '0' + fromIntegral d) + | otherwise = chr (ord 'A' + fromIntegral (d - 10)) + +-- From http://hackage.haskell.org/package/utf8-string +-- by Eric Mertens, BSD3 +-- Returns [Int] for use with showIntAtBase +utf8EncodeChar :: Char -> [Int] +utf8EncodeChar = map fromIntegral . go . ord + where + go oc + | oc <= 0x7f = [oc] + + | oc <= 0x7ff = [ 0xc0 + (oc `shiftR` 6) + , 0x80 + oc .&. 0x3f + ] + + | oc <= 0xffff = [ 0xe0 + (oc `shiftR` 12) + , 0x80 + ((oc `shiftR` 6) .&. 0x3f) + , 0x80 + oc .&. 0x3f + ] + | otherwise = [ 0xf0 + (oc `shiftR` 18) + , 0x80 + ((oc `shiftR` 12) .&. 0x3f) + , 0x80 + ((oc `shiftR` 6) .&. 0x3f) + , 0x80 + oc .&. 0x3f + ] + +-- |Can be used to make a string valid for use in a URI. +-- +escapeURIString + :: (Char->Bool) -- ^ a predicate which returns 'False' + -- if the character should be escaped + -> String -- ^ the string to process + -> String -- ^ the resulting URI string +escapeURIString p s = concatMap (escapeURIChar p) s + +-- |Turns all instances of escaped characters in the string back +-- into literal characters. +-- +unEscapeString :: String -> String +unEscapeString [] = "" +unEscapeString s@(c:cs) = case unEscapeByte s of + Just (byte, rest) -> unEscapeUtf8 byte rest + Nothing -> c : unEscapeString cs + +unEscapeByte :: String -> Maybe (Int, String) +unEscapeByte ('%':x1:x2:s) | isHexDigit x1 && isHexDigit x2 = + Just (digitToInt x1 * 16 + digitToInt x2, s) +unEscapeByte _ = Nothing + +-- Adapted from http://hackage.haskell.org/package/utf8-string +-- by Eric Mertens, BSD3 +unEscapeUtf8 :: Int -> String -> String +unEscapeUtf8 c rest + | c < 0x80 = chr c : unEscapeString rest + | c < 0xc0 = replacement_character : unEscapeString rest + | c < 0xe0 = multi1 + | c < 0xf0 = multi_byte 2 0xf 0x800 + | c < 0xf8 = multi_byte 3 0x7 0x10000 + | c < 0xfc = multi_byte 4 0x3 0x200000 + | c < 0xfe = multi_byte 5 0x1 0x4000000 + | otherwise = replacement_character : unEscapeString rest + where + replacement_character = '\xfffd' + multi1 = case unEscapeByte rest of + Just (c1, ds) | c1 .&. 0xc0 == 0x80 -> + let d = ((fromEnum c .&. 0x1f) `shiftL` 6) .|. fromEnum (c1 .&. 0x3f) + in if d >= 0x000080 then toEnum d : unEscapeString ds + else replacement_character : unEscapeString ds + _ -> replacement_character : unEscapeString rest + + multi_byte :: Int -> Int -> Int -> String + multi_byte i mask overlong = + aux i rest (unEscapeByte rest) (c .&. mask) + where + aux 0 rs _ acc + | overlong <= acc && acc <= 0x10ffff && + (acc < 0xd800 || 0xdfff < acc) && + (acc < 0xfffe || 0xffff < acc) = chr acc : unEscapeString rs + | otherwise = replacement_character : unEscapeString rs + + aux n _ (Just (r, rs)) acc + | r .&. 0xc0 == 0x80 = aux (n-1) rs (unEscapeByte rs) + $! shiftL acc 6 .|. (r .&. 0x3f) + + aux _ rs _ _ = replacement_character : unEscapeString rs + +------------------------------------------------------------ +-- Resolving a relative URI relative to a base URI +------------------------------------------------------------ + +-- |Returns a new 'URI' which represents the value of the +-- first 'URI' interpreted as relative to the second 'URI'. +-- For example: +-- +-- > "foo" `relativeTo` "http://bar.org/" = "http://bar.org/foo" +-- > "http:foo" `nonStrictRelativeTo` "http://bar.org/" = "http://bar.org/foo" +-- +-- Algorithm from RFC3986 [3], section 5.2.2 +-- + +nonStrictRelativeTo :: URI -> URI -> URI +nonStrictRelativeTo ref base = relativeTo ref' base + where + ref' = if uriScheme ref == uriScheme base + then ref { uriScheme="" } + else ref + +isDefined :: ( MonadPlus m, Eq (m a) ) => m a -> Bool +isDefined a = a /= mzero + +-- | Returns a new 'URI' which represents the value of the first 'URI' +-- interpreted as relative to the second 'URI'. +-- +-- Algorithm from RFC3986 [3], section 5.2 +relativeTo :: URI -> URI -> URI +relativeTo ref base + | isDefined ( uriScheme ref ) = + just_segments ref + | isDefined ( uriAuthority ref ) = + just_segments ref { uriScheme = uriScheme base } + | isDefined ( uriPath ref ) = + if (head (uriPath ref) == '/') then + just_segments ref + { uriScheme = uriScheme base + , uriAuthority = uriAuthority base + } + else + just_segments ref + { uriScheme = uriScheme base + , uriAuthority = uriAuthority base + , uriPath = mergePaths base ref + } + | isDefined ( uriQuery ref ) = + just_segments ref + { uriScheme = uriScheme base + , uriAuthority = uriAuthority base + , uriPath = uriPath base + } + | otherwise = + just_segments ref + { uriScheme = uriScheme base + , uriAuthority = uriAuthority base + , uriPath = uriPath base + , uriQuery = uriQuery base + } + where + just_segments u = + u { uriPath = removeDotSegments (uriPath u) } + mergePaths b r + | isDefined (uriAuthority b) && null pb = '/':pr + | otherwise = dropLast pb ++ pr + where + pb = uriPath b + pr = uriPath r + dropLast = fst . splitLast -- reverse . dropWhile (/='/') . reverse + +-- Remove dot segments, but protect leading '/' character +removeDotSegments :: String -> String +removeDotSegments ('/':ps) = '/':elimDots ps [] +removeDotSegments ps = elimDots ps [] + +-- Second arg accumulates segments processed so far in reverse order +elimDots :: String -> [String] -> String +-- elimDots ps rs | traceVal "\nps " ps $ traceVal "rs " rs $ False = error "" +elimDots [] [] = "" +elimDots [] rs = concat (reverse rs) +elimDots ( '.':'/':ps) rs = elimDots ps rs +elimDots ( '.':[] ) rs = elimDots [] rs +elimDots ( '.':'.':'/':ps) rs = elimDots ps (drop 1 rs) +elimDots ( '.':'.':[] ) rs = elimDots [] (drop 1 rs) +elimDots ps rs = elimDots ps1 (r:rs) + where + (r,ps1) = nextSegment ps + +-- Returns the next segment and the rest of the path from a path string. +-- Each segment ends with the next '/' or the end of string. +-- +nextSegment :: String -> (String,String) +nextSegment ps = + case break (=='/') ps of + (r,'/':ps1) -> (r++"/",ps1) + (r,_) -> (r,[]) + +segments :: String -> [String] +segments str = dropLeadingEmpty $ unfoldr nextSegmentMaybe str + where + nextSegmentMaybe "" = Nothing + nextSegmentMaybe ps = + case break (=='/') ps of + (seg, '/':ps1) -> Just (seg, ps1) + (seg, _) -> Just (seg, "") + dropLeadingEmpty ("":xs) = xs + dropLeadingEmpty xs = xs + +-- | Returns the segments of the path component. E.g., +-- pathSegments <$> parseURI "http://example.org/foo/bar/baz" +-- == ["foo", "bar", "baz"] +pathSegments :: URI -> [String] +pathSegments = segments . uriPath + +-- | Split last (name) segment from path, returning (path,name) +splitLast :: String -> (String,String) +splitLast p = (reverse revpath,reverse revname) + where + (revname,revpath) = break (=='/') $ reverse p + +------------------------------------------------------------ +-- Finding a URI relative to a base URI +------------------------------------------------------------ + +-- |Returns a new 'URI' which represents the relative location of +-- the first 'URI' with respect to the second 'URI'. Thus, the +-- values supplied are expected to be absolute URIs, and the result +-- returned may be a relative URI. +-- +-- Example: +-- +-- > "http://example.com/Root/sub1/name2#frag" +-- > `relativeFrom` "http://example.com/Root/sub2/name2#frag" +-- > == "../sub1/name2#frag" +-- +-- There is no single correct implementation of this function, +-- but any acceptable implementation must satisfy the following: +-- +-- > (uabs `relativeFrom` ubase) `relativeTo` ubase == uabs +-- +-- For any valid absolute URI. +-- (cf. +-- ) +-- +relativeFrom :: URI -> URI -> URI +relativeFrom uabs base + | diff uriScheme uabs base = uabs + | diff uriAuthority uabs base = uabs { uriScheme = "" } + | diff uriPath uabs base = uabs + { uriScheme = "" + , uriAuthority = Nothing + , uriPath = relPathFrom (removeBodyDotSegments $ uriPath uabs) + (removeBodyDotSegments $ uriPath base) + } + | diff uriQuery uabs base = uabs + { uriScheme = "" + , uriAuthority = Nothing + , uriPath = "" + } + | otherwise = uabs -- Always carry fragment from uabs + { uriScheme = "" + , uriAuthority = Nothing + , uriPath = "" + , uriQuery = "" + } + where + diff :: Eq b => (a -> b) -> a -> a -> Bool + diff sel u1 u2 = sel u1 /= sel u2 + -- Remove dot segments except the final segment + removeBodyDotSegments p = removeDotSegments p1 ++ p2 + where + (p1,p2) = splitLast p + +relPathFrom :: String -> String -> String +relPathFrom [] _ = "/" +relPathFrom pabs [] = pabs +relPathFrom pabs base = -- Construct a relative path segments + if sa1 == sb1 -- if the paths share a leading segment + then if (sa1 == "/") -- other than a leading '/' + then if (sa2 == sb2) + then relPathFrom1 ra2 rb2 + else pabs + else relPathFrom1 ra1 rb1 + else pabs + where + (sa1,ra1) = nextSegment pabs + (sb1,rb1) = nextSegment base + (sa2,ra2) = nextSegment ra1 + (sb2,rb2) = nextSegment rb1 + +-- relPathFrom1 strips off trailing names from the supplied paths, +-- and calls difPathFrom to find the relative path from base to +-- target +relPathFrom1 :: String -> String -> String +relPathFrom1 pabs base = relName + where + (sa,na) = splitLast pabs + (sb,nb) = splitLast base + rp = relSegsFrom sa sb + relName = if null rp then + if (na == nb) then "" + else if protect na then "./"++na + else na + else + rp++na + -- Precede name with some path if it is null or contains a ':' + protect s = null s || ':' `elem` s + +-- relSegsFrom discards any common leading segments from both paths, +-- then invokes difSegsFrom to calculate a relative path from the end +-- of the base path to the end of the target path. +-- The final name is handled separately, so this deals only with +-- "directory" segtments. +-- +relSegsFrom :: String -> String -> String +{- +relSegsFrom sabs base + | traceVal "\nrelSegsFrom\nsabs " sabs $ traceVal "base " base $ + False = error "" +-} +relSegsFrom [] [] = "" -- paths are identical +relSegsFrom sabs base = + if sa1 == sb1 + then relSegsFrom ra1 rb1 + else difSegsFrom sabs base + where + (sa1,ra1) = nextSegment sabs + (sb1,rb1) = nextSegment base + +-- difSegsFrom calculates a path difference from base to target, +-- not including the final name at the end of the path +-- (i.e. results always ends with '/') +-- +-- This function operates under the invariant that the supplied +-- value of sabs is the desired path relative to the beginning of +-- base. Thus, when base is empty, the desired path has been found. +-- +difSegsFrom :: String -> String -> String +{- +difSegsFrom sabs base + | traceVal "\ndifSegsFrom\nsabs " sabs $ traceVal "base " base $ + False = error "" +-} +difSegsFrom sabs "" = sabs +difSegsFrom sabs base = difSegsFrom ("../"++sabs) (snd $ nextSegment base) + +------------------------------------------------------------ +-- Other normalization functions +------------------------------------------------------------ + +-- |Case normalization; cf. RFC3986 section 6.2.2.1 +-- NOTE: authority case normalization is not performed +-- +normalizeCase :: String -> String +normalizeCase uristr = ncScheme uristr + where + ncScheme (':':cs) = ':':ncEscape cs + ncScheme (c:cs) | isSchemeChar c = toLower c:ncScheme cs + ncScheme _ = ncEscape uristr -- no scheme present + ncEscape ('%':h1:h2:cs) = '%':toUpper h1:toUpper h2:ncEscape cs + ncEscape (c:cs) = c:ncEscape cs + ncEscape [] = [] + +-- |Encoding normalization; cf. RFC3986 section 6.2.2.2 +-- +normalizeEscape :: String -> String +normalizeEscape ('%':h1:h2:cs) + | isHexDigit h1 && isHexDigit h2 && isUnreserved escval = + escval:normalizeEscape cs + where + escval = chr (digitToInt h1*16+digitToInt h2) +normalizeEscape (c:cs) = c:normalizeEscape cs +normalizeEscape [] = [] + +-- |Path segment normalization; cf. RFC3986 section 6.2.2.3 +-- +normalizePathSegments :: String -> String +normalizePathSegments uristr = normstr juri + where + juri = parseURI uristr + normstr Nothing = uristr + normstr (Just u) = show (normuri u) + normuri u = u { uriPath = removeDotSegments (uriPath u) } + +------------------------------------------------------------ +-- Deprecated functions +------------------------------------------------------------ + +{-# DEPRECATED parseabsoluteURI "use parseAbsoluteURI" #-} +parseabsoluteURI :: String -> Maybe URI +parseabsoluteURI = parseAbsoluteURI + +{-# DEPRECATED escapeString "use escapeURIString, and note the flipped arguments" #-} +escapeString :: String -> (Char->Bool) -> String +escapeString = flip escapeURIString + +{-# DEPRECATED reserved "use isReserved" #-} +reserved :: Char -> Bool +reserved = isReserved + +{-# DEPRECATED unreserved "use isUnreserved" #-} +unreserved :: Char -> Bool +unreserved = isUnreserved + +-- Additional component access functions for backward compatibility + +{-# DEPRECATED scheme "use uriScheme" #-} +scheme :: URI -> String +scheme = orNull init . uriScheme + +{-# DEPRECATED authority "use uriAuthority, and note changed functionality" #-} +authority :: URI -> String +authority = dropss . ($"") . uriAuthToString id . uriAuthority + where + -- Old-style authority component does not include leading '//' + dropss ('/':'/':s) = s + dropss s = s + +{-# DEPRECATED path "use uriPath" #-} +path :: URI -> String +path = uriPath + +{-# DEPRECATED query "use uriQuery, and note changed functionality" #-} +query :: URI -> String +query = orNull tail . uriQuery + +{-# DEPRECATED fragment "use uriFragment, and note changed functionality" #-} +fragment :: URI -> String +fragment = orNull tail . uriFragment + +orNull :: ([a]->[a]) -> [a] -> [a] +orNull _ [] = [] +orNull f as = f as + +-------------------------------------------------------------------------------- +-- +-- Copyright (c) 2004, G. KLYNE. All rights reserved. +-- Distributed as free software under the following license. +-- +-- Redistribution and use in source and binary forms, with or without +-- modification, are permitted provided that the following conditions +-- are met: +-- +-- - Redistributions of source code must retain the above copyright notice, +-- this list of conditions and the following disclaimer. +-- +-- - Redistributions in binary form must reproduce the above copyright +-- notice, this list of conditions and the following disclaimer in the +-- documentation and/or other materials provided with the distribution. +-- +-- - Neither name of the copyright holders nor the names of its +-- contributors may be used to endorse or promote products derived from +-- this software without specific prior written permission. +-- +-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND THE CONTRIBUTORS +-- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +-- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +-- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +-- HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-- BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-- OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-- TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-- +-------------------------------------------------------------------------------- diff -Nru cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/network-uri.cabal cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/network-uri.cabal --- cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/network-uri.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/network-uri.cabal 2016-06-02 07:15:43.000000000 +0000 @@ -0,0 +1,65 @@ +name: network-uri +version: 2.6.1.0 +synopsis: URI manipulation +description: + This package provides an URI manipulation interface. + . + In network-2.6 the @Network.URI@ module was split off from the + network package into this package. If you're using the @Network.URI@ + module you can automatically get it from the right package by adding + this to your .cabal file: + . + > flag network-uri + > description: Get Network.URI from the network-uri package + > default: True + > + > library + > -- ... + > if flag(network-uri) + > build-depends: network-uri >= 2.6, network >= 2.6 + > else + > build-depends: network-uri < 2.6, network < 2.6 + . + That is, get the module from either network < 2.6 or from + network-uri >= 2.6. +homepage: https://github.com/haskell/network-uri +bug-reports: https://github.com/haskell/network-uri/issues +license: BSD3 +license-file: LICENSE +maintainer: ezra@ezrakilty.net +category: Network +build-type: Simple +cabal-version: >=1.10 + +library + exposed-modules: + Network.URI + build-depends: + base >= 3 && < 5, + deepseq >= 1.1 && < 1.5, + parsec >= 3.0 && < 3.2 + default-extensions: CPP, DeriveDataTypeable + if impl(ghc >= 7.6) + default-extensions: DeriveGeneric + ghc-options: -Wall -fwarn-tabs + default-language: Haskell98 + +test-suite uri + hs-source-dirs: tests + main-is: uri001.hs + type: exitcode-stdio-1.0 + + build-depends: + base < 5, + HUnit, + network-uri, + test-framework, + test-framework-hunit, + test-framework-quickcheck2 + + ghc-options: -Wall -fwarn-tabs + default-language: Haskell98 + +source-repository head + type: git + location: git://github.com/haskell/network-uri.git diff -Nru cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/Setup.hs cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/Setup.hs --- cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/Setup.hs 2016-03-19 20:56:32.000000000 +0000 @@ -0,0 +1,2 @@ +import Distribution.Simple +main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/tests/uri001.hs cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/tests/uri001.hs --- cabal-install-1.22-1.22.6.0/src/network-uri-2.6.1.0/tests/uri001.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/network-uri-2.6.1.0/tests/uri001.hs 2016-03-19 20:56:32.000000000 +0000 @@ -0,0 +1,1476 @@ +{-# OPTIONS_GHC -fno-warn-missing-signatures #-} +-------------------------------------------------------------------------------- +-- $Id: URITest.hs,v 1.8 2005/07/19 22:01:27 gklyne Exp $ +-- +-- Copyright (c) 2004, G. KLYNE. All rights reserved. +-- See end of this file for licence information. +-------------------------------------------------------------------------------- +-- | +-- Module : URITest +-- Copyright : (c) 2004, Graham Klyne +-- License : BSD-style (see end of this file) +-- +-- Maintainer : Graham Klyne +-- Stability : provisional +-- Portability : H98 +-- +-- This Module contains test cases for module URI. +-- +-- To run this test without using Cabal to build the package +-- (2013-01-05, instructions tested on MacOS): +-- 1. Install Haskell platform +-- 2. cabal install test-framework +-- 3. cabal install test-framework-hunit +-- 4. ghc -XDeriveDataTypeable -D"MIN_VERSION_base(x,y,z)=1" ../Network/URI.hs uri001.hs +-- 5. ./uri001 +-- +-- Previous build instructions: +-- Using GHC, I compile with this command line: +-- ghc --make -fglasgow-exts +-- -i..\;C:\Dev\Haskell\Lib\HUnit;C:\Dev\Haskell\Lib\Parsec +-- -o URITest.exe URITest -main-is URITest.main +-- The -i line may need changing for alternative installations. +-- +-------------------------------------------------------------------------------- + +module Main where + +import Network.URI + ( URI(..), URIAuth(..) + , nullURI + , parseURI, parseURIReference, parseRelativeReference, parseAbsoluteURI + , parseAbsoluteURI + , isURI, isURIReference, isRelativeReference, isAbsoluteURI + , uriIsAbsolute, uriIsRelative + , relativeTo, nonStrictRelativeTo + , relativeFrom + , uriToString + , isUnescapedInURIComponent + , isUnescapedInURI, escapeURIString, unEscapeString + , normalizeCase, normalizeEscape, normalizePathSegments + , pathSegments + ) + +import Test.HUnit + +import Data.Maybe (fromJust) +import Data.List (intercalate) +import System.IO (openFile, IOMode(WriteMode), hClose) +import qualified Test.Framework as TF +import qualified Test.Framework.Providers.HUnit as TF +import qualified Test.Framework.Providers.QuickCheck2 as TF + +-- Test supplied string for valid URI reference syntax +-- isValidURIRef :: String -> Bool +-- Test supplied string for valid absolute URI reference syntax +-- isAbsoluteURIRef :: String -> Bool +-- Test supplied string for valid absolute URI syntax +-- isAbsoluteURI :: String -> Bool + +data URIType = AbsId -- URI form (absolute, no fragment) + | AbsRf -- Absolute URI reference + | RelRf -- Relative URI reference + | InvRf -- Invalid URI reference +isValidT :: URIType -> Bool +isValidT InvRf = False +isValidT _ = True + +isAbsRfT :: URIType -> Bool +isAbsRfT AbsId = True +isAbsRfT AbsRf = True +isAbsRfT _ = False + +isRelRfT :: URIType -> Bool +isRelRfT RelRf = True +isRelRfT _ = False + +isAbsIdT :: URIType -> Bool +isAbsIdT AbsId = True +isAbsIdT _ = False + +testEq :: (Eq a, Show a) => String -> a -> a -> Assertion +testEq lab a1 a2 = assertEqual lab a1 a2 + +testURIRef :: URIType -> String -> Assertion +testURIRef t u = sequence_ + [ testEq ("test_isURIReference:"++u) (isValidT t) (isURIReference u) + , testEq ("test_isRelativeReference:"++u) (isRelRfT t) (isRelativeReference u) + , testEq ("test_isAbsoluteURI:"++u) (isAbsIdT t) (isAbsoluteURI u) + ] + +testURIRefComponents :: String -> (Maybe URI) -> String -> Assertion +testURIRefComponents _lab uv us = + testEq ("testURIRefComponents:"++us) uv (parseURIReference us) + + +testURIRef001 = testURIRef AbsRf "http://example.org/aaa/bbb#ccc" +testURIRef002 = testURIRef AbsId "mailto:local@domain.org" +testURIRef003 = testURIRef AbsRf "mailto:local@domain.org#frag" +testURIRef004 = testURIRef AbsRf "HTTP://EXAMPLE.ORG/AAA/BBB#CCC" +testURIRef005 = testURIRef RelRf "//example.org/aaa/bbb#ccc" +testURIRef006 = testURIRef RelRf "/aaa/bbb#ccc" +testURIRef007 = testURIRef RelRf "bbb#ccc" +testURIRef008 = testURIRef RelRf "#ccc" +testURIRef009 = testURIRef RelRf "#" +testURIRef010 = testURIRef RelRf "/" +-- escapes +testURIRef011 = testURIRef AbsRf "http://example.org/aaa%2fbbb#ccc" +testURIRef012 = testURIRef AbsRf "http://example.org/aaa%2Fbbb#ccc" +testURIRef013 = testURIRef RelRf "%2F" +testURIRef014 = testURIRef RelRf "aaa%2Fbbb" +-- ports +testURIRef015 = testURIRef AbsRf "http://example.org:80/aaa/bbb#ccc" +testURIRef016 = testURIRef AbsRf "http://example.org:/aaa/bbb#ccc" +testURIRef017 = testURIRef AbsRf "http://example.org./aaa/bbb#ccc" +testURIRef018 = testURIRef AbsRf "http://example.123./aaa/bbb#ccc" +-- bare authority +testURIRef019 = testURIRef AbsId "http://example.org" +-- IPv6 literals (from RFC2732): +testURIRef021 = testURIRef AbsId "http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html" +testURIRef022 = testURIRef AbsId "http://[1080:0:0:0:8:800:200C:417A]/index.html" +testURIRef023 = testURIRef AbsId "http://[3ffe:2a00:100:7031::1]" +testURIRef024 = testURIRef AbsId "http://[1080::8:800:200C:417A]/foo" +testURIRef025 = testURIRef AbsId "http://[::192.9.5.5]/ipng" +testURIRef026 = testURIRef AbsId "http://[::FFFF:129.144.52.38]:80/index.html" +testURIRef027 = testURIRef AbsId "http://[2010:836B:4179::836B:4179]" +testURIRef028 = testURIRef RelRf "//[2010:836B:4179::836B:4179]" +testURIRef029 = testURIRef InvRf "[2010:836B:4179::836B:4179]" +-- RFC2396 test cases +testURIRef031 = testURIRef RelRf "./aaa" +testURIRef032 = testURIRef RelRf "../aaa" +testURIRef033 = testURIRef AbsId "g:h" +testURIRef034 = testURIRef RelRf "g" +testURIRef035 = testURIRef RelRf "./g" +testURIRef036 = testURIRef RelRf "g/" +testURIRef037 = testURIRef RelRf "/g" +testURIRef038 = testURIRef RelRf "//g" +testURIRef039 = testURIRef RelRf "?y" +testURIRef040 = testURIRef RelRf "g?y" +testURIRef041 = testURIRef RelRf "#s" +testURIRef042 = testURIRef RelRf "g#s" +testURIRef043 = testURIRef RelRf "g?y#s" +testURIRef044 = testURIRef RelRf ";x" +testURIRef045 = testURIRef RelRf "g;x" +testURIRef046 = testURIRef RelRf "g;x?y#s" +testURIRef047 = testURIRef RelRf "." +testURIRef048 = testURIRef RelRf "./" +testURIRef049 = testURIRef RelRf ".." +testURIRef050 = testURIRef RelRf "../" +testURIRef051 = testURIRef RelRf "../g" +testURIRef052 = testURIRef RelRf "../.." +testURIRef053 = testURIRef RelRf "../../" +testURIRef054 = testURIRef RelRf "../../g" +testURIRef055 = testURIRef RelRf "../../../g" +testURIRef056 = testURIRef RelRf "../../../../g" +testURIRef057 = testURIRef RelRf "/./g" +testURIRef058 = testURIRef RelRf "/../g" +testURIRef059 = testURIRef RelRf "g." +testURIRef060 = testURIRef RelRf ".g" +testURIRef061 = testURIRef RelRf "g.." +testURIRef062 = testURIRef RelRf "..g" +testURIRef063 = testURIRef RelRf "./../g" +testURIRef064 = testURIRef RelRf "./g/." +testURIRef065 = testURIRef RelRf "g/./h" +testURIRef066 = testURIRef RelRf "g/../h" +testURIRef067 = testURIRef RelRf "g;x=1/./y" +testURIRef068 = testURIRef RelRf "g;x=1/../y" +testURIRef069 = testURIRef RelRf "g?y/./x" +testURIRef070 = testURIRef RelRf "g?y/../x" +testURIRef071 = testURIRef RelRf "g#s/./x" +testURIRef072 = testURIRef RelRf "g#s/../x" +testURIRef073 = testURIRef RelRf "" +testURIRef074 = testURIRef RelRf "A'C" +testURIRef075 = testURIRef RelRf "A$C" +testURIRef076 = testURIRef RelRf "A@C" +testURIRef077 = testURIRef RelRf "A,C" +-- Invalid +testURIRef080 = testURIRef InvRf "http://foo.org:80Path/More" +testURIRef081 = testURIRef InvRf "::" +testURIRef082 = testURIRef InvRf " " +testURIRef083 = testURIRef InvRf "%" +testURIRef084 = testURIRef InvRf "A%Z" +testURIRef085 = testURIRef InvRf "%ZZ" +testURIRef086 = testURIRef InvRf "%AZ" +testURIRef087 = testURIRef InvRf "A C" +-- testURIRef088 = -- (case removed) +-- testURIRef089 = -- (case removed) +testURIRef090 = testURIRef InvRf "A\"C" +testURIRef091 = testURIRef InvRf "A`C" +testURIRef092 = testURIRef InvRf "AC" +testURIRef094 = testURIRef InvRf "A^C" +testURIRef095 = testURIRef InvRf "A\\C" +testURIRef096 = testURIRef InvRf "A{C" +testURIRef097 = testURIRef InvRf "A|C" +testURIRef098 = testURIRef InvRf "A}C" +-- From RFC2396: +-- rel_segment = 1*( unreserved | escaped | +-- ";" | "@" | "&" | "=" | "+" | "$" | "," ) +-- unreserved = alphanum | mark +-- mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | +-- "(" | ")" +-- Note RFC 2732 allows '[', ']' ONLY for reserved purpose of IPv6 literals, +-- or does it? +testURIRef101 = testURIRef InvRf "A[C" +testURIRef102 = testURIRef InvRf "A]C" +testURIRef103 = testURIRef InvRf "A[**]C" +testURIRef104 = testURIRef InvRf "http://[xyz]/" +testURIRef105 = testURIRef InvRf "http://]/" +testURIRef106 = testURIRef InvRf "http://example.org/[2010:836B:4179::836B:4179]" +testURIRef107 = testURIRef InvRf "http://example.org/abc#[2010:836B:4179::836B:4179]" +testURIRef108 = testURIRef InvRf "http://example.org/xxx/[qwerty]#a[b]" +-- Random other things that crop up +testURIRef111 = testURIRef AbsRf "http://example/Andrȷ" +testURIRef112 = testURIRef AbsId "file:///C:/DEV/Haskell/lib/HXmlToolbox-3.01/examples/" +testURIRef113 = testURIRef AbsId "http://46229EFFE16A9BD60B9F1BE88B2DB047ADDED785/demo.mp3" +testURIRef114 = testURIRef InvRf "http://example.org/xxx/qwerty#a#b" +testURIRef115 = testURIRef InvRf "dcp.tcp.pft://192.168.0.1:1002:3002?fec=1&crc=0" +testURIRef116 = testURIRef AbsId "dcp.tcp.pft://192.168.0.1:1002?fec=1&crc=0" +testURIRef117 = testURIRef AbsId "foo://" +-- URIs prefixed with IPv4 addresses +testURIRef118 = testURIRef AbsId "http://192.168.0.1.example.com/" +testURIRef119 = testURIRef AbsId "http://192.168.0.1.example.com./" +-- URI prefixed with 3 octets of an IPv4 address and a subdomain part with a leading digit. +testURIRef120 = testURIRef AbsId "http://192.168.0.1test.example.com/" +-- URI with IPv(future) address +testURIRef121 = testURIRef AbsId "http://[v9.123.abc;456.def]/" +testURIRef122 = testEq "v.future authority" + (Just (URIAuth "" "[v9.123.abc;456.def]" ":42")) + ((maybe Nothing uriAuthority) . parseURI $ "http://[v9.123.abc;456.def]:42/") +-- URI with non-ASCII characters, fail with Network.HTTP escaping code (see below) +-- Currently not supported by Network.URI, but captured here for possible future reference +-- when IRI support may be added. +testURIRef123 = testURIRef AbsId "http://example.com/test123/䡥汬漬⁗潲汤/index.html" +testURIRef124 = testURIRef AbsId "http://example.com/test124/Москва/index.html" + +-- From report by Alexander Ivanov: +-- should return " 䡥汬漬⁗潲汤", but returns "Hello, World" instead +-- print $ urlDecode $ urlEncode " 䡥汬漬⁗潲汤" +-- should return "Москва" +-- print $ urlDecode $ urlEncode "Москва" + +testURIRefSuite = TF.testGroup "Test URIrefs" testURIRefList +testURIRefList = + [ TF.testCase "testURIRef001" testURIRef001 + , TF.testCase "testURIRef002" testURIRef002 + , TF.testCase "testURIRef003" testURIRef003 + , TF.testCase "testURIRef004" testURIRef004 + , TF.testCase "testURIRef005" testURIRef005 + , TF.testCase "testURIRef006" testURIRef006 + , TF.testCase "testURIRef007" testURIRef007 + , TF.testCase "testURIRef008" testURIRef008 + , TF.testCase "testURIRef009" testURIRef009 + , TF.testCase "testURIRef010" testURIRef010 + -- + , TF.testCase "testURIRef011" testURIRef011 + , TF.testCase "testURIRef012" testURIRef012 + , TF.testCase "testURIRef013" testURIRef013 + , TF.testCase "testURIRef014" testURIRef014 + , TF.testCase "testURIRef015" testURIRef015 + , TF.testCase "testURIRef016" testURIRef016 + , TF.testCase "testURIRef017" testURIRef017 + , TF.testCase "testURIRef018" testURIRef018 + -- + , TF.testCase "testURIRef019" testURIRef019 + -- + , TF.testCase "testURIRef021" testURIRef021 + , TF.testCase "testURIRef022" testURIRef022 + , TF.testCase "testURIRef023" testURIRef023 + , TF.testCase "testURIRef024" testURIRef024 + , TF.testCase "testURIRef025" testURIRef025 + , TF.testCase "testURIRef026" testURIRef026 + , TF.testCase "testURIRef027" testURIRef027 + , TF.testCase "testURIRef028" testURIRef028 + , TF.testCase "testURIRef029" testURIRef029 + -- + , TF.testCase "testURIRef031" testURIRef031 + , TF.testCase "testURIRef032" testURIRef032 + , TF.testCase "testURIRef033" testURIRef033 + , TF.testCase "testURIRef034" testURIRef034 + , TF.testCase "testURIRef035" testURIRef035 + , TF.testCase "testURIRef036" testURIRef036 + , TF.testCase "testURIRef037" testURIRef037 + , TF.testCase "testURIRef038" testURIRef038 + , TF.testCase "testURIRef039" testURIRef039 + , TF.testCase "testURIRef040" testURIRef040 + , TF.testCase "testURIRef041" testURIRef041 + , TF.testCase "testURIRef042" testURIRef042 + , TF.testCase "testURIRef043" testURIRef043 + , TF.testCase "testURIRef044" testURIRef044 + , TF.testCase "testURIRef045" testURIRef045 + , TF.testCase "testURIRef046" testURIRef046 + , TF.testCase "testURIRef047" testURIRef047 + , TF.testCase "testURIRef048" testURIRef048 + , TF.testCase "testURIRef049" testURIRef049 + , TF.testCase "testURIRef050" testURIRef050 + , TF.testCase "testURIRef051" testURIRef051 + , TF.testCase "testURIRef052" testURIRef052 + , TF.testCase "testURIRef053" testURIRef053 + , TF.testCase "testURIRef054" testURIRef054 + , TF.testCase "testURIRef055" testURIRef055 + , TF.testCase "testURIRef056" testURIRef056 + , TF.testCase "testURIRef057" testURIRef057 + , TF.testCase "testURIRef058" testURIRef058 + , TF.testCase "testURIRef059" testURIRef059 + , TF.testCase "testURIRef060" testURIRef060 + , TF.testCase "testURIRef061" testURIRef061 + , TF.testCase "testURIRef062" testURIRef062 + , TF.testCase "testURIRef063" testURIRef063 + , TF.testCase "testURIRef064" testURIRef064 + , TF.testCase "testURIRef065" testURIRef065 + , TF.testCase "testURIRef066" testURIRef066 + , TF.testCase "testURIRef067" testURIRef067 + , TF.testCase "testURIRef068" testURIRef068 + , TF.testCase "testURIRef069" testURIRef069 + , TF.testCase "testURIRef070" testURIRef070 + , TF.testCase "testURIRef071" testURIRef071 + , TF.testCase "testURIRef072" testURIRef072 + , TF.testCase "testURIRef073" testURIRef073 + , TF.testCase "testURIRef074" testURIRef074 + , TF.testCase "testURIRef075" testURIRef075 + , TF.testCase "testURIRef076" testURIRef076 + , TF.testCase "testURIRef077" testURIRef077 + -- + , TF.testCase "testURIRef080" testURIRef080 + , TF.testCase "testURIRef081" testURIRef081 + , TF.testCase "testURIRef082" testURIRef082 + , TF.testCase "testURIRef083" testURIRef083 + , TF.testCase "testURIRef084" testURIRef084 + , TF.testCase "testURIRef085" testURIRef085 + , TF.testCase "testURIRef086" testURIRef086 + , TF.testCase "testURIRef087" testURIRef087 + -- testURIRef088, + -- testURIRef089, + , TF.testCase "testURIRef090" testURIRef090 + , TF.testCase "testURIRef091" testURIRef091 + , TF.testCase "testURIRef092" testURIRef092 + , TF.testCase "testURIRef093" testURIRef093 + , TF.testCase "testURIRef094" testURIRef094 + , TF.testCase "testURIRef095" testURIRef095 + , TF.testCase "testURIRef096" testURIRef096 + , TF.testCase "testURIRef097" testURIRef097 + , TF.testCase "testURIRef098" testURIRef098 + -- testURIRef099, + -- + , TF.testCase "testURIRef101" testURIRef101 + , TF.testCase "testURIRef102" testURIRef102 + , TF.testCase "testURIRef103" testURIRef103 + , TF.testCase "testURIRef104" testURIRef104 + , TF.testCase "testURIRef105" testURIRef105 + , TF.testCase "testURIRef106" testURIRef106 + , TF.testCase "testURIRef107" testURIRef107 + , TF.testCase "testURIRef108" testURIRef108 + -- + , TF.testCase "testURIRef111" testURIRef111 + , TF.testCase "testURIRef112" testURIRef112 + , TF.testCase "testURIRef113" testURIRef113 + , TF.testCase "testURIRef114" testURIRef114 + , TF.testCase "testURIRef115" testURIRef115 + , TF.testCase "testURIRef116" testURIRef116 + , TF.testCase "testURIRef117" testURIRef117 + -- + , TF.testCase "testURIRef118" testURIRef118 + , TF.testCase "testURIRef119" testURIRef119 + , TF.testCase "testURIRef120" testURIRef120 + -- + , TF.testCase "testURIRef121" testURIRef121 + , TF.testCase "testURIRef122" testURIRef122 + -- IRI test cases not currently supported + -- , TF.testCase "testURIRef123" testURIRef123 + -- , TF.testCase "testURIRef124" testURIRef124 + ] + +-- test decomposition of URI into components +testComponent01 = testURIRefComponents "testComponent01" + ( Just $ URI + { uriScheme = "http:" + , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") + , uriPath = "/aaa/bbb" + , uriQuery = "?qqq" + , uriFragment = "#fff" + } ) + "http://user:pass@example.org:99/aaa/bbb?qqq#fff" +testComponent02 = testURIRefComponents "testComponent02" + ( const Nothing + ( Just $ URI + { uriScheme = "http:" + , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") + , uriPath = "aaa/bbb" + , uriQuery = "" + , uriFragment = "" + } ) + ) + "http://user:pass@example.org:99aaa/bbb" +testComponent03 = testURIRefComponents "testComponent03" + ( Just $ URI + { uriScheme = "http:" + , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") + , uriPath = "" + , uriQuery = "?aaa/bbb" + , uriFragment = "" + } ) + "http://user:pass@example.org:99?aaa/bbb" +testComponent04 = testURIRefComponents "testComponent03" + ( Just $ URI + { uriScheme = "http:" + , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") + , uriPath = "" + , uriQuery = "" + , uriFragment = "#aaa/bbb" + } ) + "http://user:pass@example.org:99#aaa/bbb" +-- These test cases contributed by Robert Buck (mathworks.com) +testComponent11 = testURIRefComponents "testComponent03" + ( Just $ URI + { uriScheme = "about:" + , uriAuthority = Nothing + , uriPath = "" + , uriQuery = "" + , uriFragment = "" + } ) + "about:" +testComponent12 = testURIRefComponents "testComponent03" + ( Just $ URI + { uriScheme = "file:" + , uriAuthority = Just (URIAuth "" "windowsauth" "") + , uriPath = "/d$" + , uriQuery = "" + , uriFragment = "" + } ) + "file://windowsauth/d$" + +testComponentSuite = TF.testGroup "Test URIrefs" $ + [ TF.testCase "testComponent01" testComponent01 + , TF.testCase "testComponent02" testComponent02 + , TF.testCase "testComponent03" testComponent03 + , TF.testCase "testComponent04" testComponent04 + , TF.testCase "testComponent11" testComponent11 + , TF.testCase "testComponent12" testComponent12 + ] + +-- Get reference relative to given base +-- relativeRef :: String -> String -> String +-- +-- Get absolute URI given base and relative reference +-- absoluteURI :: String -> String -> String +-- +-- Test cases taken from: http://www.w3.org/2000/10/swap/uripath.py +-- (Thanks, Dan Connolly) +-- +-- NOTE: absoluteURI base (relativeRef base u) is always equivalent to u. +-- cf. http://lists.w3.org/Archives/Public/uri/2003Jan/0008.html + +testRelSplit :: String -> String -> String -> String -> Assertion +testRelSplit label base uabs urel = + testEq label urel (mkrel puabs pubas) + where + mkrel (Just u1) (Just u2) = show (u1 `relativeFrom` u2) + mkrel Nothing _ = "Invalid URI: "++urel + mkrel _ Nothing = "Invalid URI: "++uabs + puabs = parseURIReference uabs + pubas = parseURIReference base + +testRelJoin :: String -> String -> String -> String -> Assertion +testRelJoin label base urel uabs = + testEq label uabs (mkabs purel pubas) + where + mkabs (Just u1) (Just u2) = show (u1 `relativeTo` u2) + mkabs Nothing _ = "Invalid URI: "++urel + mkabs _ Nothing = "Invalid URI: "++uabs + purel = parseURIReference urel + pubas = parseURIReference base + +testRelative :: String -> String -> String -> String -> Assertion +testRelative label base uabs urel = sequence_ + [ + (testRelSplit (label++"(rel)") base uabs urel), + (testRelJoin (label++"(abs)") base urel uabs) + ] + +testRelative01 = testRelative "testRelative01" + "foo:xyz" "bar:abc" "bar:abc" +testRelative02 = testRelative "testRelative02" + "http://example/x/y/z" "http://example/x/abc" "../abc" +testRelative03 = testRelative "testRelative03" + "http://example2/x/y/z" "http://example/x/abc" "//example/x/abc" + -- "http://example2/x/y/z" "http://example/x/abc" "http://example/x/abc" +testRelative04 = testRelative "testRelative04" + "http://ex/x/y/z" "http://ex/x/r" "../r" +testRelative05 = testRelative "testRelative05" + "http://ex/x/y/z" "http://ex/r" "/r" + -- "http://ex/x/y/z" "http://ex/r" "../../r" +testRelative06 = testRelative "testRelative06" + "http://ex/x/y/z" "http://ex/x/y/q/r" "q/r" +testRelative07 = testRelative "testRelative07" + "http://ex/x/y" "http://ex/x/q/r#s" "q/r#s" +testRelative08 = testRelative "testRelative08" + "http://ex/x/y" "http://ex/x/q/r#s/t" "q/r#s/t" +testRelative09 = testRelative "testRelative09" + "http://ex/x/y" "ftp://ex/x/q/r" "ftp://ex/x/q/r" +testRelative10 = testRelative "testRelative10" + -- "http://ex/x/y" "http://ex/x/y" "y" + "http://ex/x/y" "http://ex/x/y" "" +testRelative11 = testRelative "testRelative11" + -- "http://ex/x/y/" "http://ex/x/y/" "./" + "http://ex/x/y/" "http://ex/x/y/" "" +testRelative12 = testRelative "testRelative12" + -- "http://ex/x/y/pdq" "http://ex/x/y/pdq" "pdq" + "http://ex/x/y/pdq" "http://ex/x/y/pdq" "" +testRelative13 = testRelative "testRelative13" + "http://ex/x/y/" "http://ex/x/y/z/" "z/" +testRelative14 = testRelative "testRelative14" + -- "file:/swap/test/animal.rdf" "file:/swap/test/animal.rdf#Animal" "animal.rdf#Animal" + "file:/swap/test/animal.rdf" "file:/swap/test/animal.rdf#Animal" "#Animal" +testRelative15 = testRelative "testRelative15" + "file:/e/x/y/z" "file:/e/x/abc" "../abc" +testRelative16 = testRelative "testRelative16" + "file:/example2/x/y/z" "file:/example/x/abc" "/example/x/abc" +testRelative17 = testRelative "testRelative17" + "file:/ex/x/y/z" "file:/ex/x/r" "../r" +testRelative18 = testRelative "testRelative18" + "file:/ex/x/y/z" "file:/r" "/r" +testRelative19 = testRelative "testRelative19" + "file:/ex/x/y" "file:/ex/x/q/r" "q/r" +testRelative20 = testRelative "testRelative20" + "file:/ex/x/y" "file:/ex/x/q/r#s" "q/r#s" +testRelative21 = testRelative "testRelative21" + "file:/ex/x/y" "file:/ex/x/q/r#" "q/r#" +testRelative22 = testRelative "testRelative22" + "file:/ex/x/y" "file:/ex/x/q/r#s/t" "q/r#s/t" +testRelative23 = testRelative "testRelative23" + "file:/ex/x/y" "ftp://ex/x/q/r" "ftp://ex/x/q/r" +testRelative24 = testRelative "testRelative24" + -- "file:/ex/x/y" "file:/ex/x/y" "y" + "file:/ex/x/y" "file:/ex/x/y" "" +testRelative25 = testRelative "testRelative25" + -- "file:/ex/x/y/" "file:/ex/x/y/" "./" + "file:/ex/x/y/" "file:/ex/x/y/" "" +testRelative26 = testRelative "testRelative26" + -- "file:/ex/x/y/pdq" "file:/ex/x/y/pdq" "pdq" + "file:/ex/x/y/pdq" "file:/ex/x/y/pdq" "" +testRelative27 = testRelative "testRelative27" + "file:/ex/x/y/" "file:/ex/x/y/z/" "z/" +testRelative28 = testRelative "testRelative28" + "file:/devel/WWW/2000/10/swap/test/reluri-1.n3" + "file://meetings.example.com/cal#m1" "//meetings.example.com/cal#m1" + -- "file:/devel/WWW/2000/10/swap/test/reluri-1.n3" + -- "file://meetings.example.com/cal#m1" "file://meetings.example.com/cal#m1" +testRelative29 = testRelative "testRelative29" + "file:/home/connolly/w3ccvs/WWW/2000/10/swap/test/reluri-1.n3" + "file://meetings.example.com/cal#m1" "//meetings.example.com/cal#m1" + -- "file:/home/connolly/w3ccvs/WWW/2000/10/swap/test/reluri-1.n3" + -- "file://meetings.example.com/cal#m1" "file://meetings.example.com/cal#m1" +testRelative30 = testRelative "testRelative30" + "file:/some/dir/foo" "file:/some/dir/#blort" "./#blort" +testRelative31 = testRelative "testRelative31" + "file:/some/dir/foo" "file:/some/dir/#" "./#" +testRelative32 = testRelative "testRelative32" + "http://ex/x/y" "http://ex/x/q:r" "./q:r" + -- see RFC2396bis, section 5 ^^ +testRelative33 = testRelative "testRelative33" + "http://ex/x/y" "http://ex/x/p=q:r" "./p=q:r" + -- "http://ex/x/y" "http://ex/x/p=q:r" "p=q:r" +testRelative34 = testRelative "testRelative34" + "http://ex/x/y?pp/qq" "http://ex/x/y?pp/rr" "?pp/rr" +testRelative35 = testRelative "testRelative35" + "http://ex/x/y?pp/qq" "http://ex/x/y/z" "y/z" +testRelative36 = testRelative "testRelative36" + "mailto:local" + "mailto:local/qual@domain.org#frag" + "local/qual@domain.org#frag" +testRelative37 = testRelative "testRelative37" + "mailto:local/qual1@domain1.org" + "mailto:local/more/qual2@domain2.org#frag" + "more/qual2@domain2.org#frag" +testRelative38 = testRelative "testRelative38" + "http://ex/x/z?q" "http://ex/x/y?q" "y?q" +testRelative39 = testRelative "testRelative39" + "http://ex?p" "http://ex/x/y?q" "/x/y?q" +testRelative40 = testRelative "testRelative40" + "foo:a/b" "foo:a/c/d" "c/d" +testRelative41 = testRelative "testRelative41" + "foo:a/b" "foo:/c/d" "/c/d" +testRelative42 = testRelative "testRelative42" + "foo:a/b?c#d" "foo:a/b?c" "" +testRelative43 = testRelative "testRelative42" + "foo:a" "foo:b/c" "b/c" +testRelative44 = testRelative "testRelative44" + "foo:/a/y/z" "foo:/a/b/c" "../b/c" +testRelative45 = testRelJoin "testRelative45" + "foo:a" "./b/c" "foo:b/c" +testRelative46 = testRelJoin "testRelative46" + "foo:a" "/./b/c" "foo:/b/c" +testRelative47 = testRelJoin "testRelative47" + "foo://a//b/c" "../../d" "foo://a/d" +testRelative48 = testRelJoin "testRelative48" + "foo:a" "." "foo:" +testRelative49 = testRelJoin "testRelative49" + "foo:a" ".." "foo:" + +-- add escape tests +testRelative50 = testRelative "testRelative50" + "http://example/x/y%2Fz" "http://example/x/abc" "abc" +testRelative51 = testRelative "testRelative51" + "http://example/a/x/y/z" "http://example/a/x%2Fabc" "../../x%2Fabc" +testRelative52 = testRelative "testRelative52" + "http://example/a/x/y%2Fz" "http://example/a/x%2Fabc" "../x%2Fabc" +testRelative53 = testRelative "testRelative53" + "http://example/x%2Fy/z" "http://example/x%2Fy/abc" "abc" +testRelative54 = testRelative "testRelative54" + "http://ex/x/y" "http://ex/x/q%3Ar" "q%3Ar" +testRelative55 = testRelative "testRelative55" + "http://example/x/y%2Fz" "http://example/x%2Fabc" "/x%2Fabc" +-- Apparently, TimBL prefers the following way to 41, 42 above +-- cf. http://lists.w3.org/Archives/Public/uri/2003Feb/0028.html +-- He also notes that there may be different relative fuctions +-- that satisfy the basic equivalence axiom: +-- cf. http://lists.w3.org/Archives/Public/uri/2003Jan/0008.html +testRelative56 = testRelative "testRelative56" + "http://example/x/y/z" "http://example/x%2Fabc" "/x%2Fabc" +testRelative57 = testRelative "testRelative57" + "http://example/x/y%2Fz" "http://example/x%2Fabc" "/x%2Fabc" + +-- Other oddball tests + -- Check segment normalization code: +testRelative60 = testRelJoin "testRelative60" + "ftp://example/x/y" "http://example/a/b/../../c" "http://example/c" +testRelative61 = testRelJoin "testRelative61" + "ftp://example/x/y" "http://example/a/b/c/../../" "http://example/a/" +testRelative62 = testRelJoin "testRelative62" + "ftp://example/x/y" "http://example/a/b/c/./" "http://example/a/b/c/" +testRelative63 = testRelJoin "testRelative63" + "ftp://example/x/y" "http://example/a/b/c/.././" "http://example/a/b/" +testRelative64 = testRelJoin "testRelative64" + "ftp://example/x/y" "http://example/a/b/c/d/../../../../e" "http://example/e" +testRelative65 = testRelJoin "testRelative65" + "ftp://example/x/y" "http://example/a/b/c/d/../.././../../e" "http://example/e" + -- Check handling of queries and fragments with non-relative paths +testRelative70 = testRelative "testRelative70" + "mailto:local1@domain1?query1" "mailto:local2@domain2" + "local2@domain2" +testRelative71 = testRelative "testRelative71" + "mailto:local1@domain1" "mailto:local2@domain2?query2" + "local2@domain2?query2" +testRelative72 = testRelative "testRelative72" + "mailto:local1@domain1?query1" "mailto:local2@domain2?query2" + "local2@domain2?query2" +testRelative73 = testRelative "testRelative73" + "mailto:local@domain?query1" "mailto:local@domain?query2" + "?query2" +testRelative74 = testRelative "testRelative74" + "mailto:?query1" "mailto:local@domain?query2" + "local@domain?query2" +testRelative75 = testRelative "testRelative75" + "mailto:local@domain?query1" "mailto:local@domain?query2" + "?query2" +testRelative76 = testRelative "testRelative76" + "foo:bar" "http://example/a/b?c/../d" "http://example/a/b?c/../d" +testRelative77 = testRelative "testRelative77" + "foo:bar" "http://example/a/b#c/../d" "http://example/a/b#c/../d" +{- These (78-81) are some awkward test cases thrown up by a question on the URI list: + http://lists.w3.org/Archives/Public/uri/2005Jul/0013 + Mote that RFC 3986 discards path segents after the final '/' only when merging two + paths - otherwise the final segment in the base URI is mnaintained. This leads to + difficulty in constructinmg a reversible relativeTo/relativeFrom pair of functions. +-} +testRelative78 = testRelative "testRelative78" + "http://www.example.com/data/limit/.." "http://www.example.com/data/limit/test.xml" + "test.xml" +testRelative79 = testRelative "testRelative79" + "file:/some/dir/foo" "file:/some/dir/#blort" "./#blort" +testRelative80 = testRelative "testRelative80" + "file:/some/dir/foo" "file:/some/dir/#" "./#" +testRelative81 = testRelative "testRelative81" + "file:/some/dir/.." "file:/some/dir/#blort" "./#blort" + +-- testRelative base abs rel +-- testRelSplit base abs rel +-- testRelJoin base rel abs +testRelative91 = testRelSplit "testRelative91" + "http://example.org/base/uri" "http:this" + "this" +testRelative92 = testRelJoin "testRelative92" + "http://example.org/base/uri" "http:this" + "http:this" +testRelative93 = testRelJoin "testRelative93" + "http:base" "http:this" + "http:this" +testRelative94 = testRelJoin "testRelative94" + "f:/a" ".//g" + "f://g" +testRelative95 = testRelJoin "testRelative95" + "f://example.org/base/a" "b/c//d/e" + "f://example.org/base/b/c//d/e" +testRelative96 = testRelJoin "testRelative96" + "mid:m@example.ord/c@example.org" "m2@example.ord/c2@example.org" + "mid:m@example.ord/m2@example.ord/c2@example.org" +testRelative97 = testRelJoin "testRelative97" + "file:///C:/DEV/Haskell/lib/HXmlToolbox-3.01/examples/" "mini1.xml" + "file:///C:/DEV/Haskell/lib/HXmlToolbox-3.01/examples/mini1.xml" +testRelative98 = testRelative "testRelative98" + "foo:a/y/z" "foo:a/b/c" "../b/c" +testRelative99 = testRelJoin "testRelative99" + "f:/a/" "..//g" + "f://g" + + +testRelativeSuite = TF.testGroup "Test Relative URIs" testRelativeList +testRelativeList = + [ TF.testCase "testRelative01" testRelative01 + , TF.testCase "testRelative02" testRelative02 + , TF.testCase "testRelative03" testRelative03 + , TF.testCase "testRelative04" testRelative04 + , TF.testCase "testRelative05" testRelative05 + , TF.testCase "testRelative06" testRelative06 + , TF.testCase "testRelative07" testRelative07 + , TF.testCase "testRelative08" testRelative08 + , TF.testCase "testRelative09" testRelative09 + , TF.testCase "testRelative10" testRelative10 + , TF.testCase "testRelative11" testRelative11 + , TF.testCase "testRelative12" testRelative12 + , TF.testCase "testRelative13" testRelative13 + , TF.testCase "testRelative14" testRelative14 + , TF.testCase "testRelative15" testRelative15 + , TF.testCase "testRelative16" testRelative16 + , TF.testCase "testRelative17" testRelative17 + , TF.testCase "testRelative18" testRelative18 + , TF.testCase "testRelative19" testRelative19 + , TF.testCase "testRelative20" testRelative20 + , TF.testCase "testRelative21" testRelative21 + , TF.testCase "testRelative22" testRelative22 + , TF.testCase "testRelative23" testRelative23 + , TF.testCase "testRelative24" testRelative24 + , TF.testCase "testRelative25" testRelative25 + , TF.testCase "testRelative26" testRelative26 + , TF.testCase "testRelative27" testRelative27 + , TF.testCase "testRelative28" testRelative28 + , TF.testCase "testRelative29" testRelative29 + , TF.testCase "testRelative30" testRelative30 + , TF.testCase "testRelative31" testRelative31 + , TF.testCase "testRelative32" testRelative32 + , TF.testCase "testRelative33" testRelative33 + , TF.testCase "testRelative34" testRelative34 + , TF.testCase "testRelative35" testRelative35 + , TF.testCase "testRelative36" testRelative36 + , TF.testCase "testRelative37" testRelative37 + , TF.testCase "testRelative38" testRelative38 + , TF.testCase "testRelative39" testRelative39 + , TF.testCase "testRelative40" testRelative40 + , TF.testCase "testRelative41" testRelative41 + , TF.testCase "testRelative42" testRelative42 + , TF.testCase "testRelative43" testRelative43 + , TF.testCase "testRelative44" testRelative44 + , TF.testCase "testRelative45" testRelative45 + , TF.testCase "testRelative46" testRelative46 + , TF.testCase "testRelative47" testRelative47 + , TF.testCase "testRelative48" testRelative48 + , TF.testCase "testRelative49" testRelative49 + -- + , TF.testCase "testRelative50" testRelative50 + , TF.testCase "testRelative51" testRelative51 + , TF.testCase "testRelative52" testRelative52 + , TF.testCase "testRelative53" testRelative53 + , TF.testCase "testRelative54" testRelative54 + , TF.testCase "testRelative55" testRelative55 + , TF.testCase "testRelative56" testRelative56 + , TF.testCase "testRelative57" testRelative57 + -- + , TF.testCase "testRelative60" testRelative60 + , TF.testCase "testRelative61" testRelative61 + , TF.testCase "testRelative62" testRelative62 + , TF.testCase "testRelative63" testRelative63 + , TF.testCase "testRelative64" testRelative64 + , TF.testCase "testRelative65" testRelative65 + -- + , TF.testCase "testRelative70" testRelative70 + , TF.testCase "testRelative71" testRelative71 + , TF.testCase "testRelative72" testRelative72 + , TF.testCase "testRelative73" testRelative73 + , TF.testCase "testRelative74" testRelative74 + , TF.testCase "testRelative75" testRelative75 + , TF.testCase "testRelative76" testRelative76 + , TF.testCase "testRelative77" testRelative77 + -- Awkward cases: + , TF.testCase "testRelative78" testRelative78 + , TF.testCase "testRelative79" testRelative79 + , TF.testCase "testRelative80" testRelative80 + , TF.testCase "testRelative81" testRelative81 + -- + -- , TF.testCase "testRelative90" testRelative90 + , TF.testCase "testRelative91" testRelative91 + , TF.testCase "testRelative92" testRelative92 + , TF.testCase "testRelative93" testRelative93 + , TF.testCase "testRelative94" testRelative94 + , TF.testCase "testRelative95" testRelative95 + , TF.testCase "testRelative96" testRelative96 + , TF.testCase "testRelative97" testRelative97 + , TF.testCase "testRelative98" testRelative98 + , TF.testCase "testRelative99" testRelative99 + ] + +-- RFC2396 relative-to-absolute URI tests + +rfcbase = "http://a/b/c/d;p?q" +-- normal cases, RFC2396bis 5.4.1 +testRFC01 = testRelJoin "testRFC01" rfcbase "g:h" "g:h" +testRFC02 = testRelJoin "testRFC02" rfcbase "g" "http://a/b/c/g" +testRFC03 = testRelJoin "testRFC03" rfcbase "./g" "http://a/b/c/g" +testRFC04 = testRelJoin "testRFC04" rfcbase "g/" "http://a/b/c/g/" +testRFC05 = testRelJoin "testRFC05" rfcbase "/g" "http://a/g" +testRFC06 = testRelJoin "testRFC06" rfcbase "//g" "http://g" +testRFC07 = testRelJoin "testRFC07" rfcbase "?y" "http://a/b/c/d;p?y" +testRFC08 = testRelJoin "testRFC08" rfcbase "g?y" "http://a/b/c/g?y" +testRFC09 = testRelJoin "testRFC09" rfcbase "?q#s" "http://a/b/c/d;p?q#s" +testRFC23 = testRelJoin "testRFC10" rfcbase "#s" "http://a/b/c/d;p?q#s" +testRFC10 = testRelJoin "testRFC11" rfcbase "g#s" "http://a/b/c/g#s" +testRFC11 = testRelJoin "testRFC12" rfcbase "g?y#s" "http://a/b/c/g?y#s" +testRFC12 = testRelJoin "testRFC13" rfcbase ";x" "http://a/b/c/;x" +testRFC13 = testRelJoin "testRFC14" rfcbase "g;x" "http://a/b/c/g;x" +testRFC14 = testRelJoin "testRFC15" rfcbase "g;x?y#s" "http://a/b/c/g;x?y#s" +testRFC24 = testRelJoin "testRFC16" rfcbase "" "http://a/b/c/d;p?q" +testRFC15 = testRelJoin "testRFC17" rfcbase "." "http://a/b/c/" +testRFC16 = testRelJoin "testRFC18" rfcbase "./" "http://a/b/c/" +testRFC17 = testRelJoin "testRFC19" rfcbase ".." "http://a/b/" +testRFC18 = testRelJoin "testRFC20" rfcbase "../" "http://a/b/" +testRFC19 = testRelJoin "testRFC21" rfcbase "../g" "http://a/b/g" +testRFC20 = testRelJoin "testRFC22" rfcbase "../.." "http://a/" +testRFC21 = testRelJoin "testRFC23" rfcbase "../../" "http://a/" +testRFC22 = testRelJoin "testRFC24" rfcbase "../../g" "http://a/g" +-- abnormal cases, RFC2396bis 5.4.2 +testRFC31 = testRelJoin "testRFC31" rfcbase "?q" rfcbase +testRFC32 = testRelJoin "testRFC32" rfcbase "../../../g" "http://a/g" +testRFC33 = testRelJoin "testRFC33" rfcbase "../../../../g" "http://a/g" +testRFC34 = testRelJoin "testRFC34" rfcbase "/./g" "http://a/g" +testRFC35 = testRelJoin "testRFC35" rfcbase "/../g" "http://a/g" +testRFC36 = testRelJoin "testRFC36" rfcbase "g." "http://a/b/c/g." +testRFC37 = testRelJoin "testRFC37" rfcbase ".g" "http://a/b/c/.g" +testRFC38 = testRelJoin "testRFC38" rfcbase "g.." "http://a/b/c/g.." +testRFC39 = testRelJoin "testRFC39" rfcbase "..g" "http://a/b/c/..g" +testRFC40 = testRelJoin "testRFC40" rfcbase "./../g" "http://a/b/g" +testRFC41 = testRelJoin "testRFC41" rfcbase "./g/." "http://a/b/c/g/" +testRFC42 = testRelJoin "testRFC42" rfcbase "g/./h" "http://a/b/c/g/h" +testRFC43 = testRelJoin "testRFC43" rfcbase "g/../h" "http://a/b/c/h" +testRFC44 = testRelJoin "testRFC44" rfcbase "g;x=1/./y" "http://a/b/c/g;x=1/y" +testRFC45 = testRelJoin "testRFC45" rfcbase "g;x=1/../y" "http://a/b/c/y" +testRFC46 = testRelJoin "testRFC46" rfcbase "g?y/./x" "http://a/b/c/g?y/./x" +testRFC47 = testRelJoin "testRFC47" rfcbase "g?y/../x" "http://a/b/c/g?y/../x" +testRFC48 = testRelJoin "testRFC48" rfcbase "g#s/./x" "http://a/b/c/g#s/./x" +testRFC49 = testRelJoin "testRFC49" rfcbase "g#s/../x" "http://a/b/c/g#s/../x" +testRFC50 = testRelJoin "testRFC50" rfcbase "http:x" "http:x" + +-- Null path tests +-- See RFC2396bis, section 5.2, +-- "If the base URI's path component is the empty string, then a single +-- slash character is copied to the buffer" +testRFC60 = testRelative "testRFC60" "http://ex" "http://ex/x/y?q" "/x/y?q" +testRFC61 = testRelJoin "testRFC61" "http://ex" "x/y?q" "http://ex/x/y?q" +testRFC62 = testRelative "testRFC62" "http://ex?p" "http://ex/x/y?q" "/x/y?q" +testRFC63 = testRelJoin "testRFC63" "http://ex?p" "x/y?q" "http://ex/x/y?q" +testRFC64 = testRelative "testRFC64" "http://ex#f" "http://ex/x/y?q" "/x/y?q" +testRFC65 = testRelJoin "testRFC65" "http://ex#f" "x/y?q" "http://ex/x/y?q" +testRFC66 = testRelative "testRFC66" "http://ex?p" "http://ex/x/y#g" "/x/y#g" +testRFC67 = testRelJoin "testRFC67" "http://ex?p" "x/y#g" "http://ex/x/y#g" +testRFC68 = testRelative "testRFC68" "http://ex" "http://ex/" "/" +testRFC69 = testRelJoin "testRFC69" "http://ex" "./" "http://ex/" +testRFC70 = testRelative "testRFC70" "http://ex" "http://ex/a/b" "/a/b" +testRFC71 = testRelative "testRFC71" "http://ex/a/b" "http://ex" "./" + +testRFC2396Suite = TF.testGroup "Test RFC2396 examples" testRFC2396List +testRFC2396List = + [ TF.testCase "testRFC01" testRFC01 + , TF.testCase "testRFC02" testRFC02 + , TF.testCase "testRFC03" testRFC03 + , TF.testCase "testRFC04" testRFC04 + , TF.testCase "testRFC05" testRFC05 + , TF.testCase "testRFC06" testRFC06 + , TF.testCase "testRFC07" testRFC07 + , TF.testCase "testRFC08" testRFC08 + , TF.testCase "testRFC09" testRFC09 + , TF.testCase "testRFC10" testRFC10 + , TF.testCase "testRFC11" testRFC11 + , TF.testCase "testRFC12" testRFC12 + , TF.testCase "testRFC13" testRFC13 + , TF.testCase "testRFC14" testRFC14 + , TF.testCase "testRFC15" testRFC15 + , TF.testCase "testRFC16" testRFC16 + , TF.testCase "testRFC17" testRFC17 + , TF.testCase "testRFC18" testRFC18 + , TF.testCase "testRFC19" testRFC19 + , TF.testCase "testRFC20" testRFC20 + , TF.testCase "testRFC21" testRFC21 + , TF.testCase "testRFC22" testRFC22 + , TF.testCase "testRFC23" testRFC23 + , TF.testCase "testRFC24" testRFC24 + -- testRFC30, + , TF.testCase "testRFC31" testRFC31 + , TF.testCase "testRFC32" testRFC32 + , TF.testCase "testRFC33" testRFC33 + , TF.testCase "testRFC34" testRFC34 + , TF.testCase "testRFC35" testRFC35 + , TF.testCase "testRFC36" testRFC36 + , TF.testCase "testRFC37" testRFC37 + , TF.testCase "testRFC38" testRFC38 + , TF.testCase "testRFC39" testRFC39 + , TF.testCase "testRFC40" testRFC40 + , TF.testCase "testRFC41" testRFC41 + , TF.testCase "testRFC42" testRFC42 + , TF.testCase "testRFC43" testRFC43 + , TF.testCase "testRFC44" testRFC44 + , TF.testCase "testRFC45" testRFC45 + , TF.testCase "testRFC46" testRFC46 + , TF.testCase "testRFC47" testRFC47 + , TF.testCase "testRFC48" testRFC48 + , TF.testCase "testRFC49" testRFC49 + , TF.testCase "testRFC50" testRFC50 + -- + , TF.testCase "testRFC60" testRFC60 + , TF.testCase "testRFC61" testRFC61 + , TF.testCase "testRFC62" testRFC62 + , TF.testCase "testRFC63" testRFC63 + , TF.testCase "testRFC64" testRFC64 + , TF.testCase "testRFC65" testRFC65 + , TF.testCase "testRFC66" testRFC66 + , TF.testCase "testRFC67" testRFC67 + , TF.testCase "testRFC68" testRFC68 + , TF.testCase "testRFC69" testRFC69 + , TF.testCase "testRFC70" testRFC70 + ] + +-- And some other oddballs: +mailbase = "mailto:local/option@domain.org?notaquery#frag" +testMail01 = testRelJoin "testMail01" + mailbase "more@domain" + "mailto:local/more@domain" +testMail02 = testRelJoin "testMail02" + mailbase "#newfrag" + "mailto:local/option@domain.org?notaquery#newfrag" +testMail03 = testRelJoin "testMail03" + mailbase "l1/q1@domain" + "mailto:local/l1/q1@domain" + +testMail11 = testRelJoin "testMail11" + "mailto:local1@domain1?query1" "mailto:local2@domain2" + "mailto:local2@domain2" +testMail12 = testRelJoin "testMail12" + "mailto:local1@domain1" "mailto:local2@domain2?query2" + "mailto:local2@domain2?query2" +testMail13 = testRelJoin "testMail13" + "mailto:local1@domain1?query1" "mailto:local2@domain2?query2" + "mailto:local2@domain2?query2" +testMail14 = testRelJoin "testMail14" + "mailto:local@domain?query1" "mailto:local@domain?query2" + "mailto:local@domain?query2" +testMail15 = testRelJoin "testMail15" + "mailto:?query1" "mailto:local@domain?query2" + "mailto:local@domain?query2" +testMail16 = testRelJoin "testMail16" + "mailto:local@domain?query1" "?query2" + "mailto:local@domain?query2" +testInfo17 = testRelJoin "testInfo17" + "info:name/1234/../567" "name/9876/../543" + "info:name/name/543" +testInfo18 = testRelJoin "testInfo18" + "info:/name/1234/../567" "name/9876/../543" + "info:/name/name/543" + +testOddballSuite = TF.testGroup "Test oddball examples" testOddballList +testOddballList = + [ TF.testCase "testMail01" testMail01 + , TF.testCase "testMail02" testMail02 + , TF.testCase "testMail03" testMail03 + , TF.testCase "testMail11" testMail11 + , TF.testCase "testMail12" testMail12 + , TF.testCase "testMail13" testMail13 + , TF.testCase "testMail14" testMail14 + , TF.testCase "testMail15" testMail15 + , TF.testCase "testMail16" testMail16 + , TF.testCase "testInfo17" testInfo17 + ] + +-- Normalization tests + +-- Case normalization; cf. RFC2396bis section 6.2.2.1 +-- NOTE: authority case normalization is not performed +testNormalize01 = testEq "testNormalize01" + "http://EXAMPLE.com/Root/%2A?%2B#%2C" + (normalizeCase "HTTP://EXAMPLE.com/Root/%2a?%2b#%2c") + +-- Encoding normalization; cf. RFC2396bis section 6.2.2.2 +testNormalize11 = testEq "testNormalize11" + "HTTP://EXAMPLE.com/Root/~Me/" + (normalizeEscape "HTTP://EXAMPLE.com/Root/%7eMe/") +testNormalize12 = testEq "testNormalize12" + "foo:%40AZ%5b%60az%7b%2f09%3a-._~" + (normalizeEscape "foo:%40%41%5a%5b%60%61%7a%7b%2f%30%39%3a%2d%2e%5f%7e") +testNormalize13 = testEq "testNormalize13" + "foo:%3a%2f%3f%23%5b%5d%40" + (normalizeEscape "foo:%3a%2f%3f%23%5b%5d%40") + +-- Path segment normalization; cf. RFC2396bis section 6.2.2.4 +testNormalize21 = testEq "testNormalize21" + "http://example/c" + (normalizePathSegments "http://example/a/b/../../c") +testNormalize22 = testEq "testNormalize22" + "http://example/a/" + (normalizePathSegments "http://example/a/b/c/../../") +testNormalize23 = testEq "testNormalize23" + "http://example/a/b/c/" + (normalizePathSegments "http://example/a/b/c/./") +testNormalize24 = testEq "testNormalize24" + "http://example/a/b/" + (normalizePathSegments "http://example/a/b/c/.././") +testNormalize25 = testEq "testNormalize25" + "http://example/e" + (normalizePathSegments "http://example/a/b/c/d/../../../../e") +testNormalize26 = testEq "testNormalize26" + "http://example/e" + (normalizePathSegments "http://example/a/b/c/d/../.././../../e") +testNormalize27 = testEq "testNormalize27" + "http://example/e" + (normalizePathSegments "http://example/a/b/../.././../../e") +testNormalize28 = testEq "testNormalize28" + "foo:e" + (normalizePathSegments "foo:a/b/../.././../../e") + +testNormalizeSuite = TF.testGroup "testNormalizeSuite" + [ TF.testCase "testNormalize01" testNormalize01 + , TF.testCase "testNormalize11" testNormalize11 + , TF.testCase "testNormalize12" testNormalize12 + , TF.testCase "testNormalize13" testNormalize13 + , TF.testCase "testNormalize21" testNormalize21 + , TF.testCase "testNormalize22" testNormalize22 + , TF.testCase "testNormalize23" testNormalize23 + , TF.testCase "testNormalize24" testNormalize24 + , TF.testCase "testNormalize25" testNormalize25 + , TF.testCase "testNormalize26" testNormalize26 + , TF.testCase "testNormalize27" testNormalize27 + , TF.testCase "testNormalize28" testNormalize28 + ] + +-- URI formatting (show) tests + +ts02URI = URI { uriScheme = "http:" + , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") + , uriPath = "/aaa/bbb" + , uriQuery = "?ccc" + , uriFragment = "#ddd/eee" + } + +ts04URI = URI { uriScheme = "http:" + , uriAuthority = Just (URIAuth "user:anonymous@" "example.org" ":99") + , uriPath = "/aaa/bbb" + , uriQuery = "?ccc" + , uriFragment = "#ddd/eee" + } + +ts02str = "http://user:...@example.org:99/aaa/bbb?ccc#ddd/eee" +ts03str = "http://user:pass@example.org:99/aaa/bbb?ccc#ddd/eee" +ts04str = "http://user:...@example.org:99/aaa/bbb?ccc#ddd/eee" + +testShowURI01 = testEq "testShowURI01" "" (show nullURI) +testShowURI02 = testEq "testShowURI02" ts02str (show ts02URI) +testShowURI03 = testEq "testShowURI03" ts03str ((uriToString id ts02URI) "") +testShowURI04 = testEq "testShowURI04" ts04str (show ts04URI) + +testShowURI = TF.testGroup "testShowURI" + [ TF.testCase "testShowURI01" testShowURI01 + , TF.testCase "testShowURI02" testShowURI02 + , TF.testCase "testShowURI03" testShowURI03 + , TF.testCase "testShowURI04" testShowURI04 + ] + + +-- URI escaping tests + +te01str = "http://example.org/az/09-_/.~:/?#[]@!$&'()*+,;=" +te02str = "http://example.org/a/c%/d /e" +te02esc = "http://example.org/a%3C/b%3E/c%25/d%20/e" + +testEscapeURIString01 = testEq "testEscapeURIString01" + te01str (escapeURIString isUnescapedInURI te01str) + +testEscapeURIString02 = testEq "testEscapeURIString02" + te02esc (escapeURIString isUnescapedInURI te02str) + +testEscapeURIString03 = testEq "testEscapeURIString03" + te01str (unEscapeString te01str) + +testEscapeURIString04 = testEq "testEscapeURIString04" + te02str (unEscapeString te02esc) + +testEscapeURIString05 = testEq "testEscapeURIString05" + "http%3A%2F%2Fexample.org%2Faz%2F09-_%2F.~%3A%2F%3F%23%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D" + (escapeURIString isUnescapedInURIComponent te01str) + +testEscapeURIString06 = testEq "testEscapeURIString06" + "hello%C3%B8%C2%A9%E6%97%A5%E6%9C%AC" + (escapeURIString isUnescapedInURIComponent "helloø©日本") + +propEscapeUnEscapeLoop :: String -> Bool +propEscapeUnEscapeLoop s = s == (unEscapeString $! escaped) + where + escaped = escapeURIString (const False) s + {-# NOINLINE escaped #-} + +testEscapeURIString = TF.testGroup "testEscapeURIString" + [ TF.testCase "testEscapeURIString01" testEscapeURIString01 + , TF.testCase "testEscapeURIString02" testEscapeURIString02 + , TF.testCase "testEscapeURIString03" testEscapeURIString03 + , TF.testCase "testEscapeURIString04" testEscapeURIString04 + , TF.testCase "testEscapeURIString05" testEscapeURIString05 + , TF.testCase "testEscapeURIString06" testEscapeURIString06 + , TF.testProperty "propEscapeUnEscapeLoop" propEscapeUnEscapeLoop + ] + +-- URI string normalization tests + +tn01str = "eXAMPLE://a/b/%7bfoo%7d" +tn01nrm = "example://a/b/%7Bfoo%7D" + +tn02str = "example://a/b/%63/" +tn02nrm = "example://a/b/c/" + +tn03str = "example://a/./b/../b/c/foo" +tn03nrm = "example://a/b/c/foo" + +tn04str = "eXAMPLE://a/b/%7bfoo%7d" -- From RFC2396bis, 6.2.2 +tn04nrm = "example://a/b/%7Bfoo%7D" + +tn06str = "file:/x/..//y" +tn06nrm = "file://y" + +tn07str = "file:x/..//y/" +tn07nrm = "file:/y/" + +testNormalizeURIString01 = testEq "testNormalizeURIString01" + tn01nrm (normalizeCase tn01str) +testNormalizeURIString02 = testEq "testNormalizeURIString02" + tn02nrm (normalizeEscape tn02str) +testNormalizeURIString03 = testEq "testNormalizeURIString03" + tn03nrm (normalizePathSegments tn03str) +testNormalizeURIString04 = testEq "testNormalizeURIString04" + tn04nrm ((normalizeCase . normalizeEscape . normalizePathSegments) tn04str) +testNormalizeURIString05 = testEq "testNormalizeURIString05" + tn04nrm ((normalizePathSegments . normalizeEscape . normalizeCase) tn04str) +testNormalizeURIString06 = testEq "testNormalizeURIString06" + tn06nrm (normalizePathSegments tn06str) +testNormalizeURIString07 = testEq "testNormalizeURIString07" + tn07nrm (normalizePathSegments tn07str) + +testNormalizeURIString = TF.testGroup "testNormalizeURIString" + [ TF.testCase "testNormalizeURIString01" testNormalizeURIString01 + , TF.testCase "testNormalizeURIString02" testNormalizeURIString02 + , TF.testCase "testNormalizeURIString03" testNormalizeURIString03 + , TF.testCase "testNormalizeURIString04" testNormalizeURIString04 + , TF.testCase "testNormalizeURIString05" testNormalizeURIString05 + , TF.testCase "testNormalizeURIString06" testNormalizeURIString06 + , TF.testCase "testNormalizeURIString07" testNormalizeURIString07 + ] + +-- Test strict vs non-strict relativeTo logic + +trbase = fromJust $ parseURIReference "http://bar.org/" + +testRelativeTo01 = testEq "testRelativeTo01" + "http://bar.org/foo" + (show $ + (fromJust $ parseURIReference "foo") `relativeTo` trbase) + +testRelativeTo02 = testEq "testRelativeTo02" + "http:foo" + (show $ + (fromJust $ parseURIReference "http:foo") `relativeTo` trbase) + +testRelativeTo03 = testEq "testRelativeTo03" + "http://bar.org/foo" + (show $ + (fromJust $ parseURIReference "http:foo") `nonStrictRelativeTo` trbase) + +testRelativeTo = TF.testGroup "testRelativeTo" + [ TF.testCase "testRelativeTo01" testRelativeTo01 + , TF.testCase "testRelativeTo02" testRelativeTo02 + , TF.testCase "testRelativeTo03" testRelativeTo03 + ] + +-- Test alternative parsing functions +testAltFn01 = testEq "testAltFn01" "Just http://a.b/c#f" + (show . parseURI $ "http://a.b/c#f") +testAltFn02 = testEq "testAltFn02" "Just http://a.b/c#f" + (show . parseURIReference $ "http://a.b/c#f") +testAltFn03 = testEq "testAltFn03" "Just c/d#f" + (show . parseRelativeReference $ "c/d#f") +testAltFn04 = testEq "testAltFn04" "Nothing" + (show . parseRelativeReference $ "http://a.b/c#f") +testAltFn05 = testEq "testAltFn05" "Just http://a.b/c" + (show . parseAbsoluteURI $ "http://a.b/c") +testAltFn06 = testEq "testAltFn06" "Nothing" + (show . parseAbsoluteURI $ "http://a.b/c#f") +testAltFn07 = testEq "testAltFn07" "Nothing" + (show . parseAbsoluteURI $ "c/d") +testAltFn08 = testEq "testAltFn08" "Just http://a.b/c" + (show . parseAbsoluteURI $ "http://a.b/c") + +testAltFn11 = testEq "testAltFn11" True (isURI "http://a.b/c#f") +testAltFn12 = testEq "testAltFn12" True (isURIReference "http://a.b/c#f") +testAltFn13 = testEq "testAltFn13" True (isRelativeReference "c/d#f") +testAltFn14 = testEq "testAltFn14" False (isRelativeReference "http://a.b/c#f") +testAltFn15 = testEq "testAltFn15" True (isAbsoluteURI "http://a.b/c") +testAltFn16 = testEq "testAltFn16" False (isAbsoluteURI "http://a.b/c#f") +testAltFn17 = testEq "testAltFn17" False (isAbsoluteURI "c/d") + +testAltFn = TF.testGroup "testAltFn" + [ TF.testCase "testAltFn01" testAltFn01 + , TF.testCase "testAltFn02" testAltFn02 + , TF.testCase "testAltFn03" testAltFn03 + , TF.testCase "testAltFn04" testAltFn04 + , TF.testCase "testAltFn05" testAltFn05 + , TF.testCase "testAltFn06" testAltFn06 + , TF.testCase "testAltFn07" testAltFn07 + , TF.testCase "testAltFn08" testAltFn08 + , TF.testCase "testAltFn11" testAltFn11 + , TF.testCase "testAltFn12" testAltFn12 + , TF.testCase "testAltFn13" testAltFn13 + , TF.testCase "testAltFn14" testAltFn14 + , TF.testCase "testAltFn15" testAltFn15 + , TF.testCase "testAltFn16" testAltFn16 + , TF.testCase "testAltFn17" testAltFn17 + ] + +testUriIsAbsolute :: String -> Assertion +testUriIsAbsolute str = + assertBool str (uriIsAbsolute uri) + where + Just uri = parseURIReference str + +testUriIsRelative :: String -> Assertion +testUriIsRelative str = + assertBool str (uriIsRelative uri) + where + Just uri = parseURIReference str + +testIsAbsolute = TF.testGroup "testIsAbsolute" + [ TF.testCase "testIsAbsolute01" $ testUriIsAbsolute "http://google.com" + , TF.testCase "testIsAbsolute02" $ testUriIsAbsolute "ftp://p.x.ca/woo?hai=a" + , TF.testCase "testIsAbsolute03" $ testUriIsAbsolute "mailto:bob@example.com" + ] + +testIsRelative = TF.testGroup "testIsRelative" + [ TF.testCase "testIsRelative01" $ testUriIsRelative "//google.com" + , TF.testCase "testIsRelative02" $ testUriIsRelative "/hello" + , TF.testCase "testIsRelative03" $ testUriIsRelative "this/is/a/path" + , TF.testCase "testIsRelative04" $ testUriIsRelative "?what=that" + ] + +testPathSegmentsRoundTrip :: URI -> Assertion +testPathSegmentsRoundTrip u = + let segs = pathSegments u + + dropSuffix _suf [] = [] + dropSuffix suf [x] | suf == x = [] + | otherwise = [x] + dropSuffix suf (x:xs) = x : dropSuffix suf xs + + dropPrefix _pre [] = [] + dropPrefix pre (x:xs) | pre == x = xs + | otherwise = (x:xs) + strippedUriPath = dropSuffix '/' $ dropPrefix '/' $ uriPath u + in + (Data.List.intercalate "/" segs @?= strippedUriPath) + +assertJust _f Nothing = assertFailure "URI failed to parse" +assertJust f (Just x) = f x + +testPathSegments = TF.testGroup "testPathSegments" + [ TF.testCase "testPathSegments03" $ + assertJust testPathSegmentsRoundTrip $ parseURIReference "" + , TF.testCase "testPathSegments04" $ + assertJust testPathSegmentsRoundTrip $ parseURIReference "/" + , TF.testCase "testPathSegments05" $ + assertJust testPathSegmentsRoundTrip $ parseURIReference "//" + , TF.testCase "testPathSegments06" $ + assertJust testPathSegmentsRoundTrip $ parseURIReference "foo//bar/" + , TF.testCase "testPathSegments07" $ + assertJust testPathSegmentsRoundTrip $ parseURIReference "/foo//bar/" + , TF.testCase "testPathSegments03" $ + assertJust testPathSegmentsRoundTrip $ parseURI "http://example.org" + , TF.testCase "testPathSegments04" $ + assertJust testPathSegmentsRoundTrip $ parseURI "http://example.org/" + , TF.testCase "testPathSegments05" $ + assertJust testPathSegmentsRoundTrip $ parseURI "http://example.org//" + , TF.testCase "testPathSegments06" $ + assertJust testPathSegmentsRoundTrip $ parseURI "http://ex.ca/foo//bar/" + , TF.testCase "testPathSegments07" $ + assertJust testPathSegmentsRoundTrip $ parseURI "http://ex.ca/foo//bar/" + ] + +-- Full test suite +allTests = + [ testURIRefSuite + , testComponentSuite + , testRelativeSuite + , testRFC2396Suite + , testOddballSuite + , testNormalizeSuite + , testShowURI + , testEscapeURIString + , testNormalizeURIString + , testRelativeTo + , testAltFn + , testIsAbsolute + , testIsRelative + , testPathSegments + ] + +main = TF.defaultMain allTests + +runTestFile t = do + h <- openFile "a.tmp" WriteMode + _ <- runTestText (putTextToHandle h False) t + hClose h +tf = runTestFile +tt = runTestTT + +-- Miscellaneous values for hand-testing/debugging in Hugs: + +uref = testURIRefSuite +tr01 = testRelative01 +tr02 = testRelative02 +tr03 = testRelative03 +tr04 = testRelative04 +rel = testRelativeSuite +rfc = testRFC2396Suite +oddb = testOddballSuite + +(Just bu02) = parseURIReference "http://example/x/y/z" +(Just ou02) = parseURIReference "../abc" +(Just ru02) = parseURIReference "http://example/x/abc" +-- fileuri = testURIReference "file:///C:/DEV/Haskell/lib/HXmlToolbox-3.01/examples/" + +cu02 = ou02 `relativeTo` bu02 + +-------------------------------------------------------------------------------- +-- +-- Copyright (c) 2004, G. KLYNE. All rights reserved. +-- Distributed as free software under the following license. +-- +-- Redistribution and use in source and binary forms, with or without +-- modification, are permitted provided that the following conditions +-- are met: +-- +-- - Redistributions of source code must retain the above copyright notice, +-- this list of conditions and the following disclaimer. +-- +-- - Redistributions in binary form must reproduce the above copyright +-- notice, this list of conditions and the following disclaimer in the +-- documentation and/or other materials provided with the distribution. +-- +-- - Neither name of the copyright holders nor the names of its +-- contributors may be used to endorse or promote products derived from +-- this software without specific prior written permission. +-- +-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND THE CONTRIBUTORS +-- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +-- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +-- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +-- HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-- BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-- OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-- TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-- +-------------------------------------------------------------------------------- +-- $Source: /srv/cvs/cvs.haskell.org/fptools/libraries/network/tests/URITest.hs,v $ +-- $Author: gklyne $ +-- $Revision: 1.8 $ +-- $Log: URITest.hs,v $ +-- Revision 1.81 2012/08/01 aaronfriel +-- Added additional test case for the "xip.io" service style URLs and absolute URLs prefixed with ipv4 addresses. +-- +-- Revision 1.8 2005/07/19 22:01:27 gklyne +-- Added some additional test cases raised by discussion on URI@w3.org mailing list about 2005-07-19. The test p[roposed by this discussion exposed a subtle bug in relativeFrom not being an exact inverse of relativeTo. +-- +-- Revision 1.7 2005/06/06 16:31:44 gklyne +-- Added two new test cases. +-- +-- Revision 1.6 2005/05/31 17:18:36 gklyne +-- Added some additional test cases triggered by URI-list discussions. +-- +-- Revision 1.5 2005/04/07 11:09:37 gklyne +-- Added test cases for alternate parsing functions (including deprecated 'parseabsoluteURI') +-- +-- Revision 1.4 2005/04/05 12:47:32 gklyne +-- Added test case. +-- Changed module name, now requires GHC -main-is to compile. +-- All tests run OK with GHC 6.4 on MS-Windows. +-- +-- Revision 1.3 2004/11/05 17:29:09 gklyne +-- Changed password-obscuring logic to reflect late change in revised URI +-- specification (password "anonymous" is no longer a special case). +-- Updated URI test module to use function 'escapeURIString'. +-- (Should unEscapeString be similarly updated?) +-- +-- Revision 1.2 2004/10/27 13:06:55 gklyne +-- Updated URI module function names per: +-- http://www.haskell.org//pipermail/cvs-libraries/2004-October/002916.html +-- Added test cases to give better covereage of module functions. +-- +-- Revision 1.1 2004/10/14 16:11:30 gklyne +-- Add URI unit test to cvs.haskell.org repository +-- +-- Revision 1.17 2004/10/14 11:51:09 graham +-- Confirm that URITest runs with GHC. +-- Fix up some comments and other minor details. +-- +-- Revision 1.16 2004/10/14 11:45:30 graham +-- Use moduke name main for GHC 6.2 +-- +-- Revision 1.15 2004/08/11 11:07:39 graham +-- Add new test case. +-- +-- Revision 1.14 2004/06/30 11:35:27 graham +-- Update URI code to use hierarchical libraries for Parsec and Network. +-- +-- Revision 1.13 2004/06/22 16:19:16 graham +-- New URI test case added. +-- +-- Revision 1.12 2004/04/21 15:13:29 graham +-- Add test case +-- +-- Revision 1.11 2004/04/21 14:54:05 graham +-- Fix up some tests +-- +-- Revision 1.10 2004/04/20 14:54:13 graham +-- Fix up test cases related to port number in authority, +-- and add some more URI decomposition tests. +-- +-- Revision 1.9 2004/04/07 15:06:17 graham +-- Add extra test case +-- Revise syntax in line with changes to RFC2396bis +-- +-- Revision 1.8 2004/03/17 14:34:58 graham +-- Add Network.HTTP files to CVS +-- +-- Revision 1.7 2004/03/16 14:19:38 graham +-- Change licence to BSD style; add nullURI definition; new test cases. +-- +-- Revision 1.6 2004/02/20 12:12:00 graham +-- Add URI normalization functions +-- +-- Revision 1.5 2004/02/19 23:19:35 graham +-- Network.URI module passes all test cases +-- +-- Revision 1.4 2004/02/17 20:06:02 graham +-- Revised URI parser to reflect latest RFC2396bis (-04) +-- +-- Revision 1.3 2004/02/11 14:32:14 graham +-- Added work-in-progress notes. +-- +-- Revision 1.2 2004/02/02 14:00:39 graham +-- Fix optional host name in URI. Add test cases. +-- +-- Revision 1.1 2004/01/27 21:13:45 graham +-- New URI module and test suite added, +-- implementing the GHC Network.URI interface. +-- diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/CHANGES cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/CHANGES --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/CHANGES 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/CHANGES 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,48 @@ +3.1.11 + +- Include `README.md` in package. + +3.1.10 + +- Most types now have a `Typeable` instance. Some instances are dropped from + older versions of GHC (sorry about that!). +- The token-parser now rejects Unicode numeric escape sequences for characters + outside the Unicode range. +- The token-parser now loses less precision when parsing literal doubles. +- Documentation fixes and corrections. +- We no longer test parsec builds on GHC 7.4. + +3.1.9 + +- Many and various updates to documentation and package description (inlcuding + the homepage links). +- Add an 'Eq' instance for 'ParseError' +- Fixed a regression from 3.1.6: 'runP' is again exported from module + Text.Parsec. + +3.1.8 + +- Fix a regression from 3.1.6 related to exports from the main module. + +3.1.7 + +- Fix a regression from 3.1.6 related to the reported position of error messages. + See bug #9 for details. +- Reset the current error position on success of 'lookAhead'. + +3.1.6 + +- Export 'Text' instances from Text.Parsec +- Make Text.Parsec exports more visible +- Re-arrange Text.Parsec exports +- Add functions 'crlf' and 'endOfLine' to Text.Parsec.Char for handling + input streams that do not have normalized line terminators. +- Fix off-by-one error in Token.charControl + +3.1.4 & 3.1.5 + +- Bump dependency on 'text' + +3.1.3 + +- Fix a regression introduced in 3.1.2 related to positions reported by error messages. diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/LICENSE cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/LICENSE --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/LICENSE 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,21 @@ +Copyright 1999-2000, Daan Leijen; 2007, Paolo Martini. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +This software is provided by the copyright holders "as is" and any express or +implied warranties, including, but not limited to, the implied warranties of +merchantability and fitness for a particular purpose are disclaimed. In no +event shall the copyright holders be liable for any direct, indirect, +incidental, special, exemplary, or consequential damages (including, but not +limited to, procurement of substitute goods or services; loss of use, data, +or profits; or business interruption) however caused and on any theory of +liability, whether in contract, strict liability, or tort (including +negligence or otherwise) arising in any way out of the use of this software, +even if advised of the possibility of such damage. diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/parsec.cabal cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/parsec.cabal --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/parsec.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/parsec.cabal 2016-06-02 07:15:42.000000000 +0000 @@ -0,0 +1,87 @@ +name: parsec +version: 3.1.11 +cabal-version: >= 1.8 +license: BSD3 +license-file: LICENSE +author: Daan Leijen , Paolo Martini +maintainer: Antoine Latter +homepage: https://github.com/aslatter/parsec +bug-reports: https://github.com/aslatter/parsec/issues +category: Parsing +synopsis: Monadic parser combinators +build-type: Simple +description: + Parsec is designed from scratch as an industrial-strength parser + library. It is simple, safe, well documented (on the package + homepage), has extensive libraries, good error messages, + and is fast. It is defined as a monad transformer that can be + stacked on arbitrary monads, and it is also parametric in the + input stream type. +extra-source-files: CHANGES, README.md +tested-with: GHC==7.10.*, GHC==7.8.*, GHC==7.6.* + +source-repository head + type: git + location: https://github.com/aslatter/parsec + +library + exposed-modules: + Text.Parsec, + Text.Parsec.String, + Text.Parsec.ByteString, + Text.Parsec.ByteString.Lazy, + Text.Parsec.Text, + Text.Parsec.Text.Lazy, + Text.Parsec.Pos, + Text.Parsec.Error, + Text.Parsec.Prim, + Text.Parsec.Char, + Text.Parsec.Combinator, + Text.Parsec.Token, + Text.Parsec.Expr, + Text.Parsec.Language, + Text.Parsec.Perm, + Text.ParserCombinators.Parsec, + Text.ParserCombinators.Parsec.Char, + Text.ParserCombinators.Parsec.Combinator, + Text.ParserCombinators.Parsec.Error, + Text.ParserCombinators.Parsec.Expr, + Text.ParserCombinators.Parsec.Language, + Text.ParserCombinators.Parsec.Perm, + Text.ParserCombinators.Parsec.Pos, + Text.ParserCombinators.Parsec.Prim, + Text.ParserCombinators.Parsec.Token + + build-depends: + base >= 4 && < 5, + mtl, + bytestring, + text >= 0.2 && < 1.3 + + extensions: + ExistentialQuantification, + PolymorphicComponents, + MultiParamTypeClasses, + FlexibleInstances, + FlexibleContexts, + DeriveDataTypeable, + CPP + +Test-Suite tests + type: exitcode-stdio-1.0 + hs-source-dirs: test + main-is: Main.hs + other-modules: + Bugs, + Bugs.Bug2, + Bugs.Bug6, + Bugs.Bug9, + Bugs.Bug35, + Util + build-depends: + base, + parsec, + HUnit >= 1.2 && < 1.4, + test-framework >= 0.6 && < 0.9, + test-framework-hunit >= 0.2 && < 0.4 + ghc-options: -Wall diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/README.md cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/README.md --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/README.md 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,95 @@ +Parsec [![Build Status](https://travis-ci.org/aslatter/parsec.svg?branch=master)](https://travis-ci.org/aslatter/parsec) +====== + +A monadic parser combinator library, written by Daan Leijen. Parsec is designed +from scratch as an industrial-strength parser library. It is simple, safe, well +documented, has extensive libraries, good error messages, and is fast. + +Some links: + +* [Parsec on Hackage](https://hackage.haskell.org/package/parsec), + contains the generated documentation. +* The 2001 paper written by Daan Leijen, some what outdated + ([PDF](https://web.archive.org/web/20140528151730/http://legacy.cs.uu.nl/daan/download/parsec/parsec.pdf), + [HTML](https://web.archive.org/web/20140528151730/http://legacy.cs.uu.nl/daan/download/parsec/parsec.html), + thanks to [archive.org](http://web.archive.org); + and [PDF](https://research.microsoft.com/en-us/um/people/daan/download/parsec/parsec.pdf), + thanks to Microsoft Research). +* [Using Parsec](http://book.realworldhaskell.org/read/using-parsec.html), + chapter 16 of [Real World Haskell](http://book.realworldhaskell.org/). +* [An introduction to the Parsec library](http://kunigami.wordpress.com/2014/01/21/an-introduction-to-the-parsec-library) + on Kunigami's blog. +* [An introduction to parsing text in Haskell with Parsec](http://unbui.lt/#!/post/haskell-parsec-basics) on Wilson's blog. +* Differences between Parsec and + [Attoparsec](http://hackage.haskell.org/package/attoparsec) + (Haskell's other prominent parser library) as explained in + [an answer on StackExchange](http://stackoverflow.com/a/19213247). +* Differences between Parsec and [Happy](http://www.haskell.org/happy) + (Haskell's parser generator) as explained in two + answers on separate StackExchange questions + ([1](http://stackoverflow.com/a/7270904), + [2](http://stackoverflow.com/a/14775331)). + +By analyzing [Parsec's reverse dependencies on Hackage](http://packdeps.haskellers.com/reverse/parsec) +we can find open source project that make use of Parsec. For example +[bibtex](http://hackage.haskell.org/package/bibtex), +[ConfigFile](http://hackage.haskell.org/package/ConfigFile), +[csv](http://hackage.haskell.org/package/csv) and +[hjson](http://hackage.haskell.org/package/hjson). + + +## Getting started + +This requires a working version of `cabal` and `ghci`, which are part of +any modern installation of Haskell, such as +[Haskell Platform](https://www.haskell.org/platform). + +First install Parsec. + + cabal install parsec + +Below we show how a very simple parser that tests matching parentheses +was made from GHCI (the interactive GHC environment), which we started +with the `ghci` command). + +``` +Prelude> :m +Text.Parsec +Prelude Text.Parsec> let parenSet = char '(' >> many parenSet >> char ')' +Loading package transformers-0.3.0.0 ... linking ... done. +Loading package array-0.5.0.0 ... linking ... done. +Loading package deepseq-1.3.0.2 ... linking ... done. +Loading package bytestring-0.10.4.0 ... linking ... done. +Loading package mtl-2.1.3.1 ... linking ... done. +Loading package text-1.1.1.3 ... linking ... done. +Loading package parsec-3.1.5 ... linking ... done. +Prelude Text.Parsec> let parens = (many parenSet >> eof) <|> eof +Prelude Text.Parsec> parse parens "" "()" +Right () +Prelude Text.Parsec> parse parens "" "()(())" +Right () +Prelude Text.Parsec> parse parens "" "(" +Left (line 1, column 2): +unexpected end of input +expecting "(" or ")" +``` + +The `Right ()` results indicate successes: the parentheses matched. +The `Left [...]` result indicates a parse failure, and is detailed +with an error message. + +For a more thorough introduction to Parsec we recommend the links at +the top of this README file. + + +## Contributing + +Issues (bugs, feature requests or otherwise feedback) may be reported in +[the Github issue tracker for this project](https://github.com/aslatter/parsec/issues). + +Pull-requests are also welcome. + + +## License + +See the [LICENSE](https://github.com/aslatter/parsec/blob/master/LICENSE) +file in the repository. diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Setup.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Setup.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Setup.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,6 @@ +module Main (main) where + +import Distribution.Simple + +main :: IO () +main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs/Bug2.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs/Bug2.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs/Bug2.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs/Bug2.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,28 @@ + +module Bugs.Bug2 + ( main + ) where + +import Test.HUnit hiding ( Test ) +import Test.Framework +import Test.Framework.Providers.HUnit + +import Text.Parsec +import Text.Parsec.String +import qualified Text.Parsec.Token as P +import Text.Parsec.Language (haskellDef) + +main :: Test +main = + testCase "Control Char Parsing (#2)" $ + parseString "\"test\\^Bstring\"" @?= "test\^Bstring" + + where + parseString :: String -> String + parseString input = + case parse parser "Example" input of + Left{} -> error "Parse failure" + Right str -> str + + parser :: Parser String + parser = P.stringLiteral $ P.makeTokenParser haskellDef \ No newline at end of file diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs/Bug35.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs/Bug35.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs/Bug35.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs/Bug35.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,40 @@ + +module Bugs.Bug35 (main) where + +import Text.Parsec +import Text.Parsec.Language +import Text.Parsec.String +import qualified Text.Parsec.Token as Token + +import Test.HUnit hiding (Test) +import Test.Framework +import Test.Framework.Providers.HUnit + +trickyFloats :: [String] +trickyFloats = + [ "1.5339794352098402e-118" + , "2.108934760892056e-59" + , "2.250634744599241e-19" + , "5.0e-324" + , "5.960464477539063e-8" + , "0.25996181067141905" + , "0.3572019862807257" + , "0.46817723004874223" + , "0.9640035681058178" + , "4.23808622486133" + , "4.540362294799751" + , "5.212384849884261" + , "13.958257048123212" + , "32.96176575630599" + , "38.47735512322269" + ] + +float :: Parser Double +float = Token.float (Token.makeTokenParser emptyDef) + +testBatch :: Assertion +testBatch = mapM_ testFloat trickyFloats + where testFloat x = parse float "" x @?= Right (read x :: Double) + +main :: Test +main = testCase "Quality of output of Text.Parsec.Token.float (#35)" testBatch diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs/Bug6.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs/Bug6.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs/Bug6.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs/Bug6.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,25 @@ + +module Bugs.Bug6 + ( main + ) where + +import Test.HUnit hiding ( Test ) +import Test.Framework +import Test.Framework.Providers.HUnit + +import Text.Parsec +import Text.Parsec.String + +import Util + +main :: Test +main = + testCase "Look-ahead preserving error location (#6)" $ + parseErrors variable "return" @?= ["'return' is a reserved keyword"] + +variable :: Parser String +variable = do + x <- lookAhead (many1 letter) + if x == "return" + then fail "'return' is a reserved keyword" + else string x diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs/Bug9.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs/Bug9.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs/Bug9.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs/Bug9.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,46 @@ + +module Bugs.Bug9 ( main ) where + +import Control.Applicative ((<*), (<$>), (<$)) +import Text.Parsec +import Text.Parsec.Language (haskellStyle) +import Text.Parsec.String (Parser) +import Text.Parsec.Expr +import qualified Text.Parsec.Token as P + +import Test.HUnit hiding ( Test ) +import Test.Framework +import Test.Framework.Providers.HUnit + +import Util + +data Expr = Const Integer | Op Expr Expr + deriving Show + +main :: Test +main = + testCase "Tracing of current position in error message (#9)" + $ result @?= ["unexpected '>'","expecting operator or end of input"] + + where + result :: [String] + result = parseErrors parseTopLevel "4 >> 5" + +-- Syntax analaysis + +parseTopLevel :: Parser Expr +parseTopLevel = parseExpr <* eof + +parseExpr :: Parser Expr +parseExpr = buildExpressionParser table (Const <$> integer) + where + table = [[ Infix (Op <$ reserved ">>>") AssocLeft ]] + + -- Lexical analysis + + lexer = P.makeTokenParser haskellStyle { P.reservedOpNames = [">>>"] } + + integer = P.integer lexer + reserved = P.reserved lexer + reservedOp = P.reservedOp lexer + diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Bugs.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Bugs.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,18 @@ + +module Bugs + ( bugs + ) where + +import Test.Framework + +import qualified Bugs.Bug2 +import qualified Bugs.Bug6 +import qualified Bugs.Bug9 +import qualified Bugs.Bug35 + +bugs :: [Test] +bugs = [ Bugs.Bug2.main + , Bugs.Bug6.main + , Bugs.Bug9.main + , Bugs.Bug35.main + ] diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Main.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Main.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Main.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Main.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,10 @@ + +import Test.Framework + +import Bugs ( bugs ) + +main :: IO () +main = do + defaultMain + [ testGroup "Bugs" bugs + ] \ No newline at end of file diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Util.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Util.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/test/Util.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/test/Util.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,14 @@ + +module Util where + +import Text.Parsec +import Text.Parsec.String ( Parser ) + +-- | Returns the error messages associated +-- with a failed parse. +parseErrors :: Parser a -> String -> [String] +parseErrors p input = + case parse p "" input of + Left err -> + drop 1 $ lines $ show err + Right{} -> [] diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/ByteString/Lazy.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/ByteString/Lazy.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/ByteString/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/ByteString/Lazy.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,39 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.ByteString.Lazy +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Convinience definitions for working with lazy 'C.ByteString's. +-- +----------------------------------------------------------------------------- + +module Text.Parsec.ByteString.Lazy + ( Parser, GenParser, parseFromFile + ) where + +import Text.Parsec.Error +import Text.Parsec.Prim + +import qualified Data.ByteString.Lazy.Char8 as C + +type Parser = Parsec C.ByteString () +type GenParser t st = Parsec C.ByteString st + +-- | @parseFromFile p filePath@ runs a lazy bytestring parser @p@ on the +-- input read from @filePath@ using 'ByteString.Lazy.Char8.readFile'. Returns either a 'ParseError' +-- ('Left') or a value of type @a@ ('Right'). +-- +-- > main = do{ result <- parseFromFile numbers "digits.txt" +-- > ; case result of +-- > Left err -> print err +-- > Right xs -> print (sum xs) +-- > } +parseFromFile :: Parser a -> String -> IO (Either ParseError a) +parseFromFile p fname + = do input <- C.readFile fname + return (runP p () fname input) diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/ByteString.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/ByteString.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/ByteString.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/ByteString.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,40 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.ByteString +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Convinience definitions for working with 'C.ByteString's. +-- +----------------------------------------------------------------------------- + +module Text.Parsec.ByteString + ( Parser, GenParser, parseFromFile + ) where + +import Text.Parsec.Error +import Text.Parsec.Prim + +import qualified Data.ByteString.Char8 as C + +type Parser = Parsec C.ByteString () +type GenParser t st = Parsec C.ByteString st + +-- | @parseFromFile p filePath@ runs a strict bytestring parser @p@ on the +-- input read from @filePath@ using 'ByteString.Char8.readFile'. Returns either a 'ParseError' +-- ('Left') or a value of type @a@ ('Right'). +-- +-- > main = do{ result <- parseFromFile numbers "digits.txt" +-- > ; case result of +-- > Left err -> print err +-- > Right xs -> print (sum xs) +-- > } + +parseFromFile :: Parser a -> String -> IO (Either ParseError a) +parseFromFile p fname + = do input <- C.readFile fname + return (runP p () fname input) diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Char.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Char.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Char.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Char.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,151 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.Char +-- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Commonly used character parsers. +-- +----------------------------------------------------------------------------- + +{-# LANGUAGE FlexibleContexts #-} + +module Text.Parsec.Char where + +import Data.Char +import Text.Parsec.Pos +import Text.Parsec.Prim +import Control.Applicative ((*>)) + +-- | @oneOf cs@ succeeds if the current character is in the supplied +-- list of characters @cs@. Returns the parsed character. See also +-- 'satisfy'. +-- +-- > vowel = oneOf "aeiou" + +oneOf :: (Stream s m Char) => [Char] -> ParsecT s u m Char +oneOf cs = satisfy (\c -> elem c cs) + +-- | As the dual of 'oneOf', @noneOf cs@ succeeds if the current +-- character /not/ in the supplied list of characters @cs@. Returns the +-- parsed character. +-- +-- > consonant = noneOf "aeiou" + +noneOf :: (Stream s m Char) => [Char] -> ParsecT s u m Char +noneOf cs = satisfy (\c -> not (elem c cs)) + +-- | Skips /zero/ or more white space characters. See also 'skipMany'. + +spaces :: (Stream s m Char) => ParsecT s u m () +spaces = skipMany space "white space" + +-- | Parses a white space character (any character which satisfies 'isSpace') +-- Returns the parsed character. + +space :: (Stream s m Char) => ParsecT s u m Char +space = satisfy isSpace "space" + +-- | Parses a newline character (\'\\n\'). Returns a newline character. + +newline :: (Stream s m Char) => ParsecT s u m Char +newline = char '\n' "lf new-line" + +-- | Parses a carriage return character (\'\\r\') followed by a newline character (\'\\n\'). +-- Returns a newline character. + +crlf :: (Stream s m Char) => ParsecT s u m Char +crlf = char '\r' *> char '\n' "crlf new-line" + +-- | Parses a CRLF (see 'crlf') or LF (see 'newline') end-of-line. +-- Returns a newline character (\'\\n\'). +-- +-- > endOfLine = newline <|> crlf +-- + +endOfLine :: (Stream s m Char) => ParsecT s u m Char +endOfLine = newline <|> crlf "new-line" + +-- | Parses a tab character (\'\\t\'). Returns a tab character. + +tab :: (Stream s m Char) => ParsecT s u m Char +tab = char '\t' "tab" + +-- | Parses an upper case letter (a character between \'A\' and \'Z\'). +-- Returns the parsed character. + +upper :: (Stream s m Char) => ParsecT s u m Char +upper = satisfy isUpper "uppercase letter" + +-- | Parses a lower case character (a character between \'a\' and \'z\'). +-- Returns the parsed character. + +lower :: (Stream s m Char) => ParsecT s u m Char +lower = satisfy isLower "lowercase letter" + +-- | Parses a letter or digit (a character between \'0\' and \'9\'). +-- Returns the parsed character. + +alphaNum :: (Stream s m Char => ParsecT s u m Char) +alphaNum = satisfy isAlphaNum "letter or digit" + +-- | Parses a letter (an upper case or lower case character). Returns the +-- parsed character. + +letter :: (Stream s m Char) => ParsecT s u m Char +letter = satisfy isAlpha "letter" + +-- | Parses a digit. Returns the parsed character. + +digit :: (Stream s m Char) => ParsecT s u m Char +digit = satisfy isDigit "digit" + +-- | Parses a hexadecimal digit (a digit or a letter between \'a\' and +-- \'f\' or \'A\' and \'F\'). Returns the parsed character. + +hexDigit :: (Stream s m Char) => ParsecT s u m Char +hexDigit = satisfy isHexDigit "hexadecimal digit" + +-- | Parses an octal digit (a character between \'0\' and \'7\'). Returns +-- the parsed character. + +octDigit :: (Stream s m Char) => ParsecT s u m Char +octDigit = satisfy isOctDigit "octal digit" + +-- | @char c@ parses a single character @c@. Returns the parsed +-- character (i.e. @c@). +-- +-- > semiColon = char ';' + +char :: (Stream s m Char) => Char -> ParsecT s u m Char +char c = satisfy (==c) show [c] + +-- | This parser succeeds for any character. Returns the parsed character. + +anyChar :: (Stream s m Char) => ParsecT s u m Char +anyChar = satisfy (const True) + +-- | The parser @satisfy f@ succeeds for any character for which the +-- supplied function @f@ returns 'True'. Returns the character that is +-- actually parsed. + +-- > digit = satisfy isDigit +-- > oneOf cs = satisfy (\c -> c `elem` cs) + +satisfy :: (Stream s m Char) => (Char -> Bool) -> ParsecT s u m Char +satisfy f = tokenPrim (\c -> show [c]) + (\pos c _cs -> updatePosChar pos c) + (\c -> if f c then Just c else Nothing) + +-- | @string s@ parses a sequence of characters given by @s@. Returns +-- the parsed string (i.e. @s@). +-- +-- > divOrMod = string "div" +-- > <|> string "mod" + +string :: (Stream s m Char) => String -> ParsecT s u m String +string s = tokens show updatePosString s diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Combinator.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Combinator.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Combinator.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Combinator.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,277 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.Combinator +-- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Commonly used generic combinators +-- +----------------------------------------------------------------------------- + +module Text.Parsec.Combinator + ( choice + , count + , between + , option, optionMaybe, optional + , skipMany1 + , many1 + , sepBy, sepBy1 + , endBy, endBy1 + , sepEndBy, sepEndBy1 + , chainl, chainl1 + , chainr, chainr1 + , eof, notFollowedBy + -- tricky combinators + , manyTill, lookAhead, anyToken + ) where + +import Control.Monad +import Text.Parsec.Prim + +-- | @choice ps@ tries to apply the parsers in the list @ps@ in order, +-- until one of them succeeds. Returns the value of the succeeding +-- parser. + +choice :: (Stream s m t) => [ParsecT s u m a] -> ParsecT s u m a +choice ps = foldr (<|>) mzero ps + +-- | @option x p@ tries to apply parser @p@. If @p@ fails without +-- consuming input, it returns the value @x@, otherwise the value +-- returned by @p@. +-- +-- > priority = option 0 (do{ d <- digit +-- > ; return (digitToInt d) +-- > }) + +option :: (Stream s m t) => a -> ParsecT s u m a -> ParsecT s u m a +option x p = p <|> return x + +-- | @optionMaybe p@ tries to apply parser @p@. If @p@ fails without +-- consuming input, it return 'Nothing', otherwise it returns +-- 'Just' the value returned by @p@. + +optionMaybe :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (Maybe a) +optionMaybe p = option Nothing (liftM Just p) + +-- | @optional p@ tries to apply parser @p@. It will parse @p@ or nothing. +-- It only fails if @p@ fails after consuming input. It discards the result +-- of @p@. + +optional :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m () +optional p = do{ p; return ()} <|> return () + +-- | @between open close p@ parses @open@, followed by @p@ and @close@. +-- Returns the value returned by @p@. +-- +-- > braces = between (symbol "{") (symbol "}") + +between :: (Stream s m t) => ParsecT s u m open -> ParsecT s u m close + -> ParsecT s u m a -> ParsecT s u m a +between open close p + = do{ open; x <- p; close; return x } + +-- | @skipMany1 p@ applies the parser @p@ /one/ or more times, skipping +-- its result. + +skipMany1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m () +skipMany1 p = do{ p; skipMany p } +{- +skipMany p = scan + where + scan = do{ p; scan } <|> return () +-} + +-- | @many1 p@ applies the parser @p@ /one/ or more times. Returns a +-- list of the returned values of @p@. +-- +-- > word = many1 letter + +many1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m [a] +many1 p = do{ x <- p; xs <- many p; return (x:xs) } +{- +many p = scan id + where + scan f = do{ x <- p + ; scan (\tail -> f (x:tail)) + } + <|> return (f []) +-} + + +-- | @sepBy p sep@ parses /zero/ or more occurrences of @p@, separated +-- by @sep@. Returns a list of values returned by @p@. +-- +-- > commaSep p = p `sepBy` (symbol ",") + +sepBy :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] +sepBy p sep = sepBy1 p sep <|> return [] + +-- | @sepBy1 p sep@ parses /one/ or more occurrences of @p@, separated +-- by @sep@. Returns a list of values returned by @p@. + +sepBy1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] +sepBy1 p sep = do{ x <- p + ; xs <- many (sep >> p) + ; return (x:xs) + } + + +-- | @sepEndBy1 p sep@ parses /one/ or more occurrences of @p@, +-- separated and optionally ended by @sep@. Returns a list of values +-- returned by @p@. + +sepEndBy1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] +sepEndBy1 p sep = do{ x <- p + ; do{ sep + ; xs <- sepEndBy p sep + ; return (x:xs) + } + <|> return [x] + } + +-- | @sepEndBy p sep@ parses /zero/ or more occurrences of @p@, +-- separated and optionally ended by @sep@, ie. haskell style +-- statements. Returns a list of values returned by @p@. +-- +-- > haskellStatements = haskellStatement `sepEndBy` semi + +sepEndBy :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] +sepEndBy p sep = sepEndBy1 p sep <|> return [] + + +-- | @endBy1 p sep@ parses /one/ or more occurrences of @p@, separated +-- and ended by @sep@. Returns a list of values returned by @p@. + +endBy1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] +endBy1 p sep = many1 (do{ x <- p; sep; return x }) + +-- | @endBy p sep@ parses /zero/ or more occurrences of @p@, separated +-- and ended by @sep@. Returns a list of values returned by @p@. +-- +-- > cStatements = cStatement `endBy` semi + +endBy :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] +endBy p sep = many (do{ x <- p; sep; return x }) + +-- | @count n p@ parses @n@ occurrences of @p@. If @n@ is smaller or +-- equal to zero, the parser equals to @return []@. Returns a list of +-- @n@ values returned by @p@. + +count :: (Stream s m t) => Int -> ParsecT s u m a -> ParsecT s u m [a] +count n p | n <= 0 = return [] + | otherwise = sequence (replicate n p) + +-- | @chainr p op x@ parses /zero/ or more occurrences of @p@, +-- separated by @op@ Returns a value obtained by a /right/ associative +-- application of all functions returned by @op@ to the values returned +-- by @p@. If there are no occurrences of @p@, the value @x@ is +-- returned. + +chainr :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> a -> ParsecT s u m a +chainr p op x = chainr1 p op <|> return x + +-- | @chainl p op x@ parses /zero/ or more occurrences of @p@, +-- separated by @op@. Returns a value obtained by a /left/ associative +-- application of all functions returned by @op@ to the values returned +-- by @p@. If there are zero occurrences of @p@, the value @x@ is +-- returned. + +chainl :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> a -> ParsecT s u m a +chainl p op x = chainl1 p op <|> return x + +-- | @chainl1 p op x@ parses /one/ or more occurrences of @p@, +-- separated by @op@ Returns a value obtained by a /left/ associative +-- application of all functions returned by @op@ to the values returned +-- by @p@. . This parser can for example be used to eliminate left +-- recursion which typically occurs in expression grammars. +-- +-- > expr = term `chainl1` addop +-- > term = factor `chainl1` mulop +-- > factor = parens expr <|> integer +-- > +-- > mulop = do{ symbol "*"; return (*) } +-- > <|> do{ symbol "/"; return (div) } +-- > +-- > addop = do{ symbol "+"; return (+) } +-- > <|> do{ symbol "-"; return (-) } + +chainl1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> ParsecT s u m a +chainl1 p op = do{ x <- p; rest x } + where + rest x = do{ f <- op + ; y <- p + ; rest (f x y) + } + <|> return x + +-- | @chainr1 p op x@ parses /one/ or more occurrences of |p|, +-- separated by @op@ Returns a value obtained by a /right/ associative +-- application of all functions returned by @op@ to the values returned +-- by @p@. + +chainr1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> ParsecT s u m a +chainr1 p op = scan + where + scan = do{ x <- p; rest x } + + rest x = do{ f <- op + ; y <- scan + ; return (f x y) + } + <|> return x + +----------------------------------------------------------- +-- Tricky combinators +----------------------------------------------------------- +-- | The parser @anyToken@ accepts any kind of token. It is for example +-- used to implement 'eof'. Returns the accepted token. + +anyToken :: (Stream s m t, Show t) => ParsecT s u m t +anyToken = tokenPrim show (\pos _tok _toks -> pos) Just + +-- | This parser only succeeds at the end of the input. This is not a +-- primitive parser but it is defined using 'notFollowedBy'. +-- +-- > eof = notFollowedBy anyToken "end of input" + +eof :: (Stream s m t, Show t) => ParsecT s u m () +eof = notFollowedBy anyToken "end of input" + +-- | @notFollowedBy p@ only succeeds when parser @p@ fails. This parser +-- does not consume any input. This parser can be used to implement the +-- \'longest match\' rule. For example, when recognizing keywords (for +-- example @let@), we want to make sure that a keyword is not followed +-- by a legal identifier character, in which case the keyword is +-- actually an identifier (for example @lets@). We can program this +-- behaviour as follows: +-- +-- > keywordLet = try (do{ string "let" +-- > ; notFollowedBy alphaNum +-- > }) + +notFollowedBy :: (Stream s m t, Show a) => ParsecT s u m a -> ParsecT s u m () +notFollowedBy p = try (do{ c <- try p; unexpected (show c) } + <|> return () + ) + +-- | @manyTill p end@ applies parser @p@ /zero/ or more times until +-- parser @end@ succeeds. Returns the list of values returned by @p@. +-- This parser can be used to scan comments: +-- +-- > simpleComment = do{ string "")) +-- > } +-- +-- Note the overlapping parsers @anyChar@ and @string \"-->\"@, and +-- therefore the use of the 'try' combinator. + +manyTill :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m end -> ParsecT s u m [a] +manyTill p end = scan + where + scan = do{ end; return [] } + <|> + do{ x <- p; xs <- scan; return (x:xs) } diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Error.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Error.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Error.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Error.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,214 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.Error +-- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parse errors +-- +----------------------------------------------------------------------------- + +module Text.Parsec.Error + ( Message ( SysUnExpect, UnExpect, Expect, Message ) + , messageString + , ParseError, errorPos, errorMessages, errorIsUnknown + , showErrorMessages + , newErrorMessage, newErrorUnknown + , addErrorMessage, setErrorPos, setErrorMessage + , mergeError + ) where + +import Data.List ( nub, sort ) +import Data.Typeable ( Typeable ) + +import Text.Parsec.Pos + +-- | This abstract data type represents parse error messages. There are +-- four kinds of messages: +-- +-- > data Message = SysUnExpect String +-- > | UnExpect String +-- > | Expect String +-- > | Message String +-- +-- The fine distinction between different kinds of parse errors allows +-- the system to generate quite good error messages for the user. It +-- also allows error messages that are formatted in different +-- languages. Each kind of message is generated by different combinators: +-- +-- * A 'SysUnExpect' message is automatically generated by the +-- 'Text.Parsec.Combinator.satisfy' combinator. The argument is the +-- unexpected input. +-- +-- * A 'UnExpect' message is generated by the 'Text.Parsec.Prim.unexpected' +-- combinator. The argument describes the +-- unexpected item. +-- +-- * A 'Expect' message is generated by the 'Text.Parsec.Prim.' +-- combinator. The argument describes the expected item. +-- +-- * A 'Message' message is generated by the 'fail' +-- combinator. The argument is some general parser message. + +data Message = SysUnExpect !String -- @ library generated unexpect + | UnExpect !String -- @ unexpected something + | Expect !String -- @ expecting something + | Message !String -- @ raw message + deriving ( Typeable ) + +instance Enum Message where + fromEnum (SysUnExpect _) = 0 + fromEnum (UnExpect _) = 1 + fromEnum (Expect _) = 2 + fromEnum (Message _) = 3 + toEnum _ = error "toEnum is undefined for Message" + +-- < Return 'True' only when 'compare' would return 'EQ'. + +instance Eq Message where + + m1 == m2 = fromEnum m1 == fromEnum m2 + +-- < Compares two error messages without looking at their content. Only +-- the constructors are compared where: +-- +-- > 'SysUnExpect' < 'UnExpect' < 'Expect' < 'Message' + +instance Ord Message where + compare msg1 msg2 = compare (fromEnum msg1) (fromEnum msg2) + +-- | Extract the message string from an error message + +messageString :: Message -> String +messageString (SysUnExpect s) = s +messageString (UnExpect s) = s +messageString (Expect s) = s +messageString (Message s) = s + +-- | The abstract data type @ParseError@ represents parse errors. It +-- provides the source position ('SourcePos') of the error +-- and a list of error messages ('Message'). A @ParseError@ +-- can be returned by the function 'Text.Parsec.Prim.parse'. @ParseError@ is an +-- instance of the 'Show' and 'Eq' classes. + +data ParseError = ParseError !SourcePos [Message] + deriving ( Typeable ) + +-- | Extracts the source position from the parse error + +errorPos :: ParseError -> SourcePos +errorPos (ParseError pos _msgs) + = pos + +-- | Extracts the list of error messages from the parse error + +errorMessages :: ParseError -> [Message] +errorMessages (ParseError _pos msgs) + = sort msgs + +errorIsUnknown :: ParseError -> Bool +errorIsUnknown (ParseError _pos msgs) + = null msgs + +-- < Create parse errors + +newErrorUnknown :: SourcePos -> ParseError +newErrorUnknown pos + = ParseError pos [] + +newErrorMessage :: Message -> SourcePos -> ParseError +newErrorMessage msg pos + = ParseError pos [msg] + +addErrorMessage :: Message -> ParseError -> ParseError +addErrorMessage msg (ParseError pos msgs) + = ParseError pos (msg:msgs) + +setErrorPos :: SourcePos -> ParseError -> ParseError +setErrorPos pos (ParseError _ msgs) + = ParseError pos msgs + +setErrorMessage :: Message -> ParseError -> ParseError +setErrorMessage msg (ParseError pos msgs) + = ParseError pos (msg : filter (msg /=) msgs) + +mergeError :: ParseError -> ParseError -> ParseError +mergeError e1@(ParseError pos1 msgs1) e2@(ParseError pos2 msgs2) + -- prefer meaningful errors + | null msgs2 && not (null msgs1) = e1 + | null msgs1 && not (null msgs2) = e2 + | otherwise + = case pos1 `compare` pos2 of + -- select the longest match + EQ -> ParseError pos1 (msgs1 ++ msgs2) + GT -> e1 + LT -> e2 + +instance Show ParseError where + show err + = show (errorPos err) ++ ":" ++ + showErrorMessages "or" "unknown parse error" + "expecting" "unexpected" "end of input" + (errorMessages err) + +instance Eq ParseError where + l == r + = errorPos l == errorPos r && messageStrs l == messageStrs r + where + messageStrs = map messageString . errorMessages + +-- Language independent show function + +-- TODO +-- < The standard function for showing error messages. Formats a list of +-- error messages in English. This function is used in the |Show| +-- instance of |ParseError <#ParseError>|. The resulting string will be +-- formatted like: +-- +-- |unexpected /{The first UnExpect or a SysUnExpect message}/; +-- expecting /{comma separated list of Expect messages}/; +-- /{comma separated list of Message messages}/ + +showErrorMessages :: + String -> String -> String -> String -> String -> [Message] -> String +showErrorMessages msgOr msgUnknown msgExpecting msgUnExpected msgEndOfInput msgs + | null msgs = msgUnknown + | otherwise = concat $ map ("\n"++) $ clean $ + [showSysUnExpect,showUnExpect,showExpect,showMessages] + where + (sysUnExpect,msgs1) = span ((SysUnExpect "") ==) msgs + (unExpect,msgs2) = span ((UnExpect "") ==) msgs1 + (expect,messages) = span ((Expect "") ==) msgs2 + + showExpect = showMany msgExpecting expect + showUnExpect = showMany msgUnExpected unExpect + showSysUnExpect | not (null unExpect) || + null sysUnExpect = "" + | null firstMsg = msgUnExpected ++ " " ++ msgEndOfInput + | otherwise = msgUnExpected ++ " " ++ firstMsg + where + firstMsg = messageString (head sysUnExpect) + + showMessages = showMany "" messages + + -- helpers + showMany pre msgs = case clean (map messageString msgs) of + [] -> "" + ms | null pre -> commasOr ms + | otherwise -> pre ++ " " ++ commasOr ms + + commasOr [] = "" + commasOr [m] = m + commasOr ms = commaSep (init ms) ++ " " ++ msgOr ++ " " ++ last ms + + commaSep = separate ", " . clean + + separate _ [] = "" + separate _ [m] = m + separate sep (m:ms) = m ++ sep ++ separate sep ms + + clean = nub . filter (not . null) diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Expr.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Expr.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Expr.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Expr.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,172 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.Expr +-- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : non-portable +-- +-- A helper module to parse \"expressions\". +-- Builds a parser given a table of operators and associativities. +-- +----------------------------------------------------------------------------- + +module Text.Parsec.Expr + ( Assoc(..), Operator(..), OperatorTable + , buildExpressionParser + ) where + +import Data.Typeable ( Typeable ) + +import Text.Parsec.Prim +import Text.Parsec.Combinator + +----------------------------------------------------------- +-- Assoc and OperatorTable +----------------------------------------------------------- + +-- | This data type specifies the associativity of operators: left, right +-- or none. + +data Assoc = AssocNone + | AssocLeft + | AssocRight + deriving ( Typeable ) + +-- | This data type specifies operators that work on values of type @a@. +-- An operator is either binary infix or unary prefix or postfix. A +-- binary operator has also an associated associativity. + +data Operator s u m a = Infix (ParsecT s u m (a -> a -> a)) Assoc + | Prefix (ParsecT s u m (a -> a)) + | Postfix (ParsecT s u m (a -> a)) +#if MIN_VERSION_base(4,7,0) + deriving ( Typeable ) +#endif + +-- | An @OperatorTable s u m a@ is a list of @Operator s u m a@ +-- lists. The list is ordered in descending +-- precedence. All operators in one list have the same precedence (but +-- may have a different associativity). + +type OperatorTable s u m a = [[Operator s u m a]] + +----------------------------------------------------------- +-- Convert an OperatorTable and basic term parser into +-- a full fledged expression parser +----------------------------------------------------------- + +-- | @buildExpressionParser table term@ builds an expression parser for +-- terms @term@ with operators from @table@, taking the associativity +-- and precedence specified in @table@ into account. Prefix and postfix +-- operators of the same precedence can only occur once (i.e. @--2@ is +-- not allowed if @-@ is prefix negate). Prefix and postfix operators +-- of the same precedence associate to the left (i.e. if @++@ is +-- postfix increment, than @-2++@ equals @-1@, not @-3@). +-- +-- The @buildExpressionParser@ takes care of all the complexity +-- involved in building expression parser. Here is an example of an +-- expression parser that handles prefix signs, postfix increment and +-- basic arithmetic. +-- +-- > expr = buildExpressionParser table term +-- > "expression" +-- > +-- > term = parens expr +-- > <|> natural +-- > "simple expression" +-- > +-- > table = [ [prefix "-" negate, prefix "+" id ] +-- > , [postfix "++" (+1)] +-- > , [binary "*" (*) AssocLeft, binary "/" (div) AssocLeft ] +-- > , [binary "+" (+) AssocLeft, binary "-" (-) AssocLeft ] +-- > ] +-- > +-- > binary name fun assoc = Infix (do{ reservedOp name; return fun }) assoc +-- > prefix name fun = Prefix (do{ reservedOp name; return fun }) +-- > postfix name fun = Postfix (do{ reservedOp name; return fun }) + +buildExpressionParser :: (Stream s m t) + => OperatorTable s u m a + -> ParsecT s u m a + -> ParsecT s u m a +buildExpressionParser operators simpleExpr + = foldl (makeParser) simpleExpr operators + where + makeParser term ops + = let (rassoc,lassoc,nassoc + ,prefix,postfix) = foldr splitOp ([],[],[],[],[]) ops + + rassocOp = choice rassoc + lassocOp = choice lassoc + nassocOp = choice nassoc + prefixOp = choice prefix "" + postfixOp = choice postfix "" + + ambigious assoc op= try $ + do{ op; fail ("ambiguous use of a " ++ assoc + ++ " associative operator") + } + + ambigiousRight = ambigious "right" rassocOp + ambigiousLeft = ambigious "left" lassocOp + ambigiousNon = ambigious "non" nassocOp + + termP = do{ pre <- prefixP + ; x <- term + ; post <- postfixP + ; return (post (pre x)) + } + + postfixP = postfixOp <|> return id + + prefixP = prefixOp <|> return id + + rassocP x = do{ f <- rassocOp + ; y <- do{ z <- termP; rassocP1 z } + ; return (f x y) + } + <|> ambigiousLeft + <|> ambigiousNon + -- <|> return x + + rassocP1 x = rassocP x <|> return x + + lassocP x = do{ f <- lassocOp + ; y <- termP + ; lassocP1 (f x y) + } + <|> ambigiousRight + <|> ambigiousNon + -- <|> return x + + lassocP1 x = lassocP x <|> return x + + nassocP x = do{ f <- nassocOp + ; y <- termP + ; ambigiousRight + <|> ambigiousLeft + <|> ambigiousNon + <|> return (f x y) + } + -- <|> return x + + in do{ x <- termP + ; rassocP x <|> lassocP x <|> nassocP x <|> return x + "operator" + } + + + splitOp (Infix op assoc) (rassoc,lassoc,nassoc,prefix,postfix) + = case assoc of + AssocNone -> (rassoc,lassoc,op:nassoc,prefix,postfix) + AssocLeft -> (rassoc,op:lassoc,nassoc,prefix,postfix) + AssocRight -> (op:rassoc,lassoc,nassoc,prefix,postfix) + + splitOp (Prefix op) (rassoc,lassoc,nassoc,prefix,postfix) + = (rassoc,lassoc,nassoc,op:prefix,postfix) + + splitOp (Postfix op) (rassoc,lassoc,nassoc,prefix,postfix) + = (rassoc,lassoc,nassoc,prefix,op:postfix) diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Language.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Language.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Language.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Language.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,149 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.Language +-- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : non-portable (uses non-portable module Text.Parsec.Token) +-- +-- A helper module that defines some language definitions that can be used +-- to instantiate a token parser (see "Text.Parsec.Token"). +-- +----------------------------------------------------------------------------- + +module Text.Parsec.Language + ( haskellDef, haskell + , mondrianDef, mondrian + , emptyDef + , haskellStyle + , javaStyle + , LanguageDef + , GenLanguageDef + ) where + +import Text.Parsec +import Text.Parsec.Token + +----------------------------------------------------------- +-- Styles: haskellStyle, javaStyle +----------------------------------------------------------- + +-- | This is a minimal token definition for Haskell style languages. It +-- defines the style of comments, valid identifiers and case +-- sensitivity. It does not define any reserved words or operators. + +haskellStyle :: LanguageDef st +haskellStyle = emptyDef + { commentStart = "{-" + , commentEnd = "-}" + , commentLine = "--" + , nestedComments = True + , identStart = letter + , identLetter = alphaNum <|> oneOf "_'" + , opStart = opLetter haskellStyle + , opLetter = oneOf ":!#$%&*+./<=>?@\\^|-~" + , reservedOpNames= [] + , reservedNames = [] + , caseSensitive = True + } + +-- | This is a minimal token definition for Java style languages. It +-- defines the style of comments, valid identifiers and case +-- sensitivity. It does not define any reserved words or operators. + +javaStyle :: LanguageDef st +javaStyle = emptyDef + { commentStart = "/*" + , commentEnd = "*/" + , commentLine = "//" + , nestedComments = True + , identStart = letter + , identLetter = alphaNum <|> oneOf "_'" + , reservedNames = [] + , reservedOpNames= [] + , caseSensitive = False + } + +----------------------------------------------------------- +-- minimal language definition +-------------------------------------------------------- + +-- | This is the most minimal token definition. It is recommended to use +-- this definition as the basis for other definitions. @emptyDef@ has +-- no reserved names or operators, is case sensitive and doesn't accept +-- comments, identifiers or operators. + +emptyDef :: LanguageDef st +emptyDef = LanguageDef + { commentStart = "" + , commentEnd = "" + , commentLine = "" + , nestedComments = True + , identStart = letter <|> char '_' + , identLetter = alphaNum <|> oneOf "_'" + , opStart = opLetter emptyDef + , opLetter = oneOf ":!#$%&*+./<=>?@\\^|-~" + , reservedOpNames= [] + , reservedNames = [] + , caseSensitive = True + } + + + +----------------------------------------------------------- +-- Haskell +----------------------------------------------------------- + +-- | A lexer for the haskell language. + +haskell :: TokenParser st +haskell = makeTokenParser haskellDef + +-- | The language definition for the Haskell language. + +haskellDef :: LanguageDef st +haskellDef = haskell98Def + { identLetter = identLetter haskell98Def <|> char '#' + , reservedNames = reservedNames haskell98Def ++ + ["foreign","import","export","primitive" + ,"_ccall_","_casm_" + ,"forall" + ] + } + +-- | The language definition for the language Haskell98. + +haskell98Def :: LanguageDef st +haskell98Def = haskellStyle + { reservedOpNames= ["::","..","=","\\","|","<-","->","@","~","=>"] + , reservedNames = ["let","in","case","of","if","then","else", + "data","type", + "class","default","deriving","do","import", + "infix","infixl","infixr","instance","module", + "newtype","where", + "primitive" + -- "as","qualified","hiding" + ] + } + + +----------------------------------------------------------- +-- Mondrian +----------------------------------------------------------- + +-- | A lexer for the mondrian language. + +mondrian :: TokenParser st +mondrian = makeTokenParser mondrianDef + +-- | The language definition for the language Mondrian. + +mondrianDef :: LanguageDef st +mondrianDef = javaStyle + { reservedNames = [ "case", "class", "default", "extends" + , "import", "in", "let", "new", "of", "package" + ] + , caseSensitive = True + } diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Perm.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Perm.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Perm.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Perm.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,196 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.Perm +-- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +-- License : BSD-style (see the file libraries/parsec/LICENSE) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : non-portable (uses existentially quantified data constructors) +-- +-- This module implements permutation parsers. The algorithm used +-- is fairly complex since we push the type system to its limits :-) +-- The algorithm is described in: +-- +-- /Parsing Permutation Phrases,/ +-- by Arthur Baars, Andres Loh and Doaitse Swierstra. +-- Published as a functional pearl at the Haskell Workshop 2001. +-- +----------------------------------------------------------------------------- + +{-# LANGUAGE ExistentialQuantification, StandaloneDeriving #-} + +module Text.Parsec.Perm + ( PermParser + , StreamPermParser -- abstract + + , permute + , (<||>), (<$$>) + , (<|?>), (<$?>) + ) where + +import Text.Parsec + +import Control.Monad.Identity +import Data.Typeable ( Typeable ) +#if !(MIN_VERSION_base(4,7,0)) +-- For GHC 7.6 +import Data.Typeable ( Typeable3 ) +#endif + +infixl 1 <||>, <|?> +infixl 2 <$$>, <$?> + + +{--------------------------------------------------------------- + test -- parse a permutation of + * an optional string of 'a's + * a required 'b' + * an optional 'c' +---------------------------------------------------------------} +{- +test input + = parse (do{ x <- ptest; eof; return x }) "" input + +ptest :: Parser (String,Char,Char) +ptest + = permute $ + (,,) <$?> ("",many1 (char 'a')) + <||> char 'b' + <|?> ('_',char 'c') +-} + +{--------------------------------------------------------------- + Building a permutation parser +---------------------------------------------------------------} + +-- | The expression @perm \<||> p@ adds parser @p@ to the permutation +-- parser @perm@. The parser @p@ is not allowed to accept empty input - +-- use the optional combinator ('<|?>') instead. Returns a +-- new permutation parser that includes @p@. + +(<||>) :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> Parsec s st a -> StreamPermParser s st b +(<||>) perm p = add perm p + +-- | The expression @f \<$$> p@ creates a fresh permutation parser +-- consisting of parser @p@. The the final result of the permutation +-- parser is the function @f@ applied to the return value of @p@. The +-- parser @p@ is not allowed to accept empty input - use the optional +-- combinator ('<$?>') instead. +-- +-- If the function @f@ takes more than one parameter, the type variable +-- @b@ is instantiated to a functional type which combines nicely with +-- the adds parser @p@ to the ('<||>') combinator. This +-- results in stylized code where a permutation parser starts with a +-- combining function @f@ followed by the parsers. The function @f@ +-- gets its parameters in the order in which the parsers are specified, +-- but actual input can be in any order. + +(<$$>) :: (Stream s Identity tok) => (a -> b) -> Parsec s st a -> StreamPermParser s st b +(<$$>) f p = newperm f <||> p + +-- | The expression @perm \<||> (x,p)@ adds parser @p@ to the +-- permutation parser @perm@. The parser @p@ is optional - if it can +-- not be applied, the default value @x@ will be used instead. Returns +-- a new permutation parser that includes the optional parser @p@. + +(<|?>) :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> (a, Parsec s st a) -> StreamPermParser s st b +(<|?>) perm (x,p) = addopt perm x p + +-- | The expression @f \<$?> (x,p)@ creates a fresh permutation parser +-- consisting of parser @p@. The the final result of the permutation +-- parser is the function @f@ applied to the return value of @p@. The +-- parser @p@ is optional - if it can not be applied, the default value +-- @x@ will be used instead. + +(<$?>) :: (Stream s Identity tok) => (a -> b) -> (a, Parsec s st a) -> StreamPermParser s st b +(<$?>) f (x,p) = newperm f <|?> (x,p) + +{--------------------------------------------------------------- + The permutation tree +---------------------------------------------------------------} + +-- | Provided for backwards compatibility. The tok type is ignored. + +type PermParser tok st a = StreamPermParser String st a + +-- | The type @StreamPermParser s st a@ denotes a permutation parser that, +-- when converted by the 'permute' function, parses +-- @s@ streams with user state @st@ and returns a value of +-- type @a@ on success. +-- +-- Normally, a permutation parser is first build with special operators +-- like ('<||>') and than transformed into a normal parser +-- using 'permute'. + +data StreamPermParser s st a = Perm (Maybe a) [StreamBranch s st a] +#if MIN_VERSION_base(4,7,0) + deriving ( Typeable ) +#else +deriving instance Typeable3 StreamPermParser +#endif + +-- type Branch st a = StreamBranch String st a + +data StreamBranch s st a = forall b. Branch (StreamPermParser s st (b -> a)) (Parsec s st b) +#if MIN_VERSION_base(4,7,0) + deriving ( Typeable ) +#else +deriving instance Typeable3 StreamBranch +#endif + +-- | The parser @permute perm@ parses a permutation of parser described +-- by @perm@. For example, suppose we want to parse a permutation of: +-- an optional string of @a@'s, the character @b@ and an optional @c@. +-- This can be described by: +-- +-- > test = permute (tuple <$?> ("",many1 (char 'a')) +-- > <||> char 'b' +-- > <|?> ('_',char 'c')) +-- > where +-- > tuple a b c = (a,b,c) + +-- transform a permutation tree into a normal parser +permute :: (Stream s Identity tok) => StreamPermParser s st a -> Parsec s st a +permute (Perm def xs) + = choice (map branch xs ++ empty) + where + empty + = case def of + Nothing -> [] + Just x -> [return x] + + branch (Branch perm p) + = do{ x <- p + ; f <- permute perm + ; return (f x) + } + +-- build permutation trees +newperm :: (Stream s Identity tok) => (a -> b) -> StreamPermParser s st (a -> b) +newperm f + = Perm (Just f) [] + +add :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> Parsec s st a -> StreamPermParser s st b +add perm@(Perm _mf fs) p + = Perm Nothing (first:map insert fs) + where + first = Branch perm p + insert (Branch perm' p') + = Branch (add (mapPerms flip perm') p) p' + +addopt :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> a -> Parsec s st a -> StreamPermParser s st b +addopt perm@(Perm mf fs) x p + = Perm (fmap ($ x) mf) (first:map insert fs) + where + first = Branch perm p + insert (Branch perm' p') + = Branch (addopt (mapPerms flip perm') x p) p' + + +mapPerms :: (Stream s Identity tok) => (a -> b) -> StreamPermParser s st a -> StreamPermParser s st b +mapPerms f (Perm x xs) + = Perm (fmap f x) (map mapBranch xs) + where + mapBranch (Branch perm p) + = Branch (mapPerms (f.) perm) p diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Pos.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Pos.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Pos.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Pos.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,126 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.Pos +-- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Textual source positions. +-- +----------------------------------------------------------------------------- + +module Text.Parsec.Pos + ( SourceName, Line, Column + , SourcePos + , sourceLine, sourceColumn, sourceName + , incSourceLine, incSourceColumn + , setSourceLine, setSourceColumn, setSourceName + , newPos, initialPos + , updatePosChar, updatePosString + ) where + +import Data.Data (Data) +import Data.Typeable (Typeable) + +-- < Source positions: a file name, a line and a column +-- upper left is (1,1) + +type SourceName = String +type Line = Int +type Column = Int + +-- | The abstract data type @SourcePos@ represents source positions. It +-- contains the name of the source (i.e. file name), a line number and +-- a column number. @SourcePos@ is an instance of the 'Show', 'Eq' and +-- 'Ord' class. + +data SourcePos = SourcePos SourceName !Line !Column + deriving ( Eq, Ord, Data, Typeable) + +-- | Create a new 'SourcePos' with the given source name, +-- line number and column number. + +newPos :: SourceName -> Line -> Column -> SourcePos +newPos name line column + = SourcePos name line column + +-- | Create a new 'SourcePos' with the given source name, +-- and line number and column number set to 1, the upper left. + +initialPos :: SourceName -> SourcePos +initialPos name + = newPos name 1 1 + +-- | Extracts the name of the source from a source position. + +sourceName :: SourcePos -> SourceName +sourceName (SourcePos name _line _column) = name + +-- | Extracts the line number from a source position. + +sourceLine :: SourcePos -> Line +sourceLine (SourcePos _name line _column) = line + +-- | Extracts the column number from a source position. + +sourceColumn :: SourcePos -> Column +sourceColumn (SourcePos _name _line column) = column + +-- | Increments the line number of a source position. + +incSourceLine :: SourcePos -> Line -> SourcePos +incSourceLine (SourcePos name line column) n = SourcePos name (line+n) column + +-- | Increments the column number of a source position. + +incSourceColumn :: SourcePos -> Column -> SourcePos +incSourceColumn (SourcePos name line column) n = SourcePos name line (column+n) + +-- | Set the name of the source. + +setSourceName :: SourcePos -> SourceName -> SourcePos +setSourceName (SourcePos _name line column) n = SourcePos n line column + +-- | Set the line number of a source position. + +setSourceLine :: SourcePos -> Line -> SourcePos +setSourceLine (SourcePos name _line column) n = SourcePos name n column + +-- | Set the column number of a source position. + +setSourceColumn :: SourcePos -> Column -> SourcePos +setSourceColumn (SourcePos name line _column) n = SourcePos name line n + +-- | The expression @updatePosString pos s@ updates the source position +-- @pos@ by calling 'updatePosChar' on every character in @s@, ie. +-- @foldl updatePosChar pos string@. + +updatePosString :: SourcePos -> String -> SourcePos +updatePosString pos string + = foldl updatePosChar pos string + +-- | Update a source position given a character. If the character is a +-- newline (\'\\n\') or carriage return (\'\\r\') the line number is +-- incremented by 1. If the character is a tab (\'\t\') the column +-- number is incremented to the nearest 8'th column, ie. @column + 8 - +-- ((column-1) \`mod\` 8)@. In all other cases, the column is +-- incremented by 1. + +updatePosChar :: SourcePos -> Char -> SourcePos +updatePosChar (SourcePos name line column) c + = case c of + '\n' -> SourcePos name (line+1) 1 + '\t' -> SourcePos name line (column + 8 - ((column-1) `mod` 8)) + _ -> SourcePos name line (column + 1) + +instance Show SourcePos where + show (SourcePos name line column) + | null name = showLineColumn + | otherwise = "\"" ++ name ++ "\" " ++ showLineColumn + where + showLineColumn = "(line " ++ show line ++ + ", column " ++ show column ++ + ")" diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Prim.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Prim.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Prim.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Prim.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,776 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.Prim +-- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- The primitive parser combinators. +-- +----------------------------------------------------------------------------- + +{-# LANGUAGE MultiParamTypeClasses, FunctionalDependencies, FlexibleContexts, + UndecidableInstances, StandaloneDeriving #-} +{-# OPTIONS_HADDOCK not-home #-} + +module Text.Parsec.Prim + ( unknownError + , sysUnExpectError + , unexpected + , ParsecT + , runParsecT + , mkPT + , Parsec + , Consumed(..) + , Reply(..) + , State(..) + , parsecMap + , parserReturn + , parserBind + , mergeErrorReply + , parserFail + , parserZero + , parserPlus + , () + , (<|>) + , label + , labels + , lookAhead + , Stream(..) + , tokens + , try + , token + , tokenPrim + , tokenPrimEx + , many + , skipMany + , manyAccum + , runPT + , runP + , runParserT + , runParser + , parse + , parseTest + , getPosition + , getInput + , setPosition + , setInput + , getParserState + , setParserState + , updateParserState + , getState + , putState + , modifyState + , setState + , updateState + ) where + + +import qualified Data.ByteString.Lazy.Char8 as CL +import qualified Data.ByteString.Char8 as C + +import Data.Typeable ( Typeable ) + +import qualified Data.Text as Text +import qualified Data.Text.Lazy as TextL + +import qualified Control.Applicative as Applicative ( Applicative(..), Alternative(..) ) +import Control.Monad() +import Control.Monad.Trans +import Control.Monad.Identity + +import Control.Monad.Reader.Class +import Control.Monad.State.Class +import Control.Monad.Cont.Class +import Control.Monad.Error.Class + +import Text.Parsec.Pos +import Text.Parsec.Error + +unknownError :: State s u -> ParseError +unknownError state = newErrorUnknown (statePos state) + +sysUnExpectError :: String -> SourcePos -> Reply s u a +sysUnExpectError msg pos = Error (newErrorMessage (SysUnExpect msg) pos) + +-- | The parser @unexpected msg@ always fails with an unexpected error +-- message @msg@ without consuming any input. +-- +-- The parsers 'fail', ('') and @unexpected@ are the three parsers +-- used to generate error messages. Of these, only ('') is commonly +-- used. For an example of the use of @unexpected@, see the definition +-- of 'Text.Parsec.Combinator.notFollowedBy'. + +unexpected :: (Stream s m t) => String -> ParsecT s u m a +unexpected msg + = ParsecT $ \s _ _ _ eerr -> + eerr $ newErrorMessage (UnExpect msg) (statePos s) + +-- | ParserT monad transformer and Parser type + +-- | @ParsecT s u m a@ is a parser with stream type @s@, user state type @u@, +-- underlying monad @m@ and return type @a@. Parsec is strict in the user state. +-- If this is undesirable, simply used a data type like @data Box a = Box a@ and +-- the state type @Box YourStateType@ to add a level of indirection. + +newtype ParsecT s u m a + = ParsecT {unParser :: forall b . + State s u + -> (a -> State s u -> ParseError -> m b) -- consumed ok + -> (ParseError -> m b) -- consumed err + -> (a -> State s u -> ParseError -> m b) -- empty ok + -> (ParseError -> m b) -- empty err + -> m b + } +#if MIN_VERSION_base(4,7,0) + deriving ( Typeable ) + -- GHC 7.6 doesn't like deriving instances of Typeabl1 for types with + -- non-* type-arguments. +#endif + +-- | Low-level unpacking of the ParsecT type. To run your parser, please look to +-- runPT, runP, runParserT, runParser and other such functions. +runParsecT :: Monad m => ParsecT s u m a -> State s u -> m (Consumed (m (Reply s u a))) +runParsecT p s = unParser p s cok cerr eok eerr + where cok a s' err = return . Consumed . return $ Ok a s' err + cerr err = return . Consumed . return $ Error err + eok a s' err = return . Empty . return $ Ok a s' err + eerr err = return . Empty . return $ Error err + +-- | Low-level creation of the ParsecT type. You really shouldn't have to do this. +mkPT :: Monad m => (State s u -> m (Consumed (m (Reply s u a)))) -> ParsecT s u m a +mkPT k = ParsecT $ \s cok cerr eok eerr -> do + cons <- k s + case cons of + Consumed mrep -> do + rep <- mrep + case rep of + Ok x s' err -> cok x s' err + Error err -> cerr err + Empty mrep -> do + rep <- mrep + case rep of + Ok x s' err -> eok x s' err + Error err -> eerr err + +type Parsec s u = ParsecT s u Identity + +data Consumed a = Consumed a + | Empty !a + deriving ( Typeable ) + +data Reply s u a = Ok a !(State s u) ParseError + | Error ParseError + deriving ( Typeable ) + +data State s u = State { + stateInput :: s, + statePos :: !SourcePos, + stateUser :: !u + } + deriving ( Typeable ) + +instance Functor Consumed where + fmap f (Consumed x) = Consumed (f x) + fmap f (Empty x) = Empty (f x) + +instance Functor (Reply s u) where + fmap f (Ok x s e) = Ok (f x) s e + fmap _ (Error e) = Error e -- XXX + +instance Functor (ParsecT s u m) where + fmap f p = parsecMap f p + +parsecMap :: (a -> b) -> ParsecT s u m a -> ParsecT s u m b +parsecMap f p + = ParsecT $ \s cok cerr eok eerr -> + unParser p s (cok . f) cerr (eok . f) eerr + +instance Applicative.Applicative (ParsecT s u m) where + pure = return + (<*>) = ap -- TODO: Can this be optimized? + +instance Applicative.Alternative (ParsecT s u m) where + empty = mzero + (<|>) = mplus + +instance Monad (ParsecT s u m) where + return x = parserReturn x + p >>= f = parserBind p f + fail msg = parserFail msg + +instance (MonadIO m) => MonadIO (ParsecT s u m) where + liftIO = lift . liftIO + +instance (MonadReader r m) => MonadReader r (ParsecT s u m) where + ask = lift ask + local f p = mkPT $ \s -> local f (runParsecT p s) + +-- I'm presuming the user might want a separate, non-backtracking +-- state aside from the Parsec user state. +instance (MonadState s m) => MonadState s (ParsecT s' u m) where + get = lift get + put = lift . put + +instance (MonadCont m) => MonadCont (ParsecT s u m) where + callCC f = mkPT $ \s -> + callCC $ \c -> + runParsecT (f (\a -> mkPT $ \s' -> c (pack s' a))) s + + where pack s a= Empty $ return (Ok a s (unknownError s)) + +instance (MonadError e m) => MonadError e (ParsecT s u m) where + throwError = lift . throwError + p `catchError` h = mkPT $ \s -> + runParsecT p s `catchError` \e -> + runParsecT (h e) s + +parserReturn :: a -> ParsecT s u m a +parserReturn x + = ParsecT $ \s _ _ eok _ -> + eok x s (unknownError s) + +parserBind :: ParsecT s u m a -> (a -> ParsecT s u m b) -> ParsecT s u m b +{-# INLINE parserBind #-} +parserBind m k + = ParsecT $ \s cok cerr eok eerr -> + let + -- consumed-okay case for m + mcok x s err = + let + -- if (k x) consumes, those go straigt up + pcok = cok + pcerr = cerr + + -- if (k x) doesn't consume input, but is okay, + -- we still return in the consumed continuation + peok x s err' = cok x s (mergeError err err') + + -- if (k x) doesn't consume input, but errors, + -- we return the error in the 'consumed-error' + -- continuation + peerr err' = cerr (mergeError err err') + in unParser (k x) s pcok pcerr peok peerr + + -- empty-ok case for m + meok x s err = + let + -- in these cases, (k x) can return as empty + pcok = cok + peok x s err' = eok x s (mergeError err err') + pcerr = cerr + peerr err' = eerr (mergeError err err') + in unParser (k x) s pcok pcerr peok peerr + -- consumed-error case for m + mcerr = cerr + + -- empty-error case for m + meerr = eerr + + in unParser m s mcok mcerr meok meerr + + +mergeErrorReply :: ParseError -> Reply s u a -> Reply s u a +mergeErrorReply err1 reply -- XXX where to put it? + = case reply of + Ok x state err2 -> Ok x state (mergeError err1 err2) + Error err2 -> Error (mergeError err1 err2) + +parserFail :: String -> ParsecT s u m a +parserFail msg + = ParsecT $ \s _ _ _ eerr -> + eerr $ newErrorMessage (Message msg) (statePos s) + +instance MonadPlus (ParsecT s u m) where + mzero = parserZero + mplus p1 p2 = parserPlus p1 p2 + +-- | @parserZero@ always fails without consuming any input. @parserZero@ is defined +-- equal to the 'mzero' member of the 'MonadPlus' class and to the 'Control.Applicative.empty' member +-- of the 'Control.Applicative.Alternative' class. + +parserZero :: ParsecT s u m a +parserZero + = ParsecT $ \s _ _ _ eerr -> + eerr $ unknownError s + +parserPlus :: ParsecT s u m a -> ParsecT s u m a -> ParsecT s u m a +{-# INLINE parserPlus #-} +parserPlus m n + = ParsecT $ \s cok cerr eok eerr -> + let + meerr err = + let + neok y s' err' = eok y s' (mergeError err err') + neerr err' = eerr $ mergeError err err' + in unParser n s cok cerr neok neerr + in unParser m s cok cerr eok meerr + +instance MonadTrans (ParsecT s u) where + lift amb = ParsecT $ \s _ _ eok _ -> do + a <- amb + eok a s $ unknownError s + +infix 0 +infixr 1 <|> + +-- | The parser @p \ msg@ behaves as parser @p@, but whenever the +-- parser @p@ fails /without consuming any input/, it replaces expect +-- error messages with the expect error message @msg@. +-- +-- This is normally used at the end of a set alternatives where we want +-- to return an error message in terms of a higher level construct +-- rather than returning all possible characters. For example, if the +-- @expr@ parser from the 'try' example would fail, the error +-- message is: '...: expecting expression'. Without the @(\)@ +-- combinator, the message would be like '...: expecting \"let\" or +-- letter', which is less friendly. + +() :: (ParsecT s u m a) -> String -> (ParsecT s u m a) +p msg = label p msg + +-- | This combinator implements choice. The parser @p \<|> q@ first +-- applies @p@. If it succeeds, the value of @p@ is returned. If @p@ +-- fails /without consuming any input/, parser @q@ is tried. This +-- combinator is defined equal to the 'mplus' member of the 'MonadPlus' +-- class and the ('Control.Applicative.<|>') member of 'Control.Applicative.Alternative'. +-- +-- The parser is called /predictive/ since @q@ is only tried when +-- parser @p@ didn't consume any input (i.e.. the look ahead is 1). +-- This non-backtracking behaviour allows for both an efficient +-- implementation of the parser combinators and the generation of good +-- error messages. + +(<|>) :: (ParsecT s u m a) -> (ParsecT s u m a) -> (ParsecT s u m a) +p1 <|> p2 = mplus p1 p2 + +-- | A synonym for @\@, but as a function instead of an operator. +label :: ParsecT s u m a -> String -> ParsecT s u m a +label p msg + = labels p [msg] + +labels :: ParsecT s u m a -> [String] -> ParsecT s u m a +labels p msgs = + ParsecT $ \s cok cerr eok eerr -> + let eok' x s' error = eok x s' $ if errorIsUnknown error + then error + else setExpectErrors error msgs + eerr' err = eerr $ setExpectErrors err msgs + + in unParser p s cok cerr eok' eerr' + + where + setExpectErrors err [] = setErrorMessage (Expect "") err + setExpectErrors err [msg] = setErrorMessage (Expect msg) err + setExpectErrors err (msg:msgs) + = foldr (\msg' err' -> addErrorMessage (Expect msg') err') + (setErrorMessage (Expect msg) err) msgs + +-- TODO: There should be a stronger statement that can be made about this + +-- | An instance of @Stream@ has stream type @s@, underlying monad @m@ and token type @t@ determined by the stream +-- +-- Some rough guidelines for a \"correct\" instance of Stream: +-- +-- * unfoldM uncons gives the [t] corresponding to the stream +-- +-- * A @Stream@ instance is responsible for maintaining the \"position within the stream\" in the stream state @s@. This is trivial unless you are using the monad in a non-trivial way. + +class (Monad m) => Stream s m t | s -> t where + uncons :: s -> m (Maybe (t,s)) + +instance (Monad m) => Stream [tok] m tok where + uncons [] = return $ Nothing + uncons (t:ts) = return $ Just (t,ts) + {-# INLINE uncons #-} + + +instance (Monad m) => Stream CL.ByteString m Char where + uncons = return . CL.uncons + +instance (Monad m) => Stream C.ByteString m Char where + uncons = return . C.uncons + +instance (Monad m) => Stream Text.Text m Char where + uncons = return . Text.uncons + {-# INLINE uncons #-} + +instance (Monad m) => Stream TextL.Text m Char where + uncons = return . TextL.uncons + {-# INLINE uncons #-} + + +tokens :: (Stream s m t, Eq t) + => ([t] -> String) -- Pretty print a list of tokens + -> (SourcePos -> [t] -> SourcePos) + -> [t] -- List of tokens to parse + -> ParsecT s u m [t] +{-# INLINE tokens #-} +tokens _ _ [] + = ParsecT $ \s _ _ eok _ -> + eok [] s $ unknownError s +tokens showTokens nextposs tts@(tok:toks) + = ParsecT $ \(State input pos u) cok cerr eok eerr -> + let + errEof = (setErrorMessage (Expect (showTokens tts)) + (newErrorMessage (SysUnExpect "") pos)) + + errExpect x = (setErrorMessage (Expect (showTokens tts)) + (newErrorMessage (SysUnExpect (showTokens [x])) pos)) + + walk [] rs = ok rs + walk (t:ts) rs = do + sr <- uncons rs + case sr of + Nothing -> cerr $ errEof + Just (x,xs) | t == x -> walk ts xs + | otherwise -> cerr $ errExpect x + + ok rs = let pos' = nextposs pos tts + s' = State rs pos' u + in cok tts s' (newErrorUnknown pos') + in do + sr <- uncons input + case sr of + Nothing -> eerr $ errEof + Just (x,xs) + | tok == x -> walk toks xs + | otherwise -> eerr $ errExpect x + +-- | The parser @try p@ behaves like parser @p@, except that it +-- pretends that it hasn't consumed any input when an error occurs. +-- +-- This combinator is used whenever arbitrary look ahead is needed. +-- Since it pretends that it hasn't consumed any input when @p@ fails, +-- the ('<|>') combinator will try its second alternative even when the +-- first parser failed while consuming input. +-- +-- The @try@ combinator can for example be used to distinguish +-- identifiers and reserved words. Both reserved words and identifiers +-- are a sequence of letters. Whenever we expect a certain reserved +-- word where we can also expect an identifier we have to use the @try@ +-- combinator. Suppose we write: +-- +-- > expr = letExpr <|> identifier "expression" +-- > +-- > letExpr = do{ string "let"; ... } +-- > identifier = many1 letter +-- +-- If the user writes \"lexical\", the parser fails with: @unexpected +-- \'x\', expecting \'t\' in \"let\"@. Indeed, since the ('<|>') combinator +-- only tries alternatives when the first alternative hasn't consumed +-- input, the @identifier@ parser is never tried (because the prefix +-- \"le\" of the @string \"let\"@ parser is already consumed). The +-- right behaviour can be obtained by adding the @try@ combinator: +-- +-- > expr = letExpr <|> identifier "expression" +-- > +-- > letExpr = do{ try (string "let"); ... } +-- > identifier = many1 letter + +try :: ParsecT s u m a -> ParsecT s u m a +try p = + ParsecT $ \s cok _ eok eerr -> + unParser p s cok eerr eok eerr + +-- | @lookAhead p@ parses @p@ without consuming any input. +-- +-- If @p@ fails and consumes some input, so does @lookAhead@. Combine with 'try' +-- if this is undesirable. + +lookAhead :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m a +lookAhead p = + ParsecT $ \s _ cerr eok eerr -> do + let eok' a _ _ = eok a s (newErrorUnknown (statePos s)) + unParser p s eok' cerr eok' eerr + +-- | The parser @token showTok posFromTok testTok@ accepts a token @t@ +-- with result @x@ when the function @testTok t@ returns @'Just' x@. The +-- source position of the @t@ should be returned by @posFromTok t@ and +-- the token can be shown using @showTok t@. +-- +-- This combinator is expressed in terms of 'tokenPrim'. +-- It is used to accept user defined token streams. For example, +-- suppose that we have a stream of basic tokens tupled with source +-- positions. We can than define a parser that accepts single tokens as: +-- +-- > mytoken x +-- > = token showTok posFromTok testTok +-- > where +-- > showTok (pos,t) = show t +-- > posFromTok (pos,t) = pos +-- > testTok (pos,t) = if x == t then Just t else Nothing + +token :: (Stream s Identity t) + => (t -> String) -- ^ Token pretty-printing function. + -> (t -> SourcePos) -- ^ Computes the position of a token. + -> (t -> Maybe a) -- ^ Matching function for the token to parse. + -> Parsec s u a +token showToken tokpos test = tokenPrim showToken nextpos test + where + nextpos _ tok ts = case runIdentity (uncons ts) of + Nothing -> tokpos tok + Just (tok',_) -> tokpos tok' + +-- | The parser @tokenPrim showTok nextPos testTok@ accepts a token @t@ +-- with result @x@ when the function @testTok t@ returns @'Just' x@. The +-- token can be shown using @showTok t@. The position of the /next/ +-- token should be returned when @nextPos@ is called with the current +-- source position @pos@, the current token @t@ and the rest of the +-- tokens @toks@, @nextPos pos t toks@. +-- +-- This is the most primitive combinator for accepting tokens. For +-- example, the 'Text.Parsec.Char.char' parser could be implemented as: +-- +-- > char c +-- > = tokenPrim showChar nextPos testChar +-- > where +-- > showChar x = "'" ++ x ++ "'" +-- > testChar x = if x == c then Just x else Nothing +-- > nextPos pos x xs = updatePosChar pos x + +tokenPrim :: (Stream s m t) + => (t -> String) -- ^ Token pretty-printing function. + -> (SourcePos -> t -> s -> SourcePos) -- ^ Next position calculating function. + -> (t -> Maybe a) -- ^ Matching function for the token to parse. + -> ParsecT s u m a +{-# INLINE tokenPrim #-} +tokenPrim showToken nextpos test = tokenPrimEx showToken nextpos Nothing test + +tokenPrimEx :: (Stream s m t) + => (t -> String) + -> (SourcePos -> t -> s -> SourcePos) + -> Maybe (SourcePos -> t -> s -> u -> u) + -> (t -> Maybe a) + -> ParsecT s u m a +{-# INLINE tokenPrimEx #-} +tokenPrimEx showToken nextpos Nothing test + = ParsecT $ \(State input pos user) cok cerr eok eerr -> do + r <- uncons input + case r of + Nothing -> eerr $ unexpectError "" pos + Just (c,cs) + -> case test c of + Just x -> let newpos = nextpos pos c cs + newstate = State cs newpos user + in seq newpos $ seq newstate $ + cok x newstate (newErrorUnknown newpos) + Nothing -> eerr $ unexpectError (showToken c) pos +tokenPrimEx showToken nextpos (Just nextState) test + = ParsecT $ \(State input pos user) cok cerr eok eerr -> do + r <- uncons input + case r of + Nothing -> eerr $ unexpectError "" pos + Just (c,cs) + -> case test c of + Just x -> let newpos = nextpos pos c cs + newUser = nextState pos c cs user + newstate = State cs newpos newUser + in seq newpos $ seq newstate $ + cok x newstate $ newErrorUnknown newpos + Nothing -> eerr $ unexpectError (showToken c) pos + +unexpectError msg pos = newErrorMessage (SysUnExpect msg) pos + + +-- | @many p@ applies the parser @p@ /zero/ or more times. Returns a +-- list of the returned values of @p@. +-- +-- > identifier = do{ c <- letter +-- > ; cs <- many (alphaNum <|> char '_') +-- > ; return (c:cs) +-- > } + +many :: ParsecT s u m a -> ParsecT s u m [a] +many p + = do xs <- manyAccum (:) p + return (reverse xs) + +-- | @skipMany p@ applies the parser @p@ /zero/ or more times, skipping +-- its result. +-- +-- > spaces = skipMany space + +skipMany :: ParsecT s u m a -> ParsecT s u m () +skipMany p + = do manyAccum (\_ _ -> []) p + return () + +manyAccum :: (a -> [a] -> [a]) + -> ParsecT s u m a + -> ParsecT s u m [a] +manyAccum acc p = + ParsecT $ \s cok cerr eok eerr -> + let walk xs x s' err = + unParser p s' + (seq xs $ walk $ acc x xs) -- consumed-ok + cerr -- consumed-err + manyErr -- empty-ok + (\e -> cok (acc x xs) s' e) -- empty-err + in unParser p s (walk []) cerr manyErr (\e -> eok [] s e) + +manyErr = error "Text.ParserCombinators.Parsec.Prim.many: combinator 'many' is applied to a parser that accepts an empty string." + + +-- < Running a parser: monadic (runPT) and pure (runP) + +runPT :: (Stream s m t) + => ParsecT s u m a -> u -> SourceName -> s -> m (Either ParseError a) +runPT p u name s + = do res <- runParsecT p (State s (initialPos name) u) + r <- parserReply res + case r of + Ok x _ _ -> return (Right x) + Error err -> return (Left err) + where + parserReply res + = case res of + Consumed r -> r + Empty r -> r + +runP :: (Stream s Identity t) + => Parsec s u a -> u -> SourceName -> s -> Either ParseError a +runP p u name s = runIdentity $ runPT p u name s + +-- | The most general way to run a parser. @runParserT p state filePath +-- input@ runs parser @p@ on the input list of tokens @input@, +-- obtained from source @filePath@ with the initial user state @st@. +-- The @filePath@ is only used in error messages and may be the empty +-- string. Returns a computation in the underlying monad @m@ that return either a 'ParseError' ('Left') or a +-- value of type @a@ ('Right'). + +runParserT :: (Stream s m t) + => ParsecT s u m a -> u -> SourceName -> s -> m (Either ParseError a) +runParserT = runPT + +-- | The most general way to run a parser over the Identity monad. @runParser p state filePath +-- input@ runs parser @p@ on the input list of tokens @input@, +-- obtained from source @filePath@ with the initial user state @st@. +-- The @filePath@ is only used in error messages and may be the empty +-- string. Returns either a 'ParseError' ('Left') or a +-- value of type @a@ ('Right'). +-- +-- > parseFromFile p fname +-- > = do{ input <- readFile fname +-- > ; return (runParser p () fname input) +-- > } + +runParser :: (Stream s Identity t) + => Parsec s u a -> u -> SourceName -> s -> Either ParseError a +runParser = runP + +-- | @parse p filePath input@ runs a parser @p@ over Identity without user +-- state. The @filePath@ is only used in error messages and may be the +-- empty string. Returns either a 'ParseError' ('Left') +-- or a value of type @a@ ('Right'). +-- +-- > main = case (parse numbers "" "11, 2, 43") of +-- > Left err -> print err +-- > Right xs -> print (sum xs) +-- > +-- > numbers = commaSep integer + +parse :: (Stream s Identity t) + => Parsec s () a -> SourceName -> s -> Either ParseError a +parse p = runP p () + +-- | The expression @parseTest p input@ applies a parser @p@ against +-- input @input@ and prints the result to stdout. Used for testing +-- parsers. + +parseTest :: (Stream s Identity t, Show a) + => Parsec s () a -> s -> IO () +parseTest p input + = case parse p "" input of + Left err -> do putStr "parse error at " + print err + Right x -> print x + +-- < Parser state combinators + +-- | Returns the current source position. See also 'SourcePos'. + +getPosition :: (Monad m) => ParsecT s u m SourcePos +getPosition = do state <- getParserState + return (statePos state) + +-- | Returns the current input + +getInput :: (Monad m) => ParsecT s u m s +getInput = do state <- getParserState + return (stateInput state) + +-- | @setPosition pos@ sets the current source position to @pos@. + +setPosition :: (Monad m) => SourcePos -> ParsecT s u m () +setPosition pos + = do updateParserState (\(State input _ user) -> State input pos user) + return () + +-- | @setInput input@ continues parsing with @input@. The 'getInput' and +-- @setInput@ functions can for example be used to deal with #include +-- files. + +setInput :: (Monad m) => s -> ParsecT s u m () +setInput input + = do updateParserState (\(State _ pos user) -> State input pos user) + return () + +-- | Returns the full parser state as a 'State' record. + +getParserState :: (Monad m) => ParsecT s u m (State s u) +getParserState = updateParserState id + +-- | @setParserState st@ set the full parser state to @st@. + +setParserState :: (Monad m) => State s u -> ParsecT s u m (State s u) +setParserState st = updateParserState (const st) + +-- | @updateParserState f@ applies function @f@ to the parser state. + +updateParserState :: (State s u -> State s u) -> ParsecT s u m (State s u) +updateParserState f = + ParsecT $ \s _ _ eok _ -> + let s' = f s + in eok s' s' $ unknownError s' + +-- < User state combinators + +-- | Returns the current user state. + +getState :: (Monad m) => ParsecT s u m u +getState = stateUser `liftM` getParserState + +-- | @putState st@ set the user state to @st@. + +putState :: (Monad m) => u -> ParsecT s u m () +putState u = do updateParserState $ \s -> s { stateUser = u } + return () + +-- | @modifyState f@ applies function @f@ to the user state. Suppose +-- that we want to count identifiers in a source, we could use the user +-- state as: +-- +-- > expr = do{ x <- identifier +-- > ; modifyState (+1) +-- > ; return (Id x) +-- > } + +modifyState :: (Monad m) => (u -> u) -> ParsecT s u m () +modifyState f = do updateParserState $ \s -> s { stateUser = f (stateUser s) } + return () + +-- XXX Compat + +-- | An alias for putState for backwards compatibility. + +setState :: (Monad m) => u -> ParsecT s u m () +setState = putState + +-- | An alias for modifyState for backwards compatibility. + +updateState :: (Monad m) => (u -> u) -> ParsecT s u m () +updateState = modifyState diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/String.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/String.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/String.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/String.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,37 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.String +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the file libraries/parsec/LICENSE) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Make Strings an instance of 'Stream' with 'Char' token type. +-- +----------------------------------------------------------------------------- + +module Text.Parsec.String + ( Parser, GenParser, parseFromFile + ) where + +import Text.Parsec.Error +import Text.Parsec.Prim + +type Parser = Parsec String () +type GenParser tok st = Parsec [tok] st + +-- | @parseFromFile p filePath@ runs a string parser @p@ on the +-- input read from @filePath@ using 'Prelude.readFile'. Returns either a 'ParseError' +-- ('Left') or a value of type @a@ ('Right'). +-- +-- > main = do{ result <- parseFromFile numbers "digits.txt" +-- > ; case result of +-- > Left err -> print err +-- > Right xs -> print (sum xs) +-- > } +parseFromFile :: Parser a -> String -> IO (Either ParseError a) +parseFromFile p fname + = do input <- readFile fname + return (runP p () fname input) diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Text/Lazy.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Text/Lazy.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Text/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Text/Lazy.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,24 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.String +-- Copyright : (c) Antoine Latter 2011 +-- License : BSD-style (see the file libraries/parsec/LICENSE) +-- +-- Maintainer : aslatter@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Convenience definitions for working with lazy 'Text.Text'. +-- +----------------------------------------------------------------------------- + +module Text.Parsec.Text.Lazy + ( Parser, GenParser + ) where + +import qualified Data.Text.Lazy as Text +import Text.Parsec.Error +import Text.Parsec.Prim + +type Parser = Parsec Text.Text () +type GenParser st = Parsec Text.Text st diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Text.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Text.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Text.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Text.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,24 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.String +-- Copyright : (c) Antoine Latter 2011 +-- License : BSD-style (see the file libraries/parsec/LICENSE) +-- +-- Maintainer : aslatter@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Convinience definitions for working with 'Text.Text'. +-- +----------------------------------------------------------------------------- + +module Text.Parsec.Text + ( Parser, GenParser + ) where + +import qualified Data.Text as Text +import Text.Parsec.Error +import Text.Parsec.Prim + +type Parser = Parsec Text.Text () +type GenParser st = Parsec Text.Text st diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Token.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Token.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec/Token.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec/Token.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,731 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.Parsec.Token +-- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : non-portable (uses local universal quantification: PolymorphicComponents) +-- +-- A helper module to parse lexical elements (tokens). See 'makeTokenParser' +-- for a description of how to use it. +-- +----------------------------------------------------------------------------- + +{-# LANGUAGE PolymorphicComponents #-} +{-# OPTIONS_GHC -fno-warn-name-shadowing #-} + +module Text.Parsec.Token + ( LanguageDef + , GenLanguageDef (..) + , TokenParser + , GenTokenParser (..) + , makeTokenParser + ) where + +import Data.Char ( isAlpha, toLower, toUpper, isSpace, digitToInt ) +import Data.Typeable ( Typeable ) +import Data.List ( nub, sort ) +import Control.Monad.Identity +import Text.Parsec.Prim +import Text.Parsec.Char +import Text.Parsec.Combinator + +----------------------------------------------------------- +-- Language Definition +----------------------------------------------------------- + +type LanguageDef st = GenLanguageDef String st Identity + +-- | The @GenLanguageDef@ type is a record that contains all parameterizable +-- features of the "Text.Parsec.Token" module. The module "Text.Parsec.Language" +-- contains some default definitions. + +data GenLanguageDef s u m + = LanguageDef { + + -- | Describes the start of a block comment. Use the empty string if the + -- language doesn't support block comments. For example \"\/*\". + + commentStart :: String, + + -- | Describes the end of a block comment. Use the empty string if the + -- language doesn't support block comments. For example \"*\/\". + + commentEnd :: String, + + -- | Describes the start of a line comment. Use the empty string if the + -- language doesn't support line comments. For example \"\/\/\". + + commentLine :: String, + + -- | Set to 'True' if the language supports nested block comments. + + nestedComments :: Bool, + + -- | This parser should accept any start characters of identifiers. For + -- example @letter \<|> char \'_\'@. + + identStart :: ParsecT s u m Char, + + -- | This parser should accept any legal tail characters of identifiers. + -- For example @alphaNum \<|> char \'_\'@. + + identLetter :: ParsecT s u m Char, + + -- | This parser should accept any start characters of operators. For + -- example @oneOf \":!#$%&*+.\/\<=>?\@\\\\^|-~\"@ + + opStart :: ParsecT s u m Char, + + -- | This parser should accept any legal tail characters of operators. + -- Note that this parser should even be defined if the language doesn't + -- support user-defined operators, or otherwise the 'reservedOp' + -- parser won't work correctly. + + opLetter :: ParsecT s u m Char, + + -- | The list of reserved identifiers. + + reservedNames :: [String], + + -- | The list of reserved operators. + + reservedOpNames:: [String], + + -- | Set to 'True' if the language is case sensitive. + + caseSensitive :: Bool + + } +#if MIN_VERSION_base(4,7,0) + deriving ( Typeable ) +#endif + +----------------------------------------------------------- +-- A first class module: TokenParser +----------------------------------------------------------- + +type TokenParser st = GenTokenParser String st Identity + +-- | The type of the record that holds lexical parsers that work on +-- @s@ streams with state @u@ over a monad @m@. + +data GenTokenParser s u m + = TokenParser { + + -- | This lexeme parser parses a legal identifier. Returns the identifier + -- string. This parser will fail on identifiers that are reserved + -- words. Legal identifier (start) characters and reserved words are + -- defined in the 'LanguageDef' that is passed to + -- 'makeTokenParser'. An @identifier@ is treated as + -- a single token using 'try'. + + identifier :: ParsecT s u m String, + + -- | The lexeme parser @reserved name@ parses @symbol + -- name@, but it also checks that the @name@ is not a prefix of a + -- valid identifier. A @reserved@ word is treated as a single token + -- using 'try'. + + reserved :: String -> ParsecT s u m (), + + -- | This lexeme parser parses a legal operator. Returns the name of the + -- operator. This parser will fail on any operators that are reserved + -- operators. Legal operator (start) characters and reserved operators + -- are defined in the 'LanguageDef' that is passed to + -- 'makeTokenParser'. An @operator@ is treated as a + -- single token using 'try'. + + operator :: ParsecT s u m String, + + -- |The lexeme parser @reservedOp name@ parses @symbol + -- name@, but it also checks that the @name@ is not a prefix of a + -- valid operator. A @reservedOp@ is treated as a single token using + -- 'try'. + + reservedOp :: String -> ParsecT s u m (), + + + -- | This lexeme parser parses a single literal character. Returns the + -- literal character value. This parsers deals correctly with escape + -- sequences. The literal character is parsed according to the grammar + -- rules defined in the Haskell report (which matches most programming + -- languages quite closely). + + charLiteral :: ParsecT s u m Char, + + -- | This lexeme parser parses a literal string. Returns the literal + -- string value. This parsers deals correctly with escape sequences and + -- gaps. The literal string is parsed according to the grammar rules + -- defined in the Haskell report (which matches most programming + -- languages quite closely). + + stringLiteral :: ParsecT s u m String, + + -- | This lexeme parser parses a natural number (a positive whole + -- number). Returns the value of the number. The number can be + -- specified in 'decimal', 'hexadecimal' or + -- 'octal'. The number is parsed according to the grammar + -- rules in the Haskell report. + + natural :: ParsecT s u m Integer, + + -- | This lexeme parser parses an integer (a whole number). This parser + -- is like 'natural' except that it can be prefixed with + -- sign (i.e. \'-\' or \'+\'). Returns the value of the number. The + -- number can be specified in 'decimal', 'hexadecimal' + -- or 'octal'. The number is parsed according + -- to the grammar rules in the Haskell report. + + integer :: ParsecT s u m Integer, + + -- | This lexeme parser parses a floating point value. Returns the value + -- of the number. The number is parsed according to the grammar rules + -- defined in the Haskell report. + + float :: ParsecT s u m Double, + + -- | This lexeme parser parses either 'natural' or a 'float'. + -- Returns the value of the number. This parsers deals with + -- any overlap in the grammar rules for naturals and floats. The number + -- is parsed according to the grammar rules defined in the Haskell report. + + naturalOrFloat :: ParsecT s u m (Either Integer Double), + + -- | Parses a positive whole number in the decimal system. Returns the + -- value of the number. + + decimal :: ParsecT s u m Integer, + + -- | Parses a positive whole number in the hexadecimal system. The number + -- should be prefixed with \"0x\" or \"0X\". Returns the value of the + -- number. + + hexadecimal :: ParsecT s u m Integer, + + -- | Parses a positive whole number in the octal system. The number + -- should be prefixed with \"0o\" or \"0O\". Returns the value of the + -- number. + + octal :: ParsecT s u m Integer, + + -- | Lexeme parser @symbol s@ parses 'string' @s@ and skips + -- trailing white space. + + symbol :: String -> ParsecT s u m String, + + -- | @lexeme p@ first applies parser @p@ and than the 'whiteSpace' + -- parser, returning the value of @p@. Every lexical + -- token (lexeme) is defined using @lexeme@, this way every parse + -- starts at a point without white space. Parsers that use @lexeme@ are + -- called /lexeme/ parsers in this document. + -- + -- The only point where the 'whiteSpace' parser should be + -- called explicitly is the start of the main parser in order to skip + -- any leading white space. + -- + -- > mainParser = do{ whiteSpace + -- > ; ds <- many (lexeme digit) + -- > ; eof + -- > ; return (sum ds) + -- > } + + lexeme :: forall a. ParsecT s u m a -> ParsecT s u m a, + + -- | Parses any white space. White space consists of /zero/ or more + -- occurrences of a 'space', a line comment or a block (multi + -- line) comment. Block comments may be nested. How comments are + -- started and ended is defined in the 'LanguageDef' + -- that is passed to 'makeTokenParser'. + + whiteSpace :: ParsecT s u m (), + + -- | Lexeme parser @parens p@ parses @p@ enclosed in parenthesis, + -- returning the value of @p@. + + parens :: forall a. ParsecT s u m a -> ParsecT s u m a, + + -- | Lexeme parser @braces p@ parses @p@ enclosed in braces (\'{\' and + -- \'}\'), returning the value of @p@. + + braces :: forall a. ParsecT s u m a -> ParsecT s u m a, + + -- | Lexeme parser @angles p@ parses @p@ enclosed in angle brackets (\'\<\' + -- and \'>\'), returning the value of @p@. + + angles :: forall a. ParsecT s u m a -> ParsecT s u m a, + + -- | Lexeme parser @brackets p@ parses @p@ enclosed in brackets (\'[\' + -- and \']\'), returning the value of @p@. + + brackets :: forall a. ParsecT s u m a -> ParsecT s u m a, + + -- | DEPRECATED: Use 'brackets'. + + squares :: forall a. ParsecT s u m a -> ParsecT s u m a, + + -- | Lexeme parser |semi| parses the character \';\' and skips any + -- trailing white space. Returns the string \";\". + + semi :: ParsecT s u m String, + + -- | Lexeme parser @comma@ parses the character \',\' and skips any + -- trailing white space. Returns the string \",\". + + comma :: ParsecT s u m String, + + -- | Lexeme parser @colon@ parses the character \':\' and skips any + -- trailing white space. Returns the string \":\". + + colon :: ParsecT s u m String, + + -- | Lexeme parser @dot@ parses the character \'.\' and skips any + -- trailing white space. Returns the string \".\". + + dot :: ParsecT s u m String, + + -- | Lexeme parser @semiSep p@ parses /zero/ or more occurrences of @p@ + -- separated by 'semi'. Returns a list of values returned by + -- @p@. + + semiSep :: forall a . ParsecT s u m a -> ParsecT s u m [a], + + -- | Lexeme parser @semiSep1 p@ parses /one/ or more occurrences of @p@ + -- separated by 'semi'. Returns a list of values returned by @p@. + + semiSep1 :: forall a . ParsecT s u m a -> ParsecT s u m [a], + + -- | Lexeme parser @commaSep p@ parses /zero/ or more occurrences of + -- @p@ separated by 'comma'. Returns a list of values returned + -- by @p@. + + commaSep :: forall a . ParsecT s u m a -> ParsecT s u m [a], + + -- | Lexeme parser @commaSep1 p@ parses /one/ or more occurrences of + -- @p@ separated by 'comma'. Returns a list of values returned + -- by @p@. + + commaSep1 :: forall a . ParsecT s u m a -> ParsecT s u m [a] + } +#if MIN_VERSION_base(4,7,0) + deriving ( Typeable ) +#endif + +----------------------------------------------------------- +-- Given a LanguageDef, create a token parser. +----------------------------------------------------------- + +-- | The expression @makeTokenParser language@ creates a 'GenTokenParser' +-- record that contains lexical parsers that are +-- defined using the definitions in the @language@ record. +-- +-- The use of this function is quite stylized - one imports the +-- appropiate language definition and selects the lexical parsers that +-- are needed from the resulting 'GenTokenParser'. +-- +-- > module Main where +-- > +-- > import Text.Parsec +-- > import qualified Text.Parsec.Token as P +-- > import Text.Parsec.Language (haskellDef) +-- > +-- > -- The parser +-- > ... +-- > +-- > expr = parens expr +-- > <|> identifier +-- > <|> ... +-- > +-- > +-- > -- The lexer +-- > lexer = P.makeTokenParser haskellDef +-- > +-- > parens = P.parens lexer +-- > braces = P.braces lexer +-- > identifier = P.identifier lexer +-- > reserved = P.reserved lexer +-- > ... + +makeTokenParser :: (Stream s m Char) + => GenLanguageDef s u m -> GenTokenParser s u m +makeTokenParser languageDef + = TokenParser{ identifier = identifier + , reserved = reserved + , operator = operator + , reservedOp = reservedOp + + , charLiteral = charLiteral + , stringLiteral = stringLiteral + , natural = natural + , integer = integer + , float = float + , naturalOrFloat = naturalOrFloat + , decimal = decimal + , hexadecimal = hexadecimal + , octal = octal + + , symbol = symbol + , lexeme = lexeme + , whiteSpace = whiteSpace + + , parens = parens + , braces = braces + , angles = angles + , brackets = brackets + , squares = brackets + , semi = semi + , comma = comma + , colon = colon + , dot = dot + , semiSep = semiSep + , semiSep1 = semiSep1 + , commaSep = commaSep + , commaSep1 = commaSep1 + } + where + + ----------------------------------------------------------- + -- Bracketing + ----------------------------------------------------------- + parens p = between (symbol "(") (symbol ")") p + braces p = between (symbol "{") (symbol "}") p + angles p = between (symbol "<") (symbol ">") p + brackets p = between (symbol "[") (symbol "]") p + + semi = symbol ";" + comma = symbol "," + dot = symbol "." + colon = symbol ":" + + commaSep p = sepBy p comma + semiSep p = sepBy p semi + + commaSep1 p = sepBy1 p comma + semiSep1 p = sepBy1 p semi + + + ----------------------------------------------------------- + -- Chars & Strings + ----------------------------------------------------------- + charLiteral = lexeme (between (char '\'') + (char '\'' "end of character") + characterChar ) + "character" + + characterChar = charLetter <|> charEscape + "literal character" + + charEscape = do{ char '\\'; escapeCode } + charLetter = satisfy (\c -> (c /= '\'') && (c /= '\\') && (c > '\026')) + + + + stringLiteral = lexeme ( + do{ str <- between (char '"') + (char '"' "end of string") + (many stringChar) + ; return (foldr (maybe id (:)) "" str) + } + "literal string") + + stringChar = do{ c <- stringLetter; return (Just c) } + <|> stringEscape + "string character" + + stringLetter = satisfy (\c -> (c /= '"') && (c /= '\\') && (c > '\026')) + + stringEscape = do{ char '\\' + ; do{ escapeGap ; return Nothing } + <|> do{ escapeEmpty; return Nothing } + <|> do{ esc <- escapeCode; return (Just esc) } + } + + escapeEmpty = char '&' + escapeGap = do{ many1 space + ; char '\\' "end of string gap" + } + + + + -- escape codes + escapeCode = charEsc <|> charNum <|> charAscii <|> charControl + "escape code" + + charControl = do{ char '^' + ; code <- upper + ; return (toEnum (fromEnum code - fromEnum 'A' + 1)) + } + + charNum = do{ code <- decimal + <|> do{ char 'o'; number 8 octDigit } + <|> do{ char 'x'; number 16 hexDigit } + ; if code > 0x10FFFF + then fail "invalid escape sequence" + else return (toEnum (fromInteger code)) + } + + charEsc = choice (map parseEsc escMap) + where + parseEsc (c,code) = do{ char c; return code } + + charAscii = choice (map parseAscii asciiMap) + where + parseAscii (asc,code) = try (do{ string asc; return code }) + + + -- escape code tables + escMap = zip ("abfnrtv\\\"\'") ("\a\b\f\n\r\t\v\\\"\'") + asciiMap = zip (ascii3codes ++ ascii2codes) (ascii3 ++ ascii2) + + ascii2codes = ["BS","HT","LF","VT","FF","CR","SO","SI","EM", + "FS","GS","RS","US","SP"] + ascii3codes = ["NUL","SOH","STX","ETX","EOT","ENQ","ACK","BEL", + "DLE","DC1","DC2","DC3","DC4","NAK","SYN","ETB", + "CAN","SUB","ESC","DEL"] + + ascii2 = ['\BS','\HT','\LF','\VT','\FF','\CR','\SO','\SI', + '\EM','\FS','\GS','\RS','\US','\SP'] + ascii3 = ['\NUL','\SOH','\STX','\ETX','\EOT','\ENQ','\ACK', + '\BEL','\DLE','\DC1','\DC2','\DC3','\DC4','\NAK', + '\SYN','\ETB','\CAN','\SUB','\ESC','\DEL'] + + + ----------------------------------------------------------- + -- Numbers + ----------------------------------------------------------- + naturalOrFloat = lexeme (natFloat) "number" + + float = lexeme floating "float" + integer = lexeme int "integer" + natural = lexeme nat "natural" + + + -- floats + floating = do{ n <- decimal + ; fractExponent n + } + + + natFloat = do{ char '0' + ; zeroNumFloat + } + <|> decimalFloat + + zeroNumFloat = do{ n <- hexadecimal <|> octal + ; return (Left n) + } + <|> decimalFloat + <|> fractFloat 0 + <|> return (Left 0) + + decimalFloat = do{ n <- decimal + ; option (Left n) + (fractFloat n) + } + + fractFloat n = do{ f <- fractExponent n + ; return (Right f) + } + + fractExponent n = do{ fract <- fraction + ; expo <- option "" exponent' + ; readDouble (show n ++ fract ++ expo) + } + <|> + do{ expo <- exponent' + ; readDouble (show n ++ expo) + } + where + readDouble s = + case reads s of + [(x, "")] -> return x + _ -> parserZero + + fraction = do{ char '.' + ; digits <- many1 digit "fraction" + ; return ('.' : digits) + } + "fraction" + + exponent' = do{ oneOf "eE" + ; sign' <- fmap (:[]) (oneOf "+-") <|> return "" + ; e <- decimal "exponent" + ; return ('e' : sign' ++ show e) + } + "exponent" + + + -- integers and naturals + int = do{ f <- lexeme sign + ; n <- nat + ; return (f n) + } + + sign = (char '-' >> return negate) + <|> (char '+' >> return id) + <|> return id + + nat = zeroNumber <|> decimal + + zeroNumber = do{ char '0' + ; hexadecimal <|> octal <|> decimal <|> return 0 + } + "" + + decimal = number 10 digit + hexadecimal = do{ oneOf "xX"; number 16 hexDigit } + octal = do{ oneOf "oO"; number 8 octDigit } + + number base baseDigit + = do{ digits <- many1 baseDigit + ; let n = foldl (\x d -> base*x + toInteger (digitToInt d)) 0 digits + ; seq n (return n) + } + + ----------------------------------------------------------- + -- Operators & reserved ops + ----------------------------------------------------------- + reservedOp name = + lexeme $ try $ + do{ string name + ; notFollowedBy (opLetter languageDef) ("end of " ++ show name) + } + + operator = + lexeme $ try $ + do{ name <- oper + ; if (isReservedOp name) + then unexpected ("reserved operator " ++ show name) + else return name + } + + oper = + do{ c <- (opStart languageDef) + ; cs <- many (opLetter languageDef) + ; return (c:cs) + } + "operator" + + isReservedOp name = + isReserved (sort (reservedOpNames languageDef)) name + + + ----------------------------------------------------------- + -- Identifiers & Reserved words + ----------------------------------------------------------- + reserved name = + lexeme $ try $ + do{ caseString name + ; notFollowedBy (identLetter languageDef) ("end of " ++ show name) + } + + caseString name + | caseSensitive languageDef = string name + | otherwise = do{ walk name; return name } + where + walk [] = return () + walk (c:cs) = do{ caseChar c msg; walk cs } + + caseChar c | isAlpha c = char (toLower c) <|> char (toUpper c) + | otherwise = char c + + msg = show name + + + identifier = + lexeme $ try $ + do{ name <- ident + ; if (isReservedName name) + then unexpected ("reserved word " ++ show name) + else return name + } + + + ident + = do{ c <- identStart languageDef + ; cs <- many (identLetter languageDef) + ; return (c:cs) + } + "identifier" + + isReservedName name + = isReserved theReservedNames caseName + where + caseName | caseSensitive languageDef = name + | otherwise = map toLower name + + + isReserved names name + = scan names + where + scan [] = False + scan (r:rs) = case (compare r name) of + LT -> scan rs + EQ -> True + GT -> False + + theReservedNames + | caseSensitive languageDef = sort reserved + | otherwise = sort . map (map toLower) $ reserved + where + reserved = reservedNames languageDef + + + + ----------------------------------------------------------- + -- White space & symbols + ----------------------------------------------------------- + symbol name + = lexeme (string name) + + lexeme p + = do{ x <- p; whiteSpace; return x } + + + --whiteSpace + whiteSpace + | noLine && noMulti = skipMany (simpleSpace "") + | noLine = skipMany (simpleSpace <|> multiLineComment "") + | noMulti = skipMany (simpleSpace <|> oneLineComment "") + | otherwise = skipMany (simpleSpace <|> oneLineComment <|> multiLineComment "") + where + noLine = null (commentLine languageDef) + noMulti = null (commentStart languageDef) + + + simpleSpace = + skipMany1 (satisfy isSpace) + + oneLineComment = + do{ try (string (commentLine languageDef)) + ; skipMany (satisfy (/= '\n')) + ; return () + } + + multiLineComment = + do { try (string (commentStart languageDef)) + ; inComment + } + + inComment + | nestedComments languageDef = inCommentMulti + | otherwise = inCommentSingle + + inCommentMulti + = do{ try (string (commentEnd languageDef)) ; return () } + <|> do{ multiLineComment ; inCommentMulti } + <|> do{ skipMany1 (noneOf startEnd) ; inCommentMulti } + <|> do{ oneOf startEnd ; inCommentMulti } + "end of comment" + where + startEnd = nub (commentEnd languageDef ++ commentStart languageDef) + + inCommentSingle + = do{ try (string (commentEnd languageDef)); return () } + <|> do{ skipMany1 (noneOf startEnd) ; inCommentSingle } + <|> do{ oneOf startEnd ; inCommentSingle } + "end of comment" + where + startEnd = nub (commentEnd languageDef ++ commentStart languageDef) diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/Parsec.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/Parsec.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,120 @@ +{-| +Module : Text.Parsec +Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 +License : BSD-style (see the LICENSE file) + +Maintainer : aslatter@gmail.com +Stability : provisional +Portability : portable + +This module includes everything you need to get started writing a +parser. + +By default this module is set up to parse character data. If you'd like +to parse the result of your own tokenizer you should start with the following +imports: + +@ + import Text.Parsec.Prim + import Text.Parsec.Combinator +@ + +Then you can implement your own version of 'satisfy' on top of the 'tokenPrim' +primitive. + +-} + +module Text.Parsec + ( -- * Parsers + ParsecT + , Parsec + , token + , tokens + , runParserT + , runParser + , parse + , parseTest + , getPosition + , getInput + , getState + , putState + , modifyState + -- * Combinators + , (<|>) + , () + , label + , labels + , try + , unexpected + , choice + , many + , many1 + , skipMany + , skipMany1 + , count + , between + , option + , optionMaybe + , optional + , sepBy + , sepBy1 + , endBy + , endBy1 + , sepEndBy + , sepEndBy1 + , chainl + , chainl1 + , chainr + , chainr1 + , eof + , notFollowedBy + , manyTill + , lookAhead + , anyToken + -- * Character Parsing + , module Text.Parsec.Char + -- * Error messages + , ParseError + , errorPos + -- * Position + , SourcePos + , SourceName, Line, Column + , sourceName, sourceLine, sourceColumn + , incSourceLine, incSourceColumn + , setSourceLine, setSourceColumn, setSourceName + -- * Low-level operations + , manyAccum + , tokenPrim + , tokenPrimEx + , runPT + , unknownError + , sysUnExpectError + , mergeErrorReply + , getParserState + , setParserState + , updateParserState + , Stream (..) + , runParsecT + , mkPT + , runP + , Consumed (..) + , Reply (..) + , State (..) + , setPosition + , setInput + -- * Other stuff + , setState + , updateState + , parsecMap + , parserReturn + , parserBind + , parserFail + , parserZero + , parserPlus + ) where + +import Text.Parsec.Pos +import Text.Parsec.Error +import Text.Parsec.Prim +import Text.Parsec.Char +import Text.Parsec.Combinator diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Char.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Char.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Char.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Char.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,40 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec.Char +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec.Char + ( CharParser, + spaces, + space, + newline, + tab, + upper, + lower, + alphaNum, + letter, + digit, + hexDigit, + octDigit, + char, + string, + anyChar, + oneOf, + noneOf, + satisfy + ) where + + +import Text.Parsec.Char +import Text.Parsec.String + +type CharParser st = GenParser Char st diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Combinator.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Combinator.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Combinator.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Combinator.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,42 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec.Combinator +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec.Combinator + ( choice, + count, + between, + option, + optionMaybe, + optional, + skipMany1, + many1, + sepBy, + sepBy1, + endBy, + endBy1, + sepEndBy, + sepEndBy1, + chainl, + chainl1, + chainr, + chainr1, + eof, + notFollowedBy, + manyTill, + lookAhead, + anyToken + ) where + + +import Text.Parsec.Combinator diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Error.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Error.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Error.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Error.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,40 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec.Error +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec.Error + ( Message (SysUnExpect,UnExpect,Expect,Message), + messageString, + messageCompare, + messageEq, + ParseError, + errorPos, + errorMessages, + errorIsUnknown, + showErrorMessages, + newErrorMessage, + newErrorUnknown, + addErrorMessage, + setErrorPos, + setErrorMessage, + mergeError + ) where + +import Text.Parsec.Error + + +messageCompare :: Message -> Message -> Ordering +messageCompare = compare + +messageEq :: Message -> Message -> Bool +messageEq = (==) diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Expr.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Expr.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Expr.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Expr.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,43 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec.Expr +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec.Expr + ( Assoc (AssocNone,AssocLeft,AssocRight), + Operator(..), + OperatorTable, + buildExpressionParser + ) where + +import Text.Parsec.Expr(Assoc(..)) +import qualified Text.Parsec.Expr as N +import Text.ParserCombinators.Parsec(GenParser) + +import Data.Typeable ( Typeable ) +import Control.Monad.Identity + +data Operator tok st a = Infix (GenParser tok st (a -> a -> a)) Assoc + | Prefix (GenParser tok st (a -> a)) + | Postfix (GenParser tok st (a -> a)) + +type OperatorTable tok st a = [[Operator tok st a]] + +convert :: Operator tok st a -> N.Operator [tok] st Identity a +convert (Infix p a) = N.Infix p a +convert (Prefix p) = N.Prefix p +convert (Postfix p) = N.Postfix p + +buildExpressionParser :: OperatorTable tok st a + -> GenParser tok st a + -> GenParser tok st a +buildExpressionParser = N.buildExpressionParser . map (map convert) diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Language.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Language.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Language.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Language.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,28 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec.Language +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec.Language + ( haskellDef, + haskell, + mondrianDef, + mondrian, + emptyDef, + haskellStyle, + javaStyle, + LanguageDef, + GenLanguageDef(..), + ) where + +import Text.Parsec.Token +import Text.Parsec.Language diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Perm.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Perm.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Perm.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Perm.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,24 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec.Perm +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec.Perm + ( PermParser, + permute, + (<||>), + (<$$>), + (<|?>), + (<$?>) + ) where + +import Text.Parsec.Perm diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Pos.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Pos.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Pos.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Pos.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,35 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec.Pos +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec.Pos + ( SourceName, + Line, + Column, + SourcePos, + sourceLine, + sourceColumn, + sourceName, + incSourceLine, + incSourceColumn, + setSourceLine, + setSourceColumn, + setSourceName, + newPos, + initialPos, + updatePosChar, + updatePosString + ) where + + +import Text.Parsec.Pos diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Prim.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Prim.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Prim.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Prim.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,65 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec.Prim +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec.Prim + ( (), + (<|>), + Parser, + GenParser, + runParser, + parse, + parseFromFile, + parseTest, + token, + tokens, + tokenPrim, + tokenPrimEx, + try, + label, + labels, + unexpected, + pzero, + many, + skipMany, + getState, + setState, + updateState, + getPosition, + setPosition, + getInput, + setInput, + State(..), + getParserState, + setParserState + ) where + +import Text.Parsec.Prim hiding (runParser, try) +import qualified Text.Parsec.Prim as N -- 'N' for 'New' +import Text.Parsec.String + +import Text.Parsec.Error +import Text.Parsec.Pos + +pzero :: GenParser tok st a +pzero = parserZero + +runParser :: GenParser tok st a + -> st + -> SourceName + -> [tok] + -> Either ParseError a +runParser = N.runParser + +try :: GenParser tok st a -> GenParser tok st a +try = N.try diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Token.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Token.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Token.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec/Token.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,23 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec.Token +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec.Token + ( LanguageDef, + GenLanguageDef(..), + TokenParser, + GenTokenParser(..), + makeTokenParser + ) where + +import Text.Parsec.Token diff -Nru cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec.hs cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec.hs --- cabal-install-1.22-1.22.6.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/parsec-3.1.11/Text/ParserCombinators/Parsec.hs 2016-05-13 03:05:07.000000000 +0000 @@ -0,0 +1,41 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Text.ParserCombinators.Parsec +-- Copyright : (c) Paolo Martini 2007 +-- License : BSD-style (see the LICENSE file) +-- +-- Maintainer : derek.a.elkins@gmail.com +-- Stability : provisional +-- Portability : portable +-- +-- Parsec compatibility module +-- +----------------------------------------------------------------------------- + +module Text.ParserCombinators.Parsec + ( -- complete modules + module Text.ParserCombinators.Parsec.Prim + , module Text.ParserCombinators.Parsec.Combinator + , module Text.ParserCombinators.Parsec.Char + + -- module Text.ParserCombinators.Parsec.Error + , ParseError + , errorPos + + -- module Text.ParserCombinators.Parsec.Pos + , SourcePos + , SourceName, Line, Column + , sourceName, sourceLine, sourceColumn + , incSourceLine, incSourceColumn + , setSourceLine, setSourceColumn, setSourceName + + ) where + +import Text.Parsec.String() + +import Text.ParserCombinators.Parsec.Prim +import Text.ParserCombinators.Parsec.Combinator +import Text.ParserCombinators.Parsec.Char + +import Text.ParserCombinators.Parsec.Error +import Text.ParserCombinators.Parsec.Pos diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/CHANGELOG.md cabal-install-1.22-1.22.9.0/src/random-1.1/CHANGELOG.md --- cabal-install-1.22-1.22.6.0/src/random-1.1/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/CHANGELOG.md 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,26 @@ +# 1.1 + * breaking change to `randomIValInteger` to improve RNG quality and performance + see https://github.com/haskell/random/pull/4 and + ghc https://ghc.haskell.org/trac/ghc/ticket/8898 + * correct documentation about generated range of Int32 sized values of type Int + https://github.com/haskell/random/pull/7 + * fix memory leaks by using strict fields and strict atomicModifyIORef' + https://github.com/haskell/random/pull/8 + related to ghc trac tickets #7936 and #4218 + * support for base < 4.6 (which doesnt provide strict atomicModifyIORef') + and integrating Travis CI support. + https://github.com/haskell/random/pull/12 + * fix C type in test suite https://github.com/haskell/random/pull/9 + +# 1.0.1.1 +bump for overflow bug fixes + +# 1.0.1.2 +bump for ticket 8704, build fusion + +# 1.0.1.0 +bump for bug fixes, + +# 1.0.0.4 +bumped version for float/double range bugfix + diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/.darcs-boring cabal-install-1.22-1.22.9.0/src/random-1.1/.darcs-boring --- cabal-install-1.22-1.22.6.0/src/random-1.1/.darcs-boring 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/.darcs-boring 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,5 @@ +^dist(/|$) +^setup(/|$) +^GNUmakefile$ +^Makefile.local$ +^.depend(.bak)?$ diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/LICENSE cabal-install-1.22-1.22.9.0/src/random-1.1/LICENSE --- cabal-install-1.22-1.22.6.0/src/random-1.1/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/LICENSE 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,63 @@ +This library (libraries/base) is derived from code from two +sources: + + * Code from the GHC project which is largely (c) The University of + Glasgow, and distributable under a BSD-style license (see below), + + * Code from the Haskell 98 Report which is (c) Simon Peyton Jones + and freely redistributable (but see the full license for + restrictions). + +The full text of these licenses is reproduced below. Both of the +licenses are BSD-style or compatible. + +----------------------------------------------------------------------------- + +The Glasgow Haskell Compiler License + +Copyright 2004, The University Court of the University of Glasgow. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +- Neither name of the University nor the names of its contributors may be +used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF +GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +----------------------------------------------------------------------------- + +Code derived from the document "Report on the Programming Language +Haskell 98", is distributed under the following license: + + Copyright (c) 2002 Simon Peyton Jones + + The authors intend this Report to belong to the entire Haskell + community, and so we grant permission to copy and distribute it for + any purpose, provided that it is reproduced in its entirety, + including this Notice. Modified versions of this Report may also be + copied and distributed for any purpose, provided that the modified + version is clearly presented as such, and that it does not claim to + be a definition of the Haskell 98 Language. + +----------------------------------------------------------------------------- diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/random.cabal cabal-install-1.22-1.22.9.0/src/random-1.1/random.cabal --- cabal-install-1.22-1.22.6.0/src/random-1.1/random.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/random.cabal 2016-06-02 07:15:40.000000000 +0000 @@ -0,0 +1,70 @@ +name: random +version: 1.1 + + + + +license: BSD3 +license-file: LICENSE +maintainer: core-libraries-committee@haskell.org +bug-reports: https://github.com/haskell/random/issues +synopsis: random number library +category: System +description: + This package provides a basic random number generation + library, including the ability to split random number + generators. + +extra-source-files: + .travis.yml + README.md + CHANGELOG.md + .gitignore + .darcs-boring + + + +build-type: Simple +-- cabal-version 1.8 needed because "the field 'build-depends: random' refers +-- to a library which is defined within the same package" +cabal-version: >= 1.8 + + + +Library + exposed-modules: + System.Random + extensions: CPP + GHC-Options: -O2 + build-depends: base >= 3 && < 5, time + +source-repository head + type: git + location: http://git.haskell.org/packages/random.git + +-- To run the Test-Suite: +-- $ cabal configure --enable-tests +-- $ cabal test --show-details=always --test-options="+RTS -M1M -RTS" + +Test-Suite T7936 + type: exitcode-stdio-1.0 + main-is: T7936.hs + hs-source-dirs: tests + build-depends: base >= 3 && < 5, random + ghc-options: -rtsopts -O2 + +Test-Suite TestRandomRs + type: exitcode-stdio-1.0 + main-is: TestRandomRs.hs + hs-source-dirs: tests + build-depends: base >= 3 && < 5, random + ghc-options: -rtsopts -O2 + -- TODO. Why does the following not work? + --test-options: +RTS -M1M -RTS + +Test-Suite TestRandomIOs + type: exitcode-stdio-1.0 + main-is: TestRandomIOs.hs + hs-source-dirs: tests + build-depends: base >= 3 && < 5, random + ghc-options: -rtsopts -O2 diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/README.md cabal-install-1.22-1.22.9.0/src/random-1.1/README.md --- cabal-install-1.22-1.22.6.0/src/random-1.1/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/README.md 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,18 @@ +The Haskell Standard Library -- Random Number Generation +======================================================== +[![Build Status](https://secure.travis-ci.org/haskell/random.svg?branch=master)](http://travis-ci.org/haskell/random) + +This library provides a basic interface for (splittable) random number generators. + +The API documentation can be found here: + + http://hackage.haskell.org/package/random/docs/System-Random.html + +A module supplying this interface is required for Haskell 98 (but not Haskell +2010). An older [version] +(http://www.haskell.org/ghc/docs/latest/html/libraries/haskell98/Random.html) +of this library is included with GHC in the haskell98 package. This newer +version, with compatible api, is included in the [Haskell Platform] +(http://www.haskell.org/platform/contents.html). + +Please report bugs in the Github [issue tracker] (https://github.com/haskell/random/issues) (no longer in the GHC trac). diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/Setup.hs cabal-install-1.22-1.22.9.0/src/random-1.1/Setup.hs --- cabal-install-1.22-1.22.6.0/src/random-1.1/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/Setup.hs 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,6 @@ +module Main (main) where + +import Distribution.Simple + +main :: IO () +main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/System/Random.hs cabal-install-1.22-1.22.9.0/src/random-1.1/System/Random.hs --- cabal-install-1.22-1.22.6.0/src/random-1.1/System/Random.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/System/Random.hs 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,609 @@ +#if __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : System.Random +-- Copyright : (c) The University of Glasgow 2001 +-- License : BSD-style (see the file LICENSE in the 'random' repository) +-- +-- Maintainer : libraries@haskell.org +-- Stability : stable +-- Portability : portable +-- +-- This library deals with the common task of pseudo-random number +-- generation. The library makes it possible to generate repeatable +-- results, by starting with a specified initial random number generator, +-- or to get different results on each run by using the system-initialised +-- generator or by supplying a seed from some other source. +-- +-- The library is split into two layers: +-- +-- * A core /random number generator/ provides a supply of bits. +-- The class 'RandomGen' provides a common interface to such generators. +-- The library provides one instance of 'RandomGen', the abstract +-- data type 'StdGen'. Programmers may, of course, supply their own +-- instances of 'RandomGen'. +-- +-- * The class 'Random' provides a way to extract values of a particular +-- type from a random number generator. For example, the 'Float' +-- instance of 'Random' allows one to generate random values of type +-- 'Float'. +-- +-- This implementation uses the Portable Combined Generator of L'Ecuyer +-- ["System.Random\#LEcuyer"] for 32-bit computers, transliterated by +-- Lennart Augustsson. It has a period of roughly 2.30584e18. +-- +----------------------------------------------------------------------------- + +#include "MachDeps.h" + +module System.Random + ( + + -- $intro + + -- * Random number generators + +#ifdef ENABLE_SPLITTABLEGEN + RandomGen(next, genRange) + , SplittableGen(split) +#else + RandomGen(next, genRange, split) +#endif + -- ** Standard random number generators + , StdGen + , mkStdGen + + -- ** The global random number generator + + -- $globalrng + + , getStdRandom + , getStdGen + , setStdGen + , newStdGen + + -- * Random values of various types + , Random ( random, randomR, + randoms, randomRs, + randomIO, randomRIO ) + + -- * References + -- $references + + ) where + +import Prelude + +import Data.Bits +import Data.Int +import Data.Word +import Foreign.C.Types + +#ifdef __NHC__ +import CPUTime ( getCPUTime ) +import Foreign.Ptr ( Ptr, nullPtr ) +import Foreign.C ( CTime, CUInt ) +#else +import System.CPUTime ( getCPUTime ) +import Data.Time ( getCurrentTime, UTCTime(..) ) +import Data.Ratio ( numerator, denominator ) +#endif +import Data.Char ( isSpace, chr, ord ) +import System.IO.Unsafe ( unsafePerformIO ) +import Data.IORef ( IORef, newIORef, readIORef, writeIORef ) +#if MIN_VERSION_base (4,6,0) +import Data.IORef ( atomicModifyIORef' ) +#else +import Data.IORef ( atomicModifyIORef ) +#endif +import Numeric ( readDec ) + +#ifdef __GLASGOW_HASKELL__ +import GHC.Exts ( build ) +#else +-- | A dummy variant of build without fusion. +{-# INLINE build #-} +build :: ((a -> [a] -> [a]) -> [a] -> [a]) -> [a] +build g = g (:) [] +#endif + +#if !MIN_VERSION_base (4,6,0) +atomicModifyIORef' :: IORef a -> (a -> (a,b)) -> IO b +atomicModifyIORef' ref f = do + b <- atomicModifyIORef ref + (\x -> let (a, b) = f x + in (a, a `seq` b)) + b `seq` return b +#endif + +-- The standard nhc98 implementation of Time.ClockTime does not match +-- the extended one expected in this module, so we lash-up a quick +-- replacement here. +#ifdef __NHC__ +foreign import ccall "time.h time" readtime :: Ptr CTime -> IO CTime +getTime :: IO (Integer, Integer) +getTime = do CTime t <- readtime nullPtr; return (toInteger t, 0) +#else +getTime :: IO (Integer, Integer) +getTime = do + utc <- getCurrentTime + let daytime = toRational $ utctDayTime utc + return $ quotRem (numerator daytime) (denominator daytime) +#endif + +-- | The class 'RandomGen' provides a common interface to random number +-- generators. +-- +#ifdef ENABLE_SPLITTABLEGEN +-- Minimal complete definition: 'next'. +#else +-- Minimal complete definition: 'next' and 'split'. +#endif + +class RandomGen g where + + -- |The 'next' operation returns an 'Int' that is uniformly distributed + -- in the range returned by 'genRange' (including both end points), + -- and a new generator. + next :: g -> (Int, g) + + -- |The 'genRange' operation yields the range of values returned by + -- the generator. + -- + -- It is required that: + -- + -- * If @(a,b) = 'genRange' g@, then @a < b@. + -- + -- * 'genRange' always returns a pair of defined 'Int's. + -- + -- The second condition ensures that 'genRange' cannot examine its + -- argument, and hence the value it returns can be determined only by the + -- instance of 'RandomGen'. That in turn allows an implementation to make + -- a single call to 'genRange' to establish a generator's range, without + -- being concerned that the generator returned by (say) 'next' might have + -- a different range to the generator passed to 'next'. + -- + -- The default definition spans the full range of 'Int'. + genRange :: g -> (Int,Int) + + -- default method + genRange _ = (minBound, maxBound) + +#ifdef ENABLE_SPLITTABLEGEN +-- | The class 'SplittableGen' proivides a way to specify a random number +-- generator that can be split into two new generators. +class SplittableGen g where +#endif + -- |The 'split' operation allows one to obtain two distinct random number + -- generators. This is very useful in functional programs (for example, when + -- passing a random number generator down to recursive calls), but very + -- little work has been done on statistically robust implementations of + -- 'split' (["System.Random\#Burton", "System.Random\#Hellekalek"] + -- are the only examples we know of). + split :: g -> (g, g) + +{- | +The 'StdGen' instance of 'RandomGen' has a 'genRange' of at least 30 bits. + +The result of repeatedly using 'next' should be at least as statistically +robust as the /Minimal Standard Random Number Generator/ described by +["System.Random\#Park", "System.Random\#Carta"]. +Until more is known about implementations of 'split', all we require is +that 'split' deliver generators that are (a) not identical and +(b) independently robust in the sense just given. + +The 'Show' and 'Read' instances of 'StdGen' provide a primitive way to save the +state of a random number generator. +It is required that @'read' ('show' g) == g@. + +In addition, 'reads' may be used to map an arbitrary string (not necessarily one +produced by 'show') onto a value of type 'StdGen'. In general, the 'Read' +instance of 'StdGen' has the following properties: + +* It guarantees to succeed on any string. + +* It guarantees to consume only a finite portion of the string. + +* Different argument strings are likely to result in different results. + +-} + +data StdGen + = StdGen !Int32 !Int32 + +instance RandomGen StdGen where + next = stdNext + genRange _ = stdRange + +#ifdef ENABLE_SPLITTABLEGEN +instance SplittableGen StdGen where +#endif + split = stdSplit + +instance Show StdGen where + showsPrec p (StdGen s1 s2) = + showsPrec p s1 . + showChar ' ' . + showsPrec p s2 + +instance Read StdGen where + readsPrec _p = \ r -> + case try_read r of + r'@[_] -> r' + _ -> [stdFromString r] -- because it shouldn't ever fail. + where + try_read r = do + (s1, r1) <- readDec (dropWhile isSpace r) + (s2, r2) <- readDec (dropWhile isSpace r1) + return (StdGen s1 s2, r2) + +{- + If we cannot unravel the StdGen from a string, create + one based on the string given. +-} +stdFromString :: String -> (StdGen, String) +stdFromString s = (mkStdGen num, rest) + where (cs, rest) = splitAt 6 s + num = foldl (\a x -> x + 3 * a) 1 (map ord cs) + + +{- | +The function 'mkStdGen' provides an alternative way of producing an initial +generator, by mapping an 'Int' into a generator. Again, distinct arguments +should be likely to produce distinct generators. +-} +mkStdGen :: Int -> StdGen -- why not Integer ? +mkStdGen s = mkStdGen32 $ fromIntegral s + +{- +From ["System.Random\#LEcuyer"]: "The integer variables s1 and s2 ... must be +initialized to values in the range [1, 2147483562] and [1, 2147483398] +respectively." +-} +mkStdGen32 :: Int32 -> StdGen +mkStdGen32 sMaybeNegative = StdGen (s1+1) (s2+1) + where + -- We want a non-negative number, but we can't just take the abs + -- of sMaybeNegative as -minBound == minBound. + s = sMaybeNegative .&. maxBound + (q, s1) = s `divMod` 2147483562 + s2 = q `mod` 2147483398 + +createStdGen :: Integer -> StdGen +createStdGen s = mkStdGen32 $ fromIntegral s + +{- | +With a source of random number supply in hand, the 'Random' class allows the +programmer to extract random values of a variety of types. + +Minimal complete definition: 'randomR' and 'random'. + +-} + +class Random a where + -- | Takes a range /(lo,hi)/ and a random number generator + -- /g/, and returns a random value uniformly distributed in the closed + -- interval /[lo,hi]/, together with a new generator. It is unspecified + -- what happens if /lo>hi/. For continuous types there is no requirement + -- that the values /lo/ and /hi/ are ever produced, but they may be, + -- depending on the implementation and the interval. + randomR :: RandomGen g => (a,a) -> g -> (a,g) + + -- | The same as 'randomR', but using a default range determined by the type: + -- + -- * For bounded types (instances of 'Bounded', such as 'Char'), + -- the range is normally the whole type. + -- + -- * For fractional types, the range is normally the semi-closed interval + -- @[0,1)@. + -- + -- * For 'Integer', the range is (arbitrarily) the range of 'Int'. + random :: RandomGen g => g -> (a, g) + + -- | Plural variant of 'randomR', producing an infinite list of + -- random values instead of returning a new generator. + {-# INLINE randomRs #-} + randomRs :: RandomGen g => (a,a) -> g -> [a] + randomRs ival g = build (\cons _nil -> buildRandoms cons (randomR ival) g) + + -- | Plural variant of 'random', producing an infinite list of + -- random values instead of returning a new generator. + {-# INLINE randoms #-} + randoms :: RandomGen g => g -> [a] + randoms g = build (\cons _nil -> buildRandoms cons random g) + + -- | A variant of 'randomR' that uses the global random number generator + -- (see "System.Random#globalrng"). + randomRIO :: (a,a) -> IO a + randomRIO range = getStdRandom (randomR range) + + -- | A variant of 'random' that uses the global random number generator + -- (see "System.Random#globalrng"). + randomIO :: IO a + randomIO = getStdRandom random + +-- | Produce an infinite list-equivalent of random values. +{-# INLINE buildRandoms #-} +buildRandoms :: RandomGen g + => (a -> as -> as) -- ^ E.g. '(:)' but subject to fusion + -> (g -> (a,g)) -- ^ E.g. 'random' + -> g -- ^ A 'RandomGen' instance + -> as +buildRandoms cons rand = go + where + -- The seq fixes part of #4218 and also makes fused Core simpler. + go g = x `seq` (x `cons` go g') where (x,g') = rand g + + +instance Random Integer where + randomR ival g = randomIvalInteger ival g + random g = randomR (toInteger (minBound::Int), toInteger (maxBound::Int)) g + +instance Random Int where randomR = randomIvalIntegral; random = randomBounded +instance Random Int8 where randomR = randomIvalIntegral; random = randomBounded +instance Random Int16 where randomR = randomIvalIntegral; random = randomBounded +instance Random Int32 where randomR = randomIvalIntegral; random = randomBounded +instance Random Int64 where randomR = randomIvalIntegral; random = randomBounded + +#ifndef __NHC__ +-- Word is a type synonym in nhc98. +instance Random Word where randomR = randomIvalIntegral; random = randomBounded +#endif +instance Random Word8 where randomR = randomIvalIntegral; random = randomBounded +instance Random Word16 where randomR = randomIvalIntegral; random = randomBounded +instance Random Word32 where randomR = randomIvalIntegral; random = randomBounded +instance Random Word64 where randomR = randomIvalIntegral; random = randomBounded + +instance Random CChar where randomR = randomIvalIntegral; random = randomBounded +instance Random CSChar where randomR = randomIvalIntegral; random = randomBounded +instance Random CUChar where randomR = randomIvalIntegral; random = randomBounded +instance Random CShort where randomR = randomIvalIntegral; random = randomBounded +instance Random CUShort where randomR = randomIvalIntegral; random = randomBounded +instance Random CInt where randomR = randomIvalIntegral; random = randomBounded +instance Random CUInt where randomR = randomIvalIntegral; random = randomBounded +instance Random CLong where randomR = randomIvalIntegral; random = randomBounded +instance Random CULong where randomR = randomIvalIntegral; random = randomBounded +instance Random CPtrdiff where randomR = randomIvalIntegral; random = randomBounded +instance Random CSize where randomR = randomIvalIntegral; random = randomBounded +instance Random CWchar where randomR = randomIvalIntegral; random = randomBounded +instance Random CSigAtomic where randomR = randomIvalIntegral; random = randomBounded +instance Random CLLong where randomR = randomIvalIntegral; random = randomBounded +instance Random CULLong where randomR = randomIvalIntegral; random = randomBounded +instance Random CIntPtr where randomR = randomIvalIntegral; random = randomBounded +instance Random CUIntPtr where randomR = randomIvalIntegral; random = randomBounded +instance Random CIntMax where randomR = randomIvalIntegral; random = randomBounded +instance Random CUIntMax where randomR = randomIvalIntegral; random = randomBounded + +instance Random Char where + randomR (a,b) g = + case (randomIvalInteger (toInteger (ord a), toInteger (ord b)) g) of + (x,g') -> (chr x, g') + random g = randomR (minBound,maxBound) g + +instance Random Bool where + randomR (a,b) g = + case (randomIvalInteger (bool2Int a, bool2Int b) g) of + (x, g') -> (int2Bool x, g') + where + bool2Int :: Bool -> Integer + bool2Int False = 0 + bool2Int True = 1 + + int2Bool :: Int -> Bool + int2Bool 0 = False + int2Bool _ = True + + random g = randomR (minBound,maxBound) g + +{-# INLINE randomRFloating #-} +randomRFloating :: (Fractional a, Num a, Ord a, Random a, RandomGen g) => (a, a) -> g -> (a, g) +randomRFloating (l,h) g + | l>h = randomRFloating (h,l) g + | otherwise = let (coef,g') = random g in + (2.0 * (0.5*l + coef * (0.5*h - 0.5*l)), g') -- avoid overflow + +instance Random Double where + randomR = randomRFloating + random rng = + case random rng of + (x,rng') -> + -- We use 53 bits of randomness corresponding to the 53 bit significand: + ((fromIntegral (mask53 .&. (x::Int64)) :: Double) + / fromIntegral twoto53, rng') + where + twoto53 = (2::Int64) ^ (53::Int64) + mask53 = twoto53 - 1 + +instance Random Float where + randomR = randomRFloating + random rng = + -- TODO: Faster to just use 'next' IF it generates enough bits of randomness. + case random rng of + (x,rng') -> + -- We use 24 bits of randomness corresponding to the 24 bit significand: + ((fromIntegral (mask24 .&. (x::Int32)) :: Float) + / fromIntegral twoto24, rng') + -- Note, encodeFloat is another option, but I'm not seeing slightly + -- worse performance with the following [2011.06.25]: +-- (encodeFloat rand (-24), rng') + where + mask24 = twoto24 - 1 + twoto24 = (2::Int32) ^ (24::Int32) + +-- CFloat/CDouble are basically the same as a Float/Double: +instance Random CFloat where + randomR = randomRFloating + random rng = case random rng of + (x,rng') -> (realToFrac (x::Float), rng') + +instance Random CDouble where + randomR = randomRFloating + -- A MYSTERY: + -- Presently, this is showing better performance than the Double instance: + -- (And yet, if the Double instance uses randomFrac then its performance is much worse!) + random = randomFrac + -- random rng = case random rng of + -- (x,rng') -> (realToFrac (x::Double), rng') + +mkStdRNG :: Integer -> IO StdGen +mkStdRNG o = do + ct <- getCPUTime + (sec, psec) <- getTime + return (createStdGen (sec * 12345 + psec + ct + o)) + +randomBounded :: (RandomGen g, Random a, Bounded a) => g -> (a, g) +randomBounded = randomR (minBound, maxBound) + +-- The two integer functions below take an [inclusive,inclusive] range. +randomIvalIntegral :: (RandomGen g, Integral a) => (a, a) -> g -> (a, g) +randomIvalIntegral (l,h) = randomIvalInteger (toInteger l, toInteger h) + +{-# SPECIALIZE randomIvalInteger :: (Num a) => + (Integer, Integer) -> StdGen -> (a, StdGen) #-} + +randomIvalInteger :: (RandomGen g, Num a) => (Integer, Integer) -> g -> (a, g) +randomIvalInteger (l,h) rng + | l > h = randomIvalInteger (h,l) rng + | otherwise = case (f 1 0 rng) of (v, rng') -> (fromInteger (l + v `mod` k), rng') + where + (genlo, genhi) = genRange rng + b = fromIntegral genhi - fromIntegral genlo + 1 + + -- Probabilities of the most likely and least likely result + -- will differ at most by a factor of (1 +- 1/q). Assuming the RandomGen + -- is uniform, of course + + -- On average, log q / log b more random values will be generated + -- than the minimum + q = 1000 + k = h - l + 1 + magtgt = k * q + + -- generate random values until we exceed the target magnitude + f mag v g | mag >= magtgt = (v, g) + | otherwise = v' `seq`f (mag*b) v' g' where + (x,g') = next g + v' = (v * b + (fromIntegral x - fromIntegral genlo)) + + +-- The continuous functions on the other hand take an [inclusive,exclusive) range. +randomFrac :: (RandomGen g, Fractional a) => g -> (a, g) +randomFrac = randomIvalDouble (0::Double,1) realToFrac + +randomIvalDouble :: (RandomGen g, Fractional a) => (Double, Double) -> (Double -> a) -> g -> (a, g) +randomIvalDouble (l,h) fromDouble rng + | l > h = randomIvalDouble (h,l) fromDouble rng + | otherwise = + case (randomIvalInteger (toInteger (minBound::Int32), toInteger (maxBound::Int32)) rng) of + (x, rng') -> + let + scaled_x = + fromDouble (0.5*l + 0.5*h) + -- previously (l+h)/2, overflowed + fromDouble ((0.5*h - 0.5*l) / (0.5 * realToFrac int32Count)) * -- avoid overflow + fromIntegral (x::Int32) + in + (scaled_x, rng') + +int32Count :: Integer +int32Count = toInteger (maxBound::Int32) - toInteger (minBound::Int32) + 1 -- GHC ticket #3982 + +stdRange :: (Int,Int) +stdRange = (1, 2147483562) + +stdNext :: StdGen -> (Int, StdGen) +-- Returns values in the range stdRange +stdNext (StdGen s1 s2) = (fromIntegral z', StdGen s1'' s2'') + where z' = if z < 1 then z + 2147483562 else z + z = s1'' - s2'' + + k = s1 `quot` 53668 + s1' = 40014 * (s1 - k * 53668) - k * 12211 + s1'' = if s1' < 0 then s1' + 2147483563 else s1' + + k' = s2 `quot` 52774 + s2' = 40692 * (s2 - k' * 52774) - k' * 3791 + s2'' = if s2' < 0 then s2' + 2147483399 else s2' + +stdSplit :: StdGen -> (StdGen, StdGen) +stdSplit std@(StdGen s1 s2) + = (left, right) + where + -- no statistical foundation for this! + left = StdGen new_s1 t2 + right = StdGen t1 new_s2 + + new_s1 | s1 == 2147483562 = 1 + | otherwise = s1 + 1 + + new_s2 | s2 == 1 = 2147483398 + | otherwise = s2 - 1 + + StdGen t1 t2 = snd (next std) + +-- The global random number generator + +{- $globalrng #globalrng# + +There is a single, implicit, global random number generator of type +'StdGen', held in some global variable maintained by the 'IO' monad. It is +initialised automatically in some system-dependent fashion, for example, by +using the time of day, or Linux's kernel random number generator. To get +deterministic behaviour, use 'setStdGen'. +-} + +-- |Sets the global random number generator. +setStdGen :: StdGen -> IO () +setStdGen sgen = writeIORef theStdGen sgen + +-- |Gets the global random number generator. +getStdGen :: IO StdGen +getStdGen = readIORef theStdGen + +theStdGen :: IORef StdGen +theStdGen = unsafePerformIO $ do + rng <- mkStdRNG 0 + newIORef rng + +-- |Applies 'split' to the current global random generator, +-- updates it with one of the results, and returns the other. +newStdGen :: IO StdGen +newStdGen = atomicModifyIORef' theStdGen split + +{- |Uses the supplied function to get a value from the current global +random generator, and updates the global generator with the new generator +returned by the function. For example, @rollDice@ gets a random integer +between 1 and 6: + +> rollDice :: IO Int +> rollDice = getStdRandom (randomR (1,6)) + +-} + +getStdRandom :: (StdGen -> (a,StdGen)) -> IO a +getStdRandom f = atomicModifyIORef' theStdGen (swap . f) + where swap (v,g) = (g,v) + +{- $references + +1. FW #Burton# Burton and RL Page, /Distributed random number generation/, +Journal of Functional Programming, 2(2):203-212, April 1992. + +2. SK #Park# Park, and KW Miller, /Random number generators - +good ones are hard to find/, Comm ACM 31(10), Oct 1988, pp1192-1201. + +3. DG #Carta# Carta, /Two fast implementations of the minimal standard +random number generator/, Comm ACM, 33(1), Jan 1990, pp87-88. + +4. P #Hellekalek# Hellekalek, /Don\'t trust parallel Monte Carlo/, +Department of Mathematics, University of Salzburg, +, 1998. + +5. Pierre #LEcuyer# L'Ecuyer, /Efficient and portable combined random +number generators/, Comm ACM, 31(6), Jun 1988, pp742-749. + +The Web site is a great source of information. + +-} diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/tests/T7936.hs cabal-install-1.22-1.22.9.0/src/random-1.1/tests/T7936.hs --- cabal-install-1.22-1.22.6.0/src/random-1.1/tests/T7936.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/tests/T7936.hs 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,14 @@ +-- Test for ticket #7936: +-- https://ghc.haskell.org/trac/ghc/ticket/7936 +-- +-- Used to fail with: +-- +-- $ cabal test T7936 --test-options="+RTS -M1M -RTS" +-- T7936: Heap exhausted; + +module Main where + +import System.Random (newStdGen) +import Control.Monad (replicateM_) + +main = replicateM_ 100000 newStdGen diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/tests/TestRandomIOs.hs cabal-install-1.22-1.22.9.0/src/random-1.1/tests/TestRandomIOs.hs --- cabal-install-1.22-1.22.6.0/src/random-1.1/tests/TestRandomIOs.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/tests/TestRandomIOs.hs 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,20 @@ +-- Test for ticket #4218 (TestRandomIOs): +-- https://ghc.haskell.org/trac/ghc/ticket/4218 +-- +-- Used to fail with: +-- +-- $ cabal test TestRandomIOs --test-options="+RTS -M1M -RTS" +-- TestRandomIOs: Heap exhausted; + +module Main where + +import Control.Monad (replicateM) +import System.Random (randomIO) + +-- Build a list of 5000 random ints in memory (IO Monad is strict), and print +-- the last one. +-- Should use less than 1Mb of heap space, or we are generating a list of +-- unevaluated thunks. +main = do + rs <- replicateM 5000 randomIO :: IO [Int] + print $ last rs diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/tests/TestRandomRs.hs cabal-install-1.22-1.22.9.0/src/random-1.1/tests/TestRandomRs.hs --- cabal-install-1.22-1.22.6.0/src/random-1.1/tests/TestRandomRs.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/tests/TestRandomRs.hs 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,22 @@ +-- Test for ticket #4218 (TestRandomRs): +-- https://ghc.haskell.org/trac/ghc/ticket/4218 +-- +-- Fixed together with ticket #8704 +-- https://ghc.haskell.org/trac/ghc/ticket/8704 +-- Commit 4695ffa366f659940369f05e419a4f2249c3a776 +-- +-- Used to fail with: +-- +-- $ cabal test TestRandomRs --test-options="+RTS -M1M -RTS" +-- TestRandomRs: Heap exhausted; + +module Main where + +import Control.Monad (liftM, replicateM) +import System.Random (randomRs, getStdGen) + +-- Return the five-thousandth random number: +-- Should run in constant space (< 1Mb heap). +main = do + n <- (last . take 5000 . randomRs (0, 1000000)) `liftM` getStdGen + print (n::Integer) diff -Nru cabal-install-1.22-1.22.6.0/src/random-1.1/.travis.yml cabal-install-1.22-1.22.9.0/src/random-1.1/.travis.yml --- cabal-install-1.22-1.22.6.0/src/random-1.1/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/random-1.1/.travis.yml 2014-09-16 21:34:43.000000000 +0000 @@ -0,0 +1,5 @@ +language: haskell +ghc: + - 7.4 + - 7.6 + - 7.8 diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/changelog.md cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/changelog.md --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/changelog.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/changelog.md 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,54 @@ +# Changelog for [`stm` package](http://hackage.haskell.org/package/stm) + +## 2.4.4.1 *Dec 2015* + + * Add support for `base-4.9.0.0` + + * Drop support for GHC 6.12 / `base-4.2` + +## 2.4.4 *Dec 2014* + + * Add support for `base-4.8.0.0` + + * Tighten Safe Haskell bounds + + * Add `mkWeakTMVar` to `Control.Concurrent.STM.TMVar` + + * Add `@since`-annotations + +## 2.4.3 *Mar 2014* + + * Update behaviour of `newBroadcastTChanIO` to match + `newBroadcastTChan` in causing an error on a read from the + broadcast channel + + * Add `mkWeakTVar` + + * Add `isFullTBQueue` + + * Fix `TChan` created via `newBroadcastTChanIO` to throw same + exception on a `readTChan` as when created via `newBroadcastTChan` + + * Update to Cabal 1.10 format + +## 2.4.2 *Nov 2012* + + * Add `Control.Concurrent.STM.TSem` (transactional semaphore) + + * Add Applicative/Alternative instances of STM for GHC <7.0 + + * Throw proper exception when `readTChan` called on a broadcast `TChan` + +## 2.4 *Jul 2012* + + * Add `Control.Concurrent.STM.TQueue` (a faster `TChan`) + + * Add `Control.Concurrent.STM.TBQueue` (a bounded channel based on `TQueue`) + + * Add `Eq` instance for `TChan` + + * Add `newBroadcastTChan` and `newBroadcastTChanIO` + + * Some performance improvements for `TChan` + + * Add `cloneTChan` diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TArray.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TArray.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TArray.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TArray.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,68 @@ +{-# LANGUAGE CPP, DeriveDataTypeable, FlexibleInstances, MultiParamTypeClasses #-} + +#if __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Concurrent.STM.TArray +-- Copyright : (c) The University of Glasgow 2005 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (requires STM) +-- +-- TArrays: transactional arrays, for use in the STM monad +-- +----------------------------------------------------------------------------- + +module Control.Concurrent.STM.TArray ( + TArray +) where + +import Data.Array (Array, bounds) +import Data.Array.Base (listArray, arrEleBottom, unsafeAt, MArray(..), + IArray(numElements)) +import Data.Ix (rangeSize) +import Data.Typeable (Typeable) +import Control.Concurrent.STM.TVar (TVar, newTVar, readTVar, writeTVar) +#ifdef __GLASGOW_HASKELL__ +import GHC.Conc (STM) +#else +import Control.Sequential.STM (STM) +#endif + +-- |TArray is a transactional array, supporting the usual 'MArray' +-- interface for mutable arrays. +-- +-- It is currently implemented as @Array ix (TVar e)@, +-- but it may be replaced by a more efficient implementation in the future +-- (the interface will remain the same, however). +-- +newtype TArray i e = TArray (Array i (TVar e)) deriving (Eq, Typeable) + +instance MArray TArray e STM where + getBounds (TArray a) = return (bounds a) + newArray b e = do + a <- rep (rangeSize b) (newTVar e) + return $ TArray (listArray b a) + newArray_ b = do + a <- rep (rangeSize b) (newTVar arrEleBottom) + return $ TArray (listArray b a) + unsafeRead (TArray a) i = readTVar $ unsafeAt a i + unsafeWrite (TArray a) i e = writeTVar (unsafeAt a i) e + getNumElements (TArray a) = return (numElements a) + +-- | Like 'replicateM' but uses an accumulator to prevent stack overflows. +-- Unlike 'replicateM' the returned list is in reversed order. +-- This doesn't matter though since this function is only used to create +-- arrays with identical elements. +rep :: Monad m => Int -> m a -> m [a] +rep n m = go n [] + where + go 0 xs = return xs + go i xs = do + x <- m + go (i-1) (x:xs) diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TBQueue.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TBQueue.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TBQueue.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TBQueue.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,197 @@ +{-# OPTIONS_GHC -fno-warn-name-shadowing #-} +{-# LANGUAGE CPP, DeriveDataTypeable #-} + +#if __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Concurrent.STM.TBQueue +-- Copyright : (c) The University of Glasgow 2012 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (requires STM) +-- +-- 'TBQueue' is a bounded version of 'TQueue'. The queue has a maximum +-- capacity set when it is created. If the queue already contains the +-- maximum number of elements, then 'writeTBQueue' blocks until an +-- element is removed from the queue. +-- +-- The implementation is based on the traditional purely-functional +-- queue representation that uses two lists to obtain amortised /O(1)/ +-- enqueue and dequeue operations. +-- +-- @since 2.4 +----------------------------------------------------------------------------- + +module Control.Concurrent.STM.TBQueue ( + -- * TBQueue + TBQueue, + newTBQueue, + newTBQueueIO, + readTBQueue, + tryReadTBQueue, + peekTBQueue, + tryPeekTBQueue, + writeTBQueue, + unGetTBQueue, + isEmptyTBQueue, + isFullTBQueue, + ) where + +import Data.Typeable +import GHC.Conc + +#define _UPK_(x) {-# UNPACK #-} !(x) + +-- | 'TBQueue' is an abstract type representing a bounded FIFO channel. +-- +-- @since 2.4 +data TBQueue a + = TBQueue _UPK_(TVar Int) -- CR: read capacity + _UPK_(TVar [a]) -- R: elements waiting to be read + _UPK_(TVar Int) -- CW: write capacity + _UPK_(TVar [a]) -- W: elements written (head is most recent) + deriving Typeable + +instance Eq (TBQueue a) where + TBQueue a _ _ _ == TBQueue b _ _ _ = a == b + +-- Total channel capacity remaining is CR + CW. Reads only need to +-- access CR, writes usually need to access only CW but sometimes need +-- CR. So in the common case we avoid contention between CR and CW. +-- +-- - when removing an element from R: +-- CR := CR + 1 +-- +-- - when adding an element to W: +-- if CW is non-zero +-- then CW := CW - 1 +-- then if CR is non-zero +-- then CW := CR - 1; CR := 0 +-- else **FULL** + +-- |Build and returns a new instance of 'TBQueue' +newTBQueue :: Int -- ^ maximum number of elements the queue can hold + -> STM (TBQueue a) +newTBQueue size = do + read <- newTVar [] + write <- newTVar [] + rsize <- newTVar 0 + wsize <- newTVar size + return (TBQueue rsize read wsize write) + +-- |@IO@ version of 'newTBQueue'. This is useful for creating top-level +-- 'TBQueue's using 'System.IO.Unsafe.unsafePerformIO', because using +-- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't +-- possible. +newTBQueueIO :: Int -> IO (TBQueue a) +newTBQueueIO size = do + read <- newTVarIO [] + write <- newTVarIO [] + rsize <- newTVarIO 0 + wsize <- newTVarIO size + return (TBQueue rsize read wsize write) + +-- |Write a value to a 'TBQueue'; blocks if the queue is full. +writeTBQueue :: TBQueue a -> a -> STM () +writeTBQueue (TBQueue rsize _read wsize write) a = do + w <- readTVar wsize + if (w /= 0) + then do writeTVar wsize (w - 1) + else do + r <- readTVar rsize + if (r /= 0) + then do writeTVar rsize 0 + writeTVar wsize (r - 1) + else retry + listend <- readTVar write + writeTVar write (a:listend) + +-- |Read the next value from the 'TBQueue'. +readTBQueue :: TBQueue a -> STM a +readTBQueue (TBQueue rsize read _wsize write) = do + xs <- readTVar read + r <- readTVar rsize + writeTVar rsize (r + 1) + case xs of + (x:xs') -> do + writeTVar read xs' + return x + [] -> do + ys <- readTVar write + case ys of + [] -> retry + _ -> do + let (z:zs) = reverse ys -- NB. lazy: we want the transaction to be + -- short, otherwise it will conflict + writeTVar write [] + writeTVar read zs + return z + +-- | A version of 'readTBQueue' which does not retry. Instead it +-- returns @Nothing@ if no value is available. +tryReadTBQueue :: TBQueue a -> STM (Maybe a) +tryReadTBQueue c = fmap Just (readTBQueue c) `orElse` return Nothing + +-- | Get the next value from the @TBQueue@ without removing it, +-- retrying if the channel is empty. +peekTBQueue :: TBQueue a -> STM a +peekTBQueue c = do + x <- readTBQueue c + unGetTBQueue c x + return x + +-- | A version of 'peekTBQueue' which does not retry. Instead it +-- returns @Nothing@ if no value is available. +tryPeekTBQueue :: TBQueue a -> STM (Maybe a) +tryPeekTBQueue c = do + m <- tryReadTBQueue c + case m of + Nothing -> return Nothing + Just x -> do + unGetTBQueue c x + return m + +-- |Put a data item back onto a channel, where it will be the next item read. +-- Blocks if the queue is full. +unGetTBQueue :: TBQueue a -> a -> STM () +unGetTBQueue (TBQueue rsize read wsize _write) a = do + r <- readTVar rsize + if (r > 0) + then do writeTVar rsize (r - 1) + else do + w <- readTVar wsize + if (w > 0) + then writeTVar wsize (w - 1) + else retry + xs <- readTVar read + writeTVar read (a:xs) + +-- |Returns 'True' if the supplied 'TBQueue' is empty. +isEmptyTBQueue :: TBQueue a -> STM Bool +isEmptyTBQueue (TBQueue _rsize read _wsize write) = do + xs <- readTVar read + case xs of + (_:_) -> return False + [] -> do ys <- readTVar write + case ys of + [] -> return True + _ -> return False + +-- |Returns 'True' if the supplied 'TBQueue' is full. +-- +-- @since 2.4.3 +isFullTBQueue :: TBQueue a -> STM Bool +isFullTBQueue (TBQueue rsize _read wsize _write) = do + w <- readTVar wsize + if (w > 0) + then return False + else do + r <- readTVar rsize + if (r > 0) + then return False + else return True diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TChan.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TChan.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TChan.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TChan.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,203 @@ +{-# OPTIONS_GHC -fno-warn-name-shadowing #-} +{-# LANGUAGE CPP, DeriveDataTypeable #-} + +#if __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Concurrent.STM.TChan +-- Copyright : (c) The University of Glasgow 2004 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (requires STM) +-- +-- TChan: Transactional channels +-- (GHC only) +-- +----------------------------------------------------------------------------- + +module Control.Concurrent.STM.TChan ( +#ifdef __GLASGOW_HASKELL__ + -- * TChans + TChan, + + -- ** Construction + newTChan, + newTChanIO, + newBroadcastTChan, + newBroadcastTChanIO, + dupTChan, + cloneTChan, + + -- ** Reading and writing + readTChan, + tryReadTChan, + peekTChan, + tryPeekTChan, + writeTChan, + unGetTChan, + isEmptyTChan +#endif + ) where + +#ifdef __GLASGOW_HASKELL__ +import GHC.Conc + +import Data.Typeable (Typeable) + +#define _UPK_(x) {-# UNPACK #-} !(x) + +-- | 'TChan' is an abstract type representing an unbounded FIFO channel. +data TChan a = TChan _UPK_(TVar (TVarList a)) + _UPK_(TVar (TVarList a)) + deriving (Eq, Typeable) + +type TVarList a = TVar (TList a) +data TList a = TNil | TCons a _UPK_(TVarList a) + +-- |Build and return a new instance of 'TChan' +newTChan :: STM (TChan a) +newTChan = do + hole <- newTVar TNil + read <- newTVar hole + write <- newTVar hole + return (TChan read write) + +-- |@IO@ version of 'newTChan'. This is useful for creating top-level +-- 'TChan's using 'System.IO.Unsafe.unsafePerformIO', because using +-- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't +-- possible. +newTChanIO :: IO (TChan a) +newTChanIO = do + hole <- newTVarIO TNil + read <- newTVarIO hole + write <- newTVarIO hole + return (TChan read write) + +-- | Create a write-only 'TChan'. More precisely, 'readTChan' will 'retry' +-- even after items have been written to the channel. The only way to read +-- a broadcast channel is to duplicate it with 'dupTChan'. +-- +-- Consider a server that broadcasts messages to clients: +-- +-- >serve :: TChan Message -> Client -> IO loop +-- >serve broadcastChan client = do +-- > myChan <- dupTChan broadcastChan +-- > forever $ do +-- > message <- readTChan myChan +-- > send client message +-- +-- The problem with using 'newTChan' to create the broadcast channel is that if +-- it is only written to and never read, items will pile up in memory. By +-- using 'newBroadcastTChan' to create the broadcast channel, items can be +-- garbage collected after clients have seen them. +-- +-- @since 2.4 +newBroadcastTChan :: STM (TChan a) +newBroadcastTChan = do + write_hole <- newTVar TNil + read <- newTVar (error "reading from a TChan created by newBroadcastTChan; use dupTChan first") + write <- newTVar write_hole + return (TChan read write) + +-- | @IO@ version of 'newBroadcastTChan'. +-- +-- @since 2.4 +newBroadcastTChanIO :: IO (TChan a) +newBroadcastTChanIO = do + write_hole <- newTVarIO TNil + read <- newTVarIO (error "reading from a TChan created by newBroadcastTChanIO; use dupTChan first") + write <- newTVarIO write_hole + return (TChan read write) + +-- |Write a value to a 'TChan'. +writeTChan :: TChan a -> a -> STM () +writeTChan (TChan _read write) a = do + listend <- readTVar write -- listend == TVar pointing to TNil + new_listend <- newTVar TNil + writeTVar listend (TCons a new_listend) + writeTVar write new_listend + +-- |Read the next value from the 'TChan'. +readTChan :: TChan a -> STM a +readTChan (TChan read _write) = do + listhead <- readTVar read + head <- readTVar listhead + case head of + TNil -> retry + TCons a tail -> do + writeTVar read tail + return a + +-- | A version of 'readTChan' which does not retry. Instead it +-- returns @Nothing@ if no value is available. +tryReadTChan :: TChan a -> STM (Maybe a) +tryReadTChan (TChan read _write) = do + listhead <- readTVar read + head <- readTVar listhead + case head of + TNil -> return Nothing + TCons a tl -> do + writeTVar read tl + return (Just a) + +-- | Get the next value from the @TChan@ without removing it, +-- retrying if the channel is empty. +peekTChan :: TChan a -> STM a +peekTChan (TChan read _write) = do + listhead <- readTVar read + head <- readTVar listhead + case head of + TNil -> retry + TCons a _ -> return a + +-- | A version of 'peekTChan' which does not retry. Instead it +-- returns @Nothing@ if no value is available. +tryPeekTChan :: TChan a -> STM (Maybe a) +tryPeekTChan (TChan read _write) = do + listhead <- readTVar read + head <- readTVar listhead + case head of + TNil -> return Nothing + TCons a _ -> return (Just a) + +-- |Duplicate a 'TChan': the duplicate channel begins empty, but data written to +-- either channel from then on will be available from both. Hence this creates +-- a kind of broadcast channel, where data written by anyone is seen by +-- everyone else. +dupTChan :: TChan a -> STM (TChan a) +dupTChan (TChan _read write) = do + hole <- readTVar write + new_read <- newTVar hole + return (TChan new_read write) + +-- |Put a data item back onto a channel, where it will be the next item read. +unGetTChan :: TChan a -> a -> STM () +unGetTChan (TChan read _write) a = do + listhead <- readTVar read + newhead <- newTVar (TCons a listhead) + writeTVar read newhead + +-- |Returns 'True' if the supplied 'TChan' is empty. +isEmptyTChan :: TChan a -> STM Bool +isEmptyTChan (TChan read _write) = do + listhead <- readTVar read + head <- readTVar listhead + case head of + TNil -> return True + TCons _ _ -> return False + +-- |Clone a 'TChan': similar to dupTChan, but the cloned channel starts with the +-- same content available as the original channel. +-- +-- @since 2.4 +cloneTChan :: TChan a -> STM (TChan a) +cloneTChan (TChan read write) = do + readpos <- readTVar read + new_read <- newTVar readpos + return (TChan new_read write) +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TMVar.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TMVar.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TMVar.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TMVar.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,164 @@ +{-# LANGUAGE CPP, DeriveDataTypeable, MagicHash, UnboxedTuples #-} + +#if __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Concurrent.STM.TMVar +-- Copyright : (c) The University of Glasgow 2004 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (requires STM) +-- +-- TMVar: Transactional MVars, for use in the STM monad +-- (GHC only) +-- +----------------------------------------------------------------------------- + +module Control.Concurrent.STM.TMVar ( +#ifdef __GLASGOW_HASKELL__ + -- * TMVars + TMVar, + newTMVar, + newEmptyTMVar, + newTMVarIO, + newEmptyTMVarIO, + takeTMVar, + putTMVar, + readTMVar, + tryReadTMVar, + swapTMVar, + tryTakeTMVar, + tryPutTMVar, + isEmptyTMVar, + mkWeakTMVar +#endif + ) where + +#ifdef __GLASGOW_HASKELL__ +import GHC.Base +import GHC.Conc +import GHC.Weak + +import Data.Typeable (Typeable) + +newtype TMVar a = TMVar (TVar (Maybe a)) deriving (Eq, Typeable) +{- ^ +A 'TMVar' is a synchronising variable, used +for communication between concurrent threads. It can be thought of +as a box, which may be empty or full. +-} + +-- |Create a 'TMVar' which contains the supplied value. +newTMVar :: a -> STM (TMVar a) +newTMVar a = do + t <- newTVar (Just a) + return (TMVar t) + +-- |@IO@ version of 'newTMVar'. This is useful for creating top-level +-- 'TMVar's using 'System.IO.Unsafe.unsafePerformIO', because using +-- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't +-- possible. +newTMVarIO :: a -> IO (TMVar a) +newTMVarIO a = do + t <- newTVarIO (Just a) + return (TMVar t) + +-- |Create a 'TMVar' which is initially empty. +newEmptyTMVar :: STM (TMVar a) +newEmptyTMVar = do + t <- newTVar Nothing + return (TMVar t) + +-- |@IO@ version of 'newEmptyTMVar'. This is useful for creating top-level +-- 'TMVar's using 'System.IO.Unsafe.unsafePerformIO', because using +-- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't +-- possible. +newEmptyTMVarIO :: IO (TMVar a) +newEmptyTMVarIO = do + t <- newTVarIO Nothing + return (TMVar t) + +-- |Return the contents of the 'TMVar'. If the 'TMVar' is currently +-- empty, the transaction will 'retry'. After a 'takeTMVar', +-- the 'TMVar' is left empty. +takeTMVar :: TMVar a -> STM a +takeTMVar (TMVar t) = do + m <- readTVar t + case m of + Nothing -> retry + Just a -> do writeTVar t Nothing; return a + +-- | A version of 'takeTMVar' that does not 'retry'. The 'tryTakeTMVar' +-- function returns 'Nothing' if the 'TMVar' was empty, or @'Just' a@ if +-- the 'TMVar' was full with contents @a@. After 'tryTakeTMVar', the +-- 'TMVar' is left empty. +tryTakeTMVar :: TMVar a -> STM (Maybe a) +tryTakeTMVar (TMVar t) = do + m <- readTVar t + case m of + Nothing -> return Nothing + Just a -> do writeTVar t Nothing; return (Just a) + +-- |Put a value into a 'TMVar'. If the 'TMVar' is currently full, +-- 'putTMVar' will 'retry'. +putTMVar :: TMVar a -> a -> STM () +putTMVar (TMVar t) a = do + m <- readTVar t + case m of + Nothing -> do writeTVar t (Just a); return () + Just _ -> retry + +-- | A version of 'putTMVar' that does not 'retry'. The 'tryPutTMVar' +-- function attempts to put the value @a@ into the 'TMVar', returning +-- 'True' if it was successful, or 'False' otherwise. +tryPutTMVar :: TMVar a -> a -> STM Bool +tryPutTMVar (TMVar t) a = do + m <- readTVar t + case m of + Nothing -> do writeTVar t (Just a); return True + Just _ -> return False + +-- | This is a combination of 'takeTMVar' and 'putTMVar'; ie. it +-- takes the value from the 'TMVar', puts it back, and also returns +-- it. +readTMVar :: TMVar a -> STM a +readTMVar (TMVar t) = do + m <- readTVar t + case m of + Nothing -> retry + Just a -> return a + +-- | A version of 'readTMVar' which does not retry. Instead it +-- returns @Nothing@ if no value is available. +tryReadTMVar :: TMVar a -> STM (Maybe a) +tryReadTMVar (TMVar t) = readTVar t + +-- |Swap the contents of a 'TMVar' for a new value. +swapTMVar :: TMVar a -> a -> STM a +swapTMVar (TMVar t) new = do + m <- readTVar t + case m of + Nothing -> retry + Just old -> do writeTVar t (Just new); return old + +-- |Check whether a given 'TMVar' is empty. +isEmptyTMVar :: TMVar a -> STM Bool +isEmptyTMVar (TMVar t) = do + m <- readTVar t + case m of + Nothing -> return True + Just _ -> return False + +-- | Make a 'Weak' pointer to a 'TMVar', using the second argument as +-- a finalizer to run when the 'TMVar' is garbage-collected. +-- +-- @since 2.4.4 +mkWeakTMVar :: TMVar a -> IO () -> IO (Weak (TMVar a)) +mkWeakTMVar tmv@(TMVar (TVar t#)) (IO finalizer) = IO $ \s -> + case mkWeak# t# tmv finalizer s of (# s1, w #) -> (# s1, Weak w #) +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TQueue.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TQueue.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TQueue.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TQueue.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,140 @@ +{-# OPTIONS_GHC -fno-warn-name-shadowing #-} +{-# LANGUAGE CPP, DeriveDataTypeable #-} + +#if __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Concurrent.STM.TQueue +-- Copyright : (c) The University of Glasgow 2012 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (requires STM) +-- +-- A 'TQueue' is like a 'TChan', with two important differences: +-- +-- * it has faster throughput than both 'TChan' and 'Chan' (although +-- the costs are amortised, so the cost of individual operations +-- can vary a lot). +-- +-- * it does /not/ provide equivalents of the 'dupTChan' and +-- 'cloneTChan' operations. +-- +-- The implementation is based on the traditional purely-functional +-- queue representation that uses two lists to obtain amortised /O(1)/ +-- enqueue and dequeue operations. +-- +-- @since 2.4 +----------------------------------------------------------------------------- + +module Control.Concurrent.STM.TQueue ( + -- * TQueue + TQueue, + newTQueue, + newTQueueIO, + readTQueue, + tryReadTQueue, + peekTQueue, + tryPeekTQueue, + writeTQueue, + unGetTQueue, + isEmptyTQueue, + ) where + +import GHC.Conc + +import Data.Typeable (Typeable) + +-- | 'TQueue' is an abstract type representing an unbounded FIFO channel. +-- +-- @since 2.4 +data TQueue a = TQueue {-# UNPACK #-} !(TVar [a]) + {-# UNPACK #-} !(TVar [a]) + deriving Typeable + +instance Eq (TQueue a) where + TQueue a _ == TQueue b _ = a == b + +-- |Build and returns a new instance of 'TQueue' +newTQueue :: STM (TQueue a) +newTQueue = do + read <- newTVar [] + write <- newTVar [] + return (TQueue read write) + +-- |@IO@ version of 'newTQueue'. This is useful for creating top-level +-- 'TQueue's using 'System.IO.Unsafe.unsafePerformIO', because using +-- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't +-- possible. +newTQueueIO :: IO (TQueue a) +newTQueueIO = do + read <- newTVarIO [] + write <- newTVarIO [] + return (TQueue read write) + +-- |Write a value to a 'TQueue'. +writeTQueue :: TQueue a -> a -> STM () +writeTQueue (TQueue _read write) a = do + listend <- readTVar write + writeTVar write (a:listend) + +-- |Read the next value from the 'TQueue'. +readTQueue :: TQueue a -> STM a +readTQueue (TQueue read write) = do + xs <- readTVar read + case xs of + (x:xs') -> do writeTVar read xs' + return x + [] -> do ys <- readTVar write + case ys of + [] -> retry + _ -> case reverse ys of + [] -> error "readTQueue" + (z:zs) -> do writeTVar write [] + writeTVar read zs + return z + +-- | A version of 'readTQueue' which does not retry. Instead it +-- returns @Nothing@ if no value is available. +tryReadTQueue :: TQueue a -> STM (Maybe a) +tryReadTQueue c = fmap Just (readTQueue c) `orElse` return Nothing + +-- | Get the next value from the @TQueue@ without removing it, +-- retrying if the channel is empty. +peekTQueue :: TQueue a -> STM a +peekTQueue c = do + x <- readTQueue c + unGetTQueue c x + return x + +-- | A version of 'peekTQueue' which does not retry. Instead it +-- returns @Nothing@ if no value is available. +tryPeekTQueue :: TQueue a -> STM (Maybe a) +tryPeekTQueue c = do + m <- tryReadTQueue c + case m of + Nothing -> return Nothing + Just x -> do + unGetTQueue c x + return m + +-- |Put a data item back onto a channel, where it will be the next item read. +unGetTQueue :: TQueue a -> a -> STM () +unGetTQueue (TQueue read _write) a = do + xs <- readTVar read + writeTVar read (a:xs) + +-- |Returns 'True' if the supplied 'TQueue' is empty. +isEmptyTQueue :: TQueue a -> STM Bool +isEmptyTQueue (TQueue read write) = do + xs <- readTVar read + case xs of + (_:_) -> return False + [] -> do ys <- readTVar write + case ys of + [] -> return True + _ -> return False diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TSem.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TSem.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TSem.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TSem.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,55 @@ +----------------------------------------------------------------------------- +-- | +-- Module : Control.Concurrent.STM.TSem +-- Copyright : (c) The University of Glasgow 2012 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (requires STM) +-- +-- 'TSem': transactional semaphores. +-- +-- @since 2.4.2 +----------------------------------------------------------------------------- + +{-# LANGUAGE DeriveDataTypeable #-} +module Control.Concurrent.STM.TSem ( + TSem, newTSem, waitTSem, signalTSem + ) where + +import Control.Concurrent.STM +import Control.Monad +import Data.Typeable + +-- | 'TSem' is a transactional semaphore. It holds a certain number +-- of units, and units may be acquired or released by 'waitTSem' and +-- 'signalTSem' respectively. When the 'TSem' is empty, 'waitTSem' +-- blocks. +-- +-- Note that 'TSem' has no concept of fairness, and there is no +-- guarantee that threads blocked in `waitTSem` will be unblocked in +-- the same order; in fact they will all be unblocked at the same time +-- and will fight over the 'TSem'. Hence 'TSem' is not suitable if +-- you expect there to be a high number of threads contending for the +-- resource. However, like other STM abstractions, 'TSem' is +-- composable. +-- +-- @since 2.4.2 +newtype TSem = TSem (TVar Int) + deriving (Eq, Typeable) + +newTSem :: Int -> STM TSem +newTSem i = fmap TSem (newTVar i) + +waitTSem :: TSem -> STM () +waitTSem (TSem t) = do + i <- readTVar t + when (i <= 0) retry + writeTVar t $! (i-1) + +signalTSem :: TSem -> STM () +signalTSem (TSem t) = do + i <- readTVar t + writeTVar t $! i+1 + diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TVar.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TVar.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM/TVar.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM/TVar.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,80 @@ +{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} + +#if __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Concurrent.STM.TVar +-- Copyright : (c) The University of Glasgow 2004 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (requires STM) +-- +-- TVar: Transactional variables +-- +----------------------------------------------------------------------------- + +module Control.Concurrent.STM.TVar ( + -- * TVars + TVar, + newTVar, + newTVarIO, + readTVar, + readTVarIO, + writeTVar, + modifyTVar, + modifyTVar', + swapTVar, +#ifdef __GLASGOW_HASKELL__ + registerDelay, +#endif + mkWeakTVar + ) where + +#ifdef __GLASGOW_HASKELL__ +import GHC.Base +import GHC.Conc +import GHC.Weak +#else +import Control.Sequential.STM +#endif + +-- Like 'modifyIORef' but for 'TVar'. +-- | Mutate the contents of a 'TVar'. /N.B./, this version is +-- non-strict. +modifyTVar :: TVar a -> (a -> a) -> STM () +modifyTVar var f = do + x <- readTVar var + writeTVar var (f x) +{-# INLINE modifyTVar #-} + + +-- | Strict version of 'modifyTVar'. +modifyTVar' :: TVar a -> (a -> a) -> STM () +modifyTVar' var f = do + x <- readTVar var + writeTVar var $! f x +{-# INLINE modifyTVar' #-} + + +-- Like 'swapTMVar' but for 'TVar'. +-- | Swap the contents of a 'TVar' for a new value. +swapTVar :: TVar a -> a -> STM a +swapTVar var new = do + old <- readTVar var + writeTVar var new + return old +{-# INLINE swapTVar #-} + + +-- | Make a 'Weak' pointer to a 'TVar', using the second argument as +-- a finalizer to run when 'TVar' is garbage-collected +-- +-- @since 2.4.3 +mkWeakTVar :: TVar a -> IO () -> IO (Weak (TVar a)) +mkWeakTVar t@(TVar t#) (IO finalizer) = IO $ \s -> + case mkWeak# t# t finalizer s of (# s1, w #) -> (# s1, Weak w #) diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Concurrent/STM.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Concurrent/STM.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,49 @@ +{-# LANGUAGE CPP #-} + +#if __GLASGOW_HASKELL__ >= 709 +{-# LANGUAGE Safe #-} +#elif __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Concurrent.STM +-- Copyright : (c) The University of Glasgow 2004 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (requires STM) +-- +-- Software Transactional Memory: a modular composable concurrency +-- abstraction. See +-- +-- * /Composable memory transactions/, by Tim Harris, Simon Marlow, Simon +-- Peyton Jones, and Maurice Herlihy, in /ACM Conference on Principles +-- and Practice of Parallel Programming/ 2005. +-- +-- +----------------------------------------------------------------------------- + +module Control.Concurrent.STM ( + module Control.Monad.STM, + module Control.Concurrent.STM.TVar, +#ifdef __GLASGOW_HASKELL__ + module Control.Concurrent.STM.TMVar, + module Control.Concurrent.STM.TChan, + module Control.Concurrent.STM.TQueue, + module Control.Concurrent.STM.TBQueue, +#endif + module Control.Concurrent.STM.TArray + ) where + +import Control.Monad.STM +import Control.Concurrent.STM.TVar +#ifdef __GLASGOW_HASKELL__ +import Control.Concurrent.STM.TMVar +import Control.Concurrent.STM.TChan +#endif +import Control.Concurrent.STM.TArray +import Control.Concurrent.STM.TQueue +import Control.Concurrent.STM.TBQueue diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Monad/STM.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Monad/STM.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Monad/STM.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Monad/STM.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,126 @@ +{-# OPTIONS_GHC -fno-warn-orphans #-} +{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} + +#if __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Control.Monad.STM +-- Copyright : (c) The University of Glasgow 2004 +-- License : BSD-style (see the file libraries/base/LICENSE) +-- +-- Maintainer : libraries@haskell.org +-- Stability : experimental +-- Portability : non-portable (requires STM) +-- +-- Software Transactional Memory: a modular composable concurrency +-- abstraction. See +-- +-- * /Composable memory transactions/, by Tim Harris, Simon Marlow, Simon +-- Peyton Jones, and Maurice Herlihy, in /ACM Conference on Principles +-- and Practice of Parallel Programming/ 2005. +-- +-- +-- This module only defines the 'STM' monad; you probably want to +-- import "Control.Concurrent.STM" (which exports "Control.Monad.STM"). +----------------------------------------------------------------------------- + +module Control.Monad.STM ( + STM, + atomically, +#ifdef __GLASGOW_HASKELL__ + always, + alwaysSucceeds, + retry, + orElse, + check, +#endif + throwSTM, + catchSTM + ) where + +#ifdef __GLASGOW_HASKELL__ +#if ! (MIN_VERSION_base(4,3,0)) +import GHC.Conc hiding (catchSTM) +import Control.Monad ( MonadPlus(..) ) +import Control.Exception +#else +import GHC.Conc +#endif +import GHC.Exts +import Control.Monad.Fix +#else +import Control.Sequential.STM +#endif + +#ifdef __GLASGOW_HASKELL__ +#if ! (MIN_VERSION_base(4,3,0)) +import Control.Applicative +import Control.Monad (ap) +#endif +#endif + + +#ifdef __GLASGOW_HASKELL__ +#if ! (MIN_VERSION_base(4,3,0)) +instance MonadPlus STM where + mzero = retry + mplus = orElse + +instance Applicative STM where + pure = return + (<*>) = ap + +instance Alternative STM where + empty = retry + (<|>) = orElse +#endif + +check :: Bool -> STM () +check b = if b then return () else retry +#endif + +#if ! (MIN_VERSION_base(4,3,0)) +-- |Exception handling within STM actions. +catchSTM :: Exception e => STM a -> (e -> STM a) -> STM a +catchSTM (STM m) handler = STM $ catchSTM# m handler' + where + handler' e = case fromException e of + Just e' -> case handler e' of STM m' -> m' + Nothing -> raiseIO# e + +-- | A variant of 'throw' that can only be used within the 'STM' monad. +-- +-- Throwing an exception in @STM@ aborts the transaction and propagates the +-- exception. +-- +-- Although 'throwSTM' has a type that is an instance of the type of 'throw', the +-- two functions are subtly different: +-- +-- > throw e `seq` x ===> throw e +-- > throwSTM e `seq` x ===> x +-- +-- The first example will cause the exception @e@ to be raised, +-- whereas the second one won\'t. In fact, 'throwSTM' will only cause +-- an exception to be raised when it is used within the 'STM' monad. +-- The 'throwSTM' variant should be used in preference to 'throw' to +-- raise an exception within the 'STM' monad because it guarantees +-- ordering with respect to other 'STM' operations, whereas 'throw' +-- does not. +throwSTM :: Exception e => e -> STM a +throwSTM e = STM $ raiseIO# (toException e) +#endif + + +data STMret a = STMret (State# RealWorld) a + +liftSTM :: STM a -> State# RealWorld -> STMret a +liftSTM (STM m) = \s -> case m s of (# s', r #) -> STMret s' r + +instance MonadFix STM where + mfix k = STM $ \s -> + let ans = liftSTM (k r) s + STMret _ r = ans + in case ans of STMret s' x -> (# s', x #) diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Sequential/STM.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Sequential/STM.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Control/Sequential/STM.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Control/Sequential/STM.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,93 @@ +-- Transactional memory for sequential implementations. +-- Transactions do not run concurrently, but are atomic in the face +-- of exceptions. + +{-# LANGUAGE CPP #-} + +#if __GLASGOW_HASKELL__ >= 709 +{-# LANGUAGE Safe #-} +#elif __GLASGOW_HASKELL__ >= 701 +{-# LANGUAGE Trustworthy #-} +#endif + +-- #hide +module Control.Sequential.STM ( + STM, atomically, throwSTM, catchSTM, + TVar, newTVar, newTVarIO, readTVar, readTVarIO, writeTVar + ) where + +#if __GLASGOW_HASKELL__ < 705 +import Prelude hiding (catch) +#endif +#if __GLASGOW_HASKELL__ < 709 +import Control.Applicative (Applicative(pure, (<*>))) +#endif +import Control.Exception +import Data.IORef + +-- The reference contains a rollback action to be executed on exceptions +newtype STM a = STM (IORef (IO ()) -> IO a) + +unSTM :: STM a -> IORef (IO ()) -> IO a +unSTM (STM f) = f + +instance Functor STM where + fmap f (STM m) = STM (fmap f . m) + +instance Applicative STM where + pure = STM . const . pure + STM mf <*> STM mx = STM $ \ r -> mf r <*> mx r + +instance Monad STM where + return = pure + STM m >>= k = STM $ \ r -> do + x <- m r + unSTM (k x) r + +atomically :: STM a -> IO a +atomically (STM m) = do + r <- newIORef (return ()) + m r `onException` do + rollback <- readIORef r + rollback + +throwSTM :: Exception e => e -> STM a +throwSTM = STM . const . throwIO + +catchSTM :: Exception e => STM a -> (e -> STM a) -> STM a +catchSTM (STM m) h = STM $ \ r -> do + old_rollback <- readIORef r + writeIORef r (return ()) + res <- try (m r) + rollback_m <- readIORef r + case res of + Left ex -> do + rollback_m + writeIORef r old_rollback + unSTM (h ex) r + Right a -> do + writeIORef r (rollback_m >> old_rollback) + return a + +newtype TVar a = TVar (IORef a) + deriving (Eq) + +newTVar :: a -> STM (TVar a) +newTVar a = STM (const (newTVarIO a)) + +newTVarIO :: a -> IO (TVar a) +newTVarIO a = do + ref <- newIORef a + return (TVar ref) + +readTVar :: TVar a -> STM a +readTVar (TVar ref) = STM (const (readIORef ref)) + +readTVarIO :: TVar a -> IO a +readTVarIO (TVar ref) = readIORef ref + +writeTVar :: TVar a -> a -> STM () +writeTVar (TVar ref) a = STM $ \ r -> do + oldval <- readIORef ref + modifyIORef r (writeIORef ref oldval >>) + writeIORef ref a diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/LICENSE cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/LICENSE --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/LICENSE 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,31 @@ +The Glasgow Haskell Compiler License + +Copyright 2004, The University Court of the University of Glasgow. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +- Neither name of the University nor the names of its contributors may be +used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF +GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Setup.hs cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Setup.hs --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/Setup.hs 2015-12-22 12:53:15.000000000 +0000 @@ -0,0 +1,6 @@ +module Main (main) where + +import Distribution.Simple + +main :: IO () +main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/stm.cabal cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/stm.cabal --- cabal-install-1.22-1.22.6.0/src/stm-2.4.4.1/stm.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/stm-2.4.4.1/stm.cabal 2016-06-02 07:15:41.000000000 +0000 @@ -0,0 +1,53 @@ +name: stm +version: 2.4.4.1 +-- don't forget to update changelog.md file! +license: BSD3 +license-file: LICENSE +maintainer: libraries@haskell.org +bug-reports: https://ghc.haskell.org/trac/ghc/newticket?component=libraries%20%28other%29&keywords=stm +synopsis: Software Transactional Memory +category: Concurrency +description: A modular composable concurrency abstraction. +build-type: Simple +cabal-version: >=1.10 +tested-with: GHC==7.10.*, GHC==7.8.*, GHC==7.6.*, GHC==7.4.*, GHC==7.2.*, GHC==7.0.* + +extra-source-files: + changelog.md + +source-repository head + type: git + location: http://git.haskell.org/packages/stm.git + +library + default-language: Haskell2010 + other-extensions: + CPP + DeriveDataTypeable + FlexibleInstances + MagicHash + MultiParamTypeClasses + UnboxedTuples + if impl(ghc >= 7.2) + other-extensions: Trustworthy + if impl(ghc >= 7.9) + other-extensions: Safe + + build-depends: + base >= 4.3 && < 4.10, + array >= 0.3 && < 0.6 + + exposed-modules: + Control.Concurrent.STM + Control.Concurrent.STM.TArray + Control.Concurrent.STM.TVar + Control.Concurrent.STM.TChan + Control.Concurrent.STM.TMVar + Control.Concurrent.STM.TQueue + Control.Concurrent.STM.TBQueue + Control.Concurrent.STM.TSem + Control.Monad.STM + other-modules: + Control.Sequential.STM + + ghc-options: -Wall diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/cbits/time_iconv.c cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/cbits/time_iconv.c --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/cbits/time_iconv.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/cbits/time_iconv.c 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,35 @@ +#include +#include +#include +#include + +int time_iconv(char *srcbuf, size_t srcbufsize) +{ + uint16_t *destbuf = NULL; + size_t destbufsize; + static uint16_t *origdestbuf; + static size_t origdestbufsize; + iconv_t ic = (iconv_t) -1; + int ret = 0; + + if (ic == (iconv_t) -1) { + ic = iconv_open("UTF-16LE", "UTF-8"); + if (ic == (iconv_t) -1) { + ret = -1; + goto done; + } + } + + destbufsize = srcbufsize * sizeof(uint16_t); + if (destbufsize > origdestbufsize) { + free(origdestbuf); + origdestbuf = destbuf = malloc(origdestbufsize = destbufsize); + } else { + destbuf = origdestbuf; + } + + iconv(ic, &srcbuf, &srcbufsize, (char**) &destbuf, &destbufsize); + + done: + return ret; +} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Builder.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Builder.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Builder.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Builder.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,75 @@ +-- | Testing the internal builder monoid +-- +-- Tested in this benchmark: +-- +-- * Concatenating many small strings using a builder +-- +{-# LANGUAGE OverloadedStrings #-} +module Benchmarks.Builder + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, nf) +import Data.Binary.Builder as B +import Data.ByteString.Char8 () +import Data.Monoid (mconcat, mempty) +import qualified Blaze.ByteString.Builder as Blaze +import qualified Blaze.ByteString.Builder.Char.Utf8 as Blaze +import qualified Data.ByteString as SB +import qualified Data.ByteString.Lazy as LB +import qualified Data.Text as T +import qualified Data.Text.Lazy as LT +import qualified Data.Text.Lazy.Builder as LTB +import qualified Data.Text.Lazy.Builder.Int as Int +import Data.Int (Int64) + +benchmark :: IO Benchmark +benchmark = return $ bgroup "Builder" + [ bgroup "Comparison" + [ bench "LazyText" $ nf + (LT.length . LTB.toLazyText . mconcat . map LTB.fromText) texts + , bench "Binary" $ nf + (LB.length . B.toLazyByteString . mconcat . map B.fromByteString) + byteStrings + , bench "Blaze" $ nf + (LB.length . Blaze.toLazyByteString . mconcat . map Blaze.fromString) + strings + ] + , bgroup "Int" + [ bgroup "Decimal" + [ bgroup "Positive" . + flip map numbers $ \n -> + (bench (show (length (show n))) $ nf (LTB.toLazyText . Int.decimal) n) + , bgroup "Negative" . + flip map numbers $ \m -> + let n = negate m in + (bench (show (length (show n))) $ nf (LTB.toLazyText . Int.decimal) n) + , bench "Empty" $ nf LTB.toLazyText mempty + , bgroup "Show" . + flip map numbers $ \n -> + (bench (show (length (show n))) $ nf show n) + ] + ] + ] + where + numbers :: [Int64] + numbers = [ + 6, 14, 500, 9688, 10654, 620735, 5608880, 37010612, + 731223504, 5061580596, 24596952933, 711732309084, 2845910093839, + 54601756118340, 735159434806159, 3619097625502435, 95777227510267124, + 414944309510675693, 8986407456998704019 + ] + +texts :: [T.Text] +texts = take 200000 $ cycle ["foo", "λx", "由の"] +{-# NOINLINE texts #-} + +-- Note that the non-ascii characters will be chopped +byteStrings :: [SB.ByteString] +byteStrings = take 200000 $ cycle ["foo", "λx", "由の"] +{-# NOINLINE byteStrings #-} + +-- Note that the non-ascii characters will be chopped +strings :: [String] +strings = take 200000 $ cycle ["foo", "λx", "由の"] +{-# NOINLINE strings #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/DecodeUtf8.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/DecodeUtf8.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/DecodeUtf8.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/DecodeUtf8.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,67 @@ +{-# LANGUAGE ForeignFunctionInterface #-} + +-- | Test decoding of UTF-8 +-- +-- Tested in this benchmark: +-- +-- * Decoding bytes using UTF-8 +-- +-- In some tests: +-- +-- * Taking the length of the result +-- +-- * Taking the init of the result +-- +-- The latter are used for testing stream fusion. +-- +module Benchmarks.DecodeUtf8 + ( benchmark + ) where + +import Foreign.C.Types +import Data.ByteString.Internal (ByteString(..)) +import Data.ByteString.Lazy.Internal (ByteString(..)) +import Foreign.Ptr (Ptr, plusPtr) +import Foreign.ForeignPtr (withForeignPtr) +import Data.Word (Word8) +import qualified Criterion as C +import Criterion (Benchmark, bgroup, nf, whnfIO) +import qualified Codec.Binary.UTF8.Generic as U8 +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as BL +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.Encoding as TL + +benchmark :: String -> FilePath -> IO Benchmark +benchmark kind fp = do + bs <- B.readFile fp + lbs <- BL.readFile fp + let bench name = C.bench (name ++ "+" ++ kind) + decodeStream (Chunk b0 bs0) = case T.streamDecodeUtf8 b0 of + T.Some t0 _ f0 -> t0 : go f0 bs0 + where go f (Chunk b bs1) = case f b of + T.Some t1 _ f1 -> t1 : go f1 bs1 + go _ _ = [] + decodeStream _ = [] + return $ bgroup "DecodeUtf8" + [ bench "Strict" $ nf T.decodeUtf8 bs + , bench "Stream" $ nf decodeStream lbs + , bench "IConv" $ whnfIO $ iconv bs + , bench "StrictLength" $ nf (T.length . T.decodeUtf8) bs + , bench "StrictInitLength" $ nf (T.length . T.init . T.decodeUtf8) bs + , bench "Lazy" $ nf TL.decodeUtf8 lbs + , bench "LazyLength" $ nf (TL.length . TL.decodeUtf8) lbs + , bench "LazyInitLength" $ nf (TL.length . TL.init . TL.decodeUtf8) lbs + , bench "StrictStringUtf8" $ nf U8.toString bs + , bench "StrictStringUtf8Length" $ nf (length . U8.toString) bs + , bench "LazyStringUtf8" $ nf U8.toString lbs + , bench "LazyStringUtf8Length" $ nf (length . U8.toString) lbs + ] + +iconv :: B.ByteString -> IO CInt +iconv (PS fp off len) = withForeignPtr fp $ \ptr -> + time_iconv (ptr `plusPtr` off) (fromIntegral len) + +foreign import ccall unsafe time_iconv :: Ptr Word8 -> CSize -> IO CInt diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/EncodeUtf8.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/EncodeUtf8.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/EncodeUtf8.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/EncodeUtf8.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,33 @@ +-- | UTF-8 encode a text +-- +-- Tested in this benchmark: +-- +-- * Replicating a string a number of times +-- +-- * UTF-8 encoding it +-- +module Benchmarks.EncodeUtf8 + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, whnf) +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as BL +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.Encoding as TL + +benchmark :: String -> IO Benchmark +benchmark string = do + return $ bgroup "EncodeUtf8" + [ bench "Text" $ whnf (B.length . T.encodeUtf8) text + , bench "LazyText" $ whnf (BL.length . TL.encodeUtf8) lazyText + ] + where + -- The string in different formats + text = T.replicate k $ T.pack string + lazyText = TL.replicate (fromIntegral k) $ TL.pack string + + -- Amount + k = 100000 diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Equality.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Equality.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Equality.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Equality.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,38 @@ +-- | Compare a string with a copy of itself that is identical except +-- for the last character. +-- +-- Tested in this benchmark: +-- +-- * Comparison of strings (Eq instance) +-- +module Benchmarks.Equality + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, whnf) +import qualified Data.ByteString.Char8 as B +import qualified Data.ByteString.Lazy.Char8 as BL +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.Encoding as TL + +benchmark :: FilePath -> IO Benchmark +benchmark fp = do + b <- B.readFile fp + bl1 <- BL.readFile fp + -- A lazy bytestring is a list of chunks. When we do not explicitly create two + -- different lazy bytestrings at a different address, the bytestring library + -- will compare the chunk addresses instead of the chunk contents. This is why + -- we read the lazy bytestring twice here. + bl2 <- BL.readFile fp + l <- readFile fp + let t = T.decodeUtf8 b + tl = TL.decodeUtf8 bl1 + return $ bgroup "Equality" + [ bench "Text" $ whnf (== T.init t `T.snoc` '\xfffd') t + , bench "LazyText" $ whnf (== TL.init tl `TL.snoc` '\xfffd') tl + , bench "ByteString" $ whnf (== B.init b `B.snoc` '\xfffd') b + , bench "LazyByteString" $ whnf (== BL.init bl2 `BL.snoc` '\xfffd') bl1 + , bench "String" $ whnf (== init l ++ "\xfffd") l + ] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/FileRead.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/FileRead.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/FileRead.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/FileRead.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,33 @@ +-- | Benchmarks simple file reading +-- +-- Tested in this benchmark: +-- +-- * Reading a file from the disk +-- +module Benchmarks.FileRead + ( benchmark + ) where + +import Control.Applicative ((<$>)) +import Criterion (Benchmark, bgroup, bench, whnfIO) +import qualified Data.ByteString as SB +import qualified Data.ByteString.Lazy as LB +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy as LT +import qualified Data.Text.Lazy.Encoding as LT +import qualified Data.Text.Lazy.IO as LT + +benchmark :: FilePath -> IO Benchmark +benchmark p = return $ bgroup "FileRead" + [ bench "String" $ whnfIO $ length <$> readFile p + , bench "ByteString" $ whnfIO $ SB.length <$> SB.readFile p + , bench "LazyByteString" $ whnfIO $ LB.length <$> LB.readFile p + , bench "Text" $ whnfIO $ T.length <$> T.readFile p + , bench "LazyText" $ whnfIO $ LT.length <$> LT.readFile p + , bench "TextByteString" $ whnfIO $ + (T.length . T.decodeUtf8) <$> SB.readFile p + , bench "LazyTextByteString" $ whnfIO $ + (LT.length . LT.decodeUtf8) <$> LB.readFile p + ] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/FoldLines.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/FoldLines.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/FoldLines.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/FoldLines.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,58 @@ +-- | Read a file line-by-line using handles, and perform a fold over the lines. +-- The fold is used here to calculate the number of lines in the file. +-- +-- Tested in this benchmark: +-- +-- * Buffered, line-based IO +-- +{-# LANGUAGE BangPatterns #-} +module Benchmarks.FoldLines + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, whnfIO) +import System.IO +import qualified Data.ByteString as B +import qualified Data.Text as T +import qualified Data.Text.IO as T + +benchmark :: FilePath -> IO Benchmark +benchmark fp = return $ bgroup "ReadLines" + [ bench "Text" $ withHandle $ foldLinesT (\n _ -> n + 1) (0 :: Int) + , bench "ByteString" $ withHandle $ foldLinesB (\n _ -> n + 1) (0 :: Int) + ] + where + withHandle f = whnfIO $ do + h <- openFile fp ReadMode + hSetBuffering h (BlockBuffering (Just 16384)) + x <- f h + hClose h + return x + +-- | Text line fold +-- +foldLinesT :: (a -> T.Text -> a) -> a -> Handle -> IO a +foldLinesT f z0 h = go z0 + where + go !z = do + eof <- hIsEOF h + if eof + then return z + else do + l <- T.hGetLine h + let z' = f z l in go z' +{-# INLINE foldLinesT #-} + +-- | ByteString line fold +-- +foldLinesB :: (a -> B.ByteString -> a) -> a -> Handle -> IO a +foldLinesB f z0 h = go z0 + where + go !z = do + eof <- hIsEOF h + if eof + then return z + else do + l <- B.hGetLine h + let z' = f z l in go z' +{-# INLINE foldLinesB #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Mul.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Mul.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Mul.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Mul.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,138 @@ +module Benchmarks.Mul (benchmark) where + +import Control.Exception (evaluate) +import Criterion.Main +import Data.Int (Int32, Int64) +import Data.Text.Internal (mul32, mul64) +import qualified Data.Vector.Unboxed as U + +oldMul :: Int64 -> Int64 -> Int64 +oldMul m n + | n == 0 = 0 + | m <= maxBound `quot` n = m * n + | otherwise = error "overflow" + +benchmark :: IO Benchmark +benchmark = do + _ <- evaluate testVector32 + _ <- evaluate testVector64 + return $ bgroup "Mul" [ + bench "oldMul" $ whnf (U.map (uncurry oldMul)) testVector64 + , bench "mul64" $ whnf (U.map (uncurry mul64)) testVector64 + , bench "*64" $ whnf (U.map (uncurry (*))) testVector64 + , bench "mul32" $ whnf (U.map (uncurry mul32)) testVector32 + , bench "*32" $ whnf (U.map (uncurry (*))) testVector32 + ] + +testVector64 :: U.Vector (Int64,Int64) +testVector64 = U.fromList [ + (0,1248868987182846646),(169004623633872,24458),(482549039517835,7614), + (372,8157063115504364),(27,107095594861148252),(3,63249878517962420), + (4363,255694473572912),(86678474,1732634806),(1572453024,1800489338), + (9384523143,77053781),(49024709555,75095046),(7,43457620410239131), + (8,8201563008844571),(387719037,1520696708),(189869238220197,1423), + (46788016849611,23063),(503077742109974359,0),(104,1502010908706487), + (30478140346,207525518),(80961140129236192,14),(4283,368012829143675), + (1028719181728108146,6),(318904,5874863049591),(56724427166898,110794), + (234539368,31369110449),(2,251729663598178612),(103291548194451219,5), + (76013,5345328755566),(1769631,2980846129318),(40898,60598477385754), + (0,98931348893227155),(573555872156917492,3),(318821187115,4476566), + (11152874213584,243582),(40274276,16636653248),(127,4249988676030597), + (103543712111871836,5),(71,16954462148248238),(3963027173504,216570), + (13000,503523808916753),(17038308,20018685905),(0,510350226577891549), + (175898,3875698895405),(425299191292676,5651),(17223451323664536,50), + (61755131,14247665326),(0,1018195131697569303),(36433751497238985,20), + (3473607861601050,1837),(1392342328,1733971838),(225770297367,3249655), + (14,127545244155254102),(1751488975299136,2634),(3949208,504190668767), + (153329,831454434345),(1066212122928663658,2),(351224,2663633539556), + (344565,53388869217),(35825609350446863,54),(276011553660081475,10), + (1969754174790470349,3),(35,68088438338633),(506710,3247689556438), + (11099382291,327739909),(105787303549,32824363),(210366111,14759049409), + (688893241579,3102676),(8490,70047474429581),(152085,29923000251880), + (5046974599257095,400),(4183167795,263434071),(10089728,502781960687), + (44831977765,4725378),(91,8978094664238578),(30990165721,44053350), + (1772377,149651820860),(243420621763408572,4),(32,5790357453815138), + (27980806337993771,5),(47696295759774,20848),(1745874142313778,1098), + (46869334770121,1203),(886995283,1564424789),(40679396544,76002479), + (1,672849481568486995),(337656187205,3157069),(816980552858963,6003), + (2271434085804831543,1),(0,1934521023868747186),(6266220038281,15825), + (4160,107115946987394),(524,246808621791561),(0,1952519482439636339), + (128,2865935904539691),(1044,3211982069426297),(16000511542473,88922), + (1253596745404082,2226),(27041,56836278958002),(23201,49247489754471), + (175906590497,21252392),(185163584757182295,24),(34742225226802197,150), + (2363228,250824838408),(216327527109550,45),(24,81574076994520675), + (28559899906542,15356),(10890139774837133,511),(2293,707179303654492), + (2749366833,40703233),(0,4498229704622845986),(439,4962056468281937), + (662,1453820621089921),(16336770612459631,220),(24282989393,74239137), + (2724564648490195,3),(743672760,124992589),(4528103,704330948891), + (6050483122491561,250),(13322953,13594265152),(181794,22268101450214), + (25957941712,75384092),(43352,7322262295009),(32838,52609059549923), + (33003585202001564,2),(103019,68430142267402),(129918230800,8742978), + (0,2114347379589080688),(2548,905723041545274),(222745067962838382,0), + (1671683850790425181,1),(455,4836932776795684),(794227702827214,6620), + (212534135175874,1365),(96432431858,29784975),(466626763743380,3484), + (29793949,53041519613),(8359,309952753409844),(3908960585331901,26), + (45185288970365760,114),(10131829775,68110174),(58039242399640479,83), + (628092278238719399,6),(1,196469106875361889),(302336625,16347502444), + (148,3748088684181047),(1,1649096568849015456),(1019866864,2349753026), + (8211344830,569363306),(65647579546873,34753),(2340190,1692053129069), + (64263301,30758930355),(48681618072372209,110),(7074794736,47640197), + (249634721521,7991792),(1162917363807215,232),(7446433349,420634045), + (63398619383,60709817),(51359004508011,14200),(131788797028647,7072), + (52079887791430043,7),(7,136277667582599838),(28582879735696,50327), + (1404582800566278,833),(469164435,15017166943),(99567079957578263,49), + (1015285971,3625801566),(321504843,4104079293),(5196954,464515406632), + (114246832260876,7468),(8149664437,487119673),(12265299,378168974869), + (37711995764,30766513),(3971137243,710996152),(483120070302,603162), + (103009942,61645547145),(8476344625340,6987),(547948761229739,1446), + (42234,18624767306301),(13486714173011,58948),(4,198309153268019840), + (9913176974,325539248),(28246225540203,116822),(2882463945582154,18), + (959,25504987505398),(3,1504372236378217710),(13505229956793,374987), + (751661959,457611342),(27375926,36219151769),(482168869,5301952074), + (1,1577425863241520640),(714116235611821,1164),(904492524250310488,0), + (5983514941763398,68),(10759472423,23540686),(72539568471529,34919), + (4,176090672310337473),(938702842110356453,1),(673652445,3335287382), + (3111998893666122,917),(1568013,3168419765469)] + +testVector32 :: U.Vector (Int32,Int32) +testVector32 = U.fromList [ + (39242,410),(0,100077553),(2206,9538),(509400240,1),(38048,6368), + (1789,651480),(2399,157032),(701,170017),(5241456,14),(11212,70449), + (1,227804876),(749687254,1),(74559,2954),(1158,147957),(410604456,1), + (170851,1561),(92643422,1),(6192,180509),(7,24202210),(3440,241481), + (5753677,5),(294327,1622),(252,4454673),(127684121,11),(28315800,30), + (340370905,0),(1,667887987),(592782090,1),(49023,27641),(750,290387), + (72886,3847),(0,301047933),(3050276,473),(1,788366142),(59457,15813), + (637726933,1),(1135,344317),(853616,264),(696816,493),(7038,12046), + (125219574,4),(803694088,1),(107081726,1),(39294,21699),(16361,38191), + (132561123,12),(1760,23499),(847543,484),(175687349,1),(2963,252678), + (6248,224553),(27596,4606),(5422922,121),(1542,485890),(131,583035), + (59096,4925),(3637115,132),(0,947225435),(86854,6794),(2984745,339), + (760129569,1),(1,68260595),(380835652,2),(430575,2579),(54514,7211), + (15550606,3),(9,27367402),(3007053,207),(7060988,60),(28560,27130), + (1355,21087),(10880,53059),(14563646,4),(461886361,1),(2,169260724), + (241454126,2),(406797,1),(61631630,16),(44473,5943),(63869104,12), + (950300,1528),(2113,62333),(120817,9358),(100261456,1),(426764723,1), + (119,12723684),(3,53358711),(4448071,18),(1,230278091),(238,232102), + (8,57316440),(42437979,10),(6769,19555),(48590,22006),(11500585,79), + (2808,97638),(42,26952545),(11,32104194),(23954638,1),(785427272,0), + (513,81379),(31333960,37),(897772,1009),(4,25679692),(103027993,12), + (104972702,11),(546,443401),(7,65137092),(88574269,3),(872139069,0), + (2,97417121),(378802603,0),(141071401,4),(22613,10575),(2191743,118), + (470,116119),(7062,38166),(231056,1847),(43901963,9),(2400,70640), + (63553,1555),(34,11249573),(815174,1820),(997894011,0),(98881794,2), + (5448,43132),(27956,9),(904926,1357),(112608626,3),(124,613021), + (282086,1966),(99,10656881),(113799,1501),(433318,2085),(442,948171), + (165380,1043),(28,14372905),(14880,50462),(2386,219918),(229,1797565), + (1174961,298),(3925,41833),(3903515,299),(15690452,111),(360860521,3), + (7440846,81),(2541026,507),(0,492448477),(6869,82469),(245,8322939), + (3503496,253),(123495298,0),(150963,2299),(33,4408482),(1,200911107), + (305,252121),(13,123369189),(215846,8181),(2440,65387),(776764401,1), + (1241172,434),(8,15493155),(81953961,6),(17884993,5),(26,6893822), + (0,502035190),(1,582451018),(2,514870139),(227,3625619),(49,12720258), + (1456769,207),(94797661,10),(234407,893),(26843,5783),(15688,24547), + (4091,86268),(4339448,151),(21360,6294),(397046497,2),(1227,205936), + (9966,21959),(160046791,1),(0,159992224),(27,24974797),(19177,29334), + (4136148,42),(21179785,53),(61256583,31),(385,344176),(7,11934915), + (1,18992566),(3488065,5),(768021,224),(36288474,7),(8624,117561), + (8,20341439),(5903,261475),(561,1007618),(1738,392327),(633049,1708)] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/BigTable.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/BigTable.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/BigTable.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/BigTable.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,42 @@ +-- | Create a large HTML table and dump it to a handle +-- +-- Tested in this benchmark: +-- +-- * Creating a large HTML document using a builder +-- +-- * Writing to a handle +-- +{-# LANGUAGE OverloadedStrings #-} +module Benchmarks.Programs.BigTable + ( benchmark + ) where + +import Criterion (Benchmark, bench, whnfIO) +import Data.Monoid (mappend, mconcat) +import Data.Text.Lazy.Builder (Builder, fromText, toLazyText) +import Data.Text.Lazy.IO (hPutStr) +import System.IO (Handle) +import qualified Data.Text as T + +benchmark :: Handle -> IO Benchmark +benchmark sink = return $ bench "BigTable" $ whnfIO $ do + hPutStr sink "Content-Type: text/html\n\n" + hPutStr sink . toLazyText . makeTable =<< rows + hPutStr sink "
" + where + -- We provide the number of rows in IO so the builder value isn't shared + -- between the benchmark samples. + rows :: IO Int + rows = return 20000 + {-# NOINLINE rows #-} + +makeTable :: Int -> Builder +makeTable n = mconcat $ replicate n $ mconcat $ map makeCol [1 .. 50] + +makeCol :: Int -> Builder +makeCol 1 = fromText "1" +makeCol 50 = fromText "50" +makeCol i = fromText "" `mappend` (fromInt i `mappend` fromText "") + +fromInt :: Int -> Builder +fromInt = fromText . T.pack . show diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Cut.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Cut.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Cut.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Cut.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,98 @@ +-- | Cut into a file, selecting certain columns (e.g. columns 10 to 40) +-- +-- Tested in this benchmark: +-- +-- * Reading the file +-- +-- * Splitting into lines +-- +-- * Taking a number of characters from the lines +-- +-- * Joining the lines +-- +-- * Writing back to a handle +-- +module Benchmarks.Programs.Cut + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, whnfIO) +import System.IO (Handle, hPutStr) +import qualified Data.ByteString as B +import qualified Data.ByteString.Char8 as BC +import qualified Data.ByteString.Lazy as BL +import qualified Data.ByteString.Lazy.Char8 as BLC +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.Encoding as TL +import qualified Data.Text.Lazy.IO as TL + +benchmark :: FilePath -> Handle -> Int -> Int -> IO Benchmark +benchmark p sink from to = return $ bgroup "Cut" + [ bench' "String" string + , bench' "ByteString" byteString + , bench' "LazyByteString" lazyByteString + , bench' "Text" text + , bench' "LazyText" lazyText + , bench' "TextByteString" textByteString + , bench' "LazyTextByteString" lazyTextByteString + ] + where + bench' n s = bench n $ whnfIO (s p sink from to) + +string :: FilePath -> Handle -> Int -> Int -> IO () +string fp sink from to = do + s <- readFile fp + hPutStr sink $ cut s + where + cut = unlines . map (take (to - from) . drop from) . lines + +byteString :: FilePath -> Handle -> Int -> Int -> IO () +byteString fp sink from to = do + bs <- B.readFile fp + B.hPutStr sink $ cut bs + where + cut = BC.unlines . map (B.take (to - from) . B.drop from) . BC.lines + +lazyByteString :: FilePath -> Handle -> Int -> Int -> IO () +lazyByteString fp sink from to = do + bs <- BL.readFile fp + BL.hPutStr sink $ cut bs + where + cut = BLC.unlines . map (BL.take (to' - from') . BL.drop from') . BLC.lines + from' = fromIntegral from + to' = fromIntegral to + +text :: FilePath -> Handle -> Int -> Int -> IO () +text fp sink from to = do + t <- T.readFile fp + T.hPutStr sink $ cut t + where + cut = T.unlines . map (T.take (to - from) . T.drop from) . T.lines + +lazyText :: FilePath -> Handle -> Int -> Int -> IO () +lazyText fp sink from to = do + t <- TL.readFile fp + TL.hPutStr sink $ cut t + where + cut = TL.unlines . map (TL.take (to' - from') . TL.drop from') . TL.lines + from' = fromIntegral from + to' = fromIntegral to + +textByteString :: FilePath -> Handle -> Int -> Int -> IO () +textByteString fp sink from to = do + t <- T.decodeUtf8 `fmap` B.readFile fp + B.hPutStr sink $ T.encodeUtf8 $ cut t + where + cut = T.unlines . map (T.take (to - from) . T.drop from) . T.lines + +lazyTextByteString :: FilePath -> Handle -> Int -> Int -> IO () +lazyTextByteString fp sink from to = do + t <- TL.decodeUtf8 `fmap` BL.readFile fp + BL.hPutStr sink $ TL.encodeUtf8 $ cut t + where + cut = TL.unlines . map (TL.take (to' - from') . TL.drop from') . TL.lines + from' = fromIntegral from + to' = fromIntegral to diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Fold.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Fold.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Fold.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Fold.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,68 @@ +-- | Benchmark which formats paragraph, like the @sort@ unix utility. +-- +-- Tested in this benchmark: +-- +-- * Reading the file +-- +-- * Splitting into paragraphs +-- +-- * Reformatting the paragraphs to a certain line width +-- +-- * Concatenating the results using the text builder +-- +-- * Writing back to a handle +-- +{-# LANGUAGE OverloadedStrings #-} +module Benchmarks.Programs.Fold + ( benchmark + ) where + +import Data.List (foldl') +import Data.List (intersperse) +import Data.Monoid (mempty, mappend, mconcat) +import System.IO (Handle) +import Criterion (Benchmark, bench, whnfIO) +import qualified Data.Text as T +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy.Builder as TLB +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.IO as TL + +benchmark :: FilePath -> Handle -> IO Benchmark +benchmark i o = return $ + bench "Fold" $ whnfIO $ T.readFile i >>= TL.hPutStr o . fold 80 + +-- | We represent a paragraph by a word list +-- +type Paragraph = [T.Text] + +-- | Fold a text +-- +fold :: Int -> T.Text -> TL.Text +fold maxWidth = TLB.toLazyText . mconcat . + intersperse "\n\n" . map (foldParagraph maxWidth) . paragraphs + +-- | Fold a paragraph +-- +foldParagraph :: Int -> Paragraph -> TLB.Builder +foldParagraph _ [] = mempty +foldParagraph max' (w : ws) = fst $ foldl' go (TLB.fromText w, T.length w) ws + where + go (builder, width) word + | width + len + 1 <= max' = + (builder `mappend` " " `mappend` word', width + len + 1) + | otherwise = + (builder `mappend` "\n" `mappend` word', len) + where + word' = TLB.fromText word + len = T.length word + +-- | Divide a text into paragraphs +-- +paragraphs :: T.Text -> [Paragraph] +paragraphs = splitParagraphs . map T.words . T.lines + where + splitParagraphs ls = case break null ls of + ([], []) -> [] + (p, []) -> [concat p] + (p, lr) -> concat p : splitParagraphs (dropWhile null lr) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Sort.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Sort.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Sort.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Sort.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,71 @@ +-- | This benchmark sorts the lines of a file, like the @sort@ unix utility. +-- +-- Tested in this benchmark: +-- +-- * Reading the file +-- +-- * Splitting into lines +-- +-- * Sorting the lines +-- +-- * Joining the lines +-- +-- * Writing back to a handle +-- +{-# LANGUAGE OverloadedStrings #-} +module Benchmarks.Programs.Sort + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, whnfIO) +import Data.Monoid (mconcat) +import System.IO (Handle, hPutStr) +import qualified Data.ByteString as B +import qualified Data.ByteString.Char8 as BC +import qualified Data.ByteString.Lazy as BL +import qualified Data.ByteString.Lazy.Char8 as BLC +import qualified Data.List as L +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.Builder as TLB +import qualified Data.Text.Lazy.Encoding as TL +import qualified Data.Text.Lazy.IO as TL + +benchmark :: FilePath -> Handle -> IO Benchmark +benchmark i o = return $ bgroup "Sort" + [ bench "String" $ whnfIO $ readFile i >>= hPutStr o . string + , bench "ByteString" $ whnfIO $ B.readFile i >>= B.hPutStr o . byteString + , bench "LazyByteString" $ whnfIO $ + BL.readFile i >>= BL.hPutStr o . lazyByteString + , bench "Text" $ whnfIO $ T.readFile i >>= T.hPutStr o . text + , bench "LazyText" $ whnfIO $ TL.readFile i >>= TL.hPutStr o . lazyText + , bench "TextByteString" $ whnfIO $ B.readFile i >>= + B.hPutStr o . T.encodeUtf8 . text . T.decodeUtf8 + , bench "LazyTextByteString" $ whnfIO $ BL.readFile i >>= + BL.hPutStr o . TL.encodeUtf8 . lazyText . TL.decodeUtf8 + , bench "TextBuilder" $ whnfIO $ B.readFile i >>= + BL.hPutStr o . TL.encodeUtf8 . textBuilder . T.decodeUtf8 + ] + +string :: String -> String +string = unlines . L.sort . lines + +byteString :: B.ByteString -> B.ByteString +byteString = BC.unlines . L.sort . BC.lines + +lazyByteString :: BL.ByteString -> BL.ByteString +lazyByteString = BLC.unlines . L.sort . BLC.lines + +text :: T.Text -> T.Text +text = T.unlines . L.sort . T.lines + +lazyText :: TL.Text -> TL.Text +lazyText = TL.unlines . L.sort . TL.lines + +-- | Text variant using a builder monoid for the final concatenation +-- +textBuilder :: T.Text -> TL.Text +textBuilder = TLB.toLazyText . mconcat . L.intersperse (TLB.singleton '\n') . + map TLB.fromText . L.sort . T.lines diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/StripTags.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/StripTags.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/StripTags.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/StripTags.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,53 @@ +-- | Program to replace HTML tags by whitespace +-- +-- This program was originally contributed by Petr Prokhorenkov. +-- +-- Tested in this benchmark: +-- +-- * Reading the file +-- +-- * Replacing text between HTML tags (<>) with whitespace +-- +-- * Writing back to a handle +-- +{-# OPTIONS_GHC -fspec-constr-count=5 #-} +module Benchmarks.Programs.StripTags + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, whnfIO) +import Data.List (mapAccumL) +import System.IO (Handle, hPutStr) +import qualified Data.ByteString as B +import qualified Data.ByteString.Char8 as BC +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.IO as T + +benchmark :: FilePath -> Handle -> IO Benchmark +benchmark i o = return $ bgroup "StripTags" + [ bench "String" $ whnfIO $ readFile i >>= hPutStr o . string + , bench "ByteString" $ whnfIO $ B.readFile i >>= B.hPutStr o . byteString + , bench "Text" $ whnfIO $ T.readFile i >>= T.hPutStr o . text + , bench "TextByteString" $ whnfIO $ + B.readFile i >>= B.hPutStr o . T.encodeUtf8 . text . T.decodeUtf8 + ] + +string :: String -> String +string = snd . mapAccumL step 0 + +text :: T.Text -> T.Text +text = snd . T.mapAccumL step 0 + +byteString :: B.ByteString -> B.ByteString +byteString = snd . BC.mapAccumL step 0 + +step :: Int -> Char -> (Int, Char) +step d c + | d > 0 || d' > 0 = (d', ' ') + | otherwise = (d', c) + where + d' = d + depth c + depth '>' = 1 + depth '<' = -1 + depth _ = 0 diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Throughput.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Throughput.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Throughput.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Programs/Throughput.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,41 @@ +-- | This benchmark simply reads and writes a file using the various string +-- libraries. The point of it is that we can make better estimations on how +-- much time the other benchmarks spend doing IO. +-- +-- Note that we expect ByteStrings to be a whole lot faster, since they do not +-- do any actual encoding/decoding here, while String and Text do have UTF-8 +-- encoding/decoding. +-- +-- Tested in this benchmark: +-- +-- * Reading the file +-- +-- * Replacing text between HTML tags (<>) with whitespace +-- +-- * Writing back to a handle +-- +module Benchmarks.Programs.Throughput + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, whnfIO) +import System.IO (Handle, hPutStr) +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as BL +import qualified Data.Text.Encoding as T +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy.Encoding as TL +import qualified Data.Text.Lazy.IO as TL + +benchmark :: FilePath -> Handle -> IO Benchmark +benchmark fp sink = return $ bgroup "Throughput" + [ bench "String" $ whnfIO $ readFile fp >>= hPutStr sink + , bench "ByteString" $ whnfIO $ B.readFile fp >>= B.hPutStr sink + , bench "LazyByteString" $ whnfIO $ BL.readFile fp >>= BL.hPutStr sink + , bench "Text" $ whnfIO $ T.readFile fp >>= T.hPutStr sink + , bench "LazyText" $ whnfIO $ TL.readFile fp >>= TL.hPutStr sink + , bench "TextByteString" $ whnfIO $ + B.readFile fp >>= B.hPutStr sink . T.encodeUtf8 . T.decodeUtf8 + , bench "LazyTextByteString" $ whnfIO $ + BL.readFile fp >>= BL.hPutStr sink . TL.encodeUtf8 . TL.decodeUtf8 + ] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Pure.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Pure.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Pure.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Pure.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,486 @@ +-- | Benchmarks various pure functions from the Text library +-- +-- Tested in this benchmark: +-- +-- * Most pure functions defined the string types +-- +{-# LANGUAGE BangPatterns, CPP, GADTs, MagicHash #-} +{-# OPTIONS_GHC -fno-warn-orphans #-} +module Benchmarks.Pure + ( benchmark + ) where + +import Control.DeepSeq (NFData (..)) +import Control.Exception (evaluate) +import Criterion (Benchmark, bgroup, bench, nf) +import Data.Char (toLower, toUpper) +import Data.Monoid (mappend, mempty) +import GHC.Base (Char (..), Int (..), chr#, ord#, (+#)) +import qualified Data.ByteString.Char8 as BS +import qualified Data.ByteString.Lazy.Char8 as BL +import qualified Data.ByteString.UTF8 as UTF8 +import qualified Data.List as L +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.Builder as TB +import qualified Data.Text.Lazy.Encoding as TL + +benchmark :: String -> FilePath -> IO Benchmark +benchmark kind fp = do + -- Evaluate stuff before actually running the benchmark, we don't want to + -- count it here. + + -- ByteString A + bsa <- BS.readFile fp + + -- Text A/B, LazyText A/B + ta <- evaluate $ T.decodeUtf8 bsa + tb <- evaluate $ T.toUpper ta + tla <- evaluate $ TL.fromChunks (T.chunksOf 16376 ta) + tlb <- evaluate $ TL.fromChunks (T.chunksOf 16376 tb) + + -- ByteString B, LazyByteString A/B + bsb <- evaluate $ T.encodeUtf8 tb + bla <- evaluate $ BL.fromChunks (chunksOf 16376 bsa) + blb <- evaluate $ BL.fromChunks (chunksOf 16376 bsb) + + -- String A/B + sa <- evaluate $ UTF8.toString bsa + sb <- evaluate $ T.unpack tb + + -- Lengths + bsa_len <- evaluate $ BS.length bsa + ta_len <- evaluate $ T.length ta + bla_len <- evaluate $ BL.length bla + tla_len <- evaluate $ TL.length tla + sa_len <- evaluate $ L.length sa + + -- Lines + bsl <- evaluate $ BS.lines bsa + bll <- evaluate $ BL.lines bla + tl <- evaluate $ T.lines ta + tll <- evaluate $ TL.lines tla + sl <- evaluate $ L.lines sa + + return $ bgroup "Pure" + [ bgroup "append" + [ benchT $ nf (T.append tb) ta + , benchTL $ nf (TL.append tlb) tla + , benchBS $ nf (BS.append bsb) bsa + , benchBSL $ nf (BL.append blb) bla + , benchS $ nf ((++) sb) sa + ] + , bgroup "concat" + [ benchT $ nf T.concat tl + , benchTL $ nf TL.concat tll + , benchBS $ nf BS.concat bsl + , benchBSL $ nf BL.concat bll + , benchS $ nf L.concat sl + ] + , bgroup "cons" + [ benchT $ nf (T.cons c) ta + , benchTL $ nf (TL.cons c) tla + , benchBS $ nf (BS.cons c) bsa + , benchBSL $ nf (BL.cons c) bla + , benchS $ nf (c:) sa + ] + , bgroup "concatMap" + [ benchT $ nf (T.concatMap (T.replicate 3 . T.singleton)) ta + , benchTL $ nf (TL.concatMap (TL.replicate 3 . TL.singleton)) tla + , benchBS $ nf (BS.concatMap (BS.replicate 3)) bsa + , benchBSL $ nf (BL.concatMap (BL.replicate 3)) bla + , benchS $ nf (L.concatMap (L.replicate 3 . (:[]))) sa + ] + , bgroup "decode" + [ benchT $ nf T.decodeUtf8 bsa + , benchTL $ nf TL.decodeUtf8 bla + , benchBS $ nf BS.unpack bsa + , benchBSL $ nf BL.unpack bla + , benchS $ nf UTF8.toString bsa + ] + , bgroup "decode'" + [ benchT $ nf T.decodeUtf8' bsa + , benchTL $ nf TL.decodeUtf8' bla + ] + , bgroup "drop" + [ benchT $ nf (T.drop (ta_len `div` 3)) ta + , benchTL $ nf (TL.drop (tla_len `div` 3)) tla + , benchBS $ nf (BS.drop (bsa_len `div` 3)) bsa + , benchBSL $ nf (BL.drop (bla_len `div` 3)) bla + , benchS $ nf (L.drop (sa_len `div` 3)) sa + ] + , bgroup "encode" + [ benchT $ nf T.encodeUtf8 ta + , benchTL $ nf TL.encodeUtf8 tla + , benchBS $ nf BS.pack sa + , benchBSL $ nf BL.pack sa + , benchS $ nf UTF8.fromString sa + ] + , bgroup "filter" + [ benchT $ nf (T.filter p0) ta + , benchTL $ nf (TL.filter p0) tla + , benchBS $ nf (BS.filter p0) bsa + , benchBSL $ nf (BL.filter p0) bla + , benchS $ nf (L.filter p0) sa + ] + , bgroup "filter.filter" + [ benchT $ nf (T.filter p1 . T.filter p0) ta + , benchTL $ nf (TL.filter p1 . TL.filter p0) tla + , benchBS $ nf (BS.filter p1 . BS.filter p0) bsa + , benchBSL $ nf (BL.filter p1 . BL.filter p0) bla + , benchS $ nf (L.filter p1 . L.filter p0) sa + ] + , bgroup "foldl'" + [ benchT $ nf (T.foldl' len 0) ta + , benchTL $ nf (TL.foldl' len 0) tla + , benchBS $ nf (BS.foldl' len 0) bsa + , benchBSL $ nf (BL.foldl' len 0) bla + , benchS $ nf (L.foldl' len 0) sa + ] + , bgroup "foldr" + [ benchT $ nf (L.length . T.foldr (:) []) ta + , benchTL $ nf (L.length . TL.foldr (:) []) tla + , benchBS $ nf (L.length . BS.foldr (:) []) bsa + , benchBSL $ nf (L.length . BL.foldr (:) []) bla + , benchS $ nf (L.length . L.foldr (:) []) sa + ] + , bgroup "head" + [ benchT $ nf T.head ta + , benchTL $ nf TL.head tla + , benchBS $ nf BS.head bsa + , benchBSL $ nf BL.head bla + , benchS $ nf L.head sa + ] + , bgroup "init" + [ benchT $ nf T.init ta + , benchTL $ nf TL.init tla + , benchBS $ nf BS.init bsa + , benchBSL $ nf BL.init bla + , benchS $ nf L.init sa + ] + , bgroup "intercalate" + [ benchT $ nf (T.intercalate tsw) tl + , benchTL $ nf (TL.intercalate tlw) tll + , benchBS $ nf (BS.intercalate bsw) bsl + , benchBSL $ nf (BL.intercalate blw) bll + , benchS $ nf (L.intercalate lw) sl + ] + , bgroup "intersperse" + [ benchT $ nf (T.intersperse c) ta + , benchTL $ nf (TL.intersperse c) tla + , benchBS $ nf (BS.intersperse c) bsa + , benchBSL $ nf (BL.intersperse c) bla + , benchS $ nf (L.intersperse c) sa + ] + , bgroup "isInfixOf" + [ benchT $ nf (T.isInfixOf tsw) ta + , benchTL $ nf (TL.isInfixOf tlw) tla + , benchBS $ nf (BS.isInfixOf bsw) bsa + -- no isInfixOf for lazy bytestrings + , benchS $ nf (L.isInfixOf lw) sa + ] + , bgroup "last" + [ benchT $ nf T.last ta + , benchTL $ nf TL.last tla + , benchBS $ nf BS.last bsa + , benchBSL $ nf BL.last bla + , benchS $ nf L.last sa + ] + , bgroup "map" + [ benchT $ nf (T.map f) ta + , benchTL $ nf (TL.map f) tla + , benchBS $ nf (BS.map f) bsa + , benchBSL $ nf (BL.map f) bla + , benchS $ nf (L.map f) sa + ] + , bgroup "mapAccumL" + [ benchT $ nf (T.mapAccumL g 0) ta + , benchTL $ nf (TL.mapAccumL g 0) tla + , benchBS $ nf (BS.mapAccumL g 0) bsa + , benchBSL $ nf (BL.mapAccumL g 0) bla + , benchS $ nf (L.mapAccumL g 0) sa + ] + , bgroup "mapAccumR" + [ benchT $ nf (T.mapAccumR g 0) ta + , benchTL $ nf (TL.mapAccumR g 0) tla + , benchBS $ nf (BS.mapAccumR g 0) bsa + , benchBSL $ nf (BL.mapAccumR g 0) bla + , benchS $ nf (L.mapAccumR g 0) sa + ] + , bgroup "map.map" + [ benchT $ nf (T.map f . T.map f) ta + , benchTL $ nf (TL.map f . TL.map f) tla + , benchBS $ nf (BS.map f . BS.map f) bsa + , benchBSL $ nf (BL.map f . BL.map f) bla + , benchS $ nf (L.map f . L.map f) sa + ] + , bgroup "replicate char" + [ benchT $ nf (T.replicate bsa_len) (T.singleton c) + , benchTL $ nf (TL.replicate (fromIntegral bsa_len)) (TL.singleton c) + , benchBS $ nf (BS.replicate bsa_len) c + , benchBSL $ nf (BL.replicate (fromIntegral bsa_len)) c + , benchS $ nf (L.replicate bsa_len) c + ] + , bgroup "replicate string" + [ benchT $ nf (T.replicate (bsa_len `div` T.length tsw)) tsw + , benchTL $ nf (TL.replicate (fromIntegral bsa_len `div` TL.length tlw)) tlw + , benchS $ nf (replicat (bsa_len `div` T.length tsw)) lw + ] + , bgroup "reverse" + [ benchT $ nf T.reverse ta + , benchTL $ nf TL.reverse tla + , benchBS $ nf BS.reverse bsa + , benchBSL $ nf BL.reverse bla + , benchS $ nf L.reverse sa + ] + , bgroup "take" + [ benchT $ nf (T.take (ta_len `div` 3)) ta + , benchTL $ nf (TL.take (tla_len `div` 3)) tla + , benchBS $ nf (BS.take (bsa_len `div` 3)) bsa + , benchBSL $ nf (BL.take (bla_len `div` 3)) bla + , benchS $ nf (L.take (sa_len `div` 3)) sa + ] + , bgroup "tail" + [ benchT $ nf T.tail ta + , benchTL $ nf TL.tail tla + , benchBS $ nf BS.tail bsa + , benchBSL $ nf BL.tail bla + , benchS $ nf L.tail sa + ] + , bgroup "toLower" + [ benchT $ nf T.toLower ta + , benchTL $ nf TL.toLower tla + , benchBS $ nf (BS.map toLower) bsa + , benchBSL $ nf (BL.map toLower) bla + , benchS $ nf (L.map toLower) sa + ] + , bgroup "toUpper" + [ benchT $ nf T.toUpper ta + , benchTL $ nf TL.toUpper tla + , benchBS $ nf (BS.map toUpper) bsa + , benchBSL $ nf (BL.map toUpper) bla + , benchS $ nf (L.map toUpper) sa + ] + , bgroup "uncons" + [ benchT $ nf T.uncons ta + , benchTL $ nf TL.uncons tla + , benchBS $ nf BS.uncons bsa + , benchBSL $ nf BL.uncons bla + , benchS $ nf L.uncons sa + ] + , bgroup "words" + [ benchT $ nf T.words ta + , benchTL $ nf TL.words tla + , benchBS $ nf BS.words bsa + , benchBSL $ nf BL.words bla + , benchS $ nf L.words sa + ] + , bgroup "zipWith" + [ benchT $ nf (T.zipWith min tb) ta + , benchTL $ nf (TL.zipWith min tlb) tla + , benchBS $ nf (BS.zipWith min bsb) bsa + , benchBSL $ nf (BL.zipWith min blb) bla + , benchS $ nf (L.zipWith min sb) sa + ] + , bgroup "length" + [ bgroup "cons" + [ benchT $ nf (T.length . T.cons c) ta + , benchTL $ nf (TL.length . TL.cons c) tla + , benchBS $ nf (BS.length . BS.cons c) bsa + , benchBSL $ nf (BL.length . BL.cons c) bla + , benchS $ nf (L.length . (:) c) sa + ] + , bgroup "decode" + [ benchT $ nf (T.length . T.decodeUtf8) bsa + , benchTL $ nf (TL.length . TL.decodeUtf8) bla + , benchBS $ nf (L.length . BS.unpack) bsa + , benchBSL $ nf (L.length . BL.unpack) bla + , bench "StringUTF8" $ nf (L.length . UTF8.toString) bsa + ] + , bgroup "drop" + [ benchT $ nf (T.length . T.drop (ta_len `div` 3)) ta + , benchTL $ nf (TL.length . TL.drop (tla_len `div` 3)) tla + , benchBS $ nf (BS.length . BS.drop (bsa_len `div` 3)) bsa + , benchBSL $ nf (BL.length . BL.drop (bla_len `div` 3)) bla + , benchS $ nf (L.length . L.drop (sa_len `div` 3)) sa + ] + , bgroup "filter" + [ benchT $ nf (T.length . T.filter p0) ta + , benchTL $ nf (TL.length . TL.filter p0) tla + , benchBS $ nf (BS.length . BS.filter p0) bsa + , benchBSL $ nf (BL.length . BL.filter p0) bla + , benchS $ nf (L.length . L.filter p0) sa + ] + , bgroup "filter.filter" + [ benchT $ nf (T.length . T.filter p1 . T.filter p0) ta + , benchTL $ nf (TL.length . TL.filter p1 . TL.filter p0) tla + , benchBS $ nf (BS.length . BS.filter p1 . BS.filter p0) bsa + , benchBSL $ nf (BL.length . BL.filter p1 . BL.filter p0) bla + , benchS $ nf (L.length . L.filter p1 . L.filter p0) sa + ] + , bgroup "init" + [ benchT $ nf (T.length . T.init) ta + , benchTL $ nf (TL.length . TL.init) tla + , benchBS $ nf (BS.length . BS.init) bsa + , benchBSL $ nf (BL.length . BL.init) bla + , benchS $ nf (L.length . L.init) sa + ] + , bgroup "intercalate" + [ benchT $ nf (T.length . T.intercalate tsw) tl + , benchTL $ nf (TL.length . TL.intercalate tlw) tll + , benchBS $ nf (BS.length . BS.intercalate bsw) bsl + , benchBSL $ nf (BL.length . BL.intercalate blw) bll + , benchS $ nf (L.length . L.intercalate lw) sl + ] + , bgroup "intersperse" + [ benchT $ nf (T.length . T.intersperse c) ta + , benchTL $ nf (TL.length . TL.intersperse c) tla + , benchBS $ nf (BS.length . BS.intersperse c) bsa + , benchBSL $ nf (BL.length . BL.intersperse c) bla + , benchS $ nf (L.length . L.intersperse c) sa + ] + , bgroup "map" + [ benchT $ nf (T.length . T.map f) ta + , benchTL $ nf (TL.length . TL.map f) tla + , benchBS $ nf (BS.length . BS.map f) bsa + , benchBSL $ nf (BL.length . BL.map f) bla + , benchS $ nf (L.length . L.map f) sa + ] + , bgroup "map.map" + [ benchT $ nf (T.length . T.map f . T.map f) ta + , benchTL $ nf (TL.length . TL.map f . TL.map f) tla + , benchBS $ nf (BS.length . BS.map f . BS.map f) bsa + , benchS $ nf (L.length . L.map f . L.map f) sa + ] + , bgroup "replicate char" + [ benchT $ nf (T.length . T.replicate bsa_len) (T.singleton c) + , benchTL $ nf (TL.length . TL.replicate (fromIntegral bsa_len)) (TL.singleton c) + , benchBS $ nf (BS.length . BS.replicate bsa_len) c + , benchBSL $ nf (BL.length . BL.replicate (fromIntegral bsa_len)) c + , benchS $ nf (L.length . L.replicate bsa_len) c + ] + , bgroup "replicate string" + [ benchT $ nf (T.length . T.replicate (bsa_len `div` T.length tsw)) tsw + , benchTL $ nf (TL.length . TL.replicate (fromIntegral bsa_len `div` TL.length tlw)) tlw + , benchS $ nf (L.length . replicat (bsa_len `div` T.length tsw)) lw + ] + , bgroup "take" + [ benchT $ nf (T.length . T.take (ta_len `div` 3)) ta + , benchTL $ nf (TL.length . TL.take (tla_len `div` 3)) tla + , benchBS $ nf (BS.length . BS.take (bsa_len `div` 3)) bsa + , benchBSL $ nf (BL.length . BL.take (bla_len `div` 3)) bla + , benchS $ nf (L.length . L.take (sa_len `div` 3)) sa + ] + , bgroup "tail" + [ benchT $ nf (T.length . T.tail) ta + , benchTL $ nf (TL.length . TL.tail) tla + , benchBS $ nf (BS.length . BS.tail) bsa + , benchBSL $ nf (BL.length . BL.tail) bla + , benchS $ nf (L.length . L.tail) sa + ] + , bgroup "toLower" + [ benchT $ nf (T.length . T.toLower) ta + , benchTL $ nf (TL.length . TL.toLower) tla + , benchBS $ nf (BS.length . BS.map toLower) bsa + , benchBSL $ nf (BL.length . BL.map toLower) bla + , benchS $ nf (L.length . L.map toLower) sa + ] + , bgroup "toUpper" + [ benchT $ nf (T.length . T.toUpper) ta + , benchTL $ nf (TL.length . TL.toUpper) tla + , benchBS $ nf (BS.length . BS.map toUpper) bsa + , benchBSL $ nf (BL.length . BL.map toUpper) bla + , benchS $ nf (L.length . L.map toUpper) sa + ] + , bgroup "words" + [ benchT $ nf (L.length . T.words) ta + , benchTL $ nf (L.length . TL.words) tla + , benchBS $ nf (L.length . BS.words) bsa + , benchBSL $ nf (L.length . BL.words) bla + , benchS $ nf (L.length . L.words) sa + ] + , bgroup "zipWith" + [ benchT $ nf (T.length . T.zipWith min tb) ta + , benchTL $ nf (TL.length . TL.zipWith min tlb) tla + , benchBS $ nf (L.length . BS.zipWith min bsb) bsa + , benchBSL $ nf (L.length . BL.zipWith min blb) bla + , benchS $ nf (L.length . L.zipWith min sb) sa + ] + ] + , bgroup "Builder" + [ bench "mappend char" $ nf (TL.length . TB.toLazyText . mappendNChar 'a') 10000 + , bench "mappend 8 char" $ nf (TL.length . TB.toLazyText . mappend8Char) 'a' + , bench "mappend text" $ nf (TL.length . TB.toLazyText . mappendNText short) 10000 + ] + ] + where + benchS = bench ("String+" ++ kind) + benchT = bench ("Text+" ++ kind) + benchTL = bench ("LazyText+" ++ kind) + benchBS = bench ("ByteString+" ++ kind) + benchBSL = bench ("LazyByteString+" ++ kind) + + c = 'й' + p0 = (== c) + p1 = (/= 'д') + lw = "право" + bsw = UTF8.fromString lw + blw = BL.fromChunks [bsw] + tsw = T.pack lw + tlw = TL.fromChunks [tsw] + f (C# c#) = C# (chr# (ord# c# +# 1#)) + g (I# i#) (C# c#) = (I# (i# +# 1#), C# (chr# (ord# c# +# i#))) + len l _ = l + (1::Int) + replicat n = concat . L.replicate n + short = T.pack "short" + +#if !MIN_VERSION_bytestring(0,10,0) +instance NFData BS.ByteString + +instance NFData BL.ByteString where + rnf BL.Empty = () + rnf (BL.Chunk _ ts) = rnf ts +#endif + +data B where + B :: NFData a => a -> B + +instance NFData B where + rnf (B b) = rnf b + +-- | Split a bytestring in chunks +-- +chunksOf :: Int -> BS.ByteString -> [BS.ByteString] +chunksOf k = go + where + go t = case BS.splitAt k t of + (a,b) | BS.null a -> [] + | otherwise -> a : go b + +-- | Append a character n times +-- +mappendNChar :: Char -> Int -> TB.Builder +mappendNChar c n = go 0 + where + go i + | i < n = TB.singleton c `mappend` go (i+1) + | otherwise = mempty + +-- | Gives more opportunity for inlining and elimination of unnecesary +-- bounds checks. +-- +mappend8Char :: Char -> TB.Builder +mappend8Char c = TB.singleton c `mappend` TB.singleton c `mappend` + TB.singleton c `mappend` TB.singleton c `mappend` + TB.singleton c `mappend` TB.singleton c `mappend` + TB.singleton c `mappend` TB.singleton c + +-- | Append a text N times +-- +mappendNText :: T.Text -> Int -> TB.Builder +mappendNText t n = go 0 + where + go i + | i < n = TB.fromText t `mappend` go (i+1) + | otherwise = mempty diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/ReadNumbers.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/ReadNumbers.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/ReadNumbers.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/ReadNumbers.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,93 @@ +-- | Read numbers from a file with a just a number on each line, find the +-- minimum of those numbers. The file contains different kinds of numbers: +-- +-- * Decimals +-- +-- * Hexadecimals +-- +-- * Floating point numbers +-- +-- * Floating point numbers in scientific notation +-- +-- The different benchmarks will only take into account the values they can +-- parse. +-- +-- Tested in this benchmark: +-- +-- * Lexing/parsing of different numerical types +-- +module Benchmarks.ReadNumbers + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, whnf) +import Data.List (foldl') +import Numeric (readDec, readFloat, readHex) +import qualified Data.ByteString.Char8 as B +import qualified Data.ByteString.Lazy.Char8 as BL +import qualified Data.ByteString.Lex.Fractional as B +import qualified Data.Text as T +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.IO as TL +import qualified Data.Text.Lazy.Read as TL +import qualified Data.Text.Read as T + +benchmark :: FilePath -> IO Benchmark +benchmark fp = do + -- Read all files into lines: string, text, lazy text, bytestring, lazy + -- bytestring + s <- lines `fmap` readFile fp + t <- T.lines `fmap` T.readFile fp + tl <- TL.lines `fmap` TL.readFile fp + b <- B.lines `fmap` B.readFile fp + bl <- BL.lines `fmap` BL.readFile fp + return $ bgroup "ReadNumbers" + [ bench "DecimalString" $ whnf (int . string readDec) s + , bench "HexadecimalString" $ whnf (int . string readHex) s + , bench "DoubleString" $ whnf (double . string readFloat) s + + , bench "DecimalText" $ whnf (int . text (T.signed T.decimal)) t + , bench "HexadecimalText" $ whnf (int . text (T.signed T.hexadecimal)) t + , bench "DoubleText" $ whnf (double . text T.double) t + , bench "RationalText" $ whnf (double . text T.rational) t + + , bench "DecimalLazyText" $ + whnf (int . text (TL.signed TL.decimal)) tl + , bench "HexadecimalLazyText" $ + whnf (int . text (TL.signed TL.hexadecimal)) tl + , bench "DoubleLazyText" $ + whnf (double . text TL.double) tl + , bench "RationalLazyText" $ + whnf (double . text TL.rational) tl + + , bench "DecimalByteString" $ whnf (int . byteString B.readInt) b + , bench "DoubleByteString" $ whnf (double . byteString B.readDecimal) b + + , bench "DecimalLazyByteString" $ + whnf (int . byteString BL.readInt) bl + ] + where + -- Used for fixing types + int :: Int -> Int + int = id + double :: Double -> Double + double = id + +string :: (Ord a, Num a) => (t -> [(a, t)]) -> [t] -> a +string reader = foldl' go 1000000 + where + go z t = case reader t of [(n, _)] -> min n z + _ -> z + +text :: (Ord a, Num a) => (t -> Either String (a,t)) -> [t] -> a +text reader = foldl' go 1000000 + where + go z t = case reader t of Left _ -> z + Right (n, _) -> min n z + +byteString :: (Ord a, Num a) => (t -> Maybe (a,t)) -> [t] -> a +byteString reader = foldl' go 1000000 + where + go z t = case reader t of Nothing -> z + Just (n, _) -> min n z diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Replace.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Replace.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Replace.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Replace.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,43 @@ +{-# LANGUAGE BangPatterns #-} +-- | Replace a string by another string +-- +-- Tested in this benchmark: +-- +-- * Search and replace of a pattern in a text +-- +module Benchmarks.Replace + ( benchmark + ) where + +import Criterion (Benchmark, bgroup, bench, nf) +import qualified Data.ByteString.Char8 as B +import qualified Data.ByteString.Lazy as BL +import qualified Data.ByteString.Lazy.Search as BL +import qualified Data.ByteString.Search as B +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.Encoding as TL +import qualified Data.Text.Lazy.IO as TL + +benchmark :: FilePath -> String -> String -> IO Benchmark +benchmark fp pat sub = do + tl <- TL.readFile fp + bl <- BL.readFile fp + let !t = TL.toStrict tl + !b = T.encodeUtf8 t + return $ bgroup "Replace" [ + bench "Text" $ nf (T.length . T.replace tpat tsub) t + , bench "ByteString" $ nf (BL.length . B.replace bpat bsub) b + , bench "LazyText" $ nf (TL.length . TL.replace tlpat tlsub) tl + , bench "LazyByteString" $ nf (BL.length . BL.replace blpat blsub) bl + ] + where + tpat = T.pack pat + tsub = T.pack sub + tlpat = TL.pack pat + tlsub = TL.pack sub + bpat = T.encodeUtf8 tpat + bsub = T.encodeUtf8 tsub + blpat = B.concat $ BL.toChunks $ TL.encodeUtf8 tlpat + blsub = B.concat $ BL.toChunks $ TL.encodeUtf8 tlsub diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Search.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Search.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Search.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Search.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,48 @@ +-- | Search for a pattern in a file, find the number of occurences +-- +-- Tested in this benchmark: +-- +-- * Searching all occurences of a pattern using library routines +-- +module Benchmarks.Search + ( benchmark + ) where + +import Criterion (Benchmark, bench, bgroup, whnf) +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as BL +import qualified Data.ByteString.Lazy.Search as BL +import qualified Data.ByteString.Search as B +import qualified Data.Text as T +import qualified Data.Text.Encoding as T +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.IO as TL + +benchmark :: FilePath -> T.Text -> IO Benchmark +benchmark fp needleT = do + b <- B.readFile fp + bl <- BL.readFile fp + t <- T.readFile fp + tl <- TL.readFile fp + return $ bgroup "FileIndices" + [ bench "ByteString" $ whnf (byteString needleB) b + , bench "LazyByteString" $ whnf (lazyByteString needleB) bl + , bench "Text" $ whnf (text needleT) t + , bench "LazyText" $ whnf (lazyText needleTL) tl + ] + where + needleB = T.encodeUtf8 needleT + needleTL = TL.fromChunks [needleT] + +byteString :: B.ByteString -> B.ByteString -> Int +byteString needle = length . B.indices needle + +lazyByteString :: B.ByteString -> BL.ByteString -> Int +lazyByteString needle = length . BL.indices needle + +text :: T.Text -> T.Text -> Int +text = T.count + +lazyText :: TL.Text -> TL.Text -> Int +lazyText needle = fromIntegral . TL.count needle diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Stream.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Stream.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Stream.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/Stream.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,93 @@ +-- | This module contains a number of benchmarks for the different streaming +-- functions +-- +-- Tested in this benchmark: +-- +-- * Most streaming functions +-- +{-# LANGUAGE BangPatterns #-} +{-# OPTIONS_GHC -fno-warn-orphans #-} +module Benchmarks.Stream + ( benchmark + ) where + +import Control.DeepSeq (NFData (..)) +import Criterion (Benchmark, bgroup, bench, nf) +import Data.Text.Internal.Fusion.Types (Step (..), Stream (..)) +import qualified Data.Text.Encoding as T +import qualified Data.Text.Encoding.Error as E +import qualified Data.Text.Internal.Encoding.Fusion as T +import qualified Data.Text.Internal.Encoding.Fusion.Common as F +import qualified Data.Text.Internal.Fusion as T +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy.Encoding as TL +import qualified Data.Text.Internal.Lazy.Encoding.Fusion as TL +import qualified Data.Text.Internal.Lazy.Fusion as TL +import qualified Data.Text.Lazy.IO as TL + +instance NFData a => NFData (Stream a) where + -- Currently, this implementation does not force evaluation of the size hint + rnf (Stream next s0 _) = go s0 + where + go !s = case next s of + Done -> () + Skip s' -> go s' + Yield x s' -> rnf x `seq` go s' + +benchmark :: FilePath -> IO Benchmark +benchmark fp = do + -- Different formats + t <- T.readFile fp + let !utf8 = T.encodeUtf8 t + !utf16le = T.encodeUtf16LE t + !utf16be = T.encodeUtf16BE t + !utf32le = T.encodeUtf32LE t + !utf32be = T.encodeUtf32BE t + + -- Once again for the lazy variants + tl <- TL.readFile fp + let !utf8L = TL.encodeUtf8 tl + !utf16leL = TL.encodeUtf16LE tl + !utf16beL = TL.encodeUtf16BE tl + !utf32leL = TL.encodeUtf32LE tl + !utf32beL = TL.encodeUtf32BE tl + + -- For the functions which operate on streams + let !s = T.stream t + + return $ bgroup "Stream" + + -- Fusion + [ bgroup "stream" $ + [ bench "Text" $ nf T.stream t + , bench "LazyText" $ nf TL.stream tl + ] + + -- Encoding.Fusion + , bgroup "streamUtf8" + [ bench "Text" $ nf (T.streamUtf8 E.lenientDecode) utf8 + , bench "LazyText" $ nf (TL.streamUtf8 E.lenientDecode) utf8L + ] + , bgroup "streamUtf16LE" + [ bench "Text" $ nf (T.streamUtf16LE E.lenientDecode) utf16le + , bench "LazyText" $ nf (TL.streamUtf16LE E.lenientDecode) utf16leL + ] + , bgroup "streamUtf16BE" + [ bench "Text" $ nf (T.streamUtf16BE E.lenientDecode) utf16be + , bench "LazyText" $ nf (TL.streamUtf16BE E.lenientDecode) utf16beL + ] + , bgroup "streamUtf32LE" + [ bench "Text" $ nf (T.streamUtf32LE E.lenientDecode) utf32le + , bench "LazyText" $ nf (TL.streamUtf32LE E.lenientDecode) utf32leL + ] + , bgroup "streamUtf32BE" + [ bench "Text" $ nf (T.streamUtf32BE E.lenientDecode) utf32be + , bench "LazyText" $ nf (TL.streamUtf32BE E.lenientDecode) utf32beL + ] + + -- Encoding.Fusion.Common + , bench "restreamUtf16LE" $ nf F.restreamUtf16LE s + , bench "restreamUtf16BE" $ nf F.restreamUtf16BE s + , bench "restreamUtf32LE" $ nf F.restreamUtf32LE s + , bench "restreamUtf32BE" $ nf F.restreamUtf32BE s + ] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/WordFrequencies.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/WordFrequencies.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/WordFrequencies.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks/WordFrequencies.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,36 @@ +-- | A word frequency count using the different string types +-- +-- Tested in this benchmark: +-- +-- * Splitting into words +-- +-- * Converting to lowercase +-- +-- * Comparing: Eq/Ord instances +-- +module Benchmarks.WordFrequencies + ( benchmark + ) where + +import Criterion (Benchmark, bench, bgroup, whnf) +import Data.Char (toLower) +import Data.List (foldl') +import Data.Map (Map) +import qualified Data.ByteString.Char8 as B +import qualified Data.Map as M +import qualified Data.Text as T +import qualified Data.Text.IO as T + +benchmark :: FilePath -> IO Benchmark +benchmark fp = do + s <- readFile fp + b <- B.readFile fp + t <- T.readFile fp + return $ bgroup "WordFrequencies" + [ bench "String" $ whnf (frequencies . words . map toLower) s + , bench "ByteString" $ whnf (frequencies . B.words . B.map toLower) b + , bench "Text" $ whnf (frequencies . T.words . T.toLower) t + ] + +frequencies :: Ord a => [a] -> Map a Int +frequencies = foldl' (\m k -> M.insertWith (+) k 1 m) M.empty diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Benchmarks.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,79 @@ +-- | Main module to run the micro benchmarks +-- +{-# LANGUAGE OverloadedStrings #-} +module Main + ( main + ) where + +import Criterion.Main (Benchmark, defaultMain, bgroup) +import System.FilePath (()) +import System.IO (IOMode (WriteMode), openFile, hSetEncoding, utf8) + +import qualified Benchmarks.Builder as Builder +import qualified Benchmarks.DecodeUtf8 as DecodeUtf8 +import qualified Benchmarks.EncodeUtf8 as EncodeUtf8 +import qualified Benchmarks.Equality as Equality +import qualified Benchmarks.FileRead as FileRead +import qualified Benchmarks.FoldLines as FoldLines +import qualified Benchmarks.Mul as Mul +import qualified Benchmarks.Pure as Pure +import qualified Benchmarks.ReadNumbers as ReadNumbers +import qualified Benchmarks.Replace as Replace +import qualified Benchmarks.Search as Search +import qualified Benchmarks.Stream as Stream +import qualified Benchmarks.WordFrequencies as WordFrequencies + +import qualified Benchmarks.Programs.BigTable as Programs.BigTable +import qualified Benchmarks.Programs.Cut as Programs.Cut +import qualified Benchmarks.Programs.Fold as Programs.Fold +import qualified Benchmarks.Programs.Sort as Programs.Sort +import qualified Benchmarks.Programs.StripTags as Programs.StripTags +import qualified Benchmarks.Programs.Throughput as Programs.Throughput + +main :: IO () +main = benchmarks >>= defaultMain + +benchmarks :: IO [Benchmark] +benchmarks = do + sink <- openFile "/dev/null" WriteMode + hSetEncoding sink utf8 + + -- Traditional benchmarks + bs <- sequence + [ Builder.benchmark + , DecodeUtf8.benchmark "html" (tf "libya-chinese.html") + , DecodeUtf8.benchmark "xml" (tf "yiwiki.xml") + , DecodeUtf8.benchmark "ascii" (tf "ascii.txt") + , DecodeUtf8.benchmark "russian" (tf "russian.txt") + , DecodeUtf8.benchmark "japanese" (tf "japanese.txt") + , EncodeUtf8.benchmark "επανάληψη 竺法蘭共譯" + , Equality.benchmark (tf "japanese.txt") + , FileRead.benchmark (tf "russian.txt") + , FoldLines.benchmark (tf "russian.txt") + , Mul.benchmark + , Pure.benchmark "tiny" (tf "tiny.txt") + , Pure.benchmark "ascii" (tf "ascii-small.txt") + , Pure.benchmark "france" (tf "france.html") + , Pure.benchmark "russian" (tf "russian-small.txt") + , Pure.benchmark "japanese" (tf "japanese.txt") + , ReadNumbers.benchmark (tf "numbers.txt") + , Replace.benchmark (tf "russian.txt") "принимая" "своем" + , Search.benchmark (tf "russian.txt") "принимая" + , Stream.benchmark (tf "russian.txt") + , WordFrequencies.benchmark (tf "russian.txt") + ] + + -- Program-like benchmarks + ps <- bgroup "Programs" `fmap` sequence + [ Programs.BigTable.benchmark sink + , Programs.Cut.benchmark (tf "russian.txt") sink 20 40 + , Programs.Fold.benchmark (tf "russian.txt") sink + , Programs.Sort.benchmark (tf "russian.txt") sink + , Programs.StripTags.benchmark (tf "yiwiki.xml") sink + , Programs.Throughput.benchmark (tf "russian.txt") sink + ] + + return $ bs ++ [ps] + where + -- Location of a test file + tf = ("../tests/text-test-data" ) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Multilang.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Multilang.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Multilang.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Multilang.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,32 @@ +{-# LANGUAGE BangPatterns, OverloadedStrings, RankNTypes #-} + +module Main ( + main + ) where + +import Control.Monad (forM_) +import qualified Data.ByteString as B +import qualified Data.Text as Text +import Data.Text.Encoding (decodeUtf8) +import Data.Text (Text) +import System.IO (hFlush, stdout) +import Timer (timer) + +type BM = Text -> () + +bm :: forall a. (Text -> a) -> BM +bm f t = f t `seq` () + +benchmarks :: [(String, Text.Text -> ())] +benchmarks = [ + ("find_first", bm $ Text.isInfixOf "en:Benin") + , ("find_index", bm $ Text.findIndex (=='c')) + ] + +main :: IO () +main = do + !contents <- decodeUtf8 `fmap` B.readFile "../tests/text-test-data/yiwiki.xml" + forM_ benchmarks $ \(name, bmark) -> do + putStr $ name ++ " " + hFlush stdout + putStrLn =<< (timer 100 contents bmark) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Timer.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Timer.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/haskell/Timer.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/haskell/Timer.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,30 @@ +{-# LANGUAGE BangPatterns #-} + +module Timer (timer) where + +import Control.Exception (evaluate) +import Data.Time.Clock.POSIX (getPOSIXTime) +import GHC.Float (FFFormat(..), formatRealFloat) + +ickyRound :: Int -> Double -> String +ickyRound k = formatRealFloat FFFixed (Just k) + +timer :: Int -> a -> (a -> b) -> IO String +timer count a0 f = do + let loop !k !fastest + | k <= 0 = return fastest + | otherwise = do + start <- getPOSIXTime + let inner a i + | i <= 0 = return () + | otherwise = evaluate (f a) >> inner a (i-1) + inner a0 count + end <- getPOSIXTime + let elapsed = end - start + loop (k-1) (min fastest (elapsed / fromIntegral count)) + t <- loop (3::Int) 1e300 + let log10 x = log x / log 10 + ft = realToFrac t + prec = round (log10 (fromIntegral count) - log10 ft) + return $! ickyRound prec ft +{-# NOINLINE timer #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/cut.py cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/cut.py --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/cut.py 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/cut.py 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,12 @@ +#!/usr/bin/env python + +import utils, sys, codecs + +def cut(filename, l, r): + content = open(filename, encoding='utf-8') + for line in content: + print(line[l:r]) + +for f in sys.argv[1:]: + t = utils.benchmark(lambda: cut(f, 20, 40)) + sys.stderr.write('{0}: {1}\n'.format(f, t)) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/multilang.py cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/multilang.py --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/multilang.py 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/multilang.py 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +import math +import sys +import time + +def find_first(): + cf = contents.find + return timer(lambda: cf("en:Benin")) + +def timer(f, count=100): + a = 1e300 + def g(): + return + for i in xrange(3): + start = time.time() + for j in xrange(count): + g() + a = min(a, (time.time() - start) / count) + + b = 1e300 + for i in xrange(3): + start = time.time() + for j in xrange(count): + f() + b = min(b, (time.time() - start) / count) + + return round(b - a, int(round(math.log(count, 10) - math.log(b - a, 10)))) + +contents = open('../../tests/text-test-data/yiwiki.xml', 'r').read() +contents = contents.decode('utf-8') + +benchmarks = ( + find_first, + ) + +to_run = sys.argv[1:] +bms = [] +if to_run: + for r in to_run: + for b in benchmarks: + if b.__name__.startswith(r): + bms.append(b) +else: + bms = benchmarks + +for b in bms: + sys.stdout.write(b.__name__ + ' ') + sys.stdout.flush() + print b() diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/sort.py cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/sort.py --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/sort.py 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/sort.py 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,13 @@ +#!/usr/bin/env python + +import utils, sys, codecs + +def sort(filename): + content = open(filename, encoding='utf-8').read() + lines = content.splitlines() + lines.sort() + print('\n'.join(lines)) + +for f in sys.argv[1:]: + t = utils.benchmark(lambda: sort(f)) + sys.stderr.write('{0}: {1}\n'.format(f, t)) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/strip_tags.py cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/strip_tags.py --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/strip_tags.py 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/strip_tags.py 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +import utils, sys + +def strip_tags(filename): + string = open(filename, encoding='utf-8').read() + + d = 0 + out = [] + + for c in string: + if c == '<': d += 1 + + if d > 0: + out += ' ' + else: + out += c + + if c == '>': d -= 1 + + print(''.join(out)) + +for f in sys.argv[1:]: + t = utils.benchmark(lambda: strip_tags(f)) + sys.stderr.write('{0}: {1}\n'.format(f, t)) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/utils.py cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/utils.py --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/python/utils.py 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/python/utils.py 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +import sys, time + +def benchmark_once(f): + start = time.time() + f() + end = time.time() + return end - start + +def benchmark(f): + runs = 100 + total = 0.0 + for i in range(runs): + result = benchmark_once(f) + sys.stderr.write('Run {0}: {1}\n'.format(i, result)) + total += result + return total / runs diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/cut.rb cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/cut.rb --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/cut.rb 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/cut.rb 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,16 @@ +#!/usr/bin/env ruby + +require './utils.rb' + +def cut(filename, l, r) + File.open(filename, 'r:utf-8') do |file| + file.each_line do |line| + puts line[l, r - l] + end + end +end + +ARGV.each do |f| + t = benchmark { cut(f, 20, 40) } + STDERR.puts "#{f}: #{t}" +end diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/fold.rb cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/fold.rb --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/fold.rb 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/fold.rb 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,50 @@ +#!/usr/bin/env ruby + +require './utils.rb' + +def fold(filename, max_width) + File.open(filename, 'r:utf-8') do |file| + # Words in this paragraph + paragraph = [] + + file.each_line do |line| + # If we encounter an empty line, we reformat and dump the current + # paragraph + if line.strip.empty? + puts fold_paragraph(paragraph, max_width) + puts + paragraph = [] + # Otherwise, we append the words found in the line to the paragraph + else + paragraph.concat line.split + end + end + + # Last paragraph + puts fold_paragraph(paragraph, max_width) unless paragraph.empty? + end +end + +# Fold a single paragraph to the desired width +def fold_paragraph(paragraph, max_width) + # Gradually build our output + str, *rest = paragraph + width = str.length + + rest.each do |word| + if width + word.length + 1 <= max_width + str << ' ' << word + width += word.length + 1 + else + str << "\n" << word + width = word.length + end + end + + str +end + +ARGV.each do |f| + t = benchmark { fold(f, 80) } + STDERR.puts "#{f}: #{t}" +end diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/sort.rb cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/sort.rb --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/sort.rb 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/sort.rb 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,15 @@ +#!/usr/bin/env ruby + +require './utils.rb' + +def sort(filename) + File.open(filename, 'r:utf-8') do |file| + content = file.read + puts content.lines.sort.join + end +end + +ARGV.each do |f| + t = benchmark { sort(f) } + STDERR.puts "#{f}: #{t}" +end diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/strip_tags.rb cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/strip_tags.rb --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/strip_tags.rb 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/strip_tags.rb 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,22 @@ +#!/usr/bin/env ruby + +require './utils.rb' + +def strip_tags(filename) + File.open(filename, 'r:utf-8') do |file| + str = file.read + + d = 0 + + str.each_char do |c| + d += 1 if c == '<' + putc(if d > 0 then ' ' else c end) + d -= 1 if c == '>' + end + end +end + +ARGV.each do |f| + t = benchmark { strip_tags(f) } + STDERR.puts "#{f}: #{t}" +end diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/utils.rb cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/utils.rb --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/ruby/utils.rb 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/ruby/utils.rb 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,14 @@ +require 'benchmark' + +def benchmark(&block) + runs = 100 + total = 0 + + runs.times do |i| + result = Benchmark.measure(&block).total + $stderr.puts "Run #{i}: #{result}" + total += result + end + + total / runs +end diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/Setup.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/Setup.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/Setup.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,2 @@ +import Distribution.Simple +main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/text-benchmarks.cabal cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/text-benchmarks.cabal --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/benchmarks/text-benchmarks.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/benchmarks/text-benchmarks.cabal 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,56 @@ +name: text-benchmarks +version: 0.0.0.0 +synopsis: Benchmarks for the text package +description: Benchmarks for the text package +homepage: https://bitbucket.org/bos/text +license: BSD3 +license-file: ../LICENSE +author: Jasper Van der Jeugt , + Bryan O'Sullivan , + Tom Harper , + Duncan Coutts +maintainer: jaspervdj@gmail.com +category: Text +build-type: Simple + +cabal-version: >=1.2 + +flag llvm + description: use LLVM + default: False + manual: True + +executable text-benchmarks + hs-source-dirs: haskell .. + c-sources: ../cbits/cbits.c + cbits/time_iconv.c + include-dirs: ../include + main-is: Benchmarks.hs + ghc-options: -Wall -O2 -rtsopts + if flag(llvm) + ghc-options: -fllvm + cpp-options: -DHAVE_DEEPSEQ -DINTEGER_GMP + build-depends: base == 4.*, + binary, + blaze-builder, + bytestring, + bytestring-lexing >= 0.5.0, + containers, + criterion >= 0.10.0.0, + deepseq, + directory, + filepath, + ghc-prim, + integer-gmp, + stringsearch, + utf8-string, + vector + +executable text-multilang + hs-source-dirs: haskell + main-is: Multilang.hs + ghc-options: -Wall -O2 + build-depends: base == 4.*, + bytestring, + text, + time diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/cbits/cbits.c cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/cbits/cbits.c --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/cbits/cbits.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/cbits/cbits.c 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2011 Bryan O'Sullivan . + * + * Portions copyright (c) 2008-2010 Björn Höhrmann . + * + * See http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ for details. + */ + +#include +#include +#include +#include "text_cbits.h" + +void _hs_text_memcpy(void *dest, size_t doff, const void *src, size_t soff, + size_t n) +{ + memcpy(dest + (doff<<1), src + (soff<<1), n<<1); +} + +int _hs_text_memcmp(const void *a, size_t aoff, const void *b, size_t boff, + size_t n) +{ + return memcmp(a + (aoff<<1), b + (boff<<1), n<<1); +} + +#define UTF8_ACCEPT 0 +#define UTF8_REJECT 12 + +static const uint8_t utf8d[] = { + /* + * The first part of the table maps bytes to character classes that + * to reduce the size of the transition table and create bitmasks. + */ + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, + 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, + + /* + * The second part is a transition table that maps a combination of + * a state of the automaton and a character class to a state. + */ + 0,12,24,36,60,96,84,12,12,12,48,72, 12,12,12,12,12,12,12,12,12,12,12,12, + 12, 0,12,12,12,12,12, 0,12, 0,12,12, 12,24,12,12,12,12,12,24,12,24,12,12, + 12,12,12,12,12,12,12,24,12,12,12,12, 12,24,12,12,12,12,12,12,12,24,12,12, + 12,12,12,12,12,12,12,36,12,36,12,12, 12,36,12,12,12,12,12,36,12,36,12,12, + 12,36,12,12,12,12,12,12,12,12,12,12, +}; + +static inline uint32_t +decode(uint32_t *state, uint32_t* codep, uint32_t byte) { + uint32_t type = utf8d[byte]; + + *codep = (*state != UTF8_ACCEPT) ? + (byte & 0x3fu) | (*codep << 6) : + (0xff >> type) & (byte); + + return *state = utf8d[256 + *state + type]; +} + +/* + * The ISO 8859-1 (aka latin-1) code points correspond exactly to the first 256 unicode + * code-points, therefore we can trivially convert from a latin-1 encoded bytestring to + * an UTF16 array + */ +void +_hs_text_decode_latin1(uint16_t *dest, const uint8_t *src, + const uint8_t *srcend) +{ + const uint8_t *p = src; + +#if defined(__i386__) || defined(__x86_64__) + /* This optimization works on a little-endian systems by using + (aligned) 32-bit loads instead of 8-bit loads + */ + + /* consume unaligned prefix */ + while (p != srcend && (uintptr_t)p & 0x3) + *dest++ = *p++; + + /* iterate over 32-bit aligned loads */ + while (p < srcend - 3) { + const uint32_t w = *((const uint32_t *)p); + + *dest++ = w & 0xff; + *dest++ = (w >> 8) & 0xff; + *dest++ = (w >> 16) & 0xff; + *dest++ = (w >> 24) & 0xff; + + p += 4; + } +#endif + + /* handle unaligned suffix */ + while (p != srcend) + *dest++ = *p++; +} + +/* + * A best-effort decoder. Runs until it hits either end of input or + * the start of an invalid byte sequence. + * + * At exit, we update *destoff with the next offset to write to, *src + * with the next source location past the last one successfully + * decoded, and return the next source location to read from. + * + * Moreover, we expose the internal decoder state (state0 and + * codepoint0), allowing one to restart the decoder after it + * terminates (say, due to a partial codepoint). + * + * In particular, there are a few possible outcomes, + * + * 1) We decoded the buffer entirely: + * In this case we return srcend + * state0 == UTF8_ACCEPT + * + * 2) We met an invalid encoding + * In this case we return the address of the first invalid byte + * state0 == UTF8_REJECT + * + * 3) We reached the end of the buffer while decoding a codepoint + * In this case we return a pointer to the first byte of the partial codepoint + * state0 != UTF8_ACCEPT, UTF8_REJECT + * + */ +#if defined(__GNUC__) || defined(__clang__) +static inline uint8_t const * +_hs_text_decode_utf8_int(uint16_t *const dest, size_t *destoff, + const uint8_t **src, const uint8_t *srcend, + uint32_t *codepoint0, uint32_t *state0) + __attribute((always_inline)); +#endif + +static inline uint8_t const * +_hs_text_decode_utf8_int(uint16_t *const dest, size_t *destoff, + const uint8_t **src, const uint8_t *srcend, + uint32_t *codepoint0, uint32_t *state0) +{ + uint16_t *d = dest + *destoff; + const uint8_t *s = *src, *last = *src; + uint32_t state = *state0; + uint32_t codepoint = *codepoint0; + + while (s < srcend) { +#if defined(__i386__) || defined(__x86_64__) + /* + * This code will only work on a little-endian system that + * supports unaligned loads. + * + * It gives a substantial speed win on data that is purely or + * partly ASCII (e.g. HTML), at only a slight cost on purely + * non-ASCII text. + */ + + if (state == UTF8_ACCEPT) { + while (s < srcend - 4) { + codepoint = *((uint32_t *) s); + if ((codepoint & 0x80808080) != 0) + break; + s += 4; + + /* + * Tried 32-bit stores here, but the extra bit-twiddling + * slowed the code down. + */ + + *d++ = (uint16_t) (codepoint & 0xff); + *d++ = (uint16_t) ((codepoint >> 8) & 0xff); + *d++ = (uint16_t) ((codepoint >> 16) & 0xff); + *d++ = (uint16_t) ((codepoint >> 24) & 0xff); + } + last = s; + } +#endif + + if (decode(&state, &codepoint, *s++) != UTF8_ACCEPT) { + if (state != UTF8_REJECT) + continue; + break; + } + + if (codepoint <= 0xffff) + *d++ = (uint16_t) codepoint; + else { + *d++ = (uint16_t) (0xD7C0 + (codepoint >> 10)); + *d++ = (uint16_t) (0xDC00 + (codepoint & 0x3FF)); + } + last = s; + } + + *destoff = d - dest; + *codepoint0 = codepoint; + *state0 = state; + *src = last; + + return s; +} + +uint8_t const * +_hs_text_decode_utf8_state(uint16_t *const dest, size_t *destoff, + const uint8_t **src, + const uint8_t *srcend, + uint32_t *codepoint0, uint32_t *state0) +{ + uint8_t const *ret = _hs_text_decode_utf8_int(dest, destoff, src, srcend, + codepoint0, state0); + if (*state0 == UTF8_REJECT) + ret -=1; + return ret; +} + +/* + * Helper to decode buffer and discard final decoder state + */ +const uint8_t * +_hs_text_decode_utf8(uint16_t *const dest, size_t *destoff, + const uint8_t *src, const uint8_t *const srcend) +{ + uint32_t codepoint; + uint32_t state = UTF8_ACCEPT; + uint8_t const *ret = _hs_text_decode_utf8_int(dest, destoff, &src, srcend, + &codepoint, &state); + /* Back up if we have an incomplete or invalid encoding */ + if (state != UTF8_ACCEPT) + ret -= 1; + return ret; +} + +void +_hs_text_encode_utf8(uint8_t **destp, const uint16_t *src, size_t srcoff, + size_t srclen) +{ + const uint16_t *srcend; + uint8_t *dest = *destp; + + src += srcoff; + srcend = src + srclen; + + ascii: +#if defined(__x86_64__) + while (srcend - src >= 4) { + uint64_t w = *((uint64_t *) src); + + if (w & 0xFF80FF80FF80FF80ULL) { + if (!(w & 0x000000000000FF80ULL)) { + *dest++ = w & 0xFFFF; + src++; + if (!(w & 0x00000000FF800000ULL)) { + *dest++ = (w >> 16) & 0xFFFF; + src++; + if (!(w & 0x0000FF8000000000ULL)) { + *dest++ = (w >> 32) & 0xFFFF; + src++; + } + } + } + break; + } + *dest++ = w & 0xFFFF; + *dest++ = (w >> 16) & 0xFFFF; + *dest++ = (w >> 32) & 0xFFFF; + *dest++ = w >> 48; + src += 4; + } +#endif + +#if defined(__i386__) + while (srcend - src >= 2) { + uint32_t w = *((uint32_t *) src); + + if (w & 0xFF80FF80) + break; + *dest++ = w & 0xFFFF; + *dest++ = w >> 16; + src += 2; + } +#endif + + while (src < srcend) { + uint16_t w = *src++; + + if (w <= 0x7F) { + *dest++ = w; + /* An ASCII byte is likely to begin a run of ASCII bytes. + Falling back into the fast path really helps performance. */ + goto ascii; + } + else if (w <= 0x7FF) { + *dest++ = (w >> 6) | 0xC0; + *dest++ = (w & 0x3f) | 0x80; + } + else if (w < 0xD800 || w > 0xDBFF) { + *dest++ = (w >> 12) | 0xE0; + *dest++ = ((w >> 6) & 0x3F) | 0x80; + *dest++ = (w & 0x3F) | 0x80; + } else { + uint32_t c = ((((uint32_t) w) - 0xD800) << 10) + + (((uint32_t) *src++) - 0xDC00) + 0x10000; + *dest++ = (c >> 18) | 0xF0; + *dest++ = ((c >> 12) & 0x3F) | 0x80; + *dest++ = ((c >> 6) & 0x3F) | 0x80; + *dest++ = (c & 0x3F) | 0x80; + } + } + + *destp = dest; +} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/changelog.md cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/changelog.md --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/changelog.md 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/changelog.md 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,164 @@ +1.2.2.1 + +* The switch to `integer-pure` in 1.2.2.0 was apparently mistaken. + The build flag has been renamed accordingly. Your army of diligent + maintainers apologizes for the churn. + +* Spec compliance: toCaseFold now follows the Unicode 8.0 spec + (updated from 7.0) + +* An STG lint error has been fixed + +1.2.2.0 + +* The `integer-simple` package, upon which this package optionally + depended, has been replaced with `integer-pure`. The build flag has + been renamed accordingly. + +* Bug fix: For the `Binary` instance, If UTF-8 decoding fails during a + `get`, the error is propagated via `fail` instead of an uncatchable + crash. + +* New function: `takeWhileEnd` + +* New instances for the `Text` types: + * if `base` >= 4.7: `PrintfArg` + * if `base` >= 4.9: `Semigroup` + +1.2.1.3 + +* Bug fix: As it turns out, moving the literal rewrite rules to simplifier + phase 2 does not prevent competition with the `unpack` rule, which is + also active in this phase. Unfortunately this was hidden due to a silly + test environment mistake. Moving literal rules back to phase 1 finally + fixes GHC Trac #10528 correctly. + +1.2.1.2 + +* Bug fix: Run literal rewrite rules in simplifier phase 2. + The behavior of the simplifier changed in GHC 7.10.2, + causing these rules to fail to fire, leading to poor code generation + and long compilation times. See + [GHC Trac #10528](https://ghc.haskell.org/trac/ghc/ticket/10528). + +1.2.1.1 + +* Expose unpackCString#, which you should never use. + +1.2.1.0 + +* Added Binary instances for both Text types. (If you have previously + been using the text-binary package to get a Binary instance, it is + now obsolete.) + +1.2.0.6 + +* Fixed a space leak in UTF-8 decoding + +1.2.0.5 + +* Feature parity: repeat, cycle, iterate are now implemented for lazy + Text, and the Data instance is more complete + +* Build speed: an inliner space explosion has been fixed with toCaseFold + +* Bug fix: encoding Int to a Builder would infinite-loop if the + integer-simple package was used + +* Deprecation: OnEncodeError and EncodeError are deprecated, as they + are never used + +* Internals: some types that are used internally in fusion-related + functions have moved around, been renamed, or been deleted (we don't + bump the major version if .Internal modules change) + +* Spec compliance: toCaseFold now follows the Unicode 7.0 spec + (updated from 6.3) + +1.2.0.4 + +* Fixed an incompatibility with base < 4.5 + +1.2.0.3 + +* Update formatRealFloat to correspond to the definition in versions + of base newer than 4.5 (https://github.com/bos/text/issues/105) + +1.2.0.2 + +* Bumped lower bound on deepseq to 1.4 for compatibility with the + upcoming GHC 7.10 + +1.2.0.1 + +* Fixed a buffer overflow in rendering of large Integers + (https://github.com/bos/text/issues/99) + +1.2.0.0 + +* Fixed an integer overflow in the replace function + (https://github.com/bos/text/issues/81) + +* Fixed a hang in lazy decodeUtf8With + (https://github.com/bos/text/issues/87) + +* Reduced codegen bloat caused by use of empty and single-character + literals + +* Added an instance of IsList for GHC 7.8 and above + +1.1.1.0 + +* The Data.Data instance now allows gunfold to work, via a virtual + pack constructor + +* dropEnd, takeEnd: new functions + +* Comparing the length of a Text against a number can now + short-circuit in more cases + +1.1.0.1 + +* streamDecodeUtf8: fixed gh-70, did not return all unconsumed bytes + in single-byte chunks + +1.1.0.0 + +* encodeUtf8: Performance is improved by up to 4x. + +* encodeUtf8Builder, encodeUtf8BuilderEscaped: new functions, + available only if bytestring >= 0.10.4.0 is installed, that allow + very fast and flexible encoding of a Text value to a bytestring + Builder. + + As an example of the performance gain to be had, the + encodeUtf8BuilderEscaped function helps to double the speed of JSON + encoding in the latest version of aeson! (Note: if all you need is a + plain ByteString, encodeUtf8 is still the faster way to go.) + +* All of the internal module hierarchy is now publicly exposed. If a + module is in the .Internal hierarchy, or is documented as internal, + use at your own risk - there are no API stability guarantees for + internal modules! + +1.0.0.1 + +* decodeUtf8: Fixed a regression that caused us to incorrectly + identify truncated UTF-8 as valid (gh-61) + +1.0.0.0 + +* Added support for Unicode 6.3.0 to case conversion functions + +* New function toTitle converts words in a string to title case + +* New functions peekCStringLen and withCStringLen simplify + interoperability with C functionns + +* Added support for decoding UTF-8 in stream-friendly fashion + +* Fixed a bug in mapAccumL + +* Added trusted Haskell support + +* Removed support for GHC 6.10 (released in 2008) and older diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Array.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Array.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Array.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Array.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,246 @@ +{-# LANGUAGE BangPatterns, CPP, ForeignFunctionInterface, MagicHash, Rank2Types, + RecordWildCards, UnboxedTuples, UnliftedFFITypes #-} +{-# OPTIONS_GHC -fno-warn-unused-matches #-} +-- | +-- Module : Data.Text.Array +-- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- Packed, unboxed, heap-resident arrays. Suitable for performance +-- critical use, both in terms of large data quantities and high +-- speed. +-- +-- This module is intended to be imported @qualified@, to avoid name +-- clashes with "Prelude" functions, e.g. +-- +-- > import qualified Data.Text.Array as A +-- +-- The names in this module resemble those in the 'Data.Array' family +-- of modules, but are shorter due to the assumption of qualifid +-- naming. +module Data.Text.Array + ( + -- * Types + Array(Array, aBA) + , MArray(MArray, maBA) + + -- * Functions + , copyM + , copyI + , empty + , equal +#if defined(ASSERTS) + , length +#endif + , run + , run2 + , toList + , unsafeFreeze + , unsafeIndex + , new + , unsafeWrite + ) where + +#if defined(ASSERTS) +-- This fugly hack is brought by GHC's apparent reluctance to deal +-- with MagicHash and UnboxedTuples when inferring types. Eek! +# define CHECK_BOUNDS(_func_,_len_,_k_) \ +if (_k_) < 0 || (_k_) >= (_len_) then error ("Data.Text.Array." ++ (_func_) ++ ": bounds error, offset " ++ show (_k_) ++ ", length " ++ show (_len_)) else +#else +# define CHECK_BOUNDS(_func_,_len_,_k_) +#endif + +#include "MachDeps.h" + +#if defined(ASSERTS) +import Control.Exception (assert) +#endif +#if __GLASGOW_HASKELL__ >= 702 +import Control.Monad.ST.Unsafe (unsafeIOToST) +#else +import Control.Monad.ST (unsafeIOToST) +#endif +import Data.Bits ((.&.), xor) +import Data.Text.Internal.Unsafe (inlinePerformIO) +import Data.Text.Internal.Unsafe.Shift (shiftL, shiftR) +#if __GLASGOW_HASKELL__ >= 703 +import Foreign.C.Types (CInt(CInt), CSize(CSize)) +#else +import Foreign.C.Types (CInt, CSize) +#endif +import GHC.Base (ByteArray#, MutableByteArray#, Int(..), + indexWord16Array#, newByteArray#, + unsafeFreezeByteArray#, writeWord16Array#) +import GHC.ST (ST(..), runST) +import GHC.Word (Word16(..)) +import Prelude hiding (length, read) + +-- | Immutable array type. +data Array = Array { + aBA :: ByteArray# +#if defined(ASSERTS) + , aLen :: {-# UNPACK #-} !Int -- length (in units of Word16, not bytes) +#endif + } + +-- | Mutable array type, for use in the ST monad. +data MArray s = MArray { + maBA :: MutableByteArray# s +#if defined(ASSERTS) + , maLen :: {-# UNPACK #-} !Int -- length (in units of Word16, not bytes) +#endif + } + +#if defined(ASSERTS) +-- | Operations supported by all arrays. +class IArray a where + -- | Return the length of an array. + length :: a -> Int + +instance IArray Array where + length = aLen + {-# INLINE length #-} + +instance IArray (MArray s) where + length = maLen + {-# INLINE length #-} +#endif + +-- | Create an uninitialized mutable array. +new :: forall s. Int -> ST s (MArray s) +new n + | n < 0 || n .&. highBit /= 0 = array_size_error + | otherwise = ST $ \s1# -> + case newByteArray# len# s1# of + (# s2#, marr# #) -> (# s2#, MArray marr# +#if defined(ASSERTS) + n +#endif + #) + where !(I# len#) = bytesInArray n + highBit = maxBound `xor` (maxBound `shiftR` 1) +{-# INLINE new #-} + +array_size_error :: a +array_size_error = error "Data.Text.Array.new: size overflow" + +-- | Freeze a mutable array. Do not mutate the 'MArray' afterwards! +unsafeFreeze :: MArray s -> ST s Array +unsafeFreeze MArray{..} = ST $ \s1# -> + case unsafeFreezeByteArray# maBA s1# of + (# s2#, ba# #) -> (# s2#, Array ba# +#if defined(ASSERTS) + maLen +#endif + #) +{-# INLINE unsafeFreeze #-} + +-- | Indicate how many bytes would be used for an array of the given +-- size. +bytesInArray :: Int -> Int +bytesInArray n = n `shiftL` 1 +{-# INLINE bytesInArray #-} + +-- | Unchecked read of an immutable array. May return garbage or +-- crash on an out-of-bounds access. +unsafeIndex :: Array -> Int -> Word16 +unsafeIndex Array{..} i@(I# i#) = + CHECK_BOUNDS("unsafeIndex",aLen,i) + case indexWord16Array# aBA i# of r# -> (W16# r#) +{-# INLINE unsafeIndex #-} + +-- | Unchecked write of a mutable array. May return garbage or crash +-- on an out-of-bounds access. +unsafeWrite :: MArray s -> Int -> Word16 -> ST s () +unsafeWrite MArray{..} i@(I# i#) (W16# e#) = ST $ \s1# -> + CHECK_BOUNDS("unsafeWrite",maLen,i) + case writeWord16Array# maBA i# e# s1# of + s2# -> (# s2#, () #) +{-# INLINE unsafeWrite #-} + +-- | Convert an immutable array to a list. +toList :: Array -> Int -> Int -> [Word16] +toList ary off len = loop 0 + where loop i | i < len = unsafeIndex ary (off+i) : loop (i+1) + | otherwise = [] + +-- | An empty immutable array. +empty :: Array +empty = runST (new 0 >>= unsafeFreeze) + +-- | Run an action in the ST monad and return an immutable array of +-- its result. +run :: (forall s. ST s (MArray s)) -> Array +run k = runST (k >>= unsafeFreeze) + +-- | Run an action in the ST monad and return an immutable array of +-- its result paired with whatever else the action returns. +run2 :: (forall s. ST s (MArray s, a)) -> (Array, a) +run2 k = runST (do + (marr,b) <- k + arr <- unsafeFreeze marr + return (arr,b)) +{-# INLINE run2 #-} + +-- | Copy some elements of a mutable array. +copyM :: MArray s -- ^ Destination + -> Int -- ^ Destination offset + -> MArray s -- ^ Source + -> Int -- ^ Source offset + -> Int -- ^ Count + -> ST s () +copyM dest didx src sidx count + | count <= 0 = return () + | otherwise = +#if defined(ASSERTS) + assert (sidx + count <= length src) . + assert (didx + count <= length dest) . +#endif + unsafeIOToST $ memcpyM (maBA dest) (fromIntegral didx) + (maBA src) (fromIntegral sidx) + (fromIntegral count) +{-# INLINE copyM #-} + +-- | Copy some elements of an immutable array. +copyI :: MArray s -- ^ Destination + -> Int -- ^ Destination offset + -> Array -- ^ Source + -> Int -- ^ Source offset + -> Int -- ^ First offset in destination /not/ to + -- copy (i.e. /not/ length) + -> ST s () +copyI dest i0 src j0 top + | i0 >= top = return () + | otherwise = unsafeIOToST $ + memcpyI (maBA dest) (fromIntegral i0) + (aBA src) (fromIntegral j0) + (fromIntegral (top-i0)) +{-# INLINE copyI #-} + +-- | Compare portions of two arrays for equality. No bounds checking +-- is performed. +equal :: Array -- ^ First + -> Int -- ^ Offset into first + -> Array -- ^ Second + -> Int -- ^ Offset into second + -> Int -- ^ Count + -> Bool +equal arrA offA arrB offB count = inlinePerformIO $ do + i <- memcmp (aBA arrA) (fromIntegral offA) + (aBA arrB) (fromIntegral offB) (fromIntegral count) + return $! i == 0 +{-# INLINE equal #-} + +foreign import ccall unsafe "_hs_text_memcpy" memcpyI + :: MutableByteArray# s -> CSize -> ByteArray# -> CSize -> CSize -> IO () + +foreign import ccall unsafe "_hs_text_memcmp" memcmp + :: ByteArray# -> CSize -> ByteArray# -> CSize -> CSize -> IO CInt + +foreign import ccall unsafe "_hs_text_memcpy" memcpyM + :: MutableByteArray# s -> CSize -> MutableByteArray# s -> CSize -> CSize + -> IO () diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Encoding/Error.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Encoding/Error.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Encoding/Error.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Encoding/Error.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,125 @@ +{-# LANGUAGE CPP, DeriveDataTypeable #-} +#if __GLASGOW_HASKELL__ >= 704 +{-# LANGUAGE Safe #-} +#elif __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif +-- | +-- Module : Data.Text.Encoding.Error +-- Copyright : (c) Bryan O'Sullivan 2009 +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- Types and functions for dealing with encoding and decoding errors +-- in Unicode text. +-- +-- The standard functions for encoding and decoding text are strict, +-- which is to say that they throw exceptions on invalid input. This +-- is often unhelpful on real world input, so alternative functions +-- exist that accept custom handlers for dealing with invalid inputs. +-- These 'OnError' handlers are normal Haskell functions. You can use +-- one of the presupplied functions in this module, or you can write a +-- custom handler of your own. + +module Data.Text.Encoding.Error + ( + -- * Error handling types + UnicodeException(..) + , OnError + , OnDecodeError + , OnEncodeError + -- * Useful error handling functions + , lenientDecode + , strictDecode + , strictEncode + , ignore + , replace + ) where + +import Control.DeepSeq (NFData (..)) +import Control.Exception (Exception, throw) +import Data.Typeable (Typeable) +import Data.Word (Word8) +import Numeric (showHex) + +-- | Function type for handling a coding error. It is supplied with +-- two inputs: +-- +-- * A 'String' that describes the error. +-- +-- * The input value that caused the error. If the error arose +-- because the end of input was reached or could not be identified +-- precisely, this value will be 'Nothing'. +-- +-- If the handler returns a value wrapped with 'Just', that value will +-- be used in the output as the replacement for the invalid input. If +-- it returns 'Nothing', no value will be used in the output. +-- +-- Should the handler need to abort processing, it should use 'error' +-- or 'throw' an exception (preferably a 'UnicodeException'). It may +-- use the description provided to construct a more helpful error +-- report. +type OnError a b = String -> Maybe a -> Maybe b + +-- | A handler for a decoding error. +type OnDecodeError = OnError Word8 Char + +-- | A handler for an encoding error. +{-# DEPRECATED OnEncodeError "This exception is never used in practice, and will be removed." #-} +type OnEncodeError = OnError Char Word8 + +-- | An exception type for representing Unicode encoding errors. +data UnicodeException = + DecodeError String (Maybe Word8) + -- ^ Could not decode a byte sequence because it was invalid under + -- the given encoding, or ran out of input in mid-decode. + | EncodeError String (Maybe Char) + -- ^ Tried to encode a character that could not be represented + -- under the given encoding, or ran out of input in mid-encode. + deriving (Eq, Typeable) + +{-# DEPRECATED EncodeError "This constructor is never used, and will be removed." #-} + +showUnicodeException :: UnicodeException -> String +showUnicodeException (DecodeError desc (Just w)) + = "Cannot decode byte '\\x" ++ showHex w ("': " ++ desc) +showUnicodeException (DecodeError desc Nothing) + = "Cannot decode input: " ++ desc +showUnicodeException (EncodeError desc (Just c)) + = "Cannot encode character '\\x" ++ showHex (fromEnum c) ("': " ++ desc) +showUnicodeException (EncodeError desc Nothing) + = "Cannot encode input: " ++ desc + +instance Show UnicodeException where + show = showUnicodeException + +instance Exception UnicodeException + +instance NFData UnicodeException where + rnf (DecodeError desc w) = rnf desc `seq` rnf w `seq` () + rnf (EncodeError desc c) = rnf desc `seq` rnf c `seq` () + +-- | Throw a 'UnicodeException' if decoding fails. +strictDecode :: OnDecodeError +strictDecode desc c = throw (DecodeError desc c) + +-- | Replace an invalid input byte with the Unicode replacement +-- character U+FFFD. +lenientDecode :: OnDecodeError +lenientDecode _ _ = Just '\xfffd' + +-- | Throw a 'UnicodeException' if encoding fails. +{-# DEPRECATED strictEncode "This function always throws an exception, and will be removed." #-} +strictEncode :: OnEncodeError +strictEncode desc c = throw (EncodeError desc c) + +-- | Ignore an invalid input, substituting nothing in the output. +ignore :: OnError a b +ignore _ _ = Nothing + +-- | Replace an invalid input with a valid output. +replace :: b -> OnError a b +replace c _ _ = Just c diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Encoding.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Encoding.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Encoding.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Encoding.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,490 @@ +{-# LANGUAGE BangPatterns, CPP, ForeignFunctionInterface, GeneralizedNewtypeDeriving, MagicHash, + UnliftedFFITypes #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif +-- | +-- Module : Data.Text.Encoding +-- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan, +-- (c) 2009 Duncan Coutts, +-- (c) 2008, 2009 Tom Harper +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- Functions for converting 'Text' values to and from 'ByteString', +-- using several standard encodings. +-- +-- To gain access to a much larger family of encodings, use the +-- @text-icu@ package: + +module Data.Text.Encoding + ( + -- * Decoding ByteStrings to Text + -- $strict + decodeASCII + , decodeLatin1 + , decodeUtf8 + , decodeUtf16LE + , decodeUtf16BE + , decodeUtf32LE + , decodeUtf32BE + + -- ** Catchable failure + , decodeUtf8' + + -- ** Controllable error handling + , decodeUtf8With + , decodeUtf16LEWith + , decodeUtf16BEWith + , decodeUtf32LEWith + , decodeUtf32BEWith + + -- ** Stream oriented decoding + -- $stream + , streamDecodeUtf8 + , streamDecodeUtf8With + , Decoding(..) + + -- * Encoding Text to ByteStrings + , encodeUtf8 + , encodeUtf16LE + , encodeUtf16BE + , encodeUtf32LE + , encodeUtf32BE + +#if MIN_VERSION_bytestring(0,10,4) + -- * Encoding Text using ByteString Builders + -- | /Note/ that these functions are only available if built against + -- @bytestring >= 0.10.4.0@. + , encodeUtf8Builder + , encodeUtf8BuilderEscaped +#endif + ) where + +#if __GLASGOW_HASKELL__ >= 702 +import Control.Monad.ST.Unsafe (unsafeIOToST, unsafeSTToIO) +#else +import Control.Monad.ST (unsafeIOToST, unsafeSTToIO) +#endif + +#if MIN_VERSION_bytestring(0,10,4) +import Data.Bits ((.&.)) +import Data.Text.Internal.Unsafe.Char (ord) +import qualified Data.ByteString.Builder as B +import qualified Data.ByteString.Builder.Internal as B hiding (empty, append) +import qualified Data.ByteString.Builder.Prim as BP +import qualified Data.ByteString.Builder.Prim.Internal as BP +import qualified Data.Text.Internal.Encoding.Utf16 as U16 +#endif + +import Control.Exception (evaluate, try) +import Control.Monad.ST (runST) +import Data.ByteString as B +import Data.ByteString.Internal as B hiding (c2w) +import Data.Text.Encoding.Error (OnDecodeError, UnicodeException, strictDecode) +import Data.Text.Internal (Text(..), safe, text) +import Data.Text.Internal.Private (runText) +import Data.Text.Internal.Unsafe.Char (unsafeWrite) +import Data.Text.Internal.Unsafe.Shift (shiftR) +import Data.Text.Show () +import Data.Text.Unsafe (unsafeDupablePerformIO) +import Data.Word (Word8, Word32) +import Foreign.C.Types (CSize(..)) +import Foreign.ForeignPtr (withForeignPtr) +import Foreign.Marshal.Utils (with) +import Foreign.Ptr (Ptr, minusPtr, nullPtr, plusPtr) +import Foreign.Storable (Storable, peek, poke) +import GHC.Base (ByteArray#, MutableByteArray#) +import qualified Data.Text.Array as A +import qualified Data.Text.Internal.Encoding.Fusion as E +import qualified Data.Text.Internal.Fusion as F + +#include "text_cbits.h" + +-- $strict +-- +-- All of the single-parameter functions for decoding bytestrings +-- encoded in one of the Unicode Transformation Formats (UTF) operate +-- in a /strict/ mode: each will throw an exception if given invalid +-- input. +-- +-- Each function has a variant, whose name is suffixed with -'With', +-- that gives greater control over the handling of decoding errors. +-- For instance, 'decodeUtf8' will throw an exception, but +-- 'decodeUtf8With' allows the programmer to determine what to do on a +-- decoding error. + +-- | /Deprecated/. Decode a 'ByteString' containing 7-bit ASCII +-- encoded text. +decodeASCII :: ByteString -> Text +decodeASCII = decodeUtf8 +{-# DEPRECATED decodeASCII "Use decodeUtf8 instead" #-} + +-- | Decode a 'ByteString' containing Latin-1 (aka ISO-8859-1) encoded text. +-- +-- 'decodeLatin1' is semantically equivalent to +-- @Data.Text.pack . Data.ByteString.Char8.unpack@ +decodeLatin1 :: ByteString -> Text +decodeLatin1 (PS fp off len) = text a 0 len + where + a = A.run (A.new len >>= unsafeIOToST . go) + go dest = withForeignPtr fp $ \ptr -> do + c_decode_latin1 (A.maBA dest) (ptr `plusPtr` off) (ptr `plusPtr` (off+len)) + return dest + +-- | Decode a 'ByteString' containing UTF-8 encoded text. +decodeUtf8With :: OnDecodeError -> ByteString -> Text +decodeUtf8With onErr (PS fp off len) = runText $ \done -> do + let go dest = withForeignPtr fp $ \ptr -> + with (0::CSize) $ \destOffPtr -> do + let end = ptr `plusPtr` (off + len) + loop curPtr = do + curPtr' <- c_decode_utf8 (A.maBA dest) destOffPtr curPtr end + if curPtr' == end + then do + n <- peek destOffPtr + unsafeSTToIO (done dest (fromIntegral n)) + else do + x <- peek curPtr' + case onErr desc (Just x) of + Nothing -> loop $ curPtr' `plusPtr` 1 + Just c -> do + destOff <- peek destOffPtr + w <- unsafeSTToIO $ + unsafeWrite dest (fromIntegral destOff) (safe c) + poke destOffPtr (destOff + fromIntegral w) + loop $ curPtr' `plusPtr` 1 + loop (ptr `plusPtr` off) + (unsafeIOToST . go) =<< A.new len + where + desc = "Data.Text.Internal.Encoding.decodeUtf8: Invalid UTF-8 stream" +{- INLINE[0] decodeUtf8With #-} + +-- $stream +-- +-- The 'streamDecodeUtf8' and 'streamDecodeUtf8With' functions accept +-- a 'ByteString' that represents a possibly incomplete input (e.g. a +-- packet from a network stream) that may not end on a UTF-8 boundary. +-- +-- 1. The maximal prefix of 'Text' that could be decoded from the +-- given input. +-- +-- 2. The suffix of the 'ByteString' that could not be decoded due to +-- insufficient input. +-- +-- 3. A function that accepts another 'ByteString'. That string will +-- be assumed to directly follow the string that was passed as +-- input to the original function, and it will in turn be decoded. +-- +-- To help understand the use of these functions, consider the Unicode +-- string @\"hi ☃\"@. If encoded as UTF-8, this becomes @\"hi +-- \\xe2\\x98\\x83\"@; the final @\'☃\'@ is encoded as 3 bytes. +-- +-- Now suppose that we receive this encoded string as 3 packets that +-- are split up on untidy boundaries: @[\"hi \\xe2\", \"\\x98\", +-- \"\\x83\"]@. We cannot decode the entire Unicode string until we +-- have received all three packets, but we would like to make progress +-- as we receive each one. +-- +-- @ +-- ghci> let s0\@('Some' _ _ f0) = 'streamDecodeUtf8' \"hi \\xe2\" +-- ghci> s0 +-- 'Some' \"hi \" \"\\xe2\" _ +-- @ +-- +-- We use the continuation @f0@ to decode our second packet. +-- +-- @ +-- ghci> let s1\@('Some' _ _ f1) = f0 \"\\x98\" +-- ghci> s1 +-- 'Some' \"\" \"\\xe2\\x98\" +-- @ +-- +-- We could not give @f0@ enough input to decode anything, so it +-- returned an empty string. Once we feed our second continuation @f1@ +-- the last byte of input, it will make progress. +-- +-- @ +-- ghci> let s2\@('Some' _ _ f2) = f1 \"\\x83\" +-- ghci> s2 +-- 'Some' \"\\x2603\" \"\" _ +-- @ +-- +-- If given invalid input, an exception will be thrown by the function +-- or continuation where it is encountered. + +-- | A stream oriented decoding result. +data Decoding = Some Text ByteString (ByteString -> Decoding) + +instance Show Decoding where + showsPrec d (Some t bs _) = showParen (d > prec) $ + showString "Some " . showsPrec prec' t . + showChar ' ' . showsPrec prec' bs . + showString " _" + where prec = 10; prec' = prec + 1 + +newtype CodePoint = CodePoint Word32 deriving (Eq, Show, Num, Storable) +newtype DecoderState = DecoderState Word32 deriving (Eq, Show, Num, Storable) + +-- | Decode, in a stream oriented way, a 'ByteString' containing UTF-8 +-- encoded text that is known to be valid. +-- +-- If the input contains any invalid UTF-8 data, an exception will be +-- thrown (either by this function or a continuation) that cannot be +-- caught in pure code. For more control over the handling of invalid +-- data, use 'streamDecodeUtf8With'. +streamDecodeUtf8 :: ByteString -> Decoding +streamDecodeUtf8 = streamDecodeUtf8With strictDecode + +-- | Decode, in a stream oriented way, a 'ByteString' containing UTF-8 +-- encoded text. +streamDecodeUtf8With :: OnDecodeError -> ByteString -> Decoding +streamDecodeUtf8With onErr = decodeChunk B.empty 0 0 + where + -- We create a slightly larger than necessary buffer to accommodate a + -- potential surrogate pair started in the last buffer + decodeChunk :: ByteString -> CodePoint -> DecoderState -> ByteString + -> Decoding + decodeChunk undecoded0 codepoint0 state0 bs@(PS fp off len) = + runST $ (unsafeIOToST . decodeChunkToBuffer) =<< A.new (len+1) + where + decodeChunkToBuffer :: A.MArray s -> IO Decoding + decodeChunkToBuffer dest = withForeignPtr fp $ \ptr -> + with (0::CSize) $ \destOffPtr -> + with codepoint0 $ \codepointPtr -> + with state0 $ \statePtr -> + with nullPtr $ \curPtrPtr -> + let end = ptr `plusPtr` (off + len) + loop curPtr = do + poke curPtrPtr curPtr + curPtr' <- c_decode_utf8_with_state (A.maBA dest) destOffPtr + curPtrPtr end codepointPtr statePtr + state <- peek statePtr + case state of + UTF8_REJECT -> do + -- We encountered an encoding error + x <- peek curPtr' + poke statePtr 0 + case onErr desc (Just x) of + Nothing -> loop $ curPtr' `plusPtr` 1 + Just c -> do + destOff <- peek destOffPtr + w <- unsafeSTToIO $ + unsafeWrite dest (fromIntegral destOff) (safe c) + poke destOffPtr (destOff + fromIntegral w) + loop $ curPtr' `plusPtr` 1 + + _ -> do + -- We encountered the end of the buffer while decoding + n <- peek destOffPtr + codepoint <- peek codepointPtr + chunkText <- unsafeSTToIO $ do + arr <- A.unsafeFreeze dest + return $! text arr 0 (fromIntegral n) + lastPtr <- peek curPtrPtr + let left = lastPtr `minusPtr` curPtr + !undecoded = case state of + UTF8_ACCEPT -> B.empty + _ -> B.append undecoded0 (B.drop left bs) + return $ Some chunkText undecoded + (decodeChunk undecoded codepoint state) + in loop (ptr `plusPtr` off) + desc = "Data.Text.Internal.Encoding.streamDecodeUtf8With: Invalid UTF-8 stream" + +-- | Decode a 'ByteString' containing UTF-8 encoded text that is known +-- to be valid. +-- +-- If the input contains any invalid UTF-8 data, an exception will be +-- thrown that cannot be caught in pure code. For more control over +-- the handling of invalid data, use 'decodeUtf8'' or +-- 'decodeUtf8With'. +decodeUtf8 :: ByteString -> Text +decodeUtf8 = decodeUtf8With strictDecode +{-# INLINE[0] decodeUtf8 #-} +{-# RULES "STREAM stream/decodeUtf8 fusion" [1] + forall bs. F.stream (decodeUtf8 bs) = E.streamUtf8 strictDecode bs #-} + +-- | Decode a 'ByteString' containing UTF-8 encoded text. +-- +-- If the input contains any invalid UTF-8 data, the relevant +-- exception will be returned, otherwise the decoded text. +decodeUtf8' :: ByteString -> Either UnicodeException Text +decodeUtf8' = unsafeDupablePerformIO . try . evaluate . decodeUtf8With strictDecode +{-# INLINE decodeUtf8' #-} + +#if MIN_VERSION_bytestring(0,10,4) + +-- | Encode text to a ByteString 'B.Builder' using UTF-8 encoding. +encodeUtf8Builder :: Text -> B.Builder +encodeUtf8Builder = encodeUtf8BuilderEscaped (BP.liftFixedToBounded BP.word8) + +-- | Encode text using UTF-8 encoding and escape the ASCII characters using +-- a 'BP.BoundedPrim'. +-- +-- Use this function is to implement efficient encoders for text-based formats +-- like JSON or HTML. +{-# INLINE encodeUtf8BuilderEscaped #-} +-- TODO: Extend documentation with references to source code in @blaze-html@ +-- or @aeson@ that uses this function. +encodeUtf8BuilderEscaped :: BP.BoundedPrim Word8 -> Text -> B.Builder +encodeUtf8BuilderEscaped be = + -- manual eta-expansion to ensure inlining works as expected + \txt -> B.builder (mkBuildstep txt) + where + bound = max 4 $ BP.sizeBound be + + mkBuildstep (Text arr off len) !k = + outerLoop off + where + iend = off + len + + outerLoop !i0 !br@(B.BufferRange op0 ope) + | i0 >= iend = k br + | outRemaining > 0 = goPartial (i0 + min outRemaining inpRemaining) + -- TODO: Use a loop with an integrated bound's check if outRemaining + -- is smaller than 8, as this will save on divisions. + | otherwise = return $ B.bufferFull bound op0 (outerLoop i0) + where + outRemaining = (ope `minusPtr` op0) `div` bound + inpRemaining = iend - i0 + + goPartial !iendTmp = go i0 op0 + where + go !i !op + | i < iendTmp = case A.unsafeIndex arr i of + w | w <= 0x7F -> do + BP.runB be (fromIntegral w) op >>= go (i + 1) + | w <= 0x7FF -> do + poke8 0 $ (w `shiftR` 6) + 0xC0 + poke8 1 $ (w .&. 0x3f) + 0x80 + go (i + 1) (op `plusPtr` 2) + | 0xD800 <= w && w <= 0xDBFF -> do + let c = ord $ U16.chr2 w (A.unsafeIndex arr (i+1)) + poke8 0 $ (c `shiftR` 18) + 0xF0 + poke8 1 $ ((c `shiftR` 12) .&. 0x3F) + 0x80 + poke8 2 $ ((c `shiftR` 6) .&. 0x3F) + 0x80 + poke8 3 $ (c .&. 0x3F) + 0x80 + go (i + 2) (op `plusPtr` 4) + | otherwise -> do + poke8 0 $ (w `shiftR` 12) + 0xE0 + poke8 1 $ ((w `shiftR` 6) .&. 0x3F) + 0x80 + poke8 2 $ (w .&. 0x3F) + 0x80 + go (i + 1) (op `plusPtr` 3) + | otherwise = + outerLoop i (B.BufferRange op ope) + where + poke8 j v = poke (op `plusPtr` j) (fromIntegral v :: Word8) +#endif + +-- | Encode text using UTF-8 encoding. +encodeUtf8 :: Text -> ByteString +encodeUtf8 (Text arr off len) + | len == 0 = B.empty + | otherwise = unsafeDupablePerformIO $ do + fp <- mallocByteString (len*4) + withForeignPtr fp $ \ptr -> + with ptr $ \destPtr -> do + c_encode_utf8 destPtr (A.aBA arr) (fromIntegral off) (fromIntegral len) + newDest <- peek destPtr + let utf8len = newDest `minusPtr` ptr + if utf8len >= len `shiftR` 1 + then return (PS fp 0 utf8len) + else do + fp' <- mallocByteString utf8len + withForeignPtr fp' $ \ptr' -> do + memcpy ptr' ptr (fromIntegral utf8len) + return (PS fp' 0 utf8len) + +-- | Decode text from little endian UTF-16 encoding. +decodeUtf16LEWith :: OnDecodeError -> ByteString -> Text +decodeUtf16LEWith onErr bs = F.unstream (E.streamUtf16LE onErr bs) +{-# INLINE decodeUtf16LEWith #-} + +-- | Decode text from little endian UTF-16 encoding. +-- +-- If the input contains any invalid little endian UTF-16 data, an +-- exception will be thrown. For more control over the handling of +-- invalid data, use 'decodeUtf16LEWith'. +decodeUtf16LE :: ByteString -> Text +decodeUtf16LE = decodeUtf16LEWith strictDecode +{-# INLINE decodeUtf16LE #-} + +-- | Decode text from big endian UTF-16 encoding. +decodeUtf16BEWith :: OnDecodeError -> ByteString -> Text +decodeUtf16BEWith onErr bs = F.unstream (E.streamUtf16BE onErr bs) +{-# INLINE decodeUtf16BEWith #-} + +-- | Decode text from big endian UTF-16 encoding. +-- +-- If the input contains any invalid big endian UTF-16 data, an +-- exception will be thrown. For more control over the handling of +-- invalid data, use 'decodeUtf16BEWith'. +decodeUtf16BE :: ByteString -> Text +decodeUtf16BE = decodeUtf16BEWith strictDecode +{-# INLINE decodeUtf16BE #-} + +-- | Encode text using little endian UTF-16 encoding. +encodeUtf16LE :: Text -> ByteString +encodeUtf16LE txt = E.unstream (E.restreamUtf16LE (F.stream txt)) +{-# INLINE encodeUtf16LE #-} + +-- | Encode text using big endian UTF-16 encoding. +encodeUtf16BE :: Text -> ByteString +encodeUtf16BE txt = E.unstream (E.restreamUtf16BE (F.stream txt)) +{-# INLINE encodeUtf16BE #-} + +-- | Decode text from little endian UTF-32 encoding. +decodeUtf32LEWith :: OnDecodeError -> ByteString -> Text +decodeUtf32LEWith onErr bs = F.unstream (E.streamUtf32LE onErr bs) +{-# INLINE decodeUtf32LEWith #-} + +-- | Decode text from little endian UTF-32 encoding. +-- +-- If the input contains any invalid little endian UTF-32 data, an +-- exception will be thrown. For more control over the handling of +-- invalid data, use 'decodeUtf32LEWith'. +decodeUtf32LE :: ByteString -> Text +decodeUtf32LE = decodeUtf32LEWith strictDecode +{-# INLINE decodeUtf32LE #-} + +-- | Decode text from big endian UTF-32 encoding. +decodeUtf32BEWith :: OnDecodeError -> ByteString -> Text +decodeUtf32BEWith onErr bs = F.unstream (E.streamUtf32BE onErr bs) +{-# INLINE decodeUtf32BEWith #-} + +-- | Decode text from big endian UTF-32 encoding. +-- +-- If the input contains any invalid big endian UTF-32 data, an +-- exception will be thrown. For more control over the handling of +-- invalid data, use 'decodeUtf32BEWith'. +decodeUtf32BE :: ByteString -> Text +decodeUtf32BE = decodeUtf32BEWith strictDecode +{-# INLINE decodeUtf32BE #-} + +-- | Encode text using little endian UTF-32 encoding. +encodeUtf32LE :: Text -> ByteString +encodeUtf32LE txt = E.unstream (E.restreamUtf32LE (F.stream txt)) +{-# INLINE encodeUtf32LE #-} + +-- | Encode text using big endian UTF-32 encoding. +encodeUtf32BE :: Text -> ByteString +encodeUtf32BE txt = E.unstream (E.restreamUtf32BE (F.stream txt)) +{-# INLINE encodeUtf32BE #-} + +foreign import ccall unsafe "_hs_text_decode_utf8" c_decode_utf8 + :: MutableByteArray# s -> Ptr CSize + -> Ptr Word8 -> Ptr Word8 -> IO (Ptr Word8) + +foreign import ccall unsafe "_hs_text_decode_utf8_state" c_decode_utf8_with_state + :: MutableByteArray# s -> Ptr CSize + -> Ptr (Ptr Word8) -> Ptr Word8 + -> Ptr CodePoint -> Ptr DecoderState -> IO (Ptr Word8) + +foreign import ccall unsafe "_hs_text_decode_latin1" c_decode_latin1 + :: MutableByteArray# s -> Ptr Word8 -> Ptr Word8 -> IO () + +foreign import ccall unsafe "_hs_text_encode_utf8" c_encode_utf8 + :: Ptr (Ptr Word8) -> ByteArray# -> CSize -> CSize -> IO () diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Foreign.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Foreign.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Foreign.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Foreign.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,173 @@ +{-# LANGUAGE BangPatterns, CPP, GeneralizedNewtypeDeriving #-} +-- | +-- Module : Data.Text.Foreign +-- Copyright : (c) 2009, 2010 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- Support for using 'Text' data with native code via the Haskell +-- foreign function interface. + +module Data.Text.Foreign + ( + -- * Interoperability with native code + -- $interop + I16 + -- * Safe conversion functions + , fromPtr + , useAsPtr + , asForeignPtr + -- ** Encoding as UTF-8 + , peekCStringLen + , withCStringLen + -- * Unsafe conversion code + , lengthWord16 + , unsafeCopyToPtr + -- * Low-level manipulation + -- $lowlevel + , dropWord16 + , takeWord16 + ) where + +#if defined(ASSERTS) +import Control.Exception (assert) +#endif +#if __GLASGOW_HASKELL__ >= 702 +import Control.Monad.ST.Unsafe (unsafeIOToST) +#else +import Control.Monad.ST (unsafeIOToST) +#endif +import Data.ByteString.Unsafe (unsafePackCStringLen, unsafeUseAsCStringLen) +import Data.Text.Encoding (decodeUtf8, encodeUtf8) +import Data.Text.Internal (Text(..), empty) +import Data.Text.Unsafe (lengthWord16) +import Data.Word (Word16) +import Foreign.C.String (CStringLen) +import Foreign.ForeignPtr (ForeignPtr, mallocForeignPtrArray, withForeignPtr) +import Foreign.Marshal.Alloc (allocaBytes) +import Foreign.Ptr (Ptr, castPtr, plusPtr) +import Foreign.Storable (peek, poke) +import qualified Data.Text.Array as A + +-- $interop +-- +-- The 'Text' type is implemented using arrays that are not guaranteed +-- to have a fixed address in the Haskell heap. All communication with +-- native code must thus occur by copying data back and forth. +-- +-- The 'Text' type's internal representation is UTF-16, using the +-- platform's native endianness. This makes copied data suitable for +-- use with native libraries that use a similar representation, such +-- as ICU. To interoperate with native libraries that use different +-- internal representations, such as UTF-8 or UTF-32, consider using +-- the functions in the 'Data.Text.Encoding' module. + +-- | A type representing a number of UTF-16 code units. +newtype I16 = I16 Int + deriving (Bounded, Enum, Eq, Integral, Num, Ord, Read, Real, Show) + +-- | /O(n)/ Create a new 'Text' from a 'Ptr' 'Word16' by copying the +-- contents of the array. +fromPtr :: Ptr Word16 -- ^ source array + -> I16 -- ^ length of source array (in 'Word16' units) + -> IO Text +fromPtr _ (I16 0) = return empty +fromPtr ptr (I16 len) = +#if defined(ASSERTS) + assert (len > 0) $ +#endif + return $! Text arr 0 len + where + arr = A.run (A.new len >>= copy) + copy marr = loop ptr 0 + where + loop !p !i | i == len = return marr + | otherwise = do + A.unsafeWrite marr i =<< unsafeIOToST (peek p) + loop (p `plusPtr` 2) (i + 1) + +-- $lowlevel +-- +-- Foreign functions that use UTF-16 internally may return indices in +-- units of 'Word16' instead of characters. These functions may +-- safely be used with such indices, as they will adjust offsets if +-- necessary to preserve the validity of a Unicode string. + +-- | /O(1)/ Return the prefix of the 'Text' of @n@ 'Word16' units in +-- length. +-- +-- If @n@ would cause the 'Text' to end inside a surrogate pair, the +-- end of the prefix will be advanced by one additional 'Word16' unit +-- to maintain its validity. +takeWord16 :: I16 -> Text -> Text +takeWord16 (I16 n) t@(Text arr off len) + | n <= 0 = empty + | n >= len || m >= len = t + | otherwise = Text arr off m + where + m | w < 0xDB00 || w > 0xD8FF = n + | otherwise = n+1 + w = A.unsafeIndex arr (off+n-1) + +-- | /O(1)/ Return the suffix of the 'Text', with @n@ 'Word16' units +-- dropped from its beginning. +-- +-- If @n@ would cause the 'Text' to begin inside a surrogate pair, the +-- beginning of the suffix will be advanced by one additional 'Word16' +-- unit to maintain its validity. +dropWord16 :: I16 -> Text -> Text +dropWord16 (I16 n) t@(Text arr off len) + | n <= 0 = t + | n >= len || m >= len = empty + | otherwise = Text arr (off+m) (len-m) + where + m | w < 0xD800 || w > 0xDBFF = n + | otherwise = n+1 + w = A.unsafeIndex arr (off+n-1) + +-- | /O(n)/ Copy a 'Text' to an array. The array is assumed to be big +-- enough to hold the contents of the entire 'Text'. +unsafeCopyToPtr :: Text -> Ptr Word16 -> IO () +unsafeCopyToPtr (Text arr off len) ptr = loop ptr off + where + end = off + len + loop !p !i | i == end = return () + | otherwise = do + poke p (A.unsafeIndex arr i) + loop (p `plusPtr` 2) (i + 1) + +-- | /O(n)/ Perform an action on a temporary, mutable copy of a +-- 'Text'. The copy is freed as soon as the action returns. +useAsPtr :: Text -> (Ptr Word16 -> I16 -> IO a) -> IO a +useAsPtr t@(Text _arr _off len) action = + allocaBytes (len * 2) $ \buf -> do + unsafeCopyToPtr t buf + action (castPtr buf) (fromIntegral len) + +-- | /O(n)/ Make a mutable copy of a 'Text'. +asForeignPtr :: Text -> IO (ForeignPtr Word16, I16) +asForeignPtr t@(Text _arr _off len) = do + fp <- mallocForeignPtrArray len + withForeignPtr fp $ unsafeCopyToPtr t + return (fp, I16 len) + +-- | /O(n)/ Decode a C string with explicit length, which is assumed +-- to have been encoded as UTF-8. If decoding fails, a +-- 'UnicodeException' is thrown. +peekCStringLen :: CStringLen -> IO Text +peekCStringLen cs = do + bs <- unsafePackCStringLen cs + return $! decodeUtf8 bs + +-- | Marshal a 'Text' into a C string encoded as UTF-8 in temporary +-- storage, with explicit length information. The encoded string may +-- contain NUL bytes, and is not followed by a trailing NUL byte. +-- +-- The temporary storage is freed when the subcomputation terminates +-- (either normally or via an exception), so the pointer to the +-- temporary storage must /not/ be used after this function returns. +withCStringLen :: Text -> (CStringLen -> IO a) -> IO a +withCStringLen t act = unsafeUseAsCStringLen (encodeUtf8 t) act diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Builder/Functions.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Builder/Functions.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Builder/Functions.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Builder/Functions.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,40 @@ +{-# LANGUAGE MagicHash #-} + +-- | +-- Module : Data.Text.Internal.Builder.Functions +-- Copyright : (c) 2011 MailRank, Inc. +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Useful functions and combinators. + +module Data.Text.Internal.Builder.Functions + ( + (<>) + , i2d + ) where + +import Data.Monoid (mappend) +import Data.Text.Lazy.Builder (Builder) +import GHC.Base (chr#,ord#,(+#),Int(I#),Char(C#)) +import Prelude () + +-- | Unsafe conversion for decimal digits. +{-# INLINE i2d #-} +i2d :: Int -> Char +i2d (I# i#) = C# (chr# (ord# '0'# +# i#)) + +-- | The normal 'mappend' function with right associativity instead of +-- left. +(<>) :: Builder -> Builder -> Builder +(<>) = mappend +{-# INLINE (<>) #-} + +infixr 4 <> diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Builder/Int/Digits.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Builder/Int/Digits.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Builder/Int/Digits.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Builder/Int/Digits.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,26 @@ +{-# LANGUAGE OverloadedStrings #-} + +-- Module: Data.Text.Internal.Builder.Int.Digits +-- Copyright: (c) 2013 Bryan O'Sullivan +-- License: BSD3 +-- Maintainer: Bryan O'Sullivan +-- Stability: experimental +-- Portability: portable +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- This module exists because the C preprocessor does things that we +-- shall not speak of when confronted with Haskell multiline strings. + +module Data.Text.Internal.Builder.Int.Digits (digits) where + +import Data.ByteString.Char8 (ByteString) + +digits :: ByteString +digits = "0001020304050607080910111213141516171819\ + \2021222324252627282930313233343536373839\ + \4041424344454647484950515253545556575859\ + \6061626364656667686970717273747576777879\ + \8081828384858687888990919293949596979899" diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Builder/RealFloat/Functions.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Builder/RealFloat/Functions.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Builder/RealFloat/Functions.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Builder/RealFloat/Functions.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,57 @@ +{-# LANGUAGE CPP #-} + +-- | +-- Module: Data.Text.Internal.Builder.RealFloat.Functions +-- Copyright: (c) The University of Glasgow 1994-2002 +-- License: see libraries/base/LICENSE +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! + +module Data.Text.Internal.Builder.RealFloat.Functions + ( + roundTo + ) where + +roundTo :: Int -> [Int] -> (Int,[Int]) + +#if MIN_VERSION_base(4,6,0) + +roundTo d is = + case f d True is of + x@(0,_) -> x + (1,xs) -> (1, 1:xs) + _ -> error "roundTo: bad Value" + where + b2 = base `quot` 2 + + f n _ [] = (0, replicate n 0) + f 0 e (x:xs) | x == b2 && e && all (== 0) xs = (0, []) -- Round to even when at exactly half the base + | otherwise = (if x >= b2 then 1 else 0, []) + f n _ (i:xs) + | i' == base = (1,0:ds) + | otherwise = (0,i':ds) + where + (c,ds) = f (n-1) (even i) xs + i' = c + i + base = 10 + +#else + +roundTo d is = + case f d is of + x@(0,_) -> x + (1,xs) -> (1, 1:xs) + _ -> error "roundTo: bad Value" + where + f n [] = (0, replicate n 0) + f 0 (x:_) = (if x >= 5 then 1 else 0, []) + f n (i:xs) + | i' == 10 = (1,0:ds) + | otherwise = (0,i':ds) + where + (c,ds) = f (n-1) xs + i' = c + i + +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Builder.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Builder.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Builder.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Builder.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,327 @@ +{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} +{-# OPTIONS_HADDOCK not-home #-} + +----------------------------------------------------------------------------- +-- | +-- Module : Data.Text.Internal.Builder +-- Copyright : (c) 2013 Bryan O'Sullivan +-- (c) 2010 Johan Tibell +-- License : BSD3-style (see LICENSE) +-- +-- Maintainer : Johan Tibell +-- Stability : experimental +-- Portability : portable to Hugs and GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Efficient construction of lazy @Text@ values. The principal +-- operations on a @Builder@ are @singleton@, @fromText@, and +-- @fromLazyText@, which construct new builders, and 'mappend', which +-- concatenates two builders. +-- +-- To get maximum performance when building lazy @Text@ values using a +-- builder, associate @mappend@ calls to the right. For example, +-- prefer +-- +-- > singleton 'a' `mappend` (singleton 'b' `mappend` singleton 'c') +-- +-- to +-- +-- > singleton 'a' `mappend` singleton 'b' `mappend` singleton 'c' +-- +-- as the latter associates @mappend@ to the left. +-- +----------------------------------------------------------------------------- + +module Data.Text.Internal.Builder + ( -- * Public API + -- ** The Builder type + Builder + , toLazyText + , toLazyTextWith + + -- ** Constructing Builders + , singleton + , fromText + , fromLazyText + , fromString + + -- ** Flushing the buffer state + , flush + + -- * Internal functions + , append' + , ensureFree + , writeN + ) where + +import Control.Monad.ST (ST, runST) +import Data.Monoid (Monoid(..)) +#if MIN_VERSION_base(4,9,0) +import Data.Semigroup (Semigroup(..)) +#endif +import Data.Text.Internal (Text(..)) +import Data.Text.Internal.Lazy (smallChunkSize) +import Data.Text.Unsafe (inlineInterleaveST) +import Data.Text.Internal.Unsafe.Char (unsafeWrite) +import Prelude hiding (map, putChar) + +import qualified Data.String as String +import qualified Data.Text as S +import qualified Data.Text.Array as A +import qualified Data.Text.Lazy as L + +------------------------------------------------------------------------ + +-- | A @Builder@ is an efficient way to build lazy @Text@ values. +-- There are several functions for constructing builders, but only one +-- to inspect them: to extract any data, you have to turn them into +-- lazy @Text@ values using @toLazyText@. +-- +-- Internally, a builder constructs a lazy @Text@ by filling arrays +-- piece by piece. As each buffer is filled, it is \'popped\' off, to +-- become a new chunk of the resulting lazy @Text@. All this is +-- hidden from the user of the @Builder@. +newtype Builder = Builder { + -- Invariant (from Data.Text.Lazy): + -- The lists include no null Texts. + runBuilder :: forall s. (Buffer s -> ST s [S.Text]) + -> Buffer s + -> ST s [S.Text] + } + +#if MIN_VERSION_base(4,9,0) +instance Semigroup Builder where + (<>) = append + {-# INLINE (<>) #-} +#endif + +instance Monoid Builder where + mempty = empty + {-# INLINE mempty #-} +#if MIN_VERSION_base(4,9,0) + mappend = (<>) -- future-proof definition +#else + mappend = append +#endif + {-# INLINE mappend #-} + mconcat = foldr mappend Data.Monoid.mempty + {-# INLINE mconcat #-} + +instance String.IsString Builder where + fromString = fromString + {-# INLINE fromString #-} + +instance Show Builder where + show = show . toLazyText + +instance Eq Builder where + a == b = toLazyText a == toLazyText b + +instance Ord Builder where + a <= b = toLazyText a <= toLazyText b + +------------------------------------------------------------------------ + +-- | /O(1)./ The empty @Builder@, satisfying +-- +-- * @'toLazyText' 'empty' = 'L.empty'@ +-- +empty :: Builder +empty = Builder (\ k buf -> k buf) +{-# INLINE empty #-} + +-- | /O(1)./ A @Builder@ taking a single character, satisfying +-- +-- * @'toLazyText' ('singleton' c) = 'L.singleton' c@ +-- +singleton :: Char -> Builder +singleton c = writeAtMost 2 $ \ marr o -> unsafeWrite marr o c +{-# INLINE singleton #-} + +------------------------------------------------------------------------ + +-- | /O(1)./ The concatenation of two builders, an associative +-- operation with identity 'empty', satisfying +-- +-- * @'toLazyText' ('append' x y) = 'L.append' ('toLazyText' x) ('toLazyText' y)@ +-- +append :: Builder -> Builder -> Builder +append (Builder f) (Builder g) = Builder (f . g) +{-# INLINE [0] append #-} + +-- TODO: Experiment to find the right threshold. +copyLimit :: Int +copyLimit = 128 + +-- This function attempts to merge small @Text@ values instead of +-- treating each value as its own chunk. We may not always want this. + +-- | /O(1)./ A @Builder@ taking a 'S.Text', satisfying +-- +-- * @'toLazyText' ('fromText' t) = 'L.fromChunks' [t]@ +-- +fromText :: S.Text -> Builder +fromText t@(Text arr off l) + | S.null t = empty + | l <= copyLimit = writeN l $ \marr o -> A.copyI marr o arr off (l+o) + | otherwise = flush `append` mapBuilder (t :) +{-# INLINE [1] fromText #-} + +{-# RULES +"fromText/pack" forall s . + fromText (S.pack s) = fromString s + #-} + +-- | /O(1)./ A Builder taking a @String@, satisfying +-- +-- * @'toLazyText' ('fromString' s) = 'L.fromChunks' [S.pack s]@ +-- +fromString :: String -> Builder +fromString str = Builder $ \k (Buffer p0 o0 u0 l0) -> + let loop !marr !o !u !l [] = k (Buffer marr o u l) + loop marr o u l s@(c:cs) + | l <= 1 = do + arr <- A.unsafeFreeze marr + let !t = Text arr o u + marr' <- A.new chunkSize + ts <- inlineInterleaveST (loop marr' 0 0 chunkSize s) + return $ t : ts + | otherwise = do + n <- unsafeWrite marr (o+u) c + loop marr o (u+n) (l-n) cs + in loop p0 o0 u0 l0 str + where + chunkSize = smallChunkSize +{-# INLINE fromString #-} + +-- | /O(1)./ A @Builder@ taking a lazy @Text@, satisfying +-- +-- * @'toLazyText' ('fromLazyText' t) = t@ +-- +fromLazyText :: L.Text -> Builder +fromLazyText ts = flush `append` mapBuilder (L.toChunks ts ++) +{-# INLINE fromLazyText #-} + +------------------------------------------------------------------------ + +-- Our internal buffer type +data Buffer s = Buffer {-# UNPACK #-} !(A.MArray s) + {-# UNPACK #-} !Int -- offset + {-# UNPACK #-} !Int -- used units + {-# UNPACK #-} !Int -- length left + +------------------------------------------------------------------------ + +-- | /O(n)./ Extract a lazy @Text@ from a @Builder@ with a default +-- buffer size. The construction work takes place if and when the +-- relevant part of the lazy @Text@ is demanded. +toLazyText :: Builder -> L.Text +toLazyText = toLazyTextWith smallChunkSize + +-- | /O(n)./ Extract a lazy @Text@ from a @Builder@, using the given +-- size for the initial buffer. The construction work takes place if +-- and when the relevant part of the lazy @Text@ is demanded. +-- +-- If the initial buffer is too small to hold all data, subsequent +-- buffers will be the default buffer size. +toLazyTextWith :: Int -> Builder -> L.Text +toLazyTextWith chunkSize m = L.fromChunks (runST $ + newBuffer chunkSize >>= runBuilder (m `append` flush) (const (return []))) + +-- | /O(1)./ Pop the strict @Text@ we have constructed so far, if any, +-- yielding a new chunk in the result lazy @Text@. +flush :: Builder +flush = Builder $ \ k buf@(Buffer p o u l) -> + if u == 0 + then k buf + else do arr <- A.unsafeFreeze p + let !b = Buffer p (o+u) 0 l + !t = Text arr o u + ts <- inlineInterleaveST (k b) + return $! t : ts + +------------------------------------------------------------------------ + +-- | Sequence an ST operation on the buffer +withBuffer :: (forall s. Buffer s -> ST s (Buffer s)) -> Builder +withBuffer f = Builder $ \k buf -> f buf >>= k +{-# INLINE withBuffer #-} + +-- | Get the size of the buffer +withSize :: (Int -> Builder) -> Builder +withSize f = Builder $ \ k buf@(Buffer _ _ _ l) -> + runBuilder (f l) k buf +{-# INLINE withSize #-} + +-- | Map the resulting list of texts. +mapBuilder :: ([S.Text] -> [S.Text]) -> Builder +mapBuilder f = Builder (fmap f .) + +------------------------------------------------------------------------ + +-- | Ensure that there are at least @n@ many elements available. +ensureFree :: Int -> Builder +ensureFree !n = withSize $ \ l -> + if n <= l + then empty + else flush `append'` withBuffer (const (newBuffer (max n smallChunkSize))) +{-# INLINE [0] ensureFree #-} + +writeAtMost :: Int -> (forall s. A.MArray s -> Int -> ST s Int) -> Builder +writeAtMost n f = ensureFree n `append'` withBuffer (writeBuffer f) +{-# INLINE [0] writeAtMost #-} + +-- | Ensure that @n@ many elements are available, and then use @f@ to +-- write some elements into the memory. +writeN :: Int -> (forall s. A.MArray s -> Int -> ST s ()) -> Builder +writeN n f = writeAtMost n (\ p o -> f p o >> return n) +{-# INLINE writeN #-} + +writeBuffer :: (A.MArray s -> Int -> ST s Int) -> Buffer s -> ST s (Buffer s) +writeBuffer f (Buffer p o u l) = do + n <- f p (o+u) + return $! Buffer p o (u+n) (l-n) +{-# INLINE writeBuffer #-} + +newBuffer :: Int -> ST s (Buffer s) +newBuffer size = do + arr <- A.new size + return $! Buffer arr 0 0 size +{-# INLINE newBuffer #-} + +------------------------------------------------------------------------ +-- Some nice rules for Builder + +-- This function makes GHC understand that 'writeN' and 'ensureFree' +-- are *not* recursive in the precense of the rewrite rules below. +-- This is not needed with GHC 7+. +append' :: Builder -> Builder -> Builder +append' (Builder f) (Builder g) = Builder (f . g) +{-# INLINE append' #-} + +{-# RULES + +"append/writeAtMost" forall a b (f::forall s. A.MArray s -> Int -> ST s Int) + (g::forall s. A.MArray s -> Int -> ST s Int) ws. + append (writeAtMost a f) (append (writeAtMost b g) ws) = + append (writeAtMost (a+b) (\marr o -> f marr o >>= \ n -> + g marr (o+n) >>= \ m -> + let s = n+m in s `seq` return s)) ws + +"writeAtMost/writeAtMost" forall a b (f::forall s. A.MArray s -> Int -> ST s Int) + (g::forall s. A.MArray s -> Int -> ST s Int). + append (writeAtMost a f) (writeAtMost b g) = + writeAtMost (a+b) (\marr o -> f marr o >>= \ n -> + g marr (o+n) >>= \ m -> + let s = n+m in s `seq` return s) + +"ensureFree/ensureFree" forall a b . + append (ensureFree a) (ensureFree b) = ensureFree (max a b) + +"flush/flush" + append flush flush = flush + + #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Fusion/Common.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Fusion/Common.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Fusion/Common.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Fusion/Common.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,123 @@ +{-# LANGUAGE BangPatterns #-} + +-- | +-- Module : Data.Text.Internal.Encoding.Fusion.Common +-- Copyright : (c) Tom Harper 2008-2009, +-- (c) Bryan O'Sullivan 2009, +-- (c) Duncan Coutts 2009, +-- (c) Jasper Van der Jeugt 2011 +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Use at your own risk! +-- +-- Fusible 'Stream'-oriented functions for converting between 'Text' +-- and several common encodings. + +module Data.Text.Internal.Encoding.Fusion.Common + ( + -- * Restreaming + -- Restreaming is the act of converting from one 'Stream' + -- representation to another. + restreamUtf16LE + , restreamUtf16BE + , restreamUtf32LE + , restreamUtf32BE + ) where + +import Data.Bits ((.&.)) +import Data.Text.Internal.Fusion (Step(..), Stream(..)) +import Data.Text.Internal.Fusion.Types (RS(..)) +import Data.Text.Internal.Unsafe.Char (ord) +import Data.Text.Internal.Unsafe.Shift (shiftR) +import Data.Word (Word8) + +restreamUtf16BE :: Stream Char -> Stream Word8 +restreamUtf16BE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) + where + next (RS0 s) = case next0 s of + Done -> Done + Skip s' -> Skip (RS0 s') + Yield x s' + | n < 0x10000 -> Yield (fromIntegral $ n `shiftR` 8) $ + RS1 s' (fromIntegral n) + | otherwise -> Yield c1 $ RS3 s' c2 c3 c4 + where + n = ord x + n1 = n - 0x10000 + c1 = fromIntegral (n1 `shiftR` 18 + 0xD8) + c2 = fromIntegral (n1 `shiftR` 10) + n2 = n1 .&. 0x3FF + c3 = fromIntegral (n2 `shiftR` 8 + 0xDC) + c4 = fromIntegral n2 + next (RS1 s x2) = Yield x2 (RS0 s) + next (RS2 s x2 x3) = Yield x2 (RS1 s x3) + next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) + {-# INLINE next #-} +{-# INLINE restreamUtf16BE #-} + +restreamUtf16LE :: Stream Char -> Stream Word8 +restreamUtf16LE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) + where + next (RS0 s) = case next0 s of + Done -> Done + Skip s' -> Skip (RS0 s') + Yield x s' + | n < 0x10000 -> Yield (fromIntegral n) $ + RS1 s' (fromIntegral $ shiftR n 8) + | otherwise -> Yield c1 $ RS3 s' c2 c3 c4 + where + n = ord x + n1 = n - 0x10000 + c2 = fromIntegral (shiftR n1 18 + 0xD8) + c1 = fromIntegral (shiftR n1 10) + n2 = n1 .&. 0x3FF + c4 = fromIntegral (shiftR n2 8 + 0xDC) + c3 = fromIntegral n2 + next (RS1 s x2) = Yield x2 (RS0 s) + next (RS2 s x2 x3) = Yield x2 (RS1 s x3) + next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) + {-# INLINE next #-} +{-# INLINE restreamUtf16LE #-} + +restreamUtf32BE :: Stream Char -> Stream Word8 +restreamUtf32BE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) + where + next (RS0 s) = case next0 s of + Done -> Done + Skip s' -> Skip (RS0 s') + Yield x s' -> Yield c1 (RS3 s' c2 c3 c4) + where + n = ord x + c1 = fromIntegral $ shiftR n 24 + c2 = fromIntegral $ shiftR n 16 + c3 = fromIntegral $ shiftR n 8 + c4 = fromIntegral n + next (RS1 s x2) = Yield x2 (RS0 s) + next (RS2 s x2 x3) = Yield x2 (RS1 s x3) + next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) + {-# INLINE next #-} +{-# INLINE restreamUtf32BE #-} + +restreamUtf32LE :: Stream Char -> Stream Word8 +restreamUtf32LE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) + where + next (RS0 s) = case next0 s of + Done -> Done + Skip s' -> Skip (RS0 s') + Yield x s' -> Yield c1 (RS3 s' c2 c3 c4) + where + n = ord x + c4 = fromIntegral $ shiftR n 24 + c3 = fromIntegral $ shiftR n 16 + c2 = fromIntegral $ shiftR n 8 + c1 = fromIntegral n + next (RS1 s x2) = Yield x2 (RS0 s) + next (RS2 s x2 x3) = Yield x2 (RS1 s x3) + next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) + {-# INLINE next #-} +{-# INLINE restreamUtf32LE #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Fusion.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Fusion.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Fusion.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,208 @@ +{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} + +-- | +-- Module : Data.Text.Internal.Encoding.Fusion +-- Copyright : (c) Tom Harper 2008-2009, +-- (c) Bryan O'Sullivan 2009, +-- (c) Duncan Coutts 2009 +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Fusible 'Stream'-oriented functions for converting between 'Text' +-- and several common encodings. + +module Data.Text.Internal.Encoding.Fusion + ( + -- * Streaming + streamASCII + , streamUtf8 + , streamUtf16LE + , streamUtf16BE + , streamUtf32LE + , streamUtf32BE + + -- * Unstreaming + , unstream + + , module Data.Text.Internal.Encoding.Fusion.Common + ) where + +#if defined(ASSERTS) +import Control.Exception (assert) +#endif +import Data.ByteString.Internal (ByteString(..), mallocByteString, memcpy) +import Data.Text.Internal.Fusion (Step(..), Stream(..)) +import Data.Text.Internal.Fusion.Size +import Data.Text.Encoding.Error +import Data.Text.Internal.Encoding.Fusion.Common +import Data.Text.Internal.Unsafe.Char (unsafeChr, unsafeChr8, unsafeChr32) +import Data.Text.Internal.Unsafe.Shift (shiftL, shiftR) +import Data.Word (Word8, Word16, Word32) +import Foreign.ForeignPtr (withForeignPtr, ForeignPtr) +import Foreign.Storable (pokeByteOff) +import qualified Data.ByteString as B +import qualified Data.ByteString.Unsafe as B +import qualified Data.Text.Internal.Encoding.Utf8 as U8 +import qualified Data.Text.Internal.Encoding.Utf16 as U16 +import qualified Data.Text.Internal.Encoding.Utf32 as U32 +import Data.Text.Unsafe (unsafeDupablePerformIO) + +streamASCII :: ByteString -> Stream Char +streamASCII bs = Stream next 0 (maxSize l) + where + l = B.length bs + {-# INLINE next #-} + next i + | i >= l = Done + | otherwise = Yield (unsafeChr8 x1) (i+1) + where + x1 = B.unsafeIndex bs i +{-# DEPRECATED streamASCII "Do not use this function" #-} +{-# INLINE [0] streamASCII #-} + +-- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using UTF-8 +-- encoding. +streamUtf8 :: OnDecodeError -> ByteString -> Stream Char +streamUtf8 onErr bs = Stream next 0 (maxSize l) + where + l = B.length bs + next i + | i >= l = Done + | U8.validate1 x1 = Yield (unsafeChr8 x1) (i+1) + | i+1 < l && U8.validate2 x1 x2 = Yield (U8.chr2 x1 x2) (i+2) + | i+2 < l && U8.validate3 x1 x2 x3 = Yield (U8.chr3 x1 x2 x3) (i+3) + | i+3 < l && U8.validate4 x1 x2 x3 x4 = Yield (U8.chr4 x1 x2 x3 x4) (i+4) + | otherwise = decodeError "streamUtf8" "UTF-8" onErr (Just x1) (i+1) + where + x1 = idx i + x2 = idx (i + 1) + x3 = idx (i + 2) + x4 = idx (i + 3) + idx = B.unsafeIndex bs +{-# INLINE [0] streamUtf8 #-} + +-- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little +-- endian UTF-16 encoding. +streamUtf16LE :: OnDecodeError -> ByteString -> Stream Char +streamUtf16LE onErr bs = Stream next 0 (maxSize (l `shiftR` 1)) + where + l = B.length bs + {-# INLINE next #-} + next i + | i >= l = Done + | i+1 < l && U16.validate1 x1 = Yield (unsafeChr x1) (i+2) + | i+3 < l && U16.validate2 x1 x2 = Yield (U16.chr2 x1 x2) (i+4) + | otherwise = decodeError "streamUtf16LE" "UTF-16LE" onErr Nothing (i+1) + where + x1 = idx i + (idx (i + 1) `shiftL` 8) + x2 = idx (i + 2) + (idx (i + 3) `shiftL` 8) + idx = fromIntegral . B.unsafeIndex bs :: Int -> Word16 +{-# INLINE [0] streamUtf16LE #-} + +-- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big +-- endian UTF-16 encoding. +streamUtf16BE :: OnDecodeError -> ByteString -> Stream Char +streamUtf16BE onErr bs = Stream next 0 (maxSize (l `shiftR` 1)) + where + l = B.length bs + {-# INLINE next #-} + next i + | i >= l = Done + | i+1 < l && U16.validate1 x1 = Yield (unsafeChr x1) (i+2) + | i+3 < l && U16.validate2 x1 x2 = Yield (U16.chr2 x1 x2) (i+4) + | otherwise = decodeError "streamUtf16BE" "UTF-16BE" onErr Nothing (i+1) + where + x1 = (idx i `shiftL` 8) + idx (i + 1) + x2 = (idx (i + 2) `shiftL` 8) + idx (i + 3) + idx = fromIntegral . B.unsafeIndex bs :: Int -> Word16 +{-# INLINE [0] streamUtf16BE #-} + +-- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big +-- endian UTF-32 encoding. +streamUtf32BE :: OnDecodeError -> ByteString -> Stream Char +streamUtf32BE onErr bs = Stream next 0 (maxSize (l `shiftR` 2)) + where + l = B.length bs + {-# INLINE next #-} + next i + | i >= l = Done + | i+3 < l && U32.validate x = Yield (unsafeChr32 x) (i+4) + | otherwise = decodeError "streamUtf32BE" "UTF-32BE" onErr Nothing (i+1) + where + x = shiftL x1 24 + shiftL x2 16 + shiftL x3 8 + x4 + x1 = idx i + x2 = idx (i+1) + x3 = idx (i+2) + x4 = idx (i+3) + idx = fromIntegral . B.unsafeIndex bs :: Int -> Word32 +{-# INLINE [0] streamUtf32BE #-} + +-- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little +-- endian UTF-32 encoding. +streamUtf32LE :: OnDecodeError -> ByteString -> Stream Char +streamUtf32LE onErr bs = Stream next 0 (maxSize (l `shiftR` 2)) + where + l = B.length bs + {-# INLINE next #-} + next i + | i >= l = Done + | i+3 < l && U32.validate x = Yield (unsafeChr32 x) (i+4) + | otherwise = decodeError "streamUtf32LE" "UTF-32LE" onErr Nothing (i+1) + where + x = shiftL x4 24 + shiftL x3 16 + shiftL x2 8 + x1 + x1 = idx i + x2 = idx $ i+1 + x3 = idx $ i+2 + x4 = idx $ i+3 + idx = fromIntegral . B.unsafeIndex bs :: Int -> Word32 +{-# INLINE [0] streamUtf32LE #-} + +-- | /O(n)/ Convert a 'Stream' 'Word8' to a 'ByteString'. +unstream :: Stream Word8 -> ByteString +unstream (Stream next s0 len) = unsafeDupablePerformIO $ do + let mlen = upperBound 4 len + mallocByteString mlen >>= loop mlen 0 s0 + where + loop !n !off !s fp = case next s of + Done -> trimUp fp n off + Skip s' -> loop n off s' fp + Yield x s' + | off == n -> realloc fp n off s' x + | otherwise -> do + withForeignPtr fp $ \p -> pokeByteOff p off x + loop n (off+1) s' fp + {-# NOINLINE realloc #-} + realloc fp n off s x = do + let n' = n+n + fp' <- copy0 fp n n' + withForeignPtr fp' $ \p -> pokeByteOff p off x + loop n' (off+1) s fp' + {-# NOINLINE trimUp #-} + trimUp fp _ off = return $! PS fp 0 off + copy0 :: ForeignPtr Word8 -> Int -> Int -> IO (ForeignPtr Word8) + copy0 !src !srcLen !destLen = +#if defined(ASSERTS) + assert (srcLen <= destLen) $ +#endif + do + dest <- mallocByteString destLen + withForeignPtr src $ \src' -> + withForeignPtr dest $ \dest' -> + memcpy dest' src' (fromIntegral srcLen) + return dest + +decodeError :: forall s. String -> String -> OnDecodeError -> Maybe Word8 + -> s -> Step s Char +decodeError func kind onErr mb i = + case onErr desc mb of + Nothing -> Skip i + Just c -> Yield c i + where desc = "Data.Text.Internal.Encoding.Fusion." ++ func ++ ": Invalid " ++ + kind ++ " stream" diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf16.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf16.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf16.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf16.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,45 @@ +{-# LANGUAGE MagicHash, BangPatterns #-} + +-- | +-- Module : Data.Text.Internal.Encoding.Utf16 +-- Copyright : (c) 2008, 2009 Tom Harper, +-- (c) 2009 Bryan O'Sullivan, +-- (c) 2009 Duncan Coutts +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Basic UTF-16 validation and character manipulation. +module Data.Text.Internal.Encoding.Utf16 + ( + chr2 + , validate1 + , validate2 + ) where + +import GHC.Exts +import GHC.Word (Word16(..)) + +chr2 :: Word16 -> Word16 -> Char +chr2 (W16# a#) (W16# b#) = C# (chr# (upper# +# lower# +# 0x10000#)) + where + !x# = word2Int# a# + !y# = word2Int# b# + !upper# = uncheckedIShiftL# (x# -# 0xD800#) 10# + !lower# = y# -# 0xDC00# +{-# INLINE chr2 #-} + +validate1 :: Word16 -> Bool +validate1 x1 = x1 < 0xD800 || x1 > 0xDFFF +{-# INLINE validate1 #-} + +validate2 :: Word16 -> Word16 -> Bool +validate2 x1 x2 = x1 >= 0xD800 && x1 <= 0xDBFF && + x2 >= 0xDC00 && x2 <= 0xDFFF +{-# INLINE validate2 #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf32.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf32.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf32.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf32.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,26 @@ +-- | +-- Module : Data.Text.Internal.Encoding.Utf32 +-- Copyright : (c) 2008, 2009 Tom Harper, +-- (c) 2009, 2010 Bryan O'Sullivan, +-- (c) 2009 Duncan Coutts +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Basic UTF-32 validation. +module Data.Text.Internal.Encoding.Utf32 + ( + validate + ) where + +import Data.Word (Word32) + +validate :: Word32 -> Bool +validate x1 = x1 < 0xD800 || (x1 > 0xDFFF && x1 <= 0x10FFFF) +{-# INLINE validate #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf8.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf8.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf8.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Encoding/Utf8.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,168 @@ +{-# LANGUAGE CPP, MagicHash, BangPatterns #-} + +-- | +-- Module : Data.Text.Internal.Encoding.Utf8 +-- Copyright : (c) 2008, 2009 Tom Harper, +-- (c) 2009, 2010 Bryan O'Sullivan, +-- (c) 2009 Duncan Coutts +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Basic UTF-8 validation and character manipulation. +module Data.Text.Internal.Encoding.Utf8 + ( + -- Decomposition + ord2 + , ord3 + , ord4 + -- Construction + , chr2 + , chr3 + , chr4 + -- * Validation + , validate1 + , validate2 + , validate3 + , validate4 + ) where + +#if defined(TEST_SUITE) +# undef ASSERTS +#endif + +#if defined(ASSERTS) +import Control.Exception (assert) +#endif +import Data.Bits ((.&.)) +import Data.Text.Internal.Unsafe.Char (ord) +import Data.Text.Internal.Unsafe.Shift (shiftR) +import GHC.Exts +import GHC.Word (Word8(..)) + +default(Int) + +between :: Word8 -- ^ byte to check + -> Word8 -- ^ lower bound + -> Word8 -- ^ upper bound + -> Bool +between x y z = x >= y && x <= z +{-# INLINE between #-} + +ord2 :: Char -> (Word8,Word8) +ord2 c = +#if defined(ASSERTS) + assert (n >= 0x80 && n <= 0x07ff) +#endif + (x1,x2) + where + n = ord c + x1 = fromIntegral $ (n `shiftR` 6) + 0xC0 + x2 = fromIntegral $ (n .&. 0x3F) + 0x80 + +ord3 :: Char -> (Word8,Word8,Word8) +ord3 c = +#if defined(ASSERTS) + assert (n >= 0x0800 && n <= 0xffff) +#endif + (x1,x2,x3) + where + n = ord c + x1 = fromIntegral $ (n `shiftR` 12) + 0xE0 + x2 = fromIntegral $ ((n `shiftR` 6) .&. 0x3F) + 0x80 + x3 = fromIntegral $ (n .&. 0x3F) + 0x80 + +ord4 :: Char -> (Word8,Word8,Word8,Word8) +ord4 c = +#if defined(ASSERTS) + assert (n >= 0x10000) +#endif + (x1,x2,x3,x4) + where + n = ord c + x1 = fromIntegral $ (n `shiftR` 18) + 0xF0 + x2 = fromIntegral $ ((n `shiftR` 12) .&. 0x3F) + 0x80 + x3 = fromIntegral $ ((n `shiftR` 6) .&. 0x3F) + 0x80 + x4 = fromIntegral $ (n .&. 0x3F) + 0x80 + +chr2 :: Word8 -> Word8 -> Char +chr2 (W8# x1#) (W8# x2#) = C# (chr# (z1# +# z2#)) + where + !y1# = word2Int# x1# + !y2# = word2Int# x2# + !z1# = uncheckedIShiftL# (y1# -# 0xC0#) 6# + !z2# = y2# -# 0x80# +{-# INLINE chr2 #-} + +chr3 :: Word8 -> Word8 -> Word8 -> Char +chr3 (W8# x1#) (W8# x2#) (W8# x3#) = C# (chr# (z1# +# z2# +# z3#)) + where + !y1# = word2Int# x1# + !y2# = word2Int# x2# + !y3# = word2Int# x3# + !z1# = uncheckedIShiftL# (y1# -# 0xE0#) 12# + !z2# = uncheckedIShiftL# (y2# -# 0x80#) 6# + !z3# = y3# -# 0x80# +{-# INLINE chr3 #-} + +chr4 :: Word8 -> Word8 -> Word8 -> Word8 -> Char +chr4 (W8# x1#) (W8# x2#) (W8# x3#) (W8# x4#) = + C# (chr# (z1# +# z2# +# z3# +# z4#)) + where + !y1# = word2Int# x1# + !y2# = word2Int# x2# + !y3# = word2Int# x3# + !y4# = word2Int# x4# + !z1# = uncheckedIShiftL# (y1# -# 0xF0#) 18# + !z2# = uncheckedIShiftL# (y2# -# 0x80#) 12# + !z3# = uncheckedIShiftL# (y3# -# 0x80#) 6# + !z4# = y4# -# 0x80# +{-# INLINE chr4 #-} + +validate1 :: Word8 -> Bool +validate1 x1 = x1 <= 0x7F +{-# INLINE validate1 #-} + +validate2 :: Word8 -> Word8 -> Bool +validate2 x1 x2 = between x1 0xC2 0xDF && between x2 0x80 0xBF +{-# INLINE validate2 #-} + +validate3 :: Word8 -> Word8 -> Word8 -> Bool +{-# INLINE validate3 #-} +validate3 x1 x2 x3 = validate3_1 || validate3_2 || validate3_3 || validate3_4 + where + validate3_1 = (x1 == 0xE0) && + between x2 0xA0 0xBF && + between x3 0x80 0xBF + validate3_2 = between x1 0xE1 0xEC && + between x2 0x80 0xBF && + between x3 0x80 0xBF + validate3_3 = x1 == 0xED && + between x2 0x80 0x9F && + between x3 0x80 0xBF + validate3_4 = between x1 0xEE 0xEF && + between x2 0x80 0xBF && + between x3 0x80 0xBF + +validate4 :: Word8 -> Word8 -> Word8 -> Word8 -> Bool +{-# INLINE validate4 #-} +validate4 x1 x2 x3 x4 = validate4_1 || validate4_2 || validate4_3 + where + validate4_1 = x1 == 0xF0 && + between x2 0x90 0xBF && + between x3 0x80 0xBF && + between x4 0x80 0xBF + validate4_2 = between x1 0xF1 0xF3 && + between x2 0x80 0xBF && + between x3 0x80 0xBF && + between x4 0x80 0xBF + validate4_3 = x1 == 0xF4 && + between x2 0x80 0x8F && + between x3 0x80 0xBF && + between x4 0x80 0xBF diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Functions.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Functions.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Functions.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Functions.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,31 @@ +{-# LANGUAGE CPP, DeriveDataTypeable #-} + +-- | +-- Module : Data.Text.Internal.Functions +-- Copyright : 2010 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Useful functions. + +module Data.Text.Internal.Functions + ( + intersperse + ) where + +-- | A lazier version of Data.List.intersperse. The other version +-- causes space leaks! +intersperse :: a -> [a] -> [a] +intersperse _ [] = [] +intersperse sep (x:xs) = x : go xs + where + go [] = [] + go (y:ys) = sep : y: go ys +{-# INLINE intersperse #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/CaseMapping.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/CaseMapping.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/CaseMapping.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/CaseMapping.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,842 @@ +{-# LANGUAGE Rank2Types #-} +-- AUTOMATICALLY GENERATED - DO NOT EDIT +-- Generated by scripts/CaseMapping.hs +-- CaseFolding-8.0.0.txt +-- Date: 2015-01-13, 18:16:36 GMT [MD] +-- SpecialCasing-8.0.0.txt +-- Date: 2014-12-16, 23:08:04 GMT [MD] + +module Data.Text.Internal.Fusion.CaseMapping where +import Data.Char +import Data.Text.Internal.Fusion.Types + +upperMapping :: forall s. Char -> s -> Step (CC s) Char +{-# NOINLINE upperMapping #-} +-- LATIN SMALL LETTER SHARP S +upperMapping '\x00df' s = Yield '\x0053' (CC s '\x0053' '\x0000') +-- LATIN SMALL LIGATURE FF +upperMapping '\xfb00' s = Yield '\x0046' (CC s '\x0046' '\x0000') +-- LATIN SMALL LIGATURE FI +upperMapping '\xfb01' s = Yield '\x0046' (CC s '\x0049' '\x0000') +-- LATIN SMALL LIGATURE FL +upperMapping '\xfb02' s = Yield '\x0046' (CC s '\x004c' '\x0000') +-- LATIN SMALL LIGATURE FFI +upperMapping '\xfb03' s = Yield '\x0046' (CC s '\x0046' '\x0049') +-- LATIN SMALL LIGATURE FFL +upperMapping '\xfb04' s = Yield '\x0046' (CC s '\x0046' '\x004c') +-- LATIN SMALL LIGATURE LONG S T +upperMapping '\xfb05' s = Yield '\x0053' (CC s '\x0054' '\x0000') +-- LATIN SMALL LIGATURE ST +upperMapping '\xfb06' s = Yield '\x0053' (CC s '\x0054' '\x0000') +-- ARMENIAN SMALL LIGATURE ECH YIWN +upperMapping '\x0587' s = Yield '\x0535' (CC s '\x0552' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN NOW +upperMapping '\xfb13' s = Yield '\x0544' (CC s '\x0546' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN ECH +upperMapping '\xfb14' s = Yield '\x0544' (CC s '\x0535' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN INI +upperMapping '\xfb15' s = Yield '\x0544' (CC s '\x053b' '\x0000') +-- ARMENIAN SMALL LIGATURE VEW NOW +upperMapping '\xfb16' s = Yield '\x054e' (CC s '\x0546' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN XEH +upperMapping '\xfb17' s = Yield '\x0544' (CC s '\x053d' '\x0000') +-- LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +upperMapping '\x0149' s = Yield '\x02bc' (CC s '\x004e' '\x0000') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +upperMapping '\x0390' s = Yield '\x0399' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +upperMapping '\x03b0' s = Yield '\x03a5' (CC s '\x0308' '\x0301') +-- LATIN SMALL LETTER J WITH CARON +upperMapping '\x01f0' s = Yield '\x004a' (CC s '\x030c' '\x0000') +-- LATIN SMALL LETTER H WITH LINE BELOW +upperMapping '\x1e96' s = Yield '\x0048' (CC s '\x0331' '\x0000') +-- LATIN SMALL LETTER T WITH DIAERESIS +upperMapping '\x1e97' s = Yield '\x0054' (CC s '\x0308' '\x0000') +-- LATIN SMALL LETTER W WITH RING ABOVE +upperMapping '\x1e98' s = Yield '\x0057' (CC s '\x030a' '\x0000') +-- LATIN SMALL LETTER Y WITH RING ABOVE +upperMapping '\x1e99' s = Yield '\x0059' (CC s '\x030a' '\x0000') +-- LATIN SMALL LETTER A WITH RIGHT HALF RING +upperMapping '\x1e9a' s = Yield '\x0041' (CC s '\x02be' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH PSILI +upperMapping '\x1f50' s = Yield '\x03a5' (CC s '\x0313' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA +upperMapping '\x1f52' s = Yield '\x03a5' (CC s '\x0313' '\x0300') +-- GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA +upperMapping '\x1f54' s = Yield '\x03a5' (CC s '\x0313' '\x0301') +-- GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI +upperMapping '\x1f56' s = Yield '\x03a5' (CC s '\x0313' '\x0342') +-- GREEK SMALL LETTER ALPHA WITH PERISPOMENI +upperMapping '\x1fb6' s = Yield '\x0391' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER ETA WITH PERISPOMENI +upperMapping '\x1fc6' s = Yield '\x0397' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA +upperMapping '\x1fd2' s = Yield '\x0399' (CC s '\x0308' '\x0300') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA +upperMapping '\x1fd3' s = Yield '\x0399' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER IOTA WITH PERISPOMENI +upperMapping '\x1fd6' s = Yield '\x0399' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI +upperMapping '\x1fd7' s = Yield '\x0399' (CC s '\x0308' '\x0342') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA +upperMapping '\x1fe2' s = Yield '\x03a5' (CC s '\x0308' '\x0300') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA +upperMapping '\x1fe3' s = Yield '\x03a5' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER RHO WITH PSILI +upperMapping '\x1fe4' s = Yield '\x03a1' (CC s '\x0313' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH PERISPOMENI +upperMapping '\x1fe6' s = Yield '\x03a5' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI +upperMapping '\x1fe7' s = Yield '\x03a5' (CC s '\x0308' '\x0342') +-- GREEK SMALL LETTER OMEGA WITH PERISPOMENI +upperMapping '\x1ff6' s = Yield '\x03a9' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI +upperMapping '\x1f80' s = Yield '\x1f08' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI +upperMapping '\x1f81' s = Yield '\x1f09' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI +upperMapping '\x1f82' s = Yield '\x1f0a' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI +upperMapping '\x1f83' s = Yield '\x1f0b' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI +upperMapping '\x1f84' s = Yield '\x1f0c' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI +upperMapping '\x1f85' s = Yield '\x1f0d' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +upperMapping '\x1f86' s = Yield '\x1f0e' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +upperMapping '\x1f87' s = Yield '\x1f0f' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI +upperMapping '\x1f88' s = Yield '\x1f08' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI +upperMapping '\x1f89' s = Yield '\x1f09' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI +upperMapping '\x1f8a' s = Yield '\x1f0a' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI +upperMapping '\x1f8b' s = Yield '\x1f0b' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI +upperMapping '\x1f8c' s = Yield '\x1f0c' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI +upperMapping '\x1f8d' s = Yield '\x1f0d' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +upperMapping '\x1f8e' s = Yield '\x1f0e' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +upperMapping '\x1f8f' s = Yield '\x1f0f' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI +upperMapping '\x1f90' s = Yield '\x1f28' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI +upperMapping '\x1f91' s = Yield '\x1f29' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI +upperMapping '\x1f92' s = Yield '\x1f2a' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI +upperMapping '\x1f93' s = Yield '\x1f2b' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI +upperMapping '\x1f94' s = Yield '\x1f2c' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI +upperMapping '\x1f95' s = Yield '\x1f2d' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +upperMapping '\x1f96' s = Yield '\x1f2e' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +upperMapping '\x1f97' s = Yield '\x1f2f' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI +upperMapping '\x1f98' s = Yield '\x1f28' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI +upperMapping '\x1f99' s = Yield '\x1f29' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI +upperMapping '\x1f9a' s = Yield '\x1f2a' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI +upperMapping '\x1f9b' s = Yield '\x1f2b' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI +upperMapping '\x1f9c' s = Yield '\x1f2c' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI +upperMapping '\x1f9d' s = Yield '\x1f2d' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +upperMapping '\x1f9e' s = Yield '\x1f2e' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +upperMapping '\x1f9f' s = Yield '\x1f2f' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI +upperMapping '\x1fa0' s = Yield '\x1f68' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI +upperMapping '\x1fa1' s = Yield '\x1f69' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI +upperMapping '\x1fa2' s = Yield '\x1f6a' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI +upperMapping '\x1fa3' s = Yield '\x1f6b' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI +upperMapping '\x1fa4' s = Yield '\x1f6c' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI +upperMapping '\x1fa5' s = Yield '\x1f6d' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +upperMapping '\x1fa6' s = Yield '\x1f6e' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +upperMapping '\x1fa7' s = Yield '\x1f6f' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI +upperMapping '\x1fa8' s = Yield '\x1f68' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI +upperMapping '\x1fa9' s = Yield '\x1f69' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI +upperMapping '\x1faa' s = Yield '\x1f6a' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI +upperMapping '\x1fab' s = Yield '\x1f6b' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI +upperMapping '\x1fac' s = Yield '\x1f6c' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI +upperMapping '\x1fad' s = Yield '\x1f6d' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +upperMapping '\x1fae' s = Yield '\x1f6e' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +upperMapping '\x1faf' s = Yield '\x1f6f' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI +upperMapping '\x1fb3' s = Yield '\x0391' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI +upperMapping '\x1fbc' s = Yield '\x0391' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI +upperMapping '\x1fc3' s = Yield '\x0397' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI +upperMapping '\x1fcc' s = Yield '\x0397' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI +upperMapping '\x1ff3' s = Yield '\x03a9' (CC s '\x0399' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI +upperMapping '\x1ffc' s = Yield '\x03a9' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI +upperMapping '\x1fb2' s = Yield '\x1fba' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI +upperMapping '\x1fb4' s = Yield '\x0386' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI +upperMapping '\x1fc2' s = Yield '\x1fca' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI +upperMapping '\x1fc4' s = Yield '\x0389' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI +upperMapping '\x1ff2' s = Yield '\x1ffa' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI +upperMapping '\x1ff4' s = Yield '\x038f' (CC s '\x0399' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI +upperMapping '\x1fb7' s = Yield '\x0391' (CC s '\x0342' '\x0399') +-- GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI +upperMapping '\x1fc7' s = Yield '\x0397' (CC s '\x0342' '\x0399') +-- GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI +upperMapping '\x1ff7' s = Yield '\x03a9' (CC s '\x0342' '\x0399') +upperMapping c s = Yield (toUpper c) (CC s '\0' '\0') +lowerMapping :: forall s. Char -> s -> Step (CC s) Char +{-# NOINLINE lowerMapping #-} +-- LATIN CAPITAL LETTER I WITH DOT ABOVE +lowerMapping '\x0130' s = Yield '\x0069' (CC s '\x0307' '\x0000') +lowerMapping c s = Yield (toLower c) (CC s '\0' '\0') +titleMapping :: forall s. Char -> s -> Step (CC s) Char +{-# NOINLINE titleMapping #-} +-- LATIN SMALL LETTER SHARP S +titleMapping '\x00df' s = Yield '\x0053' (CC s '\x0073' '\x0000') +-- LATIN SMALL LIGATURE FF +titleMapping '\xfb00' s = Yield '\x0046' (CC s '\x0066' '\x0000') +-- LATIN SMALL LIGATURE FI +titleMapping '\xfb01' s = Yield '\x0046' (CC s '\x0069' '\x0000') +-- LATIN SMALL LIGATURE FL +titleMapping '\xfb02' s = Yield '\x0046' (CC s '\x006c' '\x0000') +-- LATIN SMALL LIGATURE FFI +titleMapping '\xfb03' s = Yield '\x0046' (CC s '\x0066' '\x0069') +-- LATIN SMALL LIGATURE FFL +titleMapping '\xfb04' s = Yield '\x0046' (CC s '\x0066' '\x006c') +-- LATIN SMALL LIGATURE LONG S T +titleMapping '\xfb05' s = Yield '\x0053' (CC s '\x0074' '\x0000') +-- LATIN SMALL LIGATURE ST +titleMapping '\xfb06' s = Yield '\x0053' (CC s '\x0074' '\x0000') +-- ARMENIAN SMALL LIGATURE ECH YIWN +titleMapping '\x0587' s = Yield '\x0535' (CC s '\x0582' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN NOW +titleMapping '\xfb13' s = Yield '\x0544' (CC s '\x0576' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN ECH +titleMapping '\xfb14' s = Yield '\x0544' (CC s '\x0565' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN INI +titleMapping '\xfb15' s = Yield '\x0544' (CC s '\x056b' '\x0000') +-- ARMENIAN SMALL LIGATURE VEW NOW +titleMapping '\xfb16' s = Yield '\x054e' (CC s '\x0576' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN XEH +titleMapping '\xfb17' s = Yield '\x0544' (CC s '\x056d' '\x0000') +-- LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +titleMapping '\x0149' s = Yield '\x02bc' (CC s '\x004e' '\x0000') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +titleMapping '\x0390' s = Yield '\x0399' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +titleMapping '\x03b0' s = Yield '\x03a5' (CC s '\x0308' '\x0301') +-- LATIN SMALL LETTER J WITH CARON +titleMapping '\x01f0' s = Yield '\x004a' (CC s '\x030c' '\x0000') +-- LATIN SMALL LETTER H WITH LINE BELOW +titleMapping '\x1e96' s = Yield '\x0048' (CC s '\x0331' '\x0000') +-- LATIN SMALL LETTER T WITH DIAERESIS +titleMapping '\x1e97' s = Yield '\x0054' (CC s '\x0308' '\x0000') +-- LATIN SMALL LETTER W WITH RING ABOVE +titleMapping '\x1e98' s = Yield '\x0057' (CC s '\x030a' '\x0000') +-- LATIN SMALL LETTER Y WITH RING ABOVE +titleMapping '\x1e99' s = Yield '\x0059' (CC s '\x030a' '\x0000') +-- LATIN SMALL LETTER A WITH RIGHT HALF RING +titleMapping '\x1e9a' s = Yield '\x0041' (CC s '\x02be' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH PSILI +titleMapping '\x1f50' s = Yield '\x03a5' (CC s '\x0313' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA +titleMapping '\x1f52' s = Yield '\x03a5' (CC s '\x0313' '\x0300') +-- GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA +titleMapping '\x1f54' s = Yield '\x03a5' (CC s '\x0313' '\x0301') +-- GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI +titleMapping '\x1f56' s = Yield '\x03a5' (CC s '\x0313' '\x0342') +-- GREEK SMALL LETTER ALPHA WITH PERISPOMENI +titleMapping '\x1fb6' s = Yield '\x0391' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER ETA WITH PERISPOMENI +titleMapping '\x1fc6' s = Yield '\x0397' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA +titleMapping '\x1fd2' s = Yield '\x0399' (CC s '\x0308' '\x0300') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA +titleMapping '\x1fd3' s = Yield '\x0399' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER IOTA WITH PERISPOMENI +titleMapping '\x1fd6' s = Yield '\x0399' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI +titleMapping '\x1fd7' s = Yield '\x0399' (CC s '\x0308' '\x0342') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA +titleMapping '\x1fe2' s = Yield '\x03a5' (CC s '\x0308' '\x0300') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA +titleMapping '\x1fe3' s = Yield '\x03a5' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER RHO WITH PSILI +titleMapping '\x1fe4' s = Yield '\x03a1' (CC s '\x0313' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH PERISPOMENI +titleMapping '\x1fe6' s = Yield '\x03a5' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI +titleMapping '\x1fe7' s = Yield '\x03a5' (CC s '\x0308' '\x0342') +-- GREEK SMALL LETTER OMEGA WITH PERISPOMENI +titleMapping '\x1ff6' s = Yield '\x03a9' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI +titleMapping '\x1fb2' s = Yield '\x1fba' (CC s '\x0345' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI +titleMapping '\x1fb4' s = Yield '\x0386' (CC s '\x0345' '\x0000') +-- GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI +titleMapping '\x1fc2' s = Yield '\x1fca' (CC s '\x0345' '\x0000') +-- GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI +titleMapping '\x1fc4' s = Yield '\x0389' (CC s '\x0345' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI +titleMapping '\x1ff2' s = Yield '\x1ffa' (CC s '\x0345' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI +titleMapping '\x1ff4' s = Yield '\x038f' (CC s '\x0345' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI +titleMapping '\x1fb7' s = Yield '\x0391' (CC s '\x0342' '\x0345') +-- GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI +titleMapping '\x1fc7' s = Yield '\x0397' (CC s '\x0342' '\x0345') +-- GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI +titleMapping '\x1ff7' s = Yield '\x03a9' (CC s '\x0342' '\x0345') +titleMapping c s = Yield (toTitle c) (CC s '\0' '\0') +foldMapping :: forall s. Char -> s -> Step (CC s) Char +{-# NOINLINE foldMapping #-} +-- MICRO SIGN +foldMapping '\x00b5' s = Yield '\x03bc' (CC s '\x0000' '\x0000') +-- LATIN SMALL LETTER SHARP S +foldMapping '\x00df' s = Yield '\x0073' (CC s '\x0073' '\x0000') +-- LATIN CAPITAL LETTER I WITH DOT ABOVE +foldMapping '\x0130' s = Yield '\x0069' (CC s '\x0307' '\x0000') +-- LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +foldMapping '\x0149' s = Yield '\x02bc' (CC s '\x006e' '\x0000') +-- LATIN SMALL LETTER LONG S +foldMapping '\x017f' s = Yield '\x0073' (CC s '\x0000' '\x0000') +-- LATIN SMALL LETTER J WITH CARON +foldMapping '\x01f0' s = Yield '\x006a' (CC s '\x030c' '\x0000') +-- COMBINING GREEK YPOGEGRAMMENI +foldMapping '\x0345' s = Yield '\x03b9' (CC s '\x0000' '\x0000') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +foldMapping '\x0390' s = Yield '\x03b9' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +foldMapping '\x03b0' s = Yield '\x03c5' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER FINAL SIGMA +foldMapping '\x03c2' s = Yield '\x03c3' (CC s '\x0000' '\x0000') +-- GREEK BETA SYMBOL +foldMapping '\x03d0' s = Yield '\x03b2' (CC s '\x0000' '\x0000') +-- GREEK THETA SYMBOL +foldMapping '\x03d1' s = Yield '\x03b8' (CC s '\x0000' '\x0000') +-- GREEK PHI SYMBOL +foldMapping '\x03d5' s = Yield '\x03c6' (CC s '\x0000' '\x0000') +-- GREEK PI SYMBOL +foldMapping '\x03d6' s = Yield '\x03c0' (CC s '\x0000' '\x0000') +-- GREEK KAPPA SYMBOL +foldMapping '\x03f0' s = Yield '\x03ba' (CC s '\x0000' '\x0000') +-- GREEK RHO SYMBOL +foldMapping '\x03f1' s = Yield '\x03c1' (CC s '\x0000' '\x0000') +-- GREEK LUNATE EPSILON SYMBOL +foldMapping '\x03f5' s = Yield '\x03b5' (CC s '\x0000' '\x0000') +-- ARMENIAN SMALL LIGATURE ECH YIWN +foldMapping '\x0587' s = Yield '\x0565' (CC s '\x0582' '\x0000') +-- CHEROKEE SMALL LETTER YE +foldMapping '\x13f8' s = Yield '\x13f0' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER YI +foldMapping '\x13f9' s = Yield '\x13f1' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER YO +foldMapping '\x13fa' s = Yield '\x13f2' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER YU +foldMapping '\x13fb' s = Yield '\x13f3' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER YV +foldMapping '\x13fc' s = Yield '\x13f4' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER MV +foldMapping '\x13fd' s = Yield '\x13f5' (CC s '\x0000' '\x0000') +-- LATIN SMALL LETTER H WITH LINE BELOW +foldMapping '\x1e96' s = Yield '\x0068' (CC s '\x0331' '\x0000') +-- LATIN SMALL LETTER T WITH DIAERESIS +foldMapping '\x1e97' s = Yield '\x0074' (CC s '\x0308' '\x0000') +-- LATIN SMALL LETTER W WITH RING ABOVE +foldMapping '\x1e98' s = Yield '\x0077' (CC s '\x030a' '\x0000') +-- LATIN SMALL LETTER Y WITH RING ABOVE +foldMapping '\x1e99' s = Yield '\x0079' (CC s '\x030a' '\x0000') +-- LATIN SMALL LETTER A WITH RIGHT HALF RING +foldMapping '\x1e9a' s = Yield '\x0061' (CC s '\x02be' '\x0000') +-- LATIN SMALL LETTER LONG S WITH DOT ABOVE +foldMapping '\x1e9b' s = Yield '\x1e61' (CC s '\x0000' '\x0000') +-- LATIN CAPITAL LETTER SHARP S +foldMapping '\x1e9e' s = Yield '\x0073' (CC s '\x0073' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH PSILI +foldMapping '\x1f50' s = Yield '\x03c5' (CC s '\x0313' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA +foldMapping '\x1f52' s = Yield '\x03c5' (CC s '\x0313' '\x0300') +-- GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA +foldMapping '\x1f54' s = Yield '\x03c5' (CC s '\x0313' '\x0301') +-- GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI +foldMapping '\x1f56' s = Yield '\x03c5' (CC s '\x0313' '\x0342') +-- GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI +foldMapping '\x1f80' s = Yield '\x1f00' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI +foldMapping '\x1f81' s = Yield '\x1f01' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI +foldMapping '\x1f82' s = Yield '\x1f02' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI +foldMapping '\x1f83' s = Yield '\x1f03' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI +foldMapping '\x1f84' s = Yield '\x1f04' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI +foldMapping '\x1f85' s = Yield '\x1f05' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +foldMapping '\x1f86' s = Yield '\x1f06' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +foldMapping '\x1f87' s = Yield '\x1f07' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI +foldMapping '\x1f88' s = Yield '\x1f00' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI +foldMapping '\x1f89' s = Yield '\x1f01' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI +foldMapping '\x1f8a' s = Yield '\x1f02' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI +foldMapping '\x1f8b' s = Yield '\x1f03' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI +foldMapping '\x1f8c' s = Yield '\x1f04' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI +foldMapping '\x1f8d' s = Yield '\x1f05' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +foldMapping '\x1f8e' s = Yield '\x1f06' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +foldMapping '\x1f8f' s = Yield '\x1f07' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI +foldMapping '\x1f90' s = Yield '\x1f20' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI +foldMapping '\x1f91' s = Yield '\x1f21' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI +foldMapping '\x1f92' s = Yield '\x1f22' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI +foldMapping '\x1f93' s = Yield '\x1f23' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI +foldMapping '\x1f94' s = Yield '\x1f24' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI +foldMapping '\x1f95' s = Yield '\x1f25' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +foldMapping '\x1f96' s = Yield '\x1f26' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +foldMapping '\x1f97' s = Yield '\x1f27' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI +foldMapping '\x1f98' s = Yield '\x1f20' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI +foldMapping '\x1f99' s = Yield '\x1f21' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI +foldMapping '\x1f9a' s = Yield '\x1f22' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI +foldMapping '\x1f9b' s = Yield '\x1f23' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI +foldMapping '\x1f9c' s = Yield '\x1f24' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI +foldMapping '\x1f9d' s = Yield '\x1f25' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +foldMapping '\x1f9e' s = Yield '\x1f26' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +foldMapping '\x1f9f' s = Yield '\x1f27' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI +foldMapping '\x1fa0' s = Yield '\x1f60' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI +foldMapping '\x1fa1' s = Yield '\x1f61' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI +foldMapping '\x1fa2' s = Yield '\x1f62' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI +foldMapping '\x1fa3' s = Yield '\x1f63' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI +foldMapping '\x1fa4' s = Yield '\x1f64' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI +foldMapping '\x1fa5' s = Yield '\x1f65' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +foldMapping '\x1fa6' s = Yield '\x1f66' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +foldMapping '\x1fa7' s = Yield '\x1f67' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI +foldMapping '\x1fa8' s = Yield '\x1f60' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI +foldMapping '\x1fa9' s = Yield '\x1f61' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI +foldMapping '\x1faa' s = Yield '\x1f62' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI +foldMapping '\x1fab' s = Yield '\x1f63' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI +foldMapping '\x1fac' s = Yield '\x1f64' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI +foldMapping '\x1fad' s = Yield '\x1f65' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +foldMapping '\x1fae' s = Yield '\x1f66' (CC s '\x03b9' '\x0000') +-- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +foldMapping '\x1faf' s = Yield '\x1f67' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI +foldMapping '\x1fb2' s = Yield '\x1f70' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI +foldMapping '\x1fb3' s = Yield '\x03b1' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI +foldMapping '\x1fb4' s = Yield '\x03ac' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PERISPOMENI +foldMapping '\x1fb6' s = Yield '\x03b1' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI +foldMapping '\x1fb7' s = Yield '\x03b1' (CC s '\x0342' '\x03b9') +-- GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI +foldMapping '\x1fbc' s = Yield '\x03b1' (CC s '\x03b9' '\x0000') +-- GREEK PROSGEGRAMMENI +foldMapping '\x1fbe' s = Yield '\x03b9' (CC s '\x0000' '\x0000') +-- GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI +foldMapping '\x1fc2' s = Yield '\x1f74' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI +foldMapping '\x1fc3' s = Yield '\x03b7' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI +foldMapping '\x1fc4' s = Yield '\x03ae' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER ETA WITH PERISPOMENI +foldMapping '\x1fc6' s = Yield '\x03b7' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI +foldMapping '\x1fc7' s = Yield '\x03b7' (CC s '\x0342' '\x03b9') +-- GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI +foldMapping '\x1fcc' s = Yield '\x03b7' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA +foldMapping '\x1fd2' s = Yield '\x03b9' (CC s '\x0308' '\x0300') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA +foldMapping '\x1fd3' s = Yield '\x03b9' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER IOTA WITH PERISPOMENI +foldMapping '\x1fd6' s = Yield '\x03b9' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI +foldMapping '\x1fd7' s = Yield '\x03b9' (CC s '\x0308' '\x0342') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA +foldMapping '\x1fe2' s = Yield '\x03c5' (CC s '\x0308' '\x0300') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA +foldMapping '\x1fe3' s = Yield '\x03c5' (CC s '\x0308' '\x0301') +-- GREEK SMALL LETTER RHO WITH PSILI +foldMapping '\x1fe4' s = Yield '\x03c1' (CC s '\x0313' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH PERISPOMENI +foldMapping '\x1fe6' s = Yield '\x03c5' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI +foldMapping '\x1fe7' s = Yield '\x03c5' (CC s '\x0308' '\x0342') +-- GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI +foldMapping '\x1ff2' s = Yield '\x1f7c' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI +foldMapping '\x1ff3' s = Yield '\x03c9' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI +foldMapping '\x1ff4' s = Yield '\x03ce' (CC s '\x03b9' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PERISPOMENI +foldMapping '\x1ff6' s = Yield '\x03c9' (CC s '\x0342' '\x0000') +-- GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI +foldMapping '\x1ff7' s = Yield '\x03c9' (CC s '\x0342' '\x03b9') +-- GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI +foldMapping '\x1ffc' s = Yield '\x03c9' (CC s '\x03b9' '\x0000') +-- LATIN CAPITAL LETTER J WITH CROSSED-TAIL +foldMapping '\xa7b2' s = Yield '\x029d' (CC s '\x0000' '\x0000') +-- LATIN CAPITAL LETTER CHI +foldMapping '\xa7b3' s = Yield '\xab53' (CC s '\x0000' '\x0000') +-- LATIN CAPITAL LETTER BETA +foldMapping '\xa7b4' s = Yield '\xa7b5' (CC s '\x0000' '\x0000') +-- LATIN CAPITAL LETTER OMEGA +foldMapping '\xa7b6' s = Yield '\xa7b7' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER A +foldMapping '\xab70' s = Yield '\x13a0' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER E +foldMapping '\xab71' s = Yield '\x13a1' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER I +foldMapping '\xab72' s = Yield '\x13a2' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER O +foldMapping '\xab73' s = Yield '\x13a3' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER U +foldMapping '\xab74' s = Yield '\x13a4' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER V +foldMapping '\xab75' s = Yield '\x13a5' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER GA +foldMapping '\xab76' s = Yield '\x13a6' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER KA +foldMapping '\xab77' s = Yield '\x13a7' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER GE +foldMapping '\xab78' s = Yield '\x13a8' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER GI +foldMapping '\xab79' s = Yield '\x13a9' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER GO +foldMapping '\xab7a' s = Yield '\x13aa' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER GU +foldMapping '\xab7b' s = Yield '\x13ab' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER GV +foldMapping '\xab7c' s = Yield '\x13ac' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER HA +foldMapping '\xab7d' s = Yield '\x13ad' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER HE +foldMapping '\xab7e' s = Yield '\x13ae' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER HI +foldMapping '\xab7f' s = Yield '\x13af' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER HO +foldMapping '\xab80' s = Yield '\x13b0' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER HU +foldMapping '\xab81' s = Yield '\x13b1' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER HV +foldMapping '\xab82' s = Yield '\x13b2' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER LA +foldMapping '\xab83' s = Yield '\x13b3' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER LE +foldMapping '\xab84' s = Yield '\x13b4' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER LI +foldMapping '\xab85' s = Yield '\x13b5' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER LO +foldMapping '\xab86' s = Yield '\x13b6' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER LU +foldMapping '\xab87' s = Yield '\x13b7' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER LV +foldMapping '\xab88' s = Yield '\x13b8' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER MA +foldMapping '\xab89' s = Yield '\x13b9' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER ME +foldMapping '\xab8a' s = Yield '\x13ba' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER MI +foldMapping '\xab8b' s = Yield '\x13bb' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER MO +foldMapping '\xab8c' s = Yield '\x13bc' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER MU +foldMapping '\xab8d' s = Yield '\x13bd' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER NA +foldMapping '\xab8e' s = Yield '\x13be' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER HNA +foldMapping '\xab8f' s = Yield '\x13bf' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER NAH +foldMapping '\xab90' s = Yield '\x13c0' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER NE +foldMapping '\xab91' s = Yield '\x13c1' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER NI +foldMapping '\xab92' s = Yield '\x13c2' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER NO +foldMapping '\xab93' s = Yield '\x13c3' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER NU +foldMapping '\xab94' s = Yield '\x13c4' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER NV +foldMapping '\xab95' s = Yield '\x13c5' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER QUA +foldMapping '\xab96' s = Yield '\x13c6' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER QUE +foldMapping '\xab97' s = Yield '\x13c7' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER QUI +foldMapping '\xab98' s = Yield '\x13c8' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER QUO +foldMapping '\xab99' s = Yield '\x13c9' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER QUU +foldMapping '\xab9a' s = Yield '\x13ca' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER QUV +foldMapping '\xab9b' s = Yield '\x13cb' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER SA +foldMapping '\xab9c' s = Yield '\x13cc' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER S +foldMapping '\xab9d' s = Yield '\x13cd' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER SE +foldMapping '\xab9e' s = Yield '\x13ce' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER SI +foldMapping '\xab9f' s = Yield '\x13cf' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER SO +foldMapping '\xaba0' s = Yield '\x13d0' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER SU +foldMapping '\xaba1' s = Yield '\x13d1' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER SV +foldMapping '\xaba2' s = Yield '\x13d2' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER DA +foldMapping '\xaba3' s = Yield '\x13d3' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TA +foldMapping '\xaba4' s = Yield '\x13d4' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER DE +foldMapping '\xaba5' s = Yield '\x13d5' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TE +foldMapping '\xaba6' s = Yield '\x13d6' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER DI +foldMapping '\xaba7' s = Yield '\x13d7' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TI +foldMapping '\xaba8' s = Yield '\x13d8' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER DO +foldMapping '\xaba9' s = Yield '\x13d9' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER DU +foldMapping '\xabaa' s = Yield '\x13da' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER DV +foldMapping '\xabab' s = Yield '\x13db' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER DLA +foldMapping '\xabac' s = Yield '\x13dc' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TLA +foldMapping '\xabad' s = Yield '\x13dd' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TLE +foldMapping '\xabae' s = Yield '\x13de' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TLI +foldMapping '\xabaf' s = Yield '\x13df' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TLO +foldMapping '\xabb0' s = Yield '\x13e0' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TLU +foldMapping '\xabb1' s = Yield '\x13e1' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TLV +foldMapping '\xabb2' s = Yield '\x13e2' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TSA +foldMapping '\xabb3' s = Yield '\x13e3' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TSE +foldMapping '\xabb4' s = Yield '\x13e4' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TSI +foldMapping '\xabb5' s = Yield '\x13e5' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TSO +foldMapping '\xabb6' s = Yield '\x13e6' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TSU +foldMapping '\xabb7' s = Yield '\x13e7' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER TSV +foldMapping '\xabb8' s = Yield '\x13e8' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER WA +foldMapping '\xabb9' s = Yield '\x13e9' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER WE +foldMapping '\xabba' s = Yield '\x13ea' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER WI +foldMapping '\xabbb' s = Yield '\x13eb' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER WO +foldMapping '\xabbc' s = Yield '\x13ec' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER WU +foldMapping '\xabbd' s = Yield '\x13ed' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER WV +foldMapping '\xabbe' s = Yield '\x13ee' (CC s '\x0000' '\x0000') +-- CHEROKEE SMALL LETTER YA +foldMapping '\xabbf' s = Yield '\x13ef' (CC s '\x0000' '\x0000') +-- LATIN SMALL LIGATURE FF +foldMapping '\xfb00' s = Yield '\x0066' (CC s '\x0066' '\x0000') +-- LATIN SMALL LIGATURE FI +foldMapping '\xfb01' s = Yield '\x0066' (CC s '\x0069' '\x0000') +-- LATIN SMALL LIGATURE FL +foldMapping '\xfb02' s = Yield '\x0066' (CC s '\x006c' '\x0000') +-- LATIN SMALL LIGATURE FFI +foldMapping '\xfb03' s = Yield '\x0066' (CC s '\x0066' '\x0069') +-- LATIN SMALL LIGATURE FFL +foldMapping '\xfb04' s = Yield '\x0066' (CC s '\x0066' '\x006c') +-- LATIN SMALL LIGATURE LONG S T +foldMapping '\xfb05' s = Yield '\x0073' (CC s '\x0074' '\x0000') +-- LATIN SMALL LIGATURE ST +foldMapping '\xfb06' s = Yield '\x0073' (CC s '\x0074' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN NOW +foldMapping '\xfb13' s = Yield '\x0574' (CC s '\x0576' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN ECH +foldMapping '\xfb14' s = Yield '\x0574' (CC s '\x0565' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN INI +foldMapping '\xfb15' s = Yield '\x0574' (CC s '\x056b' '\x0000') +-- ARMENIAN SMALL LIGATURE VEW NOW +foldMapping '\xfb16' s = Yield '\x057e' (CC s '\x0576' '\x0000') +-- ARMENIAN SMALL LIGATURE MEN XEH +foldMapping '\xfb17' s = Yield '\x0574' (CC s '\x056d' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER A +foldMapping '\x10c80' s = Yield '\x10cc0' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER AA +foldMapping '\x10c81' s = Yield '\x10cc1' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EB +foldMapping '\x10c82' s = Yield '\x10cc2' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER AMB +foldMapping '\x10c83' s = Yield '\x10cc3' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EC +foldMapping '\x10c84' s = Yield '\x10cc4' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ENC +foldMapping '\x10c85' s = Yield '\x10cc5' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ECS +foldMapping '\x10c86' s = Yield '\x10cc6' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ED +foldMapping '\x10c87' s = Yield '\x10cc7' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER AND +foldMapping '\x10c88' s = Yield '\x10cc8' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER E +foldMapping '\x10c89' s = Yield '\x10cc9' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER CLOSE E +foldMapping '\x10c8a' s = Yield '\x10cca' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EE +foldMapping '\x10c8b' s = Yield '\x10ccb' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EF +foldMapping '\x10c8c' s = Yield '\x10ccc' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EG +foldMapping '\x10c8d' s = Yield '\x10ccd' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EGY +foldMapping '\x10c8e' s = Yield '\x10cce' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EH +foldMapping '\x10c8f' s = Yield '\x10ccf' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER I +foldMapping '\x10c90' s = Yield '\x10cd0' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER II +foldMapping '\x10c91' s = Yield '\x10cd1' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EJ +foldMapping '\x10c92' s = Yield '\x10cd2' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EK +foldMapping '\x10c93' s = Yield '\x10cd3' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER AK +foldMapping '\x10c94' s = Yield '\x10cd4' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER UNK +foldMapping '\x10c95' s = Yield '\x10cd5' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EL +foldMapping '\x10c96' s = Yield '\x10cd6' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ELY +foldMapping '\x10c97' s = Yield '\x10cd7' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EM +foldMapping '\x10c98' s = Yield '\x10cd8' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EN +foldMapping '\x10c99' s = Yield '\x10cd9' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ENY +foldMapping '\x10c9a' s = Yield '\x10cda' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER O +foldMapping '\x10c9b' s = Yield '\x10cdb' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER OO +foldMapping '\x10c9c' s = Yield '\x10cdc' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER NIKOLSBURG OE +foldMapping '\x10c9d' s = Yield '\x10cdd' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER RUDIMENTA OE +foldMapping '\x10c9e' s = Yield '\x10cde' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER OEE +foldMapping '\x10c9f' s = Yield '\x10cdf' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EP +foldMapping '\x10ca0' s = Yield '\x10ce0' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EMP +foldMapping '\x10ca1' s = Yield '\x10ce1' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ER +foldMapping '\x10ca2' s = Yield '\x10ce2' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER SHORT ER +foldMapping '\x10ca3' s = Yield '\x10ce3' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ES +foldMapping '\x10ca4' s = Yield '\x10ce4' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ESZ +foldMapping '\x10ca5' s = Yield '\x10ce5' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ET +foldMapping '\x10ca6' s = Yield '\x10ce6' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ENT +foldMapping '\x10ca7' s = Yield '\x10ce7' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ETY +foldMapping '\x10ca8' s = Yield '\x10ce8' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ECH +foldMapping '\x10ca9' s = Yield '\x10ce9' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER U +foldMapping '\x10caa' s = Yield '\x10cea' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER UU +foldMapping '\x10cab' s = Yield '\x10ceb' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER NIKOLSBURG UE +foldMapping '\x10cac' s = Yield '\x10cec' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER RUDIMENTA UE +foldMapping '\x10cad' s = Yield '\x10ced' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EV +foldMapping '\x10cae' s = Yield '\x10cee' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EZ +foldMapping '\x10caf' s = Yield '\x10cef' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER EZS +foldMapping '\x10cb0' s = Yield '\x10cf0' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER ENT-SHAPED SIGN +foldMapping '\x10cb1' s = Yield '\x10cf1' (CC s '\x0000' '\x0000') +-- OLD HUNGARIAN CAPITAL LETTER US +foldMapping '\x10cb2' s = Yield '\x10cf2' (CC s '\x0000' '\x0000') +foldMapping c s = Yield (toLower c) (CC s '\0' '\0') diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Common.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Common.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Common.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Common.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,938 @@ +{-# LANGUAGE BangPatterns, MagicHash, Rank2Types #-} +-- | +-- Module : Data.Text.Internal.Fusion.Common +-- Copyright : (c) Bryan O'Sullivan 2009, 2012 +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Common stream fusion functionality for text. + +module Data.Text.Internal.Fusion.Common + ( + -- * Creation and elimination + singleton + , streamList + , unstreamList + , streamCString# + + -- * Basic interface + , cons + , snoc + , append + , head + , uncons + , last + , tail + , init + , null + , lengthI + , compareLengthI + , isSingleton + + -- * Transformations + , map + , intercalate + , intersperse + + -- ** Case conversion + -- $case + , toCaseFold + , toLower + , toTitle + , toUpper + + -- ** Justification + , justifyLeftI + + -- * Folds + , foldl + , foldl' + , foldl1 + , foldl1' + , foldr + , foldr1 + + -- ** Special folds + , concat + , concatMap + , any + , all + , maximum + , minimum + + -- * Construction + -- ** Scans + , scanl + + -- ** Generation and unfolding + , replicateCharI + , replicateI + , unfoldr + , unfoldrNI + + -- * Substrings + -- ** Breaking strings + , take + , drop + , takeWhile + , dropWhile + + -- * Predicates + , isPrefixOf + + -- * Searching + , elem + , filter + + -- * Indexing + , findBy + , indexI + , findIndexI + , countCharI + + -- * Zipping and unzipping + , zipWith + ) where + +import Prelude (Bool(..), Char, Eq(..), Int, Integral, Maybe(..), + Ord(..), Ordering(..), String, (.), ($), (+), (-), (*), (++), + (&&), fromIntegral, otherwise) +import qualified Data.List as L +import qualified Prelude as P +import Data.Bits (shiftL) +import Data.Char (isLetter) +import Data.Int (Int64) +import Data.Text.Internal.Fusion.Types +import Data.Text.Internal.Fusion.CaseMapping (foldMapping, lowerMapping, titleMapping, + upperMapping) +import Data.Text.Internal.Fusion.Size +import GHC.Prim (Addr#, chr#, indexCharOffAddr#, ord#) +import GHC.Types (Char(..), Int(..)) + +singleton :: Char -> Stream Char +singleton c = Stream next False 1 + where next False = Yield c True + next True = Done +{-# INLINE [0] singleton #-} + +streamList :: [a] -> Stream a +{-# INLINE [0] streamList #-} +streamList s = Stream next s unknownSize + where next [] = Done + next (x:xs) = Yield x xs + +unstreamList :: Stream a -> [a] +unstreamList (Stream next s0 _len) = unfold s0 + where unfold !s = case next s of + Done -> [] + Skip s' -> unfold s' + Yield x s' -> x : unfold s' +{-# INLINE [0] unstreamList #-} + +{-# RULES "STREAM streamList/unstreamList fusion" forall s. streamList (unstreamList s) = s #-} + +-- | Stream the UTF-8-like packed encoding used by GHC to represent +-- constant strings in generated code. +-- +-- This encoding uses the byte sequence "\xc0\x80" to represent NUL, +-- and the string is NUL-terminated. +streamCString# :: Addr# -> Stream Char +streamCString# addr = Stream step 0 unknownSize + where + step !i + | b == 0 = Done + | b <= 0x7f = Yield (C# b#) (i+1) + | b <= 0xdf = let !c = chr $ ((b-0xc0) `shiftL` 6) + next 1 + in Yield c (i+2) + | b <= 0xef = let !c = chr $ ((b-0xe0) `shiftL` 12) + + (next 1 `shiftL` 6) + + next 2 + in Yield c (i+3) + | otherwise = let !c = chr $ ((b-0xf0) `shiftL` 18) + + (next 1 `shiftL` 12) + + (next 2 `shiftL` 6) + + next 3 + in Yield c (i+4) + where b = I# (ord# b#) + next n = I# (ord# (at# (i+n))) - 0x80 + !b# = at# i + at# (I# i#) = indexCharOffAddr# addr i# + chr (I# i#) = C# (chr# i#) +{-# INLINE [0] streamCString# #-} + +-- ---------------------------------------------------------------------------- +-- * Basic stream functions + +data C s = C0 !s + | C1 !s + +-- | /O(n)/ Adds a character to the front of a Stream Char. +cons :: Char -> Stream Char -> Stream Char +cons !w (Stream next0 s0 len) = Stream next (C1 s0) (len+1) + where + next (C1 s) = Yield w (C0 s) + next (C0 s) = case next0 s of + Done -> Done + Skip s' -> Skip (C0 s') + Yield x s' -> Yield x (C0 s') +{-# INLINE [0] cons #-} + +data Snoc a = N + | J !a + +-- | /O(n)/ Adds a character to the end of a stream. +snoc :: Stream Char -> Char -> Stream Char +snoc (Stream next0 xs0 len) w = Stream next (J xs0) (len+1) + where + next (J xs) = case next0 xs of + Done -> Yield w N + Skip xs' -> Skip (J xs') + Yield x xs' -> Yield x (J xs') + next N = Done +{-# INLINE [0] snoc #-} + +data E l r = L !l + | R !r + +-- | /O(n)/ Appends one Stream to the other. +append :: Stream Char -> Stream Char -> Stream Char +append (Stream next0 s01 len1) (Stream next1 s02 len2) = + Stream next (L s01) (len1 + len2) + where + next (L s1) = case next0 s1 of + Done -> Skip (R s02) + Skip s1' -> Skip (L s1') + Yield x s1' -> Yield x (L s1') + next (R s2) = case next1 s2 of + Done -> Done + Skip s2' -> Skip (R s2') + Yield x s2' -> Yield x (R s2') +{-# INLINE [0] append #-} + +-- | /O(1)/ Returns the first character of a Text, which must be non-empty. +-- Subject to array fusion. +head :: Stream Char -> Char +head (Stream next s0 _len) = loop_head s0 + where + loop_head !s = case next s of + Yield x _ -> x + Skip s' -> loop_head s' + Done -> head_empty +{-# INLINE [0] head #-} + +head_empty :: a +head_empty = streamError "head" "Empty stream" +{-# NOINLINE head_empty #-} + +-- | /O(1)/ Returns the first character and remainder of a 'Stream +-- Char', or 'Nothing' if empty. Subject to array fusion. +uncons :: Stream Char -> Maybe (Char, Stream Char) +uncons (Stream next s0 len) = loop_uncons s0 + where + loop_uncons !s = case next s of + Yield x s1 -> Just (x, Stream next s1 (len-1)) + Skip s' -> loop_uncons s' + Done -> Nothing +{-# INLINE [0] uncons #-} + +-- | /O(n)/ Returns the last character of a 'Stream Char', which must +-- be non-empty. +last :: Stream Char -> Char +last (Stream next s0 _len) = loop0_last s0 + where + loop0_last !s = case next s of + Done -> emptyError "last" + Skip s' -> loop0_last s' + Yield x s' -> loop_last x s' + loop_last !x !s = case next s of + Done -> x + Skip s' -> loop_last x s' + Yield x' s' -> loop_last x' s' +{-# INLINE[0] last #-} + +-- | /O(1)/ Returns all characters after the head of a Stream Char, which must +-- be non-empty. +tail :: Stream Char -> Stream Char +tail (Stream next0 s0 len) = Stream next (C0 s0) (len-1) + where + next (C0 s) = case next0 s of + Done -> emptyError "tail" + Skip s' -> Skip (C0 s') + Yield _ s' -> Skip (C1 s') + next (C1 s) = case next0 s of + Done -> Done + Skip s' -> Skip (C1 s') + Yield x s' -> Yield x (C1 s') +{-# INLINE [0] tail #-} + +data Init s = Init0 !s + | Init1 {-# UNPACK #-} !Char !s + +-- | /O(1)/ Returns all but the last character of a Stream Char, which +-- must be non-empty. +init :: Stream Char -> Stream Char +init (Stream next0 s0 len) = Stream next (Init0 s0) (len-1) + where + next (Init0 s) = case next0 s of + Done -> emptyError "init" + Skip s' -> Skip (Init0 s') + Yield x s' -> Skip (Init1 x s') + next (Init1 x s) = case next0 s of + Done -> Done + Skip s' -> Skip (Init1 x s') + Yield x' s' -> Yield x (Init1 x' s') +{-# INLINE [0] init #-} + +-- | /O(1)/ Tests whether a Stream Char is empty or not. +null :: Stream Char -> Bool +null (Stream next s0 _len) = loop_null s0 + where + loop_null !s = case next s of + Done -> True + Yield _ _ -> False + Skip s' -> loop_null s' +{-# INLINE[0] null #-} + +-- | /O(n)/ Returns the number of characters in a string. +lengthI :: Integral a => Stream Char -> a +lengthI (Stream next s0 _len) = loop_length 0 s0 + where + loop_length !z s = case next s of + Done -> z + Skip s' -> loop_length z s' + Yield _ s' -> loop_length (z + 1) s' +{-# INLINE[0] lengthI #-} + +-- | /O(n)/ Compares the count of characters in a string to a number. +-- Subject to fusion. +-- +-- This function gives the same answer as comparing against the result +-- of 'lengthI', but can short circuit if the count of characters is +-- greater than the number or if the stream can't possibly be as long +-- as the number supplied, and hence be more efficient. +compareLengthI :: Integral a => Stream Char -> a -> Ordering +compareLengthI (Stream next s0 len) n = + case compareSize len (fromIntegral n) of + Just o -> o + Nothing -> loop_cmp 0 s0 + where + loop_cmp !z s = case next s of + Done -> compare z n + Skip s' -> loop_cmp z s' + Yield _ s' | z > n -> GT + | otherwise -> loop_cmp (z + 1) s' +{-# INLINE[0] compareLengthI #-} + +-- | /O(n)/ Indicate whether a string contains exactly one element. +isSingleton :: Stream Char -> Bool +isSingleton (Stream next s0 _len) = loop 0 s0 + where + loop !z s = case next s of + Done -> z == (1::Int) + Skip s' -> loop z s' + Yield _ s' + | z >= 1 -> False + | otherwise -> loop (z+1) s' +{-# INLINE[0] isSingleton #-} + +-- ---------------------------------------------------------------------------- +-- * Stream transformations + +-- | /O(n)/ 'map' @f @xs is the Stream Char obtained by applying @f@ +-- to each element of @xs@. +map :: (Char -> Char) -> Stream Char -> Stream Char +map f (Stream next0 s0 len) = Stream next s0 len + where + next !s = case next0 s of + Done -> Done + Skip s' -> Skip s' + Yield x s' -> Yield (f x) s' +{-# INLINE [0] map #-} + +{-# + RULES "STREAM map/map fusion" forall f g s. + map f (map g s) = map (\x -> f (g x)) s + #-} + +data I s = I1 !s + | I2 !s {-# UNPACK #-} !Char + | I3 !s + +-- | /O(n)/ Take a character and place it between each of the +-- characters of a 'Stream Char'. +intersperse :: Char -> Stream Char -> Stream Char +intersperse c (Stream next0 s0 len) = Stream next (I1 s0) len + where + next (I1 s) = case next0 s of + Done -> Done + Skip s' -> Skip (I1 s') + Yield x s' -> Skip (I2 s' x) + next (I2 s x) = Yield x (I3 s) + next (I3 s) = case next0 s of + Done -> Done + Skip s' -> Skip (I3 s') + Yield x s' -> Yield c (I2 s' x) +{-# INLINE [0] intersperse #-} + +-- ---------------------------------------------------------------------------- +-- ** Case conversions (folds) + +-- $case +-- +-- With Unicode text, it is incorrect to use combinators like @map +-- toUpper@ to case convert each character of a string individually. +-- Instead, use the whole-string case conversion functions from this +-- module. For correctness in different writing systems, these +-- functions may map one input character to two or three output +-- characters. + +caseConvert :: (forall s. Char -> s -> Step (CC s) Char) + -> Stream Char -> Stream Char +caseConvert remap (Stream next0 s0 len) = Stream next (CC s0 '\0' '\0') len + where + next (CC s '\0' _) = + case next0 s of + Done -> Done + Skip s' -> Skip (CC s' '\0' '\0') + Yield c s' -> remap c s' + next (CC s a b) = Yield a (CC s b '\0') + +-- | /O(n)/ Convert a string to folded case. This function is mainly +-- useful for performing caseless (or case insensitive) string +-- comparisons. +-- +-- A string @x@ is a caseless match for a string @y@ if and only if: +-- +-- @toCaseFold x == toCaseFold y@ +-- +-- The result string may be longer than the input string, and may +-- differ from applying 'toLower' to the input string. For instance, +-- the Armenian small ligature men now (U+FB13) is case folded to the +-- bigram men now (U+0574 U+0576), while the micro sign (U+00B5) is +-- case folded to the Greek small letter letter mu (U+03BC) instead of +-- itself. +toCaseFold :: Stream Char -> Stream Char +toCaseFold = caseConvert foldMapping +{-# INLINE [0] toCaseFold #-} + +-- | /O(n)/ Convert a string to upper case, using simple case +-- conversion. The result string may be longer than the input string. +-- For instance, the German eszett (U+00DF) maps to the two-letter +-- sequence SS. +toUpper :: Stream Char -> Stream Char +toUpper = caseConvert upperMapping +{-# INLINE [0] toUpper #-} + +-- | /O(n)/ Convert a string to lower case, using simple case +-- conversion. The result string may be longer than the input string. +-- For instance, the Latin capital letter I with dot above (U+0130) +-- maps to the sequence Latin small letter i (U+0069) followed by +-- combining dot above (U+0307). +toLower :: Stream Char -> Stream Char +toLower = caseConvert lowerMapping +{-# INLINE [0] toLower #-} + +-- | /O(n)/ Convert a string to title case, using simple case +-- conversion. +-- +-- The first letter of the input is converted to title case, as is +-- every subsequent letter that immediately follows a non-letter. +-- Every letter that immediately follows another letter is converted +-- to lower case. +-- +-- The result string may be longer than the input string. For example, +-- the Latin small ligature fl (U+FB02) is converted to the +-- sequence Latin capital letter F (U+0046) followed by Latin small +-- letter l (U+006C). +-- +-- /Note/: this function does not take language or culture specific +-- rules into account. For instance, in English, different style +-- guides disagree on whether the book name \"The Hill of the Red +-- Fox\" is correctly title cased—but this function will +-- capitalize /every/ word. +toTitle :: Stream Char -> Stream Char +toTitle (Stream next0 s0 len) = Stream next (CC (False :*: s0) '\0' '\0') len + where + next (CC (letter :*: s) '\0' _) = + case next0 s of + Done -> Done + Skip s' -> Skip (CC (letter :*: s') '\0' '\0') + Yield c s' + | letter' -> if letter + then lowerMapping c (letter' :*: s') + else titleMapping c (letter' :*: s') + | otherwise -> Yield c (CC (letter' :*: s') '\0' '\0') + where letter' = isLetter c + next (CC s a b) = Yield a (CC s b '\0') +{-# INLINE [0] toTitle #-} + +data Justify i s = Just1 !i !s + | Just2 !i !s + +justifyLeftI :: Integral a => a -> Char -> Stream Char -> Stream Char +justifyLeftI k c (Stream next0 s0 len) = + Stream next (Just1 0 s0) (larger (fromIntegral k) len) + where + next (Just1 n s) = + case next0 s of + Done -> next (Just2 n s) + Skip s' -> Skip (Just1 n s') + Yield x s' -> Yield x (Just1 (n+1) s') + next (Just2 n s) + | n < k = Yield c (Just2 (n+1) s) + | otherwise = Done + {-# INLINE next #-} +{-# INLINE [0] justifyLeftI #-} + +-- ---------------------------------------------------------------------------- +-- * Reducing Streams (folds) + +-- | foldl, applied to a binary operator, a starting value (typically the +-- left-identity of the operator), and a Stream, reduces the Stream using the +-- binary operator, from left to right. +foldl :: (b -> Char -> b) -> b -> Stream Char -> b +foldl f z0 (Stream next s0 _len) = loop_foldl z0 s0 + where + loop_foldl z !s = case next s of + Done -> z + Skip s' -> loop_foldl z s' + Yield x s' -> loop_foldl (f z x) s' +{-# INLINE [0] foldl #-} + +-- | A strict version of foldl. +foldl' :: (b -> Char -> b) -> b -> Stream Char -> b +foldl' f z0 (Stream next s0 _len) = loop_foldl' z0 s0 + where + loop_foldl' !z !s = case next s of + Done -> z + Skip s' -> loop_foldl' z s' + Yield x s' -> loop_foldl' (f z x) s' +{-# INLINE [0] foldl' #-} + +-- | foldl1 is a variant of foldl that has no starting value argument, +-- and thus must be applied to non-empty Streams. +foldl1 :: (Char -> Char -> Char) -> Stream Char -> Char +foldl1 f (Stream next s0 _len) = loop0_foldl1 s0 + where + loop0_foldl1 !s = case next s of + Skip s' -> loop0_foldl1 s' + Yield x s' -> loop_foldl1 x s' + Done -> emptyError "foldl1" + loop_foldl1 z !s = case next s of + Done -> z + Skip s' -> loop_foldl1 z s' + Yield x s' -> loop_foldl1 (f z x) s' +{-# INLINE [0] foldl1 #-} + +-- | A strict version of foldl1. +foldl1' :: (Char -> Char -> Char) -> Stream Char -> Char +foldl1' f (Stream next s0 _len) = loop0_foldl1' s0 + where + loop0_foldl1' !s = case next s of + Skip s' -> loop0_foldl1' s' + Yield x s' -> loop_foldl1' x s' + Done -> emptyError "foldl1" + loop_foldl1' !z !s = case next s of + Done -> z + Skip s' -> loop_foldl1' z s' + Yield x s' -> loop_foldl1' (f z x) s' +{-# INLINE [0] foldl1' #-} + +-- | 'foldr', applied to a binary operator, a starting value (typically the +-- right-identity of the operator), and a stream, reduces the stream using the +-- binary operator, from right to left. +foldr :: (Char -> b -> b) -> b -> Stream Char -> b +foldr f z (Stream next s0 _len) = loop_foldr s0 + where + loop_foldr !s = case next s of + Done -> z + Skip s' -> loop_foldr s' + Yield x s' -> f x (loop_foldr s') +{-# INLINE [0] foldr #-} + +-- | foldr1 is a variant of 'foldr' that has no starting value argument, +-- and thus must be applied to non-empty streams. +-- Subject to array fusion. +foldr1 :: (Char -> Char -> Char) -> Stream Char -> Char +foldr1 f (Stream next s0 _len) = loop0_foldr1 s0 + where + loop0_foldr1 !s = case next s of + Done -> emptyError "foldr1" + Skip s' -> loop0_foldr1 s' + Yield x s' -> loop_foldr1 x s' + + loop_foldr1 x !s = case next s of + Done -> x + Skip s' -> loop_foldr1 x s' + Yield x' s' -> f x (loop_foldr1 x' s') +{-# INLINE [0] foldr1 #-} + +intercalate :: Stream Char -> [Stream Char] -> Stream Char +intercalate s = concat . (L.intersperse s) +{-# INLINE [0] intercalate #-} + +-- ---------------------------------------------------------------------------- +-- ** Special folds + +-- | /O(n)/ Concatenate a list of streams. Subject to array fusion. +concat :: [Stream Char] -> Stream Char +concat = L.foldr append empty +{-# INLINE [0] concat #-} + +-- | Map a function over a stream that results in a stream and concatenate the +-- results. +concatMap :: (Char -> Stream Char) -> Stream Char -> Stream Char +concatMap f = foldr (append . f) empty +{-# INLINE [0] concatMap #-} + +-- | /O(n)/ any @p @xs determines if any character in the stream +-- @xs@ satisfies the predicate @p@. +any :: (Char -> Bool) -> Stream Char -> Bool +any p (Stream next0 s0 _len) = loop_any s0 + where + loop_any !s = case next0 s of + Done -> False + Skip s' -> loop_any s' + Yield x s' | p x -> True + | otherwise -> loop_any s' +{-# INLINE [0] any #-} + +-- | /O(n)/ all @p @xs determines if all characters in the 'Text' +-- @xs@ satisfy the predicate @p@. +all :: (Char -> Bool) -> Stream Char -> Bool +all p (Stream next0 s0 _len) = loop_all s0 + where + loop_all !s = case next0 s of + Done -> True + Skip s' -> loop_all s' + Yield x s' | p x -> loop_all s' + | otherwise -> False +{-# INLINE [0] all #-} + +-- | /O(n)/ maximum returns the maximum value from a stream, which must be +-- non-empty. +maximum :: Stream Char -> Char +maximum (Stream next0 s0 _len) = loop0_maximum s0 + where + loop0_maximum !s = case next0 s of + Done -> emptyError "maximum" + Skip s' -> loop0_maximum s' + Yield x s' -> loop_maximum x s' + loop_maximum !z !s = case next0 s of + Done -> z + Skip s' -> loop_maximum z s' + Yield x s' + | x > z -> loop_maximum x s' + | otherwise -> loop_maximum z s' +{-# INLINE [0] maximum #-} + +-- | /O(n)/ minimum returns the minimum value from a 'Text', which must be +-- non-empty. +minimum :: Stream Char -> Char +minimum (Stream next0 s0 _len) = loop0_minimum s0 + where + loop0_minimum !s = case next0 s of + Done -> emptyError "minimum" + Skip s' -> loop0_minimum s' + Yield x s' -> loop_minimum x s' + loop_minimum !z !s = case next0 s of + Done -> z + Skip s' -> loop_minimum z s' + Yield x s' + | x < z -> loop_minimum x s' + | otherwise -> loop_minimum z s' +{-# INLINE [0] minimum #-} + +-- ----------------------------------------------------------------------------- +-- * Building streams + +scanl :: (Char -> Char -> Char) -> Char -> Stream Char -> Stream Char +scanl f z0 (Stream next0 s0 len) = Stream next (Scan1 z0 s0) (len+1) -- HINT maybe too low + where + {-# INLINE next #-} + next (Scan1 z s) = Yield z (Scan2 z s) + next (Scan2 z s) = case next0 s of + Yield x s' -> let !x' = f z x + in Yield x' (Scan2 x' s') + Skip s' -> Skip (Scan2 z s') + Done -> Done +{-# INLINE [0] scanl #-} + +-- ----------------------------------------------------------------------------- +-- ** Generating and unfolding streams + +replicateCharI :: Integral a => a -> Char -> Stream Char +replicateCharI n c + | n < 0 = empty + | otherwise = Stream next 0 (fromIntegral n) -- HINT maybe too low + where + next i | i >= n = Done + | otherwise = Yield c (i + 1) +{-# INLINE [0] replicateCharI #-} + +data RI s = RI !s {-# UNPACK #-} !Int64 + +replicateI :: Int64 -> Stream Char -> Stream Char +replicateI n (Stream next0 s0 len) = + Stream next (RI s0 0) (fromIntegral (max 0 n) * len) + where + next (RI s k) + | k >= n = Done + | otherwise = case next0 s of + Done -> Skip (RI s0 (k+1)) + Skip s' -> Skip (RI s' k) + Yield x s' -> Yield x (RI s' k) +{-# INLINE [0] replicateI #-} + +-- | /O(n)/, where @n@ is the length of the result. The unfoldr function +-- is analogous to the List 'unfoldr'. unfoldr builds a stream +-- from a seed value. The function takes the element and returns +-- Nothing if it is done producing the stream or returns Just +-- (a,b), in which case, a is the next Char in the string, and b is +-- the seed value for further production. +unfoldr :: (a -> Maybe (Char,a)) -> a -> Stream Char +unfoldr f s0 = Stream next s0 1 -- HINT maybe too low + where + {-# INLINE next #-} + next !s = case f s of + Nothing -> Done + Just (w, s') -> Yield w s' +{-# INLINE [0] unfoldr #-} + +-- | /O(n)/ Like 'unfoldr', 'unfoldrNI' builds a stream from a seed +-- value. However, the length of the result is limited by the +-- first argument to 'unfoldrNI'. This function is more efficient than +-- 'unfoldr' when the length of the result is known. +unfoldrNI :: Integral a => a -> (b -> Maybe (Char,b)) -> b -> Stream Char +unfoldrNI n f s0 | n < 0 = empty + | otherwise = Stream next (0 :*: s0) (fromIntegral (n*2)) -- HINT maybe too high + where + {-# INLINE next #-} + next (z :*: s) = case f s of + Nothing -> Done + Just (w, s') | z >= n -> Done + | otherwise -> Yield w ((z + 1) :*: s') +{-# INLINE unfoldrNI #-} + +------------------------------------------------------------------------------- +-- * Substreams + +-- | /O(n)/ take n, applied to a stream, returns the prefix of the +-- stream of length @n@, or the stream itself if @n@ is greater than the +-- length of the stream. +take :: Integral a => a -> Stream Char -> Stream Char +take n0 (Stream next0 s0 len) = + Stream next (n0 :*: s0) (smaller len (fromIntegral (max 0 n0))) + where + {-# INLINE next #-} + next (n :*: s) | n <= 0 = Done + | otherwise = case next0 s of + Done -> Done + Skip s' -> Skip (n :*: s') + Yield x s' -> Yield x ((n-1) :*: s') +{-# INLINE [0] take #-} + +data Drop a s = NS !s + | JS !a !s + +-- | /O(n)/ drop n, applied to a stream, returns the suffix of the +-- stream after the first @n@ characters, or the empty stream if @n@ +-- is greater than the length of the stream. +drop :: Integral a => a -> Stream Char -> Stream Char +drop n0 (Stream next0 s0 len) = + Stream next (JS n0 s0) (len - fromIntegral (max 0 n0)) + where + {-# INLINE next #-} + next (JS n s) + | n <= 0 = Skip (NS s) + | otherwise = case next0 s of + Done -> Done + Skip s' -> Skip (JS n s') + Yield _ s' -> Skip (JS (n-1) s') + next (NS s) = case next0 s of + Done -> Done + Skip s' -> Skip (NS s') + Yield x s' -> Yield x (NS s') +{-# INLINE [0] drop #-} + +-- | takeWhile, applied to a predicate @p@ and a stream, returns the +-- longest prefix (possibly empty) of elements that satisfy p. +takeWhile :: (Char -> Bool) -> Stream Char -> Stream Char +takeWhile p (Stream next0 s0 len) = Stream next s0 len -- HINT maybe too high + where + {-# INLINE next #-} + next !s = case next0 s of + Done -> Done + Skip s' -> Skip s' + Yield x s' | p x -> Yield x s' + | otherwise -> Done +{-# INLINE [0] takeWhile #-} + +-- | dropWhile @p @xs returns the suffix remaining after takeWhile @p @xs. +dropWhile :: (Char -> Bool) -> Stream Char -> Stream Char +dropWhile p (Stream next0 s0 len) = Stream next (L s0) len -- HINT maybe too high + where + {-# INLINE next #-} + next (L s) = case next0 s of + Done -> Done + Skip s' -> Skip (L s') + Yield x s' | p x -> Skip (L s') + | otherwise -> Yield x (R s') + next (R s) = case next0 s of + Done -> Done + Skip s' -> Skip (R s') + Yield x s' -> Yield x (R s') +{-# INLINE [0] dropWhile #-} + +-- | /O(n)/ The 'isPrefixOf' function takes two 'Stream's and returns +-- 'True' iff the first is a prefix of the second. +isPrefixOf :: (Eq a) => Stream a -> Stream a -> Bool +isPrefixOf (Stream next1 s1 _) (Stream next2 s2 _) = loop (next1 s1) (next2 s2) + where + loop Done _ = True + loop _ Done = False + loop (Skip s1') (Skip s2') = loop (next1 s1') (next2 s2') + loop (Skip s1') x2 = loop (next1 s1') x2 + loop x1 (Skip s2') = loop x1 (next2 s2') + loop (Yield x1 s1') (Yield x2 s2') = x1 == x2 && + loop (next1 s1') (next2 s2') +{-# INLINE [0] isPrefixOf #-} + +-- ---------------------------------------------------------------------------- +-- * Searching + +------------------------------------------------------------------------------- +-- ** Searching by equality + +-- | /O(n)/ elem is the stream membership predicate. +elem :: Char -> Stream Char -> Bool +elem w (Stream next s0 _len) = loop_elem s0 + where + loop_elem !s = case next s of + Done -> False + Skip s' -> loop_elem s' + Yield x s' | x == w -> True + | otherwise -> loop_elem s' +{-# INLINE [0] elem #-} + +------------------------------------------------------------------------------- +-- ** Searching with a predicate + +-- | /O(n)/ The 'findBy' function takes a predicate and a stream, +-- and returns the first element in matching the predicate, or 'Nothing' +-- if there is no such element. + +findBy :: (Char -> Bool) -> Stream Char -> Maybe Char +findBy p (Stream next s0 _len) = loop_find s0 + where + loop_find !s = case next s of + Done -> Nothing + Skip s' -> loop_find s' + Yield x s' | p x -> Just x + | otherwise -> loop_find s' +{-# INLINE [0] findBy #-} + +-- | /O(n)/ Stream index (subscript) operator, starting from 0. +indexI :: Integral a => Stream Char -> a -> Char +indexI (Stream next s0 _len) n0 + | n0 < 0 = streamError "index" "Negative index" + | otherwise = loop_index n0 s0 + where + loop_index !n !s = case next s of + Done -> streamError "index" "Index too large" + Skip s' -> loop_index n s' + Yield x s' | n == 0 -> x + | otherwise -> loop_index (n-1) s' +{-# INLINE [0] indexI #-} + +-- | /O(n)/ 'filter', applied to a predicate and a stream, +-- returns a stream containing those characters that satisfy the +-- predicate. +filter :: (Char -> Bool) -> Stream Char -> Stream Char +filter p (Stream next0 s0 len) = Stream next s0 len -- HINT maybe too high + where + next !s = case next0 s of + Done -> Done + Skip s' -> Skip s' + Yield x s' | p x -> Yield x s' + | otherwise -> Skip s' +{-# INLINE [0] filter #-} + +{-# RULES + "STREAM filter/filter fusion" forall p q s. + filter p (filter q s) = filter (\x -> q x && p x) s + #-} + +-- | The 'findIndexI' function takes a predicate and a stream and +-- returns the index of the first element in the stream satisfying the +-- predicate. +findIndexI :: Integral a => (Char -> Bool) -> Stream Char -> Maybe a +findIndexI p s = case findIndicesI p s of + (i:_) -> Just i + _ -> Nothing +{-# INLINE [0] findIndexI #-} + +-- | The 'findIndicesI' function takes a predicate and a stream and +-- returns all indices of the elements in the stream satisfying the +-- predicate. +findIndicesI :: Integral a => (Char -> Bool) -> Stream Char -> [a] +findIndicesI p (Stream next s0 _len) = loop_findIndex 0 s0 + where + loop_findIndex !i !s = case next s of + Done -> [] + Skip s' -> loop_findIndex i s' -- hmm. not caught by QC + Yield x s' | p x -> i : loop_findIndex (i+1) s' + | otherwise -> loop_findIndex (i+1) s' +{-# INLINE [0] findIndicesI #-} + +------------------------------------------------------------------------------- +-- * Zipping + +-- | Strict triple. +data Zip a b m = Z1 !a !b + | Z2 !a !b !m + +-- | zipWith generalises 'zip' by zipping with the function given as +-- the first argument, instead of a tupling function. +zipWith :: (a -> a -> b) -> Stream a -> Stream a -> Stream b +zipWith f (Stream next0 sa0 len1) (Stream next1 sb0 len2) = + Stream next (Z1 sa0 sb0) (smaller len1 len2) + where + next (Z1 sa sb) = case next0 sa of + Done -> Done + Skip sa' -> Skip (Z1 sa' sb) + Yield a sa' -> Skip (Z2 sa' sb a) + + next (Z2 sa' sb a) = case next1 sb of + Done -> Done + Skip sb' -> Skip (Z2 sa' sb' a) + Yield b sb' -> Yield (f a b) (Z1 sa' sb') +{-# INLINE [0] zipWith #-} + +-- | /O(n)/ The 'countCharI' function returns the number of times the +-- query element appears in the given stream. +countCharI :: Integral a => Char -> Stream Char -> a +countCharI a (Stream next s0 _len) = loop 0 s0 + where + loop !i !s = case next s of + Done -> i + Skip s' -> loop i s' + Yield x s' | a == x -> loop (i+1) s' + | otherwise -> loop i s' +{-# INLINE [0] countCharI #-} + +streamError :: String -> String -> a +streamError func msg = P.error $ "Data.Text.Internal.Fusion.Common." ++ func ++ ": " ++ msg + +emptyError :: String -> a +emptyError func = internalError func "Empty input" + +internalError :: String -> a +internalError func = streamError func "Internal error" diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Size.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Size.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Size.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Size.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,157 @@ +{-# LANGUAGE CPP #-} +{-# OPTIONS_GHC -fno-warn-missing-methods #-} +-- | +-- Module : Data.Text.Internal.Fusion.Internal +-- Copyright : (c) Roman Leshchinskiy 2008, +-- (c) Bryan O'Sullivan 2009 +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Size hints. + +module Data.Text.Internal.Fusion.Size + ( + Size + , exactly + , exactSize + , maxSize + , betweenSize + , unknownSize + , smaller + , larger + , upperBound + , lowerBound + , compareSize + , isEmpty + ) where + +import Data.Text.Internal (mul) +#if defined(ASSERTS) +import Control.Exception (assert) +#endif + +data Size = Between {-# UNPACK #-} !Int {-# UNPACK #-} !Int -- ^ Lower and upper bounds on size. + | Unknown -- ^ Unknown size. + deriving (Eq, Show) + +exactly :: Size -> Maybe Int +exactly (Between na nb) | na == nb = Just na +exactly _ = Nothing +{-# INLINE exactly #-} + +exactSize :: Int -> Size +exactSize n = +#if defined(ASSERTS) + assert (n >= 0) +#endif + Between n n +{-# INLINE exactSize #-} + +maxSize :: Int -> Size +maxSize n = +#if defined(ASSERTS) + assert (n >= 0) +#endif + Between 0 n +{-# INLINE maxSize #-} + +betweenSize :: Int -> Int -> Size +betweenSize m n = +#if defined(ASSERTS) + assert (m >= 0) + assert (n >= m) +#endif + Between m n +{-# INLINE betweenSize #-} + +unknownSize :: Size +unknownSize = Unknown +{-# INLINE unknownSize #-} + +instance Num Size where + (+) = addSize + (-) = subtractSize + (*) = mulSize + + fromInteger = f where f = exactSize . fromInteger + {-# INLINE f #-} + +add :: Int -> Int -> Int +add m n | mn >= 0 = mn + | otherwise = overflowError + where mn = m + n +{-# INLINE add #-} + +addSize :: Size -> Size -> Size +addSize (Between ma mb) (Between na nb) = Between (add ma na) (add mb nb) +addSize _ _ = Unknown +{-# INLINE addSize #-} + +subtractSize :: Size -> Size -> Size +subtractSize (Between ma mb) (Between na nb) = Between (max (ma-nb) 0) (max (mb-na) 0) +subtractSize a@(Between 0 _) Unknown = a +subtractSize (Between _ mb) Unknown = Between 0 mb +subtractSize _ _ = Unknown +{-# INLINE subtractSize #-} + +mulSize :: Size -> Size -> Size +mulSize (Between ma mb) (Between na nb) = Between (mul ma na) (mul mb nb) +mulSize _ _ = Unknown +{-# INLINE mulSize #-} + +-- | Minimum of two size hints. +smaller :: Size -> Size -> Size +smaller a@(Between ma mb) b@(Between na nb) + | mb <= na = a + | nb <= ma = b + | otherwise = Between (ma `min` na) (mb `min` nb) +smaller a@(Between 0 _) Unknown = a +smaller (Between _ mb) Unknown = Between 0 mb +smaller Unknown b@(Between 0 _) = b +smaller Unknown (Between _ nb) = Between 0 nb +smaller Unknown Unknown = Unknown +{-# INLINE smaller #-} + +-- | Maximum of two size hints. +larger :: Size -> Size -> Size +larger a@(Between ma mb) b@(Between na nb) + | ma >= nb = a + | na >= mb = b + | otherwise = Between (ma `max` na) (mb `max` nb) +larger _ _ = Unknown +{-# INLINE larger #-} + +-- | Compute the maximum size from a size hint, if possible. +upperBound :: Int -> Size -> Int +upperBound _ (Between _ n) = n +upperBound k _ = k +{-# INLINE upperBound #-} + +-- | Compute the maximum size from a size hint, if possible. +lowerBound :: Int -> Size -> Int +lowerBound _ (Between n _) = n +lowerBound k _ = k +{-# INLINE lowerBound #-} + +compareSize :: Size -> Int -> Maybe Ordering +compareSize (Between ma mb) n + | mb < n = Just LT + | ma > n = Just GT + | ma == n && mb == n = Just EQ +compareSize _ _ = Nothing + + +isEmpty :: Size -> Bool +isEmpty (Between _ n) = n <= 0 +isEmpty _ = False +{-# INLINE isEmpty #-} + +overflowError :: Int +overflowError = error "Data.Text.Internal.Fusion.Size: size overflow" diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Types.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Types.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Types.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion/Types.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,122 @@ +{-# LANGUAGE BangPatterns, ExistentialQuantification #-} +-- | +-- Module : Data.Text.Internal.Fusion.Types +-- Copyright : (c) Tom Harper 2008-2009, +-- (c) Bryan O'Sullivan 2009, +-- (c) Duncan Coutts 2009, +-- (c) Jasper Van der Jeugt 2011 +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Core stream fusion functionality for text. + +module Data.Text.Internal.Fusion.Types + ( + CC(..) + , PairS(..) + , Scan(..) + , RS(..) + , Step(..) + , Stream(..) + , empty + ) where + +import Data.Text.Internal.Fusion.Size +import Data.Word (Word8) + +-- | Specialised tuple for case conversion. +data CC s = CC !s {-# UNPACK #-} !Char {-# UNPACK #-} !Char + +-- | Restreaming state. +data RS s + = RS0 !s + | RS1 !s {-# UNPACK #-} !Word8 + | RS2 !s {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 + | RS3 !s {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 + +-- | Strict pair. +data PairS a b = !a :*: !b + -- deriving (Eq, Ord, Show) +infixl 2 :*: + +-- | An intermediate result in a scan. +data Scan s = Scan1 {-# UNPACK #-} !Char !s + | Scan2 {-# UNPACK #-} !Char !s + +-- | Intermediate result in a processing pipeline. +data Step s a = Done + | Skip !s + | Yield !a !s + +{- +instance (Show a) => Show (Step s a) + where show Done = "Done" + show (Skip _) = "Skip" + show (Yield x _) = "Yield " ++ show x +-} + +instance (Eq a) => Eq (Stream a) where + (==) = eq + +instance (Ord a) => Ord (Stream a) where + compare = cmp + +-- The length hint in a Stream has two roles. If its value is zero, +-- we trust it, and treat the stream as empty. Otherwise, we treat it +-- as a hint: it should usually be accurate, so we use it when +-- unstreaming to decide what size array to allocate. However, the +-- unstreaming functions must be able to cope with the hint being too +-- small or too large. +-- +-- The size hint tries to track the UTF-16 code points in a stream, +-- but often counts the number of characters instead. It can easily +-- undercount if, for instance, a transformed stream contains astral +-- plane characters (those above 0x10000). + +data Stream a = + forall s. Stream + (s -> Step s a) -- stepper function + !s -- current state + !Size -- size hint + +-- | /O(n)/ Determines if two streams are equal. +eq :: (Eq a) => Stream a -> Stream a -> Bool +eq (Stream next1 s1 _) (Stream next2 s2 _) = loop (next1 s1) (next2 s2) + where + loop Done Done = True + loop (Skip s1') (Skip s2') = loop (next1 s1') (next2 s2') + loop (Skip s1') x2 = loop (next1 s1') x2 + loop x1 (Skip s2') = loop x1 (next2 s2') + loop Done _ = False + loop _ Done = False + loop (Yield x1 s1') (Yield x2 s2') = x1 == x2 && + loop (next1 s1') (next2 s2') +{-# INLINE [0] eq #-} + +cmp :: (Ord a) => Stream a -> Stream a -> Ordering +cmp (Stream next1 s1 _) (Stream next2 s2 _) = loop (next1 s1) (next2 s2) + where + loop Done Done = EQ + loop (Skip s1') (Skip s2') = loop (next1 s1') (next2 s2') + loop (Skip s1') x2 = loop (next1 s1') x2 + loop x1 (Skip s2') = loop x1 (next2 s2') + loop Done _ = LT + loop _ Done = GT + loop (Yield x1 s1') (Yield x2 s2') = + case compare x1 x2 of + EQ -> loop (next1 s1') (next2 s2') + other -> other +{-# INLINE [0] cmp #-} + +-- | The empty stream. +empty :: Stream a +empty = Stream next () 0 + where next _ = Done +{-# INLINE [0] empty #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Fusion.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,234 @@ +{-# LANGUAGE BangPatterns, MagicHash #-} + +-- | +-- Module : Data.Text.Internal.Fusion +-- Copyright : (c) Tom Harper 2008-2009, +-- (c) Bryan O'Sullivan 2009-2010, +-- (c) Duncan Coutts 2009 +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Text manipulation functions represented as fusible operations over +-- streams. +module Data.Text.Internal.Fusion + ( + -- * Types + Stream(..) + , Step(..) + + -- * Creation and elimination + , stream + , unstream + , reverseStream + + , length + + -- * Transformations + , reverse + + -- * Construction + -- ** Scans + , reverseScanr + + -- ** Accumulating maps + , mapAccumL + + -- ** Generation and unfolding + , unfoldrN + + -- * Indexing + , index + , findIndex + , countChar + ) where + +import Prelude (Bool(..), Char, Maybe(..), Monad(..), Int, + Num(..), Ord(..), ($), (&&), + fromIntegral, otherwise) +import Data.Bits ((.&.)) +import Data.Text.Internal (Text(..)) +import Data.Text.Internal.Private (runText) +import Data.Text.Internal.Unsafe.Char (ord, unsafeChr, unsafeWrite) +import Data.Text.Internal.Unsafe.Shift (shiftL, shiftR) +import qualified Data.Text.Array as A +import qualified Data.Text.Internal.Fusion.Common as S +import Data.Text.Internal.Fusion.Types +import Data.Text.Internal.Fusion.Size +import qualified Data.Text.Internal as I +import qualified Data.Text.Internal.Encoding.Utf16 as U16 + +default(Int) + +-- | /O(n)/ Convert a 'Text' into a 'Stream Char'. +stream :: Text -> Stream Char +stream (Text arr off len) = Stream next off (betweenSize (len `shiftR` 1) len) + where + !end = off+len + next !i + | i >= end = Done + | n >= 0xD800 && n <= 0xDBFF = Yield (U16.chr2 n n2) (i + 2) + | otherwise = Yield (unsafeChr n) (i + 1) + where + n = A.unsafeIndex arr i + n2 = A.unsafeIndex arr (i + 1) +{-# INLINE [0] stream #-} + +-- | /O(n)/ Convert a 'Text' into a 'Stream Char', but iterate +-- backwards. +reverseStream :: Text -> Stream Char +reverseStream (Text arr off len) = Stream next (off+len-1) (betweenSize (len `shiftR` 1) len) + where + {-# INLINE next #-} + next !i + | i < off = Done + | n >= 0xDC00 && n <= 0xDFFF = Yield (U16.chr2 n2 n) (i - 2) + | otherwise = Yield (unsafeChr n) (i - 1) + where + n = A.unsafeIndex arr i + n2 = A.unsafeIndex arr (i - 1) +{-# INLINE [0] reverseStream #-} + +-- | /O(n)/ Convert a 'Stream Char' into a 'Text'. +unstream :: Stream Char -> Text +unstream (Stream next0 s0 len) = runText $ \done -> do + let mlen = upperBound 4 len + arr0 <- A.new mlen + let outer arr top = loop + where + loop !s !i = + case next0 s of + Done -> done arr i + Skip s' -> loop s' i + Yield x s' + | j >= top -> {-# SCC "unstream/resize" #-} do + let top' = (top + 1) `shiftL` 1 + arr' <- A.new top' + A.copyM arr' 0 arr 0 top + outer arr' top' s i + | otherwise -> do d <- unsafeWrite arr i x + loop s' (i+d) + where j | ord x < 0x10000 = i + | otherwise = i + 1 + outer arr0 mlen s0 0 +{-# INLINE [0] unstream #-} +{-# RULES "STREAM stream/unstream fusion" forall s. stream (unstream s) = s #-} + + +-- ---------------------------------------------------------------------------- +-- * Basic stream functions + +length :: Stream Char -> Int +length = S.lengthI +{-# INLINE[0] length #-} + +-- | /O(n)/ Reverse the characters of a string. +reverse :: Stream Char -> Text +reverse (Stream next s len0) + | isEmpty len0 = I.empty + | otherwise = I.text arr off' len' + where + len0' = upperBound 4 (larger len0 4) + (arr, (off', len')) = A.run2 (A.new len0' >>= loop s (len0'-1) len0') + loop !s0 !i !len marr = + case next s0 of + Done -> return (marr, (j, len-j)) + where j = i + 1 + Skip s1 -> loop s1 i len marr + Yield x s1 | i < least -> {-# SCC "reverse/resize" #-} do + let newLen = len `shiftL` 1 + marr' <- A.new newLen + A.copyM marr' (newLen-len) marr 0 len + write s1 (len+i) newLen marr' + | otherwise -> write s1 i len marr + where n = ord x + least | n < 0x10000 = 0 + | otherwise = 1 + m = n - 0x10000 + lo = fromIntegral $ (m `shiftR` 10) + 0xD800 + hi = fromIntegral $ (m .&. 0x3FF) + 0xDC00 + write t j l mar + | n < 0x10000 = do + A.unsafeWrite mar j (fromIntegral n) + loop t (j-1) l mar + | otherwise = do + A.unsafeWrite mar (j-1) lo + A.unsafeWrite mar j hi + loop t (j-2) l mar +{-# INLINE [0] reverse #-} + +-- | /O(n)/ Perform the equivalent of 'scanr' over a list, only with +-- the input and result reversed. +reverseScanr :: (Char -> Char -> Char) -> Char -> Stream Char -> Stream Char +reverseScanr f z0 (Stream next0 s0 len) = Stream next (Scan1 z0 s0) (len+1) -- HINT maybe too low + where + {-# INLINE next #-} + next (Scan1 z s) = Yield z (Scan2 z s) + next (Scan2 z s) = case next0 s of + Yield x s' -> let !x' = f x z + in Yield x' (Scan2 x' s') + Skip s' -> Skip (Scan2 z s') + Done -> Done +{-# INLINE reverseScanr #-} + +-- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a stream from a seed +-- value. However, the length of the result is limited by the +-- first argument to 'unfoldrN'. This function is more efficient than +-- 'unfoldr' when the length of the result is known. +unfoldrN :: Int -> (a -> Maybe (Char,a)) -> a -> Stream Char +unfoldrN n = S.unfoldrNI n +{-# INLINE [0] unfoldrN #-} + +------------------------------------------------------------------------------- +-- ** Indexing streams + +-- | /O(n)/ stream index (subscript) operator, starting from 0. +index :: Stream Char -> Int -> Char +index = S.indexI +{-# INLINE [0] index #-} + +-- | The 'findIndex' function takes a predicate and a stream and +-- returns the index of the first element in the stream +-- satisfying the predicate. +findIndex :: (Char -> Bool) -> Stream Char -> Maybe Int +findIndex = S.findIndexI +{-# INLINE [0] findIndex #-} + +-- | /O(n)/ The 'count' function returns the number of times the query +-- element appears in the given stream. +countChar :: Char -> Stream Char -> Int +countChar = S.countCharI +{-# INLINE [0] countChar #-} + +-- | /O(n)/ Like a combination of 'map' and 'foldl''. Applies a +-- function to each element of a 'Text', passing an accumulating +-- parameter from left to right, and returns a final 'Text'. +mapAccumL :: (a -> Char -> (a,Char)) -> a -> Stream Char -> (a, Text) +mapAccumL f z0 (Stream next0 s0 len) = (nz, I.text na 0 nl) + where + (na,(nz,nl)) = A.run2 (A.new mlen >>= \arr -> outer arr mlen z0 s0 0) + where mlen = upperBound 4 len + outer arr top = loop + where + loop !z !s !i = + case next0 s of + Done -> return (arr, (z,i)) + Skip s' -> loop z s' i + Yield x s' + | j >= top -> {-# SCC "mapAccumL/resize" #-} do + let top' = (top + 1) `shiftL` 1 + arr' <- A.new top' + A.copyM arr' 0 arr 0 top + outer arr' top' z s i + | otherwise -> do d <- unsafeWrite arr i c + loop z' s' (i+d) + where (z',c) = f z x + j | ord c < 0x10000 = i + | otherwise = i + 1 +{-# INLINE [0] mapAccumL #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/IO.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/IO.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/IO.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/IO.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,166 @@ +{-# LANGUAGE BangPatterns, CPP, RecordWildCards #-} +-- | +-- Module : Data.Text.Internal.IO +-- Copyright : (c) 2009, 2010 Bryan O'Sullivan, +-- (c) 2009 Simon Marlow +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Low-level support for text I\/O. + +module Data.Text.Internal.IO + ( + hGetLineWith + , readChunk + ) where + +import qualified Control.Exception as E +import Data.IORef (readIORef, writeIORef) +import Data.Text (Text) +import Data.Text.Internal.Fusion (unstream) +import Data.Text.Internal.Fusion.Types (Step(..), Stream(..)) +import Data.Text.Internal.Fusion.Size (exactSize, maxSize) +import Data.Text.Unsafe (inlinePerformIO) +import Foreign.Storable (peekElemOff) +import GHC.IO.Buffer (Buffer(..), CharBuffer, RawCharBuffer, bufferAdjustL, + bufferElems, charSize, isEmptyBuffer, readCharBuf, + withRawBuffer, writeCharBuf) +import GHC.IO.Handle.Internals (ioe_EOF, readTextDevice, wantReadableHandle_) +import GHC.IO.Handle.Types (Handle__(..), Newline(..)) +import System.IO (Handle) +import System.IO.Error (isEOFError) +import qualified Data.Text as T + +-- | Read a single line of input from a handle, constructing a list of +-- decoded chunks as we go. When we're done, transform them into the +-- destination type. +hGetLineWith :: ([Text] -> t) -> Handle -> IO t +hGetLineWith f h = wantReadableHandle_ "hGetLine" h go + where + go hh@Handle__{..} = readIORef haCharBuffer >>= fmap f . hGetLineLoop hh [] + +hGetLineLoop :: Handle__ -> [Text] -> CharBuffer -> IO [Text] +hGetLineLoop hh@Handle__{..} = go where + go ts buf@Buffer{ bufL=r0, bufR=w, bufRaw=raw0 } = do + let findEOL raw r | r == w = return (False, w) + | otherwise = do + (c,r') <- readCharBuf raw r + if c == '\n' + then return (True, r) + else findEOL raw r' + (eol, off) <- findEOL raw0 r0 + (t,r') <- if haInputNL == CRLF + then unpack_nl raw0 r0 off + else do t <- unpack raw0 r0 off + return (t,off) + if eol + then do writeIORef haCharBuffer (bufferAdjustL (off+1) buf) + return $ reverse (t:ts) + else do + let buf1 = bufferAdjustL r' buf + maybe_buf <- maybeFillReadBuffer hh buf1 + case maybe_buf of + -- Nothing indicates we caught an EOF, and we may have a + -- partial line to return. + Nothing -> do + -- we reached EOF. There might be a lone \r left + -- in the buffer, so check for that and + -- append it to the line if necessary. + let pre | isEmptyBuffer buf1 = T.empty + | otherwise = T.singleton '\r' + writeIORef haCharBuffer buf1{ bufL=0, bufR=0 } + let str = reverse . filter (not . T.null) $ pre:t:ts + if null str + then ioe_EOF + else return str + Just new_buf -> go (t:ts) new_buf + +-- This function is lifted almost verbatim from GHC.IO.Handle.Text. +maybeFillReadBuffer :: Handle__ -> CharBuffer -> IO (Maybe CharBuffer) +maybeFillReadBuffer handle_ buf + = E.catch (Just `fmap` getSomeCharacters handle_ buf) $ \e -> + if isEOFError e + then return Nothing + else ioError e + +unpack :: RawCharBuffer -> Int -> Int -> IO Text +unpack !buf !r !w + | charSize /= 4 = sizeError "unpack" + | r >= w = return T.empty + | otherwise = withRawBuffer buf go + where + go pbuf = return $! unstream (Stream next r (exactSize (w-r))) + where + next !i | i >= w = Done + | otherwise = Yield (ix i) (i+1) + ix i = inlinePerformIO $ peekElemOff pbuf i + +unpack_nl :: RawCharBuffer -> Int -> Int -> IO (Text, Int) +unpack_nl !buf !r !w + | charSize /= 4 = sizeError "unpack_nl" + | r >= w = return (T.empty, 0) + | otherwise = withRawBuffer buf $ go + where + go pbuf = do + let !t = unstream (Stream next r (maxSize (w-r))) + w' = w - 1 + return $ if ix w' == '\r' + then (t,w') + else (t,w) + where + next !i | i >= w = Done + | c == '\r' = let i' = i + 1 + in if i' < w + then if ix i' == '\n' + then Yield '\n' (i+2) + else Yield '\n' i' + else Done + | otherwise = Yield c (i+1) + where c = ix i + ix i = inlinePerformIO $ peekElemOff pbuf i + +-- This function is completely lifted from GHC.IO.Handle.Text. +getSomeCharacters :: Handle__ -> CharBuffer -> IO CharBuffer +getSomeCharacters handle_@Handle__{..} buf@Buffer{..} = + case bufferElems buf of + -- buffer empty: read some more + 0 -> {-# SCC "readTextDevice" #-} readTextDevice handle_ buf + + -- if the buffer has a single '\r' in it and we're doing newline + -- translation: read some more + 1 | haInputNL == CRLF -> do + (c,_) <- readCharBuf bufRaw bufL + if c == '\r' + then do -- shuffle the '\r' to the beginning. This is only safe + -- if we're about to call readTextDevice, otherwise it + -- would mess up flushCharBuffer. + -- See [note Buffer Flushing], GHC.IO.Handle.Types + _ <- writeCharBuf bufRaw 0 '\r' + let buf' = buf{ bufL=0, bufR=1 } + readTextDevice handle_ buf' + else do + return buf + + -- buffer has some chars in it already: just return it + _otherwise -> {-# SCC "otherwise" #-} return buf + +-- | Read a single chunk of strict text from a buffer. Used by both +-- the strict and lazy implementations of hGetContents. +readChunk :: Handle__ -> CharBuffer -> IO Text +readChunk hh@Handle__{..} buf = do + buf'@Buffer{..} <- getSomeCharacters hh buf + (t,r) <- if haInputNL == CRLF + then unpack_nl bufRaw bufL bufR + else do t <- unpack bufRaw bufL bufR + return (t,bufR) + writeIORef haCharBuffer (bufferAdjustL r buf') + return t + +sizeError :: String -> a +sizeError loc = error $ "Data.Text.IO." ++ loc ++ ": bad internal buffer size" diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Encoding/Fusion.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Encoding/Fusion.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Encoding/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Encoding/Fusion.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,324 @@ +{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} + +-- | +-- Module : Data.Text.Lazy.Encoding.Fusion +-- Copyright : (c) 2009, 2010 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Fusible 'Stream'-oriented functions for converting between lazy +-- 'Text' and several common encodings. + +module Data.Text.Internal.Lazy.Encoding.Fusion + ( + -- * Streaming + -- streamASCII + streamUtf8 + , streamUtf16LE + , streamUtf16BE + , streamUtf32LE + , streamUtf32BE + + -- * Unstreaming + , unstream + + , module Data.Text.Internal.Encoding.Fusion.Common + ) where + +import Data.ByteString.Lazy.Internal (ByteString(..), defaultChunkSize) +import qualified Data.ByteString as B +import qualified Data.ByteString.Unsafe as B +import Data.Text.Internal.Encoding.Fusion.Common +import Data.Text.Encoding.Error +import Data.Text.Internal.Fusion (Step(..), Stream(..)) +import Data.Text.Internal.Fusion.Size +import Data.Text.Internal.Unsafe.Char (unsafeChr, unsafeChr8, unsafeChr32) +import Data.Text.Internal.Unsafe.Shift (shiftL) +import Data.Word (Word8, Word16, Word32) +import qualified Data.Text.Internal.Encoding.Utf8 as U8 +import qualified Data.Text.Internal.Encoding.Utf16 as U16 +import qualified Data.Text.Internal.Encoding.Utf32 as U32 +import Data.Text.Unsafe (unsafeDupablePerformIO) +import Foreign.ForeignPtr (withForeignPtr, ForeignPtr) +import Foreign.Storable (pokeByteOff) +import Data.ByteString.Internal (mallocByteString, memcpy) +#if defined(ASSERTS) +import Control.Exception (assert) +#endif +import qualified Data.ByteString.Internal as B + +data S = S0 + | S1 {-# UNPACK #-} !Word8 + | S2 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 + | S3 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 + | S4 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 + +data T = T !ByteString !S {-# UNPACK #-} !Int + +-- | /O(n)/ Convert a lazy 'ByteString' into a 'Stream Char', using +-- UTF-8 encoding. +streamUtf8 :: OnDecodeError -> ByteString -> Stream Char +streamUtf8 onErr bs0 = Stream next (T bs0 S0 0) unknownSize + where + next (T bs@(Chunk ps _) S0 i) + | i < len && U8.validate1 a = + Yield (unsafeChr8 a) (T bs S0 (i+1)) + | i + 1 < len && U8.validate2 a b = + Yield (U8.chr2 a b) (T bs S0 (i+2)) + | i + 2 < len && U8.validate3 a b c = + Yield (U8.chr3 a b c) (T bs S0 (i+3)) + | i + 3 < len && U8.validate4 a b c d = + Yield (U8.chr4 a b c d) (T bs S0 (i+4)) + where len = B.length ps + a = B.unsafeIndex ps i + b = B.unsafeIndex ps (i+1) + c = B.unsafeIndex ps (i+2) + d = B.unsafeIndex ps (i+3) + next st@(T bs s i) = + case s of + S1 a | U8.validate1 a -> Yield (unsafeChr8 a) es + S2 a b | U8.validate2 a b -> Yield (U8.chr2 a b) es + S3 a b c | U8.validate3 a b c -> Yield (U8.chr3 a b c) es + S4 a b c d | U8.validate4 a b c d -> Yield (U8.chr4 a b c d) es + _ -> consume st + where es = T bs S0 i + consume (T bs@(Chunk ps rest) s i) + | i >= B.length ps = consume (T rest s 0) + | otherwise = + case s of + S0 -> next (T bs (S1 x) (i+1)) + S1 a -> next (T bs (S2 a x) (i+1)) + S2 a b -> next (T bs (S3 a b x) (i+1)) + S3 a b c -> next (T bs (S4 a b c x) (i+1)) + S4 a b c d -> decodeError "streamUtf8" "UTF-8" onErr (Just a) + (T bs (S3 b c d) (i+1)) + where x = B.unsafeIndex ps i + consume (T Empty S0 _) = Done + consume st = decodeError "streamUtf8" "UTF-8" onErr Nothing st +{-# INLINE [0] streamUtf8 #-} + +-- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little +-- endian UTF-16 encoding. +streamUtf16LE :: OnDecodeError -> ByteString -> Stream Char +streamUtf16LE onErr bs0 = Stream next (T bs0 S0 0) unknownSize + where + next (T bs@(Chunk ps _) S0 i) + | i + 1 < len && U16.validate1 x1 = + Yield (unsafeChr x1) (T bs S0 (i+2)) + | i + 3 < len && U16.validate2 x1 x2 = + Yield (U16.chr2 x1 x2) (T bs S0 (i+4)) + where len = B.length ps + x1 = c (idx i) (idx (i + 1)) + x2 = c (idx (i + 2)) (idx (i + 3)) + c w1 w2 = w1 + (w2 `shiftL` 8) + idx = fromIntegral . B.unsafeIndex ps :: Int -> Word16 + next st@(T bs s i) = + case s of + S2 w1 w2 | U16.validate1 (c w1 w2) -> + Yield (unsafeChr (c w1 w2)) es + S4 w1 w2 w3 w4 | U16.validate2 (c w1 w2) (c w3 w4) -> + Yield (U16.chr2 (c w1 w2) (c w3 w4)) es + _ -> consume st + where es = T bs S0 i + c :: Word8 -> Word8 -> Word16 + c w1 w2 = fromIntegral w1 + (fromIntegral w2 `shiftL` 8) + consume (T bs@(Chunk ps rest) s i) + | i >= B.length ps = consume (T rest s 0) + | otherwise = + case s of + S0 -> next (T bs (S1 x) (i+1)) + S1 w1 -> next (T bs (S2 w1 x) (i+1)) + S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) + S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) + S4 w1 w2 w3 w4 -> decodeError "streamUtf16LE" "UTF-16LE" onErr (Just w1) + (T bs (S3 w2 w3 w4) (i+1)) + where x = B.unsafeIndex ps i + consume (T Empty S0 _) = Done + consume st = decodeError "streamUtf16LE" "UTF-16LE" onErr Nothing st +{-# INLINE [0] streamUtf16LE #-} + +-- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big +-- endian UTF-16 encoding. +streamUtf16BE :: OnDecodeError -> ByteString -> Stream Char +streamUtf16BE onErr bs0 = Stream next (T bs0 S0 0) unknownSize + where + next (T bs@(Chunk ps _) S0 i) + | i + 1 < len && U16.validate1 x1 = + Yield (unsafeChr x1) (T bs S0 (i+2)) + | i + 3 < len && U16.validate2 x1 x2 = + Yield (U16.chr2 x1 x2) (T bs S0 (i+4)) + where len = B.length ps + x1 = c (idx i) (idx (i + 1)) + x2 = c (idx (i + 2)) (idx (i + 3)) + c w1 w2 = (w1 `shiftL` 8) + w2 + idx = fromIntegral . B.unsafeIndex ps :: Int -> Word16 + next st@(T bs s i) = + case s of + S2 w1 w2 | U16.validate1 (c w1 w2) -> + Yield (unsafeChr (c w1 w2)) es + S4 w1 w2 w3 w4 | U16.validate2 (c w1 w2) (c w3 w4) -> + Yield (U16.chr2 (c w1 w2) (c w3 w4)) es + _ -> consume st + where es = T bs S0 i + c :: Word8 -> Word8 -> Word16 + c w1 w2 = (fromIntegral w1 `shiftL` 8) + fromIntegral w2 + consume (T bs@(Chunk ps rest) s i) + | i >= B.length ps = consume (T rest s 0) + | otherwise = + case s of + S0 -> next (T bs (S1 x) (i+1)) + S1 w1 -> next (T bs (S2 w1 x) (i+1)) + S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) + S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) + S4 w1 w2 w3 w4 -> decodeError "streamUtf16BE" "UTF-16BE" onErr (Just w1) + (T bs (S3 w2 w3 w4) (i+1)) + where x = B.unsafeIndex ps i + consume (T Empty S0 _) = Done + consume st = decodeError "streamUtf16BE" "UTF-16BE" onErr Nothing st +{-# INLINE [0] streamUtf16BE #-} + +-- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big +-- endian UTF-32 encoding. +streamUtf32BE :: OnDecodeError -> ByteString -> Stream Char +streamUtf32BE onErr bs0 = Stream next (T bs0 S0 0) unknownSize + where + next (T bs@(Chunk ps _) S0 i) + | i + 3 < len && U32.validate x = + Yield (unsafeChr32 x) (T bs S0 (i+4)) + where len = B.length ps + x = shiftL x1 24 + shiftL x2 16 + shiftL x3 8 + x4 + x1 = idx i + x2 = idx (i+1) + x3 = idx (i+2) + x4 = idx (i+3) + idx = fromIntegral . B.unsafeIndex ps :: Int -> Word32 + next st@(T bs s i) = + case s of + S4 w1 w2 w3 w4 | U32.validate (c w1 w2 w3 w4) -> + Yield (unsafeChr32 (c w1 w2 w3 w4)) es + _ -> consume st + where es = T bs S0 i + c :: Word8 -> Word8 -> Word8 -> Word8 -> Word32 + c w1 w2 w3 w4 = shifted + where + shifted = shiftL x1 24 + shiftL x2 16 + shiftL x3 8 + x4 + x1 = fromIntegral w1 + x2 = fromIntegral w2 + x3 = fromIntegral w3 + x4 = fromIntegral w4 + consume (T bs@(Chunk ps rest) s i) + | i >= B.length ps = consume (T rest s 0) + | otherwise = + case s of + S0 -> next (T bs (S1 x) (i+1)) + S1 w1 -> next (T bs (S2 w1 x) (i+1)) + S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) + S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) + S4 w1 w2 w3 w4 -> decodeError "streamUtf32BE" "UTF-32BE" onErr (Just w1) + (T bs (S3 w2 w3 w4) (i+1)) + where x = B.unsafeIndex ps i + consume (T Empty S0 _) = Done + consume st = decodeError "streamUtf32BE" "UTF-32BE" onErr Nothing st +{-# INLINE [0] streamUtf32BE #-} + +-- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little +-- endian UTF-32 encoding. +streamUtf32LE :: OnDecodeError -> ByteString -> Stream Char +streamUtf32LE onErr bs0 = Stream next (T bs0 S0 0) unknownSize + where + next (T bs@(Chunk ps _) S0 i) + | i + 3 < len && U32.validate x = + Yield (unsafeChr32 x) (T bs S0 (i+4)) + where len = B.length ps + x = shiftL x4 24 + shiftL x3 16 + shiftL x2 8 + x1 + x1 = idx i + x2 = idx (i+1) + x3 = idx (i+2) + x4 = idx (i+3) + idx = fromIntegral . B.unsafeIndex ps :: Int -> Word32 + next st@(T bs s i) = + case s of + S4 w1 w2 w3 w4 | U32.validate (c w1 w2 w3 w4) -> + Yield (unsafeChr32 (c w1 w2 w3 w4)) es + _ -> consume st + where es = T bs S0 i + c :: Word8 -> Word8 -> Word8 -> Word8 -> Word32 + c w1 w2 w3 w4 = shifted + where + shifted = shiftL x4 24 + shiftL x3 16 + shiftL x2 8 + x1 + x1 = fromIntegral w1 + x2 = fromIntegral w2 + x3 = fromIntegral w3 + x4 = fromIntegral w4 + consume (T bs@(Chunk ps rest) s i) + | i >= B.length ps = consume (T rest s 0) + | otherwise = + case s of + S0 -> next (T bs (S1 x) (i+1)) + S1 w1 -> next (T bs (S2 w1 x) (i+1)) + S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) + S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) + S4 w1 w2 w3 w4 -> decodeError "streamUtf32LE" "UTF-32LE" onErr (Just w1) + (T bs (S3 w2 w3 w4) (i+1)) + where x = B.unsafeIndex ps i + consume (T Empty S0 _) = Done + consume st = decodeError "streamUtf32LE" "UTF-32LE" onErr Nothing st +{-# INLINE [0] streamUtf32LE #-} + +-- | /O(n)/ Convert a 'Stream' 'Word8' to a lazy 'ByteString'. +unstreamChunks :: Int -> Stream Word8 -> ByteString +unstreamChunks chunkSize (Stream next s0 len0) = chunk s0 (upperBound 4 len0) + where chunk s1 len1 = unsafeDupablePerformIO $ do + let len = max 4 (min len1 chunkSize) + mallocByteString len >>= loop len 0 s1 + where + loop !n !off !s fp = case next s of + Done | off == 0 -> return Empty + | otherwise -> return $! Chunk (trimUp fp off) Empty + Skip s' -> loop n off s' fp + Yield x s' + | off == chunkSize -> do + let !newLen = n - off + return $! Chunk (trimUp fp off) (chunk s newLen) + | off == n -> realloc fp n off s' x + | otherwise -> do + withForeignPtr fp $ \p -> pokeByteOff p off x + loop n (off+1) s' fp + {-# NOINLINE realloc #-} + realloc fp n off s x = do + let n' = min (n+n) chunkSize + fp' <- copy0 fp n n' + withForeignPtr fp' $ \p -> pokeByteOff p off x + loop n' (off+1) s fp' + trimUp fp off = B.PS fp 0 off + copy0 :: ForeignPtr Word8 -> Int -> Int -> IO (ForeignPtr Word8) + copy0 !src !srcLen !destLen = +#if defined(ASSERTS) + assert (srcLen <= destLen) $ +#endif + do + dest <- mallocByteString destLen + withForeignPtr src $ \src' -> + withForeignPtr dest $ \dest' -> + memcpy dest' src' (fromIntegral srcLen) + return dest + +-- | /O(n)/ Convert a 'Stream' 'Word8' to a lazy 'ByteString'. +unstream :: Stream Word8 -> ByteString +unstream = unstreamChunks defaultChunkSize + +decodeError :: forall s. String -> String -> OnDecodeError -> Maybe Word8 + -> s -> Step s Char +decodeError func kind onErr mb i = + case onErr desc mb of + Nothing -> Skip i + Just c -> Yield c i + where desc = "Data.Text.Lazy.Encoding.Fusion." ++ func ++ ": Invalid " ++ + kind ++ " stream" diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Fusion.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Fusion.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Fusion.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,120 @@ +{-# LANGUAGE BangPatterns #-} +-- | +-- Module : Data.Text.Lazy.Fusion +-- Copyright : (c) 2009, 2010 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Core stream fusion functionality for text. + +module Data.Text.Internal.Lazy.Fusion + ( + stream + , unstream + , unstreamChunks + , length + , unfoldrN + , index + , countChar + ) where + +import Prelude hiding (length) +import qualified Data.Text.Internal.Fusion.Common as S +import Control.Monad.ST (runST) +import Data.Text.Internal.Fusion.Types +import Data.Text.Internal.Fusion.Size (isEmpty, unknownSize) +import Data.Text.Internal.Lazy +import qualified Data.Text.Internal as I +import qualified Data.Text.Array as A +import Data.Text.Internal.Unsafe.Char (unsafeWrite) +import Data.Text.Internal.Unsafe.Shift (shiftL) +import Data.Text.Unsafe (Iter(..), iter) +import Data.Int (Int64) + +default(Int64) + +-- | /O(n)/ Convert a 'Text' into a 'Stream Char'. +stream :: Text -> Stream Char +stream text = Stream next (text :*: 0) unknownSize + where + next (Empty :*: _) = Done + next (txt@(Chunk t@(I.Text _ _ len) ts) :*: i) + | i >= len = next (ts :*: 0) + | otherwise = Yield c (txt :*: i+d) + where Iter c d = iter t i +{-# INLINE [0] stream #-} + +-- | /O(n)/ Convert a 'Stream Char' into a 'Text', using the given +-- chunk size. +unstreamChunks :: Int -> Stream Char -> Text +unstreamChunks !chunkSize (Stream next s0 len0) + | isEmpty len0 = Empty + | otherwise = outer s0 + where + outer so = {-# SCC "unstreamChunks/outer" #-} + case next so of + Done -> Empty + Skip s' -> outer s' + Yield x s' -> runST $ do + a <- A.new unknownLength + unsafeWrite a 0 x >>= inner a unknownLength s' + where unknownLength = 4 + where + inner marr !len s !i + | i + 1 >= chunkSize = finish marr i s + | i + 1 >= len = {-# SCC "unstreamChunks/resize" #-} do + let newLen = min (len `shiftL` 1) chunkSize + marr' <- A.new newLen + A.copyM marr' 0 marr 0 len + inner marr' newLen s i + | otherwise = + {-# SCC "unstreamChunks/inner" #-} + case next s of + Done -> finish marr i s + Skip s' -> inner marr len s' i + Yield x s' -> do d <- unsafeWrite marr i x + inner marr len s' (i+d) + finish marr len s' = do + arr <- A.unsafeFreeze marr + return (I.Text arr 0 len `Chunk` outer s') +{-# INLINE [0] unstreamChunks #-} + +-- | /O(n)/ Convert a 'Stream Char' into a 'Text', using +-- 'defaultChunkSize'. +unstream :: Stream Char -> Text +unstream = unstreamChunks defaultChunkSize +{-# INLINE [0] unstream #-} + +-- | /O(n)/ Returns the number of characters in a text. +length :: Stream Char -> Int64 +length = S.lengthI +{-# INLINE[0] length #-} + +{-# RULES "LAZY STREAM stream/unstream fusion" forall s. + stream (unstream s) = s #-} + +-- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a stream from a seed +-- value. However, the length of the result is limited by the +-- first argument to 'unfoldrN'. This function is more efficient than +-- 'unfoldr' when the length of the result is known. +unfoldrN :: Int64 -> (a -> Maybe (Char,a)) -> a -> Stream Char +unfoldrN n = S.unfoldrNI n +{-# INLINE [0] unfoldrN #-} + +-- | /O(n)/ stream index (subscript) operator, starting from 0. +index :: Stream Char -> Int64 -> Char +index = S.indexI +{-# INLINE [0] index #-} + +-- | /O(n)/ The 'count' function returns the number of times the query +-- element appears in the given stream. +countChar :: Char -> Stream Char -> Int64 +countChar = S.countCharI +{-# INLINE [0] countChar #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Search.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Search.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Search.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Lazy/Search.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,134 @@ +{-# LANGUAGE BangPatterns, ScopedTypeVariables #-} + +-- | +-- Module : Data.Text.Lazy.Search +-- Copyright : (c) 2009, 2010 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Fast substring search for lazy 'Text', based on work by Boyer, +-- Moore, Horspool, Sunday, and Lundh. Adapted from the strict +-- implementation. + +module Data.Text.Internal.Lazy.Search + ( + indices + ) where + +import qualified Data.Text.Array as A +import Data.Int (Int64) +import Data.Word (Word16, Word64) +import qualified Data.Text.Internal as T +import Data.Text.Internal.Fusion.Types (PairS(..)) +import Data.Text.Internal.Lazy (Text(..), foldlChunks) +import Data.Bits ((.|.), (.&.)) +import Data.Text.Internal.Unsafe.Shift (shiftL) + +-- | /O(n+m)/ Find the offsets of all non-overlapping indices of +-- @needle@ within @haystack@. +-- +-- This function is strict in @needle@, and lazy (as far as possible) +-- in the chunks of @haystack@. +-- +-- In (unlikely) bad cases, this algorithm's complexity degrades +-- towards /O(n*m)/. +indices :: Text -- ^ Substring to search for (@needle@) + -> Text -- ^ Text to search in (@haystack@) + -> [Int64] +indices needle@(Chunk n ns) _haystack@(Chunk k ks) + | nlen <= 0 = [] + | nlen == 1 = indicesOne (nindex 0) 0 k ks + | otherwise = advance k ks 0 0 + where + advance x@(T.Text _ _ l) xs = scan + where + scan !g !i + | i >= m = case xs of + Empty -> [] + Chunk y ys -> advance y ys g (i-m) + | lackingHay (i + nlen) x xs = [] + | c == z && candidateMatch 0 = g : scan (g+nlen) (i+nlen) + | otherwise = scan (g+delta) (i+delta) + where + m = fromIntegral l + c = hindex (i + nlast) + delta | nextInPattern = nlen + 1 + | c == z = skip + 1 + | otherwise = 1 + nextInPattern = mask .&. swizzle (hindex (i+nlen)) == 0 + candidateMatch !j + | j >= nlast = True + | hindex (i+j) /= nindex j = False + | otherwise = candidateMatch (j+1) + hindex = index x xs + nlen = wordLength needle + nlast = nlen - 1 + nindex = index n ns + z = foldlChunks fin 0 needle + where fin _ (T.Text farr foff flen) = A.unsafeIndex farr (foff+flen-1) + (mask :: Word64) :*: skip = buildTable n ns 0 0 0 (nlen-2) + swizzle w = 1 `shiftL` (fromIntegral w .&. 0x3f) + buildTable (T.Text xarr xoff xlen) xs = go + where + go !(g::Int64) !i !msk !skp + | i >= xlast = case xs of + Empty -> (msk .|. swizzle z) :*: skp + Chunk y ys -> buildTable y ys g 0 msk' skp' + | otherwise = go (g+1) (i+1) msk' skp' + where c = A.unsafeIndex xarr (xoff+i) + msk' = msk .|. swizzle c + skp' | c == z = nlen - g - 2 + | otherwise = skp + xlast = xlen - 1 + -- | Check whether an attempt to index into the haystack at the + -- given offset would fail. + lackingHay q = go 0 + where + go p (T.Text _ _ l) ps = p' < q && case ps of + Empty -> True + Chunk r rs -> go p' r rs + where p' = p + fromIntegral l +indices _ _ = [] + +-- | Fast index into a partly unpacked 'Text'. We take into account +-- the possibility that the caller might try to access one element +-- past the end. +index :: T.Text -> Text -> Int64 -> Word16 +index (T.Text arr off len) xs !i + | j < len = A.unsafeIndex arr (off+j) + | otherwise = case xs of + Empty + -- out of bounds, but legal + | j == len -> 0 + -- should never happen, due to lackingHay above + | otherwise -> emptyError "index" + Chunk c cs -> index c cs (i-fromIntegral len) + where j = fromIntegral i + +-- | A variant of 'indices' that scans linearly for a single 'Word16'. +indicesOne :: Word16 -> Int64 -> T.Text -> Text -> [Int64] +indicesOne c = chunk + where + chunk !i (T.Text oarr ooff olen) os = go 0 + where + go h | h >= olen = case os of + Empty -> [] + Chunk y ys -> chunk (i+fromIntegral olen) y ys + | on == c = i + fromIntegral h : go (h+1) + | otherwise = go (h+1) + where on = A.unsafeIndex oarr (ooff+h) + +-- | The number of 'Word16' values in a 'Text'. +wordLength :: Text -> Int64 +wordLength = foldlChunks sumLength 0 + where sumLength i (T.Text _ _ l) = i + fromIntegral l + +emptyError :: String -> a +emptyError fun = error ("Data.Text.Lazy.Search." ++ fun ++ ": empty input") diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Lazy.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Lazy.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Lazy.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,119 @@ +{-# LANGUAGE BangPatterns, DeriveDataTypeable #-} +{-# OPTIONS_HADDOCK not-home #-} + +-- | +-- Module : Data.Text.Internal.Lazy +-- Copyright : (c) 2009, 2010 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- A module containing private 'Text' internals. This exposes the +-- 'Text' representation and low level construction functions. +-- Modules which extend the 'Text' system may need to use this module. + +module Data.Text.Internal.Lazy + ( + Text(..) + , chunk + , empty + , foldrChunks + , foldlChunks + -- * Data type invariant and abstraction functions + + -- $invariant + , strictInvariant + , lazyInvariant + , showStructure + + -- * Chunk allocation sizes + , defaultChunkSize + , smallChunkSize + , chunkOverhead + ) where + +import Data.Text () +import Data.Text.Internal.Unsafe.Shift (shiftL) +import Data.Typeable (Typeable) +import Foreign.Storable (sizeOf) +import qualified Data.Text.Internal as T + +data Text = Empty + | Chunk {-# UNPACK #-} !T.Text Text + deriving (Typeable) + +-- $invariant +-- +-- The data type invariant for lazy 'Text': Every 'Text' is either 'Empty' or +-- consists of non-null 'T.Text's. All functions must preserve this, +-- and the QC properties must check this. + +-- | Check the invariant strictly. +strictInvariant :: Text -> Bool +strictInvariant Empty = True +strictInvariant x@(Chunk (T.Text _ _ len) cs) + | len > 0 = strictInvariant cs + | otherwise = error $ "Data.Text.Lazy: invariant violation: " + ++ showStructure x + +-- | Check the invariant lazily. +lazyInvariant :: Text -> Text +lazyInvariant Empty = Empty +lazyInvariant x@(Chunk c@(T.Text _ _ len) cs) + | len > 0 = Chunk c (lazyInvariant cs) + | otherwise = error $ "Data.Text.Lazy: invariant violation: " + ++ showStructure x + +-- | Display the internal structure of a lazy 'Text'. +showStructure :: Text -> String +showStructure Empty = "Empty" +showStructure (Chunk t Empty) = "Chunk " ++ show t ++ " Empty" +showStructure (Chunk t ts) = + "Chunk " ++ show t ++ " (" ++ showStructure ts ++ ")" + +-- | Smart constructor for 'Chunk'. Guarantees the data type invariant. +chunk :: T.Text -> Text -> Text +{-# INLINE chunk #-} +chunk t@(T.Text _ _ len) ts | len == 0 = ts + | otherwise = Chunk t ts + +-- | Smart constructor for 'Empty'. +empty :: Text +{-# INLINE [0] empty #-} +empty = Empty + +-- | Consume the chunks of a lazy 'Text' with a natural right fold. +foldrChunks :: (T.Text -> a -> a) -> a -> Text -> a +foldrChunks f z = go + where go Empty = z + go (Chunk c cs) = f c (go cs) +{-# INLINE foldrChunks #-} + +-- | Consume the chunks of a lazy 'Text' with a strict, tail-recursive, +-- accumulating left fold. +foldlChunks :: (a -> T.Text -> a) -> a -> Text -> a +foldlChunks f z = go z + where go !a Empty = a + go !a (Chunk c cs) = go (f a c) cs +{-# INLINE foldlChunks #-} + +-- | Currently set to 16 KiB, less the memory management overhead. +defaultChunkSize :: Int +defaultChunkSize = 16384 - chunkOverhead +{-# INLINE defaultChunkSize #-} + +-- | Currently set to 128 bytes, less the memory management overhead. +smallChunkSize :: Int +smallChunkSize = 128 - chunkOverhead +{-# INLINE smallChunkSize #-} + +-- | The memory management overhead. Currently this is tuned for GHC only. +chunkOverhead :: Int +chunkOverhead = sizeOf (undefined :: Int) `shiftL` 1 +{-# INLINE chunkOverhead #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Private.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Private.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Private.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Private.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,37 @@ +{-# LANGUAGE BangPatterns, Rank2Types, UnboxedTuples #-} + +-- | +-- Module : Data.Text.Internal.Private +-- Copyright : (c) 2011 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC + +module Data.Text.Internal.Private + ( + runText + , span_ + ) where + +import Control.Monad.ST (ST, runST) +import Data.Text.Internal (Text(..), text) +import Data.Text.Unsafe (Iter(..), iter) +import qualified Data.Text.Array as A + +span_ :: (Char -> Bool) -> Text -> (# Text, Text #) +span_ p t@(Text arr off len) = (# hd,tl #) + where hd = text arr off k + tl = text arr (off+k) (len-k) + !k = loop 0 + loop !i | i < len && p c = loop (i+d) + | otherwise = i + where Iter c d = iter t i +{-# INLINE span_ #-} + +runText :: (forall s. (A.MArray s -> Int -> ST s Text) -> ST s Text) -> Text +runText act = runST (act $ \ !marr !len -> do + arr <- A.unsafeFreeze marr + return $! text arr 0 len) +{-# INLINE runText #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Read.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Read.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Read.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Read.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,62 @@ +-- | +-- Module : Data.Text.Internal.Read +-- Copyright : (c) 2014 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- Common internal functions for reading textual data. +module Data.Text.Internal.Read + ( + IReader + , IParser(..) + , T(..) + , digitToInt + , hexDigitToInt + , perhaps + ) where + +import Control.Applicative as App (Applicative(..)) +import Control.Arrow (first) +import Control.Monad (ap) +import Data.Char (ord) + +type IReader t a = t -> Either String (a,t) + +newtype IParser t a = P { + runP :: IReader t a + } + +instance Functor (IParser t) where + fmap f m = P $ fmap (first f) . runP m + +instance Applicative (IParser t) where + pure a = P $ \t -> Right (a,t) + {-# INLINE pure #-} + (<*>) = ap + +instance Monad (IParser t) where + return = App.pure + m >>= k = P $ \t -> case runP m t of + Left err -> Left err + Right (a,t') -> runP (k a) t' + {-# INLINE (>>=) #-} + fail msg = P $ \_ -> Left msg + +data T = T !Integer !Int + +perhaps :: a -> IParser t a -> IParser t a +perhaps def m = P $ \t -> case runP m t of + Left _ -> Right (def,t) + r@(Right _) -> r + +hexDigitToInt :: Char -> Int +hexDigitToInt c + | c >= '0' && c <= '9' = ord c - ord '0' + | c >= 'a' && c <= 'f' = ord c - (ord 'a' - 10) + | otherwise = ord c - (ord 'A' - 10) + +digitToInt :: Char -> Int +digitToInt c = ord c - ord '0' diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Search.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Search.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Search.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Search.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,89 @@ +{-# LANGUAGE BangPatterns, ScopedTypeVariables #-} + +-- | +-- Module : Data.Text.Internal.Search +-- Copyright : (c) Bryan O'Sullivan 2009 +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- Fast substring search for 'Text', based on work by Boyer, Moore, +-- Horspool, Sunday, and Lundh. +-- +-- References: +-- +-- * R. S. Boyer, J. S. Moore: A Fast String Searching Algorithm. +-- Communications of the ACM, 20, 10, 762-772 (1977) +-- +-- * R. N. Horspool: Practical Fast Searching in Strings. Software - +-- Practice and Experience 10, 501-506 (1980) +-- +-- * D. M. Sunday: A Very Fast Substring Search Algorithm. +-- Communications of the ACM, 33, 8, 132-142 (1990) +-- +-- * F. Lundh: The Fast Search Algorithm. +-- (2006) + +module Data.Text.Internal.Search + ( + indices + ) where + +import qualified Data.Text.Array as A +import Data.Word (Word64) +import Data.Text.Internal (Text(..)) +import Data.Bits ((.|.), (.&.)) +import Data.Text.Internal.Unsafe.Shift (shiftL) + +data T = {-# UNPACK #-} !Word64 :* {-# UNPACK #-} !Int + +-- | /O(n+m)/ Find the offsets of all non-overlapping indices of +-- @needle@ within @haystack@. The offsets returned represent +-- uncorrected indices in the low-level \"needle\" array, to which its +-- offset must be added. +-- +-- In (unlikely) bad cases, this algorithm's complexity degrades +-- towards /O(n*m)/. +indices :: Text -- ^ Substring to search for (@needle@) + -> Text -- ^ Text to search in (@haystack@) + -> [Int] +indices _needle@(Text narr noff nlen) _haystack@(Text harr hoff hlen) + | nlen == 1 = scanOne (nindex 0) + | nlen <= 0 || ldiff < 0 = [] + | otherwise = scan 0 + where + ldiff = hlen - nlen + nlast = nlen - 1 + z = nindex nlast + nindex k = A.unsafeIndex narr (noff+k) + hindex k = A.unsafeIndex harr (hoff+k) + hindex' k | k == hlen = 0 + | otherwise = A.unsafeIndex harr (hoff+k) + buildTable !i !msk !skp + | i >= nlast = (msk .|. swizzle z) :* skp + | otherwise = buildTable (i+1) (msk .|. swizzle c) skp' + where c = nindex i + skp' | c == z = nlen - i - 2 + | otherwise = skp + swizzle k = 1 `shiftL` (fromIntegral k .&. 0x3f) + scan !i + | i > ldiff = [] + | c == z && candidateMatch 0 = i : scan (i + nlen) + | otherwise = scan (i + delta) + where c = hindex (i + nlast) + candidateMatch !j + | j >= nlast = True + | hindex (i+j) /= nindex j = False + | otherwise = candidateMatch (j+1) + delta | nextInPattern = nlen + 1 + | c == z = skip + 1 + | otherwise = 1 + where nextInPattern = mask .&. swizzle (hindex' (i+nlen)) == 0 + !(mask :* skip) = buildTable 0 0 (nlen-2) + scanOne c = loop 0 + where loop !i | i >= hlen = [] + | hindex i == c = i : loop (i+1) + | otherwise = loop (i+1) +{-# INLINE indices #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe/Char.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe/Char.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe/Char.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe/Char.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,95 @@ +{-# LANGUAGE CPP, MagicHash #-} + +-- | +-- Module : Data.Text.Internal.Unsafe.Char +-- Copyright : (c) 2008, 2009 Tom Harper, +-- (c) 2009, 2010 Bryan O'Sullivan, +-- (c) 2009 Duncan Coutts +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Fast character manipulation functions. +module Data.Text.Internal.Unsafe.Char + ( + ord + , unsafeChr + , unsafeChr8 + , unsafeChr32 + , unsafeWrite + -- , unsafeWriteRev + ) where + +#ifdef ASSERTS +import Control.Exception (assert) +#endif +import Control.Monad.ST (ST) +import Data.Bits ((.&.)) +import Data.Text.Internal.Unsafe.Shift (shiftR) +import GHC.Exts (Char(..), Int(..), chr#, ord#, word2Int#) +import GHC.Word (Word8(..), Word16(..), Word32(..)) +import qualified Data.Text.Array as A + +ord :: Char -> Int +ord (C# c#) = I# (ord# c#) +{-# INLINE ord #-} + +unsafeChr :: Word16 -> Char +unsafeChr (W16# w#) = C# (chr# (word2Int# w#)) +{-# INLINE unsafeChr #-} + +unsafeChr8 :: Word8 -> Char +unsafeChr8 (W8# w#) = C# (chr# (word2Int# w#)) +{-# INLINE unsafeChr8 #-} + +unsafeChr32 :: Word32 -> Char +unsafeChr32 (W32# w#) = C# (chr# (word2Int# w#)) +{-# INLINE unsafeChr32 #-} + +-- | Write a character into the array at the given offset. Returns +-- the number of 'Word16's written. +unsafeWrite :: A.MArray s -> Int -> Char -> ST s Int +unsafeWrite marr i c + | n < 0x10000 = do +#if defined(ASSERTS) + assert (i >= 0) . assert (i < A.length marr) $ return () +#endif + A.unsafeWrite marr i (fromIntegral n) + return 1 + | otherwise = do +#if defined(ASSERTS) + assert (i >= 0) . assert (i < A.length marr - 1) $ return () +#endif + A.unsafeWrite marr i lo + A.unsafeWrite marr (i+1) hi + return 2 + where n = ord c + m = n - 0x10000 + lo = fromIntegral $ (m `shiftR` 10) + 0xD800 + hi = fromIntegral $ (m .&. 0x3FF) + 0xDC00 +{-# INLINE unsafeWrite #-} + +{- +unsafeWriteRev :: A.MArray s Word16 -> Int -> Char -> ST s Int +unsafeWriteRev marr i c + | n < 0x10000 = do + assert (i >= 0) . assert (i < A.length marr) $ + A.unsafeWrite marr i (fromIntegral n) + return (i-1) + | otherwise = do + assert (i >= 1) . assert (i < A.length marr) $ + A.unsafeWrite marr (i-1) lo + A.unsafeWrite marr i hi + return (i-2) + where n = ord c + m = n - 0x10000 + lo = fromIntegral $ (m `shiftR` 10) + 0xD800 + hi = fromIntegral $ (m .&. 0x3FF) + 0xDC00 +{-# INLINE unsafeWriteRev #-} +-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe/Shift.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe/Shift.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe/Shift.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe/Shift.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,72 @@ +{-# LANGUAGE MagicHash #-} + +-- | +-- Module : Data.Text.Internal.Unsafe.Shift +-- Copyright : (c) Bryan O'Sullivan 2009 +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- Fast, unchecked bit shifting functions. + +module Data.Text.Internal.Unsafe.Shift + ( + UnsafeShift(..) + ) where + +-- import qualified Data.Bits as Bits +import GHC.Base +import GHC.Word + +-- | This is a workaround for poor optimisation in GHC 6.8.2. It +-- fails to notice constant-width shifts, and adds a test and branch +-- to every shift. This imposes about a 10% performance hit. +-- +-- These functions are undefined when the amount being shifted by is +-- greater than the size in bits of a machine Int#. +class UnsafeShift a where + shiftL :: a -> Int -> a + shiftR :: a -> Int -> a + +instance UnsafeShift Word16 where + {-# INLINE shiftL #-} + shiftL (W16# x#) (I# i#) = W16# (narrow16Word# (x# `uncheckedShiftL#` i#)) + + {-# INLINE shiftR #-} + shiftR (W16# x#) (I# i#) = W16# (x# `uncheckedShiftRL#` i#) + +instance UnsafeShift Word32 where + {-# INLINE shiftL #-} + shiftL (W32# x#) (I# i#) = W32# (narrow32Word# (x# `uncheckedShiftL#` i#)) + + {-# INLINE shiftR #-} + shiftR (W32# x#) (I# i#) = W32# (x# `uncheckedShiftRL#` i#) + +instance UnsafeShift Word64 where + {-# INLINE shiftL #-} + shiftL (W64# x#) (I# i#) = W64# (x# `uncheckedShiftL64#` i#) + + {-# INLINE shiftR #-} + shiftR (W64# x#) (I# i#) = W64# (x# `uncheckedShiftRL64#` i#) + +instance UnsafeShift Int where + {-# INLINE shiftL #-} + shiftL (I# x#) (I# i#) = I# (x# `iShiftL#` i#) + + {-# INLINE shiftR #-} + shiftR (I# x#) (I# i#) = I# (x# `iShiftRA#` i#) + +{- +instance UnsafeShift Integer where + {-# INLINE shiftL #-} + shiftL = Bits.shiftL + + {-# INLINE shiftR #-} + shiftR = Bits.shiftR +-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal/Unsafe.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,56 @@ +{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} +{-# OPTIONS_HADDOCK not-home #-} + +-- | +-- Module : Data.Text.Internal.Unsafe +-- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- /Warning/: this is an internal module, and does not have a stable +-- API or name. Functions in this module may not check or enforce +-- preconditions expected by public modules. Use at your own risk! +-- +-- A module containing /unsafe/ operations, for /very very careful/ use +-- in /heavily tested/ code. +module Data.Text.Internal.Unsafe + ( + inlineInterleaveST + , inlinePerformIO + ) where + +import GHC.ST (ST(..)) +#if defined(__GLASGOW_HASKELL__) +import GHC.IO (IO(IO)) +import GHC.Base (realWorld#) +#endif + + +-- | Just like unsafePerformIO, but we inline it. Big performance gains as +-- it exposes lots of things to further inlining. /Very unsafe/. In +-- particular, you should do no memory allocation inside an +-- 'inlinePerformIO' block. On Hugs this is just @unsafePerformIO@. +-- +{-# INLINE inlinePerformIO #-} +inlinePerformIO :: IO a -> a +#if defined(__GLASGOW_HASKELL__) +inlinePerformIO (IO m) = case m realWorld# of (# _, r #) -> r +#else +inlinePerformIO = unsafePerformIO +#endif + +-- | Allow an 'ST' computation to be deferred lazily. When passed an +-- action of type 'ST' @s@ @a@, the action will only be performed when +-- the value of @a@ is demanded. +-- +-- This function is identical to the normal unsafeInterleaveST, but is +-- inlined and hence faster. +-- +-- /Note/: This operation is highly unsafe, as it can introduce +-- externally visible non-determinism into an 'ST' action. +inlineInterleaveST :: ST s a -> ST s a +inlineInterleaveST (ST m) = ST $ \ s -> + let r = case m s of (# _, res #) -> res in (# s, r #) +{-# INLINE inlineInterleaveST #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Internal.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Internal.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,188 @@ +{-# LANGUAGE CPP, DeriveDataTypeable, UnboxedTuples #-} +{-# OPTIONS_HADDOCK not-home #-} + +-- | +-- Module : Data.Text.Internal +-- Copyright : (c) 2008, 2009 Tom Harper, +-- (c) 2009, 2010 Bryan O'Sullivan, +-- (c) 2009 Duncan Coutts +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- A module containing private 'Text' internals. This exposes the +-- 'Text' representation and low level construction functions. +-- Modules which extend the 'Text' system may need to use this module. +-- +-- You should not use this module unless you are determined to monkey +-- with the internals, as the functions here do just about nothing to +-- preserve data invariants. You have been warned! + +#if defined(__GLASGOW_HASKELL__) && !defined(__HADDOCK__) +#include "MachDeps.h" +#endif + +module Data.Text.Internal + ( + -- * Types + -- $internals + Text(..) + -- * Construction + , text + , textP + -- * Safety + , safe + -- * Code that must be here for accessibility + , empty + , empty_ + -- * Utilities + , firstf + -- * Checked multiplication + , mul + , mul32 + , mul64 + -- * Debugging + , showText + ) where + +#if defined(ASSERTS) +import Control.Exception (assert) +#endif +import Data.Bits +import Data.Int (Int32, Int64) +import Data.Text.Internal.Unsafe.Char (ord) +import Data.Typeable (Typeable) +import qualified Data.Text.Array as A + +-- | A space efficient, packed, unboxed Unicode text type. +data Text = Text + {-# UNPACK #-} !A.Array -- payload (Word16 elements) + {-# UNPACK #-} !Int -- offset (units of Word16, not Char) + {-# UNPACK #-} !Int -- length (units of Word16, not Char) + deriving (Typeable) + +-- | Smart constructor. +text_ :: A.Array -> Int -> Int -> Text +text_ arr off len = +#if defined(ASSERTS) + let c = A.unsafeIndex arr off + alen = A.length arr + in assert (len >= 0) . + assert (off >= 0) . + assert (alen == 0 || len == 0 || off < alen) . + assert (len == 0 || c < 0xDC00 || c > 0xDFFF) $ +#endif + Text arr off len +{-# INLINE text_ #-} + +-- | /O(1)/ The empty 'Text'. +empty :: Text +empty = Text A.empty 0 0 +{-# INLINE [1] empty #-} + +-- | A non-inlined version of 'empty'. +empty_ :: Text +empty_ = Text A.empty 0 0 +{-# NOINLINE empty_ #-} + +-- | Construct a 'Text' without invisibly pinning its byte array in +-- memory if its length has dwindled to zero. +text :: A.Array -> Int -> Int -> Text +text arr off len | len == 0 = empty + | otherwise = text_ arr off len +{-# INLINE text #-} + +textP :: A.Array -> Int -> Int -> Text +{-# DEPRECATED textP "Use text instead" #-} +textP = text + +-- | A useful 'show'-like function for debugging purposes. +showText :: Text -> String +showText (Text arr off len) = + "Text " ++ show (A.toList arr off len) ++ ' ' : + show off ++ ' ' : show len + +-- | Map a 'Char' to a 'Text'-safe value. +-- +-- UTF-16 surrogate code points are not included in the set of Unicode +-- scalar values, but are unfortunately admitted as valid 'Char' +-- values by Haskell. They cannot be represented in a 'Text'. This +-- function remaps those code points to the Unicode replacement +-- character (U+FFFD, \'�\'), and leaves other code points +-- unchanged. +safe :: Char -> Char +safe c + | ord c .&. 0x1ff800 /= 0xd800 = c + | otherwise = '\xfffd' +{-# INLINE [0] safe #-} + +-- | Apply a function to the first element of an optional pair. +firstf :: (a -> c) -> Maybe (a,b) -> Maybe (c,b) +firstf f (Just (a, b)) = Just (f a, b) +firstf _ Nothing = Nothing + +-- | Checked multiplication. Calls 'error' if the result would +-- overflow. +mul :: Int -> Int -> Int +#if WORD_SIZE_IN_BITS == 64 +mul a b = fromIntegral $ fromIntegral a `mul64` fromIntegral b +#else +mul a b = fromIntegral $ fromIntegral a `mul32` fromIntegral b +#endif +{-# INLINE mul #-} +infixl 7 `mul` + +-- | Checked multiplication. Calls 'error' if the result would +-- overflow. +mul64 :: Int64 -> Int64 -> Int64 +mul64 a b + | a >= 0 && b >= 0 = mul64_ a b + | a >= 0 = -mul64_ a (-b) + | b >= 0 = -mul64_ (-a) b + | otherwise = mul64_ (-a) (-b) +{-# INLINE mul64 #-} +infixl 7 `mul64` + +mul64_ :: Int64 -> Int64 -> Int64 +mul64_ a b + | ahi > 0 && bhi > 0 = error "overflow" + | top > 0x7fffffff = error "overflow" + | total < 0 = error "overflow" + | otherwise = total + where (# ahi, alo #) = (# a `shiftR` 32, a .&. 0xffffffff #) + (# bhi, blo #) = (# b `shiftR` 32, b .&. 0xffffffff #) + top = ahi * blo + alo * bhi + total = (top `shiftL` 32) + alo * blo +{-# INLINE mul64_ #-} + +-- | Checked multiplication. Calls 'error' if the result would +-- overflow. +mul32 :: Int32 -> Int32 -> Int32 +mul32 a b = case fromIntegral a * fromIntegral b of + ab | ab < min32 || ab > max32 -> error "overflow" + | otherwise -> fromIntegral ab + where min32 = -0x80000000 :: Int64 + max32 = 0x7fffffff +{-# INLINE mul32 #-} +infixl 7 `mul32` + +-- $internals +-- +-- Internally, the 'Text' type is represented as an array of 'Word16' +-- UTF-16 code units. The offset and length fields in the constructor +-- are in these units, /not/ units of 'Char'. +-- +-- Invariants that all functions must maintain: +-- +-- * Since the 'Text' type uses UTF-16 internally, it cannot represent +-- characters in the reserved surrogate code point range U+D800 to +-- U+DFFF. To maintain this invariant, the 'safe' function maps +-- 'Char' values in this range to the replacement character (U+FFFD, +-- \'�\'). +-- +-- * A leading (or \"high\") surrogate code unit (0xD800–0xDBFF) must +-- always be followed by a trailing (or \"low\") surrogate code unit +-- (0xDC00-0xDFFF). A trailing surrogate code unit must always be +-- preceded by a leading surrogate code unit. diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/IO.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/IO.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/IO.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/IO.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,338 @@ +{-# LANGUAGE BangPatterns, CPP, RecordWildCards, ScopedTypeVariables #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif +-- | +-- Module : Data.Text.IO +-- Copyright : (c) 2009, 2010 Bryan O'Sullivan, +-- (c) 2009 Simon Marlow +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- Efficient locale-sensitive support for text I\/O. +-- +-- Skip past the synopsis for some important notes on performance and +-- portability across different versions of GHC. + +module Data.Text.IO + ( + -- * Performance + -- $performance + + -- * Locale support + -- $locale + -- * File-at-a-time operations + readFile + , writeFile + , appendFile + -- * Operations on handles + , hGetContents + , hGetChunk + , hGetLine + , hPutStr + , hPutStrLn + -- * Special cases for standard input and output + , interact + , getContents + , getLine + , putStr + , putStrLn + ) where + +import Data.Text (Text) +import Prelude hiding (appendFile, getContents, getLine, interact, + putStr, putStrLn, readFile, writeFile) +import System.IO (Handle, IOMode(..), hPutChar, openFile, stdin, stdout, + withFile) +import qualified Control.Exception as E +import Control.Monad (liftM2, when) +import Data.IORef (readIORef, writeIORef) +import qualified Data.Text as T +import Data.Text.Internal.Fusion (stream) +import Data.Text.Internal.Fusion.Types (Step(..), Stream(..)) +import Data.Text.Internal.IO (hGetLineWith, readChunk) +import GHC.IO.Buffer (Buffer(..), BufferState(..), CharBufElem, CharBuffer, + RawCharBuffer, emptyBuffer, isEmptyBuffer, newCharBuffer, + writeCharBuf) +import GHC.IO.Exception (IOException(ioe_type), IOErrorType(InappropriateType)) +import GHC.IO.Handle.Internals (augmentIOError, hClose_help, wantReadableHandle, + wantWritableHandle) +import GHC.IO.Handle.Text (commitBuffer') +import GHC.IO.Handle.Types (BufferList(..), BufferMode(..), Handle__(..), + HandleType(..), Newline(..)) +import System.IO (hGetBuffering, hFileSize, hSetBuffering, hTell) +import System.IO.Error (isEOFError) + +-- $performance +-- #performance# +-- +-- The functions in this module obey the runtime system's locale, +-- character set encoding, and line ending conversion settings. +-- +-- If you know in advance that you will be working with data that has +-- a specific encoding (e.g. UTF-8), and your application is highly +-- performance sensitive, you may find that it is faster to perform +-- I\/O with bytestrings and to encode and decode yourself than to use +-- the functions in this module. +-- +-- Whether this will hold depends on the version of GHC you are using, +-- the platform you are working on, the data you are working with, and +-- the encodings you are using, so be sure to test for yourself. + +-- | The 'readFile' function reads a file and returns the contents of +-- the file as a string. The entire file is read strictly, as with +-- 'getContents'. +readFile :: FilePath -> IO Text +readFile name = openFile name ReadMode >>= hGetContents + +-- | Write a string to a file. The file is truncated to zero length +-- before writing begins. +writeFile :: FilePath -> Text -> IO () +writeFile p = withFile p WriteMode . flip hPutStr + +-- | Write a string the end of a file. +appendFile :: FilePath -> Text -> IO () +appendFile p = withFile p AppendMode . flip hPutStr + +catchError :: String -> Handle -> Handle__ -> IOError -> IO Text +catchError caller h Handle__{..} err + | isEOFError err = do + buf <- readIORef haCharBuffer + return $ if isEmptyBuffer buf + then T.empty + else T.singleton '\r' + | otherwise = E.throwIO (augmentIOError err caller h) + +-- | /Experimental./ Read a single chunk of strict text from a +-- 'Handle'. The size of the chunk depends on the amount of input +-- currently buffered. +-- +-- This function blocks only if there is no data available, and EOF +-- has not yet been reached. Once EOF is reached, this function +-- returns an empty string instead of throwing an exception. +hGetChunk :: Handle -> IO Text +hGetChunk h = wantReadableHandle "hGetChunk" h readSingleChunk + where + readSingleChunk hh@Handle__{..} = do + buf <- readIORef haCharBuffer + t <- readChunk hh buf `E.catch` catchError "hGetChunk" h hh + return (hh, t) + +-- | Read the remaining contents of a 'Handle' as a string. The +-- 'Handle' is closed once the contents have been read, or if an +-- exception is thrown. +-- +-- Internally, this function reads a chunk at a time from the +-- lower-level buffering abstraction, and concatenates the chunks into +-- a single string once the entire file has been read. +-- +-- As a result, it requires approximately twice as much memory as its +-- result to construct its result. For files more than a half of +-- available RAM in size, this may result in memory exhaustion. +hGetContents :: Handle -> IO Text +hGetContents h = do + chooseGoodBuffering h + wantReadableHandle "hGetContents" h readAll + where + readAll hh@Handle__{..} = do + let readChunks = do + buf <- readIORef haCharBuffer + t <- readChunk hh buf `E.catch` catchError "hGetContents" h hh + if T.null t + then return [t] + else (t:) `fmap` readChunks + ts <- readChunks + (hh', _) <- hClose_help hh + return (hh'{haType=ClosedHandle}, T.concat ts) + +-- | Use a more efficient buffer size if we're reading in +-- block-buffered mode with the default buffer size. When we can +-- determine the size of the handle we're reading, set the buffer size +-- to that, so that we can read the entire file in one chunk. +-- Otherwise, use a buffer size of at least 16KB. +chooseGoodBuffering :: Handle -> IO () +chooseGoodBuffering h = do + bufMode <- hGetBuffering h + case bufMode of + BlockBuffering Nothing -> do + d <- E.catch (liftM2 (-) (hFileSize h) (hTell h)) $ \(e::IOException) -> + if ioe_type e == InappropriateType + then return 16384 -- faster than the 2KB default + else E.throwIO e + when (d > 0) . hSetBuffering h . BlockBuffering . Just . fromIntegral $ d + _ -> return () + +-- | Read a single line from a handle. +hGetLine :: Handle -> IO Text +hGetLine = hGetLineWith T.concat + +-- | Write a string to a handle. +hPutStr :: Handle -> Text -> IO () +-- This function is lifted almost verbatim from GHC.IO.Handle.Text. +hPutStr h t = do + (buffer_mode, nl) <- + wantWritableHandle "hPutStr" h $ \h_ -> do + bmode <- getSpareBuffer h_ + return (bmode, haOutputNL h_) + let str = stream t + case buffer_mode of + (NoBuffering, _) -> hPutChars h str + (LineBuffering, buf) -> writeLines h nl buf str + (BlockBuffering _, buf) + | nl == CRLF -> writeBlocksCRLF h buf str + | otherwise -> writeBlocksRaw h buf str + +hPutChars :: Handle -> Stream Char -> IO () +hPutChars h (Stream next0 s0 _len) = loop s0 + where + loop !s = case next0 s of + Done -> return () + Skip s' -> loop s' + Yield x s' -> hPutChar h x >> loop s' + +-- The following functions are largely lifted from GHC.IO.Handle.Text, +-- but adapted to a coinductive stream of data instead of an inductive +-- list. +-- +-- We have several variations of more or less the same code for +-- performance reasons. Splitting the original buffered write +-- function into line- and block-oriented versions gave us a 2.1x +-- performance improvement. Lifting out the raw/cooked newline +-- handling gave a few more percent on top. + +writeLines :: Handle -> Newline -> Buffer CharBufElem -> Stream Char -> IO () +writeLines h nl buf0 (Stream next0 s0 _len) = outer s0 buf0 + where + outer s1 Buffer{bufRaw=raw, bufSize=len} = inner s1 (0::Int) + where + inner !s !n = + case next0 s of + Done -> commit n False{-no flush-} True{-release-} >> return () + Skip s' -> inner s' n + Yield x s' + | n + 1 >= len -> commit n True{-needs flush-} False >>= outer s + | x == '\n' -> do + n' <- if nl == CRLF + then do n1 <- writeCharBuf raw n '\r' + writeCharBuf raw n1 '\n' + else writeCharBuf raw n x + commit n' True{-needs flush-} False >>= outer s' + | otherwise -> writeCharBuf raw n x >>= inner s' + commit = commitBuffer h raw len + +writeBlocksCRLF :: Handle -> Buffer CharBufElem -> Stream Char -> IO () +writeBlocksCRLF h buf0 (Stream next0 s0 _len) = outer s0 buf0 + where + outer s1 Buffer{bufRaw=raw, bufSize=len} = inner s1 (0::Int) + where + inner !s !n = + case next0 s of + Done -> commit n False{-no flush-} True{-release-} >> return () + Skip s' -> inner s' n + Yield x s' + | n + 1 >= len -> commit n True{-needs flush-} False >>= outer s + | x == '\n' -> do n1 <- writeCharBuf raw n '\r' + writeCharBuf raw n1 '\n' >>= inner s' + | otherwise -> writeCharBuf raw n x >>= inner s' + commit = commitBuffer h raw len + +writeBlocksRaw :: Handle -> Buffer CharBufElem -> Stream Char -> IO () +writeBlocksRaw h buf0 (Stream next0 s0 _len) = outer s0 buf0 + where + outer s1 Buffer{bufRaw=raw, bufSize=len} = inner s1 (0::Int) + where + inner !s !n = + case next0 s of + Done -> commit n False{-no flush-} True{-release-} >> return () + Skip s' -> inner s' n + Yield x s' + | n + 1 >= len -> commit n True{-needs flush-} False >>= outer s + | otherwise -> writeCharBuf raw n x >>= inner s' + commit = commitBuffer h raw len + +-- This function is completely lifted from GHC.IO.Handle.Text. +getSpareBuffer :: Handle__ -> IO (BufferMode, CharBuffer) +getSpareBuffer Handle__{haCharBuffer=ref, + haBuffers=spare_ref, + haBufferMode=mode} + = do + case mode of + NoBuffering -> return (mode, error "no buffer!") + _ -> do + bufs <- readIORef spare_ref + buf <- readIORef ref + case bufs of + BufferListCons b rest -> do + writeIORef spare_ref rest + return ( mode, emptyBuffer b (bufSize buf) WriteBuffer) + BufferListNil -> do + new_buf <- newCharBuffer (bufSize buf) WriteBuffer + return (mode, new_buf) + + +-- This function is completely lifted from GHC.IO.Handle.Text. +commitBuffer :: Handle -> RawCharBuffer -> Int -> Int -> Bool -> Bool + -> IO CharBuffer +commitBuffer hdl !raw !sz !count flush release = + wantWritableHandle "commitAndReleaseBuffer" hdl $ + commitBuffer' raw sz count flush release +{-# INLINE commitBuffer #-} + +-- | Write a string to a handle, followed by a newline. +hPutStrLn :: Handle -> Text -> IO () +hPutStrLn h t = hPutStr h t >> hPutChar h '\n' + +-- | The 'interact' function takes a function of type @Text -> Text@ +-- as its argument. The entire input from the standard input device is +-- passed to this function as its argument, and the resulting string +-- is output on the standard output device. +interact :: (Text -> Text) -> IO () +interact f = putStr . f =<< getContents + +-- | Read all user input on 'stdin' as a single string. +getContents :: IO Text +getContents = hGetContents stdin + +-- | Read a single line of user input from 'stdin'. +getLine :: IO Text +getLine = hGetLine stdin + +-- | Write a string to 'stdout'. +putStr :: Text -> IO () +putStr = hPutStr stdout + +-- | Write a string to 'stdout', followed by a newline. +putStrLn :: Text -> IO () +putStrLn = hPutStrLn stdout + +-- $locale +-- +-- /Note/: The behaviour of functions in this module depends on the +-- version of GHC you are using. +-- +-- Beginning with GHC 6.12, text I\/O is performed using the system or +-- handle's current locale and line ending conventions. +-- +-- Under GHC 6.10 and earlier, the system I\/O libraries do not +-- support locale-sensitive I\/O or line ending conversion. On these +-- versions of GHC, functions in this library all use UTF-8. What +-- does this mean in practice? +-- +-- * All data that is read will be decoded as UTF-8. +-- +-- * Before data is written, it is first encoded as UTF-8. +-- +-- * On both reading and writing, the platform's native newline +-- conversion is performed. +-- +-- If you must use a non-UTF-8 locale on an older version of GHC, you +-- will have to perform the transcoding yourself, e.g. as follows: +-- +-- > import qualified Data.ByteString as B +-- > import Data.Text (Text) +-- > import Data.Text.Encoding (encodeUtf16) +-- > +-- > putStr_Utf16LE :: Text -> IO () +-- > putStr_Utf16LE t = B.putStr (encodeUtf16LE t) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Builder/Int.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Builder/Int.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Builder/Int.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Builder/Int.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,262 @@ +{-# LANGUAGE BangPatterns, CPP, MagicHash, RankNTypes, ScopedTypeVariables, + UnboxedTuples #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif + +-- Module: Data.Text.Lazy.Builder.Int +-- Copyright: (c) 2013 Bryan O'Sullivan +-- (c) 2011 MailRank, Inc. +-- License: BSD3 +-- Maintainer: Bryan O'Sullivan +-- Stability: experimental +-- Portability: portable +-- +-- Efficiently write an integral value to a 'Builder'. + +module Data.Text.Lazy.Builder.Int + ( + decimal + , hexadecimal + ) where + +import Data.Int (Int8, Int16, Int32, Int64) +import Data.Monoid (mempty) +import qualified Data.ByteString.Unsafe as B +import Data.Text.Internal.Builder.Functions ((<>), i2d) +import Data.Text.Internal.Builder +import Data.Text.Internal.Builder.Int.Digits (digits) +import Data.Text.Array +import Data.Word (Word, Word8, Word16, Word32, Word64) +import GHC.Base (quotInt, remInt) +import GHC.Num (quotRemInteger) +import GHC.Types (Int(..)) +import Control.Monad.ST + +#ifdef __GLASGOW_HASKELL__ +# if defined(INTEGER_GMP) +import GHC.Integer.GMP.Internals (Integer(S#)) +# elif defined(INTEGER_SIMPLE) +import GHC.Integer +# else +# error "You need to use either GMP or integer-simple." +# endif +#endif + +#if defined(INTEGER_GMP) || defined(INTEGER_SIMPLE) +# define PAIR(a,b) (# a,b #) +#else +# define PAIR(a,b) (a,b) +#endif + +decimal :: Integral a => a -> Builder +{-# RULES "decimal/Int8" decimal = boundedDecimal :: Int8 -> Builder #-} +{-# RULES "decimal/Int" decimal = boundedDecimal :: Int -> Builder #-} +{-# RULES "decimal/Int16" decimal = boundedDecimal :: Int16 -> Builder #-} +{-# RULES "decimal/Int32" decimal = boundedDecimal :: Int32 -> Builder #-} +{-# RULES "decimal/Int64" decimal = boundedDecimal :: Int64 -> Builder #-} +{-# RULES "decimal/Word" decimal = positive :: Data.Word.Word -> Builder #-} +{-# RULES "decimal/Word8" decimal = positive :: Word8 -> Builder #-} +{-# RULES "decimal/Word16" decimal = positive :: Word16 -> Builder #-} +{-# RULES "decimal/Word32" decimal = positive :: Word32 -> Builder #-} +{-# RULES "decimal/Word64" decimal = positive :: Word64 -> Builder #-} +{-# RULES "decimal/Integer" decimal = integer 10 :: Integer -> Builder #-} +decimal i = decimal' (<= -128) i +{-# NOINLINE decimal #-} + +boundedDecimal :: (Integral a, Bounded a) => a -> Builder +{-# SPECIALIZE boundedDecimal :: Int -> Builder #-} +{-# SPECIALIZE boundedDecimal :: Int8 -> Builder #-} +{-# SPECIALIZE boundedDecimal :: Int16 -> Builder #-} +{-# SPECIALIZE boundedDecimal :: Int32 -> Builder #-} +{-# SPECIALIZE boundedDecimal :: Int64 -> Builder #-} +boundedDecimal i = decimal' (== minBound) i + +decimal' :: (Integral a) => (a -> Bool) -> a -> Builder +{-# INLINE decimal' #-} +decimal' p i + | i < 0 = if p i + then let (q, r) = i `quotRem` 10 + qq = -q + !n = countDigits qq + in writeN (n + 2) $ \marr off -> do + unsafeWrite marr off minus + posDecimal marr (off+1) n qq + unsafeWrite marr (off+n+1) (i2w (-r)) + else let j = -i + !n = countDigits j + in writeN (n + 1) $ \marr off -> + unsafeWrite marr off minus >> posDecimal marr (off+1) n j + | otherwise = positive i + +positive :: (Integral a) => a -> Builder +{-# SPECIALIZE positive :: Int -> Builder #-} +{-# SPECIALIZE positive :: Int8 -> Builder #-} +{-# SPECIALIZE positive :: Int16 -> Builder #-} +{-# SPECIALIZE positive :: Int32 -> Builder #-} +{-# SPECIALIZE positive :: Int64 -> Builder #-} +{-# SPECIALIZE positive :: Word -> Builder #-} +{-# SPECIALIZE positive :: Word8 -> Builder #-} +{-# SPECIALIZE positive :: Word16 -> Builder #-} +{-# SPECIALIZE positive :: Word32 -> Builder #-} +{-# SPECIALIZE positive :: Word64 -> Builder #-} +positive i + | i < 10 = writeN 1 $ \marr off -> unsafeWrite marr off (i2w i) + | otherwise = let !n = countDigits i + in writeN n $ \marr off -> posDecimal marr off n i + +posDecimal :: (Integral a) => + forall s. MArray s -> Int -> Int -> a -> ST s () +{-# INLINE posDecimal #-} +posDecimal marr off0 ds v0 = go (off0 + ds - 1) v0 + where go off v + | v >= 100 = do + let (q, r) = v `quotRem` 100 + write2 off r + go (off - 2) q + | v < 10 = unsafeWrite marr off (i2w v) + | otherwise = write2 off v + write2 off i0 = do + let i = fromIntegral i0; j = i + i + unsafeWrite marr off $ get (j + 1) + unsafeWrite marr (off - 1) $ get j + get = fromIntegral . B.unsafeIndex digits + +minus, zero :: Word16 +{-# INLINE minus #-} +{-# INLINE zero #-} +minus = 45 +zero = 48 + +i2w :: (Integral a) => a -> Word16 +{-# INLINE i2w #-} +i2w v = zero + fromIntegral v + +countDigits :: (Integral a) => a -> Int +{-# INLINE countDigits #-} +countDigits v0 + | fromIntegral v64 == v0 = go 1 v64 + | otherwise = goBig 1 (fromIntegral v0) + where v64 = fromIntegral v0 + goBig !k (v :: Integer) + | v > big = goBig (k + 19) (v `quot` big) + | otherwise = go k (fromIntegral v) + big = 10000000000000000000 + go !k (v :: Word64) + | v < 10 = k + | v < 100 = k + 1 + | v < 1000 = k + 2 + | v < 1000000000000 = + k + if v < 100000000 + then if v < 1000000 + then if v < 10000 + then 3 + else 4 + fin v 100000 + else 6 + fin v 10000000 + else if v < 10000000000 + then 8 + fin v 1000000000 + else 10 + fin v 100000000000 + | otherwise = go (k + 12) (v `quot` 1000000000000) + fin v n = if v >= n then 1 else 0 + +hexadecimal :: Integral a => a -> Builder +{-# SPECIALIZE hexadecimal :: Int -> Builder #-} +{-# SPECIALIZE hexadecimal :: Int8 -> Builder #-} +{-# SPECIALIZE hexadecimal :: Int16 -> Builder #-} +{-# SPECIALIZE hexadecimal :: Int32 -> Builder #-} +{-# SPECIALIZE hexadecimal :: Int64 -> Builder #-} +{-# SPECIALIZE hexadecimal :: Word -> Builder #-} +{-# SPECIALIZE hexadecimal :: Word8 -> Builder #-} +{-# SPECIALIZE hexadecimal :: Word16 -> Builder #-} +{-# SPECIALIZE hexadecimal :: Word32 -> Builder #-} +{-# SPECIALIZE hexadecimal :: Word64 -> Builder #-} +{-# RULES "hexadecimal/Integer" + hexadecimal = hexInteger :: Integer -> Builder #-} +hexadecimal i + | i < 0 = error hexErrMsg + | otherwise = go i + where + go n | n < 16 = hexDigit n + | otherwise = go (n `quot` 16) <> hexDigit (n `rem` 16) +{-# NOINLINE[0] hexadecimal #-} + +hexInteger :: Integer -> Builder +hexInteger i + | i < 0 = error hexErrMsg + | otherwise = integer 16 i + +hexErrMsg :: String +hexErrMsg = "Data.Text.Lazy.Builder.Int.hexadecimal: applied to negative number" + +hexDigit :: Integral a => a -> Builder +hexDigit n + | n <= 9 = singleton $! i2d (fromIntegral n) + | otherwise = singleton $! toEnum (fromIntegral n + 87) +{-# INLINE hexDigit #-} + +data T = T !Integer !Int + +integer :: Int -> Integer -> Builder +#ifdef INTEGER_GMP +integer 10 (S# i#) = decimal (I# i#) +integer 16 (S# i#) = hexadecimal (I# i#) +#endif +integer base i + | i < 0 = singleton '-' <> go (-i) + | otherwise = go i + where + go n | n < maxInt = int (fromInteger n) + | otherwise = putH (splitf (maxInt * maxInt) n) + + splitf p n + | p > n = [n] + | otherwise = splith p (splitf (p*p) n) + + splith p (n:ns) = case n `quotRemInteger` p of + PAIR(q,r) | q > 0 -> q : r : splitb p ns + | otherwise -> r : splitb p ns + splith _ _ = error "splith: the impossible happened." + + splitb p (n:ns) = case n `quotRemInteger` p of + PAIR(q,r) -> q : r : splitb p ns + splitb _ _ = [] + + T maxInt10 maxDigits10 = + until ((>mi) . (*10) . fstT) (\(T n d) -> T (n*10) (d+1)) (T 10 1) + where mi = fromIntegral (maxBound :: Int) + T maxInt16 maxDigits16 = + until ((>mi) . (*16) . fstT) (\(T n d) -> T (n*16) (d+1)) (T 16 1) + where mi = fromIntegral (maxBound :: Int) + + fstT (T a _) = a + + maxInt | base == 10 = maxInt10 + | otherwise = maxInt16 + maxDigits | base == 10 = maxDigits10 + | otherwise = maxDigits16 + + putH (n:ns) = case n `quotRemInteger` maxInt of + PAIR(x,y) + | q > 0 -> int q <> pblock r <> putB ns + | otherwise -> int r <> putB ns + where q = fromInteger x + r = fromInteger y + putH _ = error "putH: the impossible happened" + + putB (n:ns) = case n `quotRemInteger` maxInt of + PAIR(x,y) -> pblock q <> pblock r <> putB ns + where q = fromInteger x + r = fromInteger y + putB _ = Data.Monoid.mempty + + int :: Int -> Builder + int x | base == 10 = decimal x + | otherwise = hexadecimal x + + pblock = loop maxDigits + where + loop !d !n + | d == 1 = hexDigit n + | otherwise = loop (d-1) q <> hexDigit r + where q = n `quotInt` base + r = n `remInt` base diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Builder/RealFloat.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Builder/RealFloat.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Builder/RealFloat.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Builder/RealFloat.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,242 @@ +{-# LANGUAGE CPP, OverloadedStrings #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif + +-- | +-- Module: Data.Text.Lazy.Builder.RealFloat +-- Copyright: (c) The University of Glasgow 1994-2002 +-- License: see libraries/base/LICENSE +-- +-- Write a floating point value to a 'Builder'. + +module Data.Text.Lazy.Builder.RealFloat + ( + FPFormat(..) + , realFloat + , formatRealFloat + ) where + +import Data.Array.Base (unsafeAt) +import Data.Array.IArray +import Data.Text.Internal.Builder.Functions ((<>), i2d) +import Data.Text.Lazy.Builder.Int (decimal) +import Data.Text.Internal.Builder.RealFloat.Functions (roundTo) +import Data.Text.Lazy.Builder +import qualified Data.Text as T + +-- | Control the rendering of floating point numbers. +data FPFormat = Exponent + -- ^ Scientific notation (e.g. @2.3e123@). + | Fixed + -- ^ Standard decimal notation. + | Generic + -- ^ Use decimal notation for values between @0.1@ and + -- @9,999,999@, and scientific notation otherwise. + deriving (Enum, Read, Show) + +-- | Show a signed 'RealFloat' value to full precision, +-- using standard decimal notation for arguments whose absolute value lies +-- between @0.1@ and @9,999,999@, and scientific notation otherwise. +realFloat :: (RealFloat a) => a -> Builder +{-# SPECIALIZE realFloat :: Float -> Builder #-} +{-# SPECIALIZE realFloat :: Double -> Builder #-} +realFloat x = formatRealFloat Generic Nothing x + +formatRealFloat :: (RealFloat a) => + FPFormat + -> Maybe Int -- ^ Number of decimal places to render. + -> a + -> Builder +{-# SPECIALIZE formatRealFloat :: FPFormat -> Maybe Int -> Float -> Builder #-} +{-# SPECIALIZE formatRealFloat :: FPFormat -> Maybe Int -> Double -> Builder #-} +formatRealFloat fmt decs x + | isNaN x = "NaN" + | isInfinite x = if x < 0 then "-Infinity" else "Infinity" + | x < 0 || isNegativeZero x = singleton '-' <> doFmt fmt (floatToDigits (-x)) + | otherwise = doFmt fmt (floatToDigits x) + where + doFmt format (is, e) = + let ds = map i2d is in + case format of + Generic -> + doFmt (if e < 0 || e > 7 then Exponent else Fixed) + (is,e) + Exponent -> + case decs of + Nothing -> + let show_e' = decimal (e-1) in + case ds of + "0" -> "0.0e0" + [d] -> singleton d <> ".0e" <> show_e' + (d:ds') -> singleton d <> singleton '.' <> fromString ds' <> singleton 'e' <> show_e' + [] -> error "formatRealFloat/doFmt/Exponent: []" + Just dec -> + let dec' = max dec 1 in + case is of + [0] -> "0." <> fromText (T.replicate dec' "0") <> "e0" + _ -> + let + (ei,is') = roundTo (dec'+1) is + (d:ds') = map i2d (if ei > 0 then init is' else is') + in + singleton d <> singleton '.' <> fromString ds' <> singleton 'e' <> decimal (e-1+ei) + Fixed -> + let + mk0 ls = case ls of { "" -> "0" ; _ -> fromString ls} + in + case decs of + Nothing + | e <= 0 -> "0." <> fromText (T.replicate (-e) "0") <> fromString ds + | otherwise -> + let + f 0 s rs = mk0 (reverse s) <> singleton '.' <> mk0 rs + f n s "" = f (n-1) ('0':s) "" + f n s (r:rs) = f (n-1) (r:s) rs + in + f e "" ds + Just dec -> + let dec' = max dec 0 in + if e >= 0 then + let + (ei,is') = roundTo (dec' + e) is + (ls,rs) = splitAt (e+ei) (map i2d is') + in + mk0 ls <> (if null rs then "" else singleton '.' <> fromString rs) + else + let + (ei,is') = roundTo dec' (replicate (-e) 0 ++ is) + d:ds' = map i2d (if ei > 0 then is' else 0:is') + in + singleton d <> (if null ds' then "" else singleton '.' <> fromString ds') + + +-- Based on "Printing Floating-Point Numbers Quickly and Accurately" +-- by R.G. Burger and R.K. Dybvig in PLDI 96. +-- This version uses a much slower logarithm estimator. It should be improved. + +-- | 'floatToDigits' takes a base and a non-negative 'RealFloat' number, +-- and returns a list of digits and an exponent. +-- In particular, if @x>=0@, and +-- +-- > floatToDigits base x = ([d1,d2,...,dn], e) +-- +-- then +-- +-- (1) @n >= 1@ +-- +-- (2) @x = 0.d1d2...dn * (base**e)@ +-- +-- (3) @0 <= di <= base-1@ + +floatToDigits :: (RealFloat a) => a -> ([Int], Int) +{-# SPECIALIZE floatToDigits :: Float -> ([Int], Int) #-} +{-# SPECIALIZE floatToDigits :: Double -> ([Int], Int) #-} +floatToDigits 0 = ([0], 0) +floatToDigits x = + let + (f0, e0) = decodeFloat x + (minExp0, _) = floatRange x + p = floatDigits x + b = floatRadix x + minExp = minExp0 - p -- the real minimum exponent + -- Haskell requires that f be adjusted so denormalized numbers + -- will have an impossibly low exponent. Adjust for this. + (f, e) = + let n = minExp - e0 in + if n > 0 then (f0 `quot` (expt b n), e0+n) else (f0, e0) + (r, s, mUp, mDn) = + if e >= 0 then + let be = expt b e in + if f == expt b (p-1) then + (f*be*b*2, 2*b, be*b, be) -- according to Burger and Dybvig + else + (f*be*2, 2, be, be) + else + if e > minExp && f == expt b (p-1) then + (f*b*2, expt b (-e+1)*2, b, 1) + else + (f*2, expt b (-e)*2, 1, 1) + k :: Int + k = + let + k0 :: Int + k0 = + if b == 2 then + -- logBase 10 2 is very slightly larger than 8651/28738 + -- (about 5.3558e-10), so if log x >= 0, the approximation + -- k1 is too small, hence we add one and need one fixup step less. + -- If log x < 0, the approximation errs rather on the high side. + -- That is usually more than compensated for by ignoring the + -- fractional part of logBase 2 x, but when x is a power of 1/2 + -- or slightly larger and the exponent is a multiple of the + -- denominator of the rational approximation to logBase 10 2, + -- k1 is larger than logBase 10 x. If k1 > 1 + logBase 10 x, + -- we get a leading zero-digit we don't want. + -- With the approximation 3/10, this happened for + -- 0.5^1030, 0.5^1040, ..., 0.5^1070 and values close above. + -- The approximation 8651/28738 guarantees k1 < 1 + logBase 10 x + -- for IEEE-ish floating point types with exponent fields + -- <= 17 bits and mantissae of several thousand bits, earlier + -- convergents to logBase 10 2 would fail for long double. + -- Using quot instead of div is a little faster and requires + -- fewer fixup steps for negative lx. + let lx = p - 1 + e0 + k1 = (lx * 8651) `quot` 28738 + in if lx >= 0 then k1 + 1 else k1 + else + -- f :: Integer, log :: Float -> Float, + -- ceiling :: Float -> Int + ceiling ((log (fromInteger (f+1) :: Float) + + fromIntegral e * log (fromInteger b)) / + log 10) +--WAS: fromInt e * log (fromInteger b)) + + fixup n = + if n >= 0 then + if r + mUp <= expt 10 n * s then n else fixup (n+1) + else + if expt 10 (-n) * (r + mUp) <= s then n else fixup (n+1) + in + fixup k0 + + gen ds rn sN mUpN mDnN = + let + (dn, rn') = (rn * 10) `quotRem` sN + mUpN' = mUpN * 10 + mDnN' = mDnN * 10 + in + case (rn' < mDnN', rn' + mUpN' > sN) of + (True, False) -> dn : ds + (False, True) -> dn+1 : ds + (True, True) -> if rn' * 2 < sN then dn : ds else dn+1 : ds + (False, False) -> gen (dn:ds) rn' sN mUpN' mDnN' + + rds = + if k >= 0 then + gen [] r (s * expt 10 k) mUp mDn + else + let bk = expt 10 (-k) in + gen [] (r * bk) s (mUp * bk) (mDn * bk) + in + (map fromIntegral (reverse rds), k) + +-- Exponentiation with a cache for the most common numbers. +minExpt, maxExpt :: Int +minExpt = 0 +maxExpt = 1100 + +expt :: Integer -> Int -> Integer +expt base n + | base == 2 && n >= minExpt && n <= maxExpt = expts `unsafeAt` n + | base == 10 && n <= maxExpt10 = expts10 `unsafeAt` n + | otherwise = base^n + +expts :: Array Int Integer +expts = array (minExpt,maxExpt) [(n,2^n) | n <- [minExpt .. maxExpt]] + +maxExpt10 :: Int +maxExpt10 = 324 + +expts10 :: Array Int Integer +expts10 = array (minExpt,maxExpt10) [(n,10^n) | n <- [minExpt .. maxExpt10]] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Builder.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Builder.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Builder.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Builder.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,58 @@ +{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif + +----------------------------------------------------------------------------- +-- | +-- Module : Data.Text.Lazy.Builder +-- Copyright : (c) 2013 Bryan O'Sullivan +-- (c) 2010 Johan Tibell +-- License : BSD3-style (see LICENSE) +-- +-- Maintainer : Johan Tibell +-- Stability : experimental +-- Portability : portable to Hugs and GHC +-- +-- Efficient construction of lazy @Text@ values. The principal +-- operations on a @Builder@ are @singleton@, @fromText@, and +-- @fromLazyText@, which construct new builders, and 'mappend', which +-- concatenates two builders. +-- +-- To get maximum performance when building lazy @Text@ values using a +-- builder, associate @mappend@ calls to the right. For example, +-- prefer +-- +-- > singleton 'a' `mappend` (singleton 'b' `mappend` singleton 'c') +-- +-- to +-- +-- > singleton 'a' `mappend` singleton 'b' `mappend` singleton 'c' +-- +-- as the latter associates @mappend@ to the left. Or, equivalently, +-- prefer +-- +-- > singleton 'a' <> singleton 'b' <> singleton 'c' +-- +-- since the '<>' from recent versions of 'Data.Monoid' associates +-- to the right. + +----------------------------------------------------------------------------- + +module Data.Text.Lazy.Builder + ( -- * The Builder type + Builder + , toLazyText + , toLazyTextWith + + -- * Constructing Builders + , singleton + , fromText + , fromLazyText + , fromString + + -- * Flushing the buffer state + , flush + ) where + +import Data.Text.Internal.Builder diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Encoding.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Encoding.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Encoding.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Encoding.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,250 @@ +{-# LANGUAGE BangPatterns,CPP #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif +-- | +-- Module : Data.Text.Lazy.Encoding +-- Copyright : (c) 2009, 2010 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- Functions for converting lazy 'Text' values to and from lazy +-- 'ByteString', using several standard encodings. +-- +-- To gain access to a much larger variety of encodings, use the +-- @text-icu@ package: + +module Data.Text.Lazy.Encoding + ( + -- * Decoding ByteStrings to Text + -- $strict + decodeASCII + , decodeLatin1 + , decodeUtf8 + , decodeUtf16LE + , decodeUtf16BE + , decodeUtf32LE + , decodeUtf32BE + + -- ** Catchable failure + , decodeUtf8' + + -- ** Controllable error handling + , decodeUtf8With + , decodeUtf16LEWith + , decodeUtf16BEWith + , decodeUtf32LEWith + , decodeUtf32BEWith + + -- * Encoding Text to ByteStrings + , encodeUtf8 + , encodeUtf16LE + , encodeUtf16BE + , encodeUtf32LE + , encodeUtf32BE + +#if MIN_VERSION_bytestring(0,10,4) + -- * Encoding Text using ByteString Builders + , encodeUtf8Builder + , encodeUtf8BuilderEscaped +#endif + ) where + +import Control.Exception (evaluate, try) +import Data.Text.Encoding.Error (OnDecodeError, UnicodeException, strictDecode) +import Data.Text.Internal.Lazy (Text(..), chunk, empty, foldrChunks) +import qualified Data.ByteString as S +import qualified Data.ByteString.Lazy as B +import qualified Data.ByteString.Lazy.Internal as B +import qualified Data.ByteString.Unsafe as B +#if MIN_VERSION_bytestring(0,10,4) +import Data.Word (Word8) +import Data.Monoid (Monoid(..)) +import qualified Data.ByteString.Builder as B +import qualified Data.ByteString.Builder.Extra as B (safeStrategy, toLazyByteStringWith) +import qualified Data.ByteString.Builder.Prim as BP +#endif +import qualified Data.Text as T +import qualified Data.Text.Encoding as TE +import qualified Data.Text.Internal.Lazy.Encoding.Fusion as E +import qualified Data.Text.Internal.Lazy.Fusion as F +import Data.Text.Unsafe (unsafeDupablePerformIO) + +-- $strict +-- +-- All of the single-parameter functions for decoding bytestrings +-- encoded in one of the Unicode Transformation Formats (UTF) operate +-- in a /strict/ mode: each will throw an exception if given invalid +-- input. +-- +-- Each function has a variant, whose name is suffixed with -'With', +-- that gives greater control over the handling of decoding errors. +-- For instance, 'decodeUtf8' will throw an exception, but +-- 'decodeUtf8With' allows the programmer to determine what to do on a +-- decoding error. + +-- | /Deprecated/. Decode a 'ByteString' containing 7-bit ASCII +-- encoded text. +decodeASCII :: B.ByteString -> Text +decodeASCII = decodeUtf8 +{-# DEPRECATED decodeASCII "Use decodeUtf8 instead" #-} + +-- | Decode a 'ByteString' containing Latin-1 (aka ISO-8859-1) encoded text. +decodeLatin1 :: B.ByteString -> Text +decodeLatin1 = foldr (chunk . TE.decodeLatin1) empty . B.toChunks + +-- | Decode a 'ByteString' containing UTF-8 encoded text. +decodeUtf8With :: OnDecodeError -> B.ByteString -> Text +decodeUtf8With onErr (B.Chunk b0 bs0) = + case TE.streamDecodeUtf8With onErr b0 of + TE.Some t l f -> chunk t (go f l bs0) + where + go f0 _ (B.Chunk b bs) = + case f0 b of + TE.Some t l f -> chunk t (go f l bs) + go _ l _ + | S.null l = empty + | otherwise = case onErr desc (Just (B.unsafeHead l)) of + Nothing -> empty + Just c -> Chunk (T.singleton c) Empty + desc = "Data.Text.Lazy.Encoding.decodeUtf8With: Invalid UTF-8 stream" +decodeUtf8With _ _ = empty + +-- | Decode a 'ByteString' containing UTF-8 encoded text that is known +-- to be valid. +-- +-- If the input contains any invalid UTF-8 data, an exception will be +-- thrown that cannot be caught in pure code. For more control over +-- the handling of invalid data, use 'decodeUtf8'' or +-- 'decodeUtf8With'. +decodeUtf8 :: B.ByteString -> Text +decodeUtf8 = decodeUtf8With strictDecode +{-# INLINE[0] decodeUtf8 #-} + +-- This rule seems to cause performance loss. +{- RULES "LAZY STREAM stream/decodeUtf8' fusion" [1] + forall bs. F.stream (decodeUtf8' bs) = E.streamUtf8 strictDecode bs #-} + +-- | Decode a 'ByteString' containing UTF-8 encoded text.. +-- +-- If the input contains any invalid UTF-8 data, the relevant +-- exception will be returned, otherwise the decoded text. +-- +-- /Note/: this function is /not/ lazy, as it must decode its entire +-- input before it can return a result. If you need lazy (streaming) +-- decoding, use 'decodeUtf8With' in lenient mode. +decodeUtf8' :: B.ByteString -> Either UnicodeException Text +decodeUtf8' bs = unsafeDupablePerformIO $ do + let t = decodeUtf8 bs + try (evaluate (rnf t `seq` t)) + where + rnf Empty = () + rnf (Chunk _ ts) = rnf ts +{-# INLINE decodeUtf8' #-} + +encodeUtf8 :: Text -> B.ByteString +#if MIN_VERSION_bytestring(0,10,4) +encodeUtf8 Empty = B.empty +encodeUtf8 lt@(Chunk t _) = + B.toLazyByteStringWith strategy B.empty $ encodeUtf8Builder lt + where + -- To improve our small string performance, we use a strategy that + -- allocates a buffer that is guaranteed to be large enough for the + -- encoding of the first chunk, but not larger than the default + -- B.smallChunkSize. We clamp the firstChunkSize to ensure that we don't + -- generate too large buffers which hamper streaming. + firstChunkSize = min B.smallChunkSize (4 * (T.length t + 1)) + strategy = B.safeStrategy firstChunkSize B.defaultChunkSize + +encodeUtf8Builder :: Text -> B.Builder +encodeUtf8Builder = + foldrChunks (\c b -> TE.encodeUtf8Builder c `mappend` b) Data.Monoid.mempty + +{-# INLINE encodeUtf8BuilderEscaped #-} +encodeUtf8BuilderEscaped :: BP.BoundedPrim Word8 -> Text -> B.Builder +encodeUtf8BuilderEscaped prim = + foldrChunks (\c b -> TE.encodeUtf8BuilderEscaped prim c `mappend` b) mempty + +#else +encodeUtf8 (Chunk c cs) = B.Chunk (TE.encodeUtf8 c) (encodeUtf8 cs) +encodeUtf8 Empty = B.Empty +#endif + +-- | Decode text from little endian UTF-16 encoding. +decodeUtf16LEWith :: OnDecodeError -> B.ByteString -> Text +decodeUtf16LEWith onErr bs = F.unstream (E.streamUtf16LE onErr bs) +{-# INLINE decodeUtf16LEWith #-} + +-- | Decode text from little endian UTF-16 encoding. +-- +-- If the input contains any invalid little endian UTF-16 data, an +-- exception will be thrown. For more control over the handling of +-- invalid data, use 'decodeUtf16LEWith'. +decodeUtf16LE :: B.ByteString -> Text +decodeUtf16LE = decodeUtf16LEWith strictDecode +{-# INLINE decodeUtf16LE #-} + +-- | Decode text from big endian UTF-16 encoding. +decodeUtf16BEWith :: OnDecodeError -> B.ByteString -> Text +decodeUtf16BEWith onErr bs = F.unstream (E.streamUtf16BE onErr bs) +{-# INLINE decodeUtf16BEWith #-} + +-- | Decode text from big endian UTF-16 encoding. +-- +-- If the input contains any invalid big endian UTF-16 data, an +-- exception will be thrown. For more control over the handling of +-- invalid data, use 'decodeUtf16BEWith'. +decodeUtf16BE :: B.ByteString -> Text +decodeUtf16BE = decodeUtf16BEWith strictDecode +{-# INLINE decodeUtf16BE #-} + +-- | Encode text using little endian UTF-16 encoding. +encodeUtf16LE :: Text -> B.ByteString +encodeUtf16LE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf16LE) [] txt) +{-# INLINE encodeUtf16LE #-} + +-- | Encode text using big endian UTF-16 encoding. +encodeUtf16BE :: Text -> B.ByteString +encodeUtf16BE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf16BE) [] txt) +{-# INLINE encodeUtf16BE #-} + +-- | Decode text from little endian UTF-32 encoding. +decodeUtf32LEWith :: OnDecodeError -> B.ByteString -> Text +decodeUtf32LEWith onErr bs = F.unstream (E.streamUtf32LE onErr bs) +{-# INLINE decodeUtf32LEWith #-} + +-- | Decode text from little endian UTF-32 encoding. +-- +-- If the input contains any invalid little endian UTF-32 data, an +-- exception will be thrown. For more control over the handling of +-- invalid data, use 'decodeUtf32LEWith'. +decodeUtf32LE :: B.ByteString -> Text +decodeUtf32LE = decodeUtf32LEWith strictDecode +{-# INLINE decodeUtf32LE #-} + +-- | Decode text from big endian UTF-32 encoding. +decodeUtf32BEWith :: OnDecodeError -> B.ByteString -> Text +decodeUtf32BEWith onErr bs = F.unstream (E.streamUtf32BE onErr bs) +{-# INLINE decodeUtf32BEWith #-} + +-- | Decode text from big endian UTF-32 encoding. +-- +-- If the input contains any invalid big endian UTF-32 data, an +-- exception will be thrown. For more control over the handling of +-- invalid data, use 'decodeUtf32BEWith'. +decodeUtf32BE :: B.ByteString -> Text +decodeUtf32BE = decodeUtf32BEWith strictDecode +{-# INLINE decodeUtf32BE #-} + +-- | Encode text using little endian UTF-32 encoding. +encodeUtf32LE :: Text -> B.ByteString +encodeUtf32LE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf32LE) [] txt) +{-# INLINE encodeUtf32LE #-} + +-- | Encode text using big endian UTF-32 encoding. +encodeUtf32BE :: Text -> B.ByteString +encodeUtf32BE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf32BE) [] txt) +{-# INLINE encodeUtf32BE #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Internal.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Internal.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Internal.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Internal.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,20 @@ +{-# LANGUAGE BangPatterns, DeriveDataTypeable #-} +-- | +-- Module : Data.Text.Lazy.Internal +-- Copyright : (c) 2013 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- This module has been renamed to 'Data.Text.Internal.Lazy'. This +-- name for the module will be removed in the next major release. + +module Data.Text.Lazy.Internal + {-# DEPRECATED "Use Data.Text.Internal.Lazy instead" #-} + ( + module Data.Text.Internal.Lazy + ) where + +import Data.Text.Internal.Lazy diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/IO.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/IO.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/IO.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/IO.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,196 @@ +{-# LANGUAGE BangPatterns, CPP, RecordWildCards #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif +-- | +-- Module : Data.Text.Lazy.IO +-- Copyright : (c) 2009, 2010 Bryan O'Sullivan, +-- (c) 2009 Simon Marlow +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- Efficient locale-sensitive support for lazy text I\/O. +-- +-- Skip past the synopsis for some important notes on performance and +-- portability across different versions of GHC. + +module Data.Text.Lazy.IO + ( + -- * Performance + -- $performance + + -- * Locale support + -- $locale + -- * File-at-a-time operations + readFile + , writeFile + , appendFile + -- * Operations on handles + , hGetContents + , hGetLine + , hPutStr + , hPutStrLn + -- * Special cases for standard input and output + , interact + , getContents + , getLine + , putStr + , putStrLn + ) where + +import Data.Text.Lazy (Text) +import Prelude hiding (appendFile, getContents, getLine, interact, + putStr, putStrLn, readFile, writeFile) +import System.IO (Handle, IOMode(..), hPutChar, openFile, stdin, stdout, + withFile) +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy as L +import qualified Control.Exception as E +import Control.Monad (when) +import Data.IORef (readIORef) +import Data.Text.Internal.IO (hGetLineWith, readChunk) +import Data.Text.Internal.Lazy (chunk, empty) +import GHC.IO.Buffer (isEmptyBuffer) +import GHC.IO.Exception (IOException(..), IOErrorType(..), ioException) +import GHC.IO.Handle.Internals (augmentIOError, hClose_help, + wantReadableHandle, withHandle) +import GHC.IO.Handle.Types (Handle__(..), HandleType(..)) +import System.IO (BufferMode(..), hGetBuffering, hSetBuffering) +import System.IO.Error (isEOFError) +import System.IO.Unsafe (unsafeInterleaveIO) + +-- $performance +-- +-- The functions in this module obey the runtime system's locale, +-- character set encoding, and line ending conversion settings. +-- +-- If you know in advance that you will be working with data that has +-- a specific encoding (e.g. UTF-8), and your application is highly +-- performance sensitive, you may find that it is faster to perform +-- I\/O with bytestrings and to encode and decode yourself than to use +-- the functions in this module. +-- +-- Whether this will hold depends on the version of GHC you are using, +-- the platform you are working on, the data you are working with, and +-- the encodings you are using, so be sure to test for yourself. + +-- | Read a file and return its contents as a string. The file is +-- read lazily, as with 'getContents'. +readFile :: FilePath -> IO Text +readFile name = openFile name ReadMode >>= hGetContents + +-- | Write a string to a file. The file is truncated to zero length +-- before writing begins. +writeFile :: FilePath -> Text -> IO () +writeFile p = withFile p WriteMode . flip hPutStr + +-- | Write a string the end of a file. +appendFile :: FilePath -> Text -> IO () +appendFile p = withFile p AppendMode . flip hPutStr + +-- | Lazily read the remaining contents of a 'Handle'. The 'Handle' +-- will be closed after the read completes, or on error. +hGetContents :: Handle -> IO Text +hGetContents h = do + chooseGoodBuffering h + wantReadableHandle "hGetContents" h $ \hh -> do + ts <- lazyRead h + return (hh{haType=SemiClosedHandle}, ts) + +-- | Use a more efficient buffer size if we're reading in +-- block-buffered mode with the default buffer size. +chooseGoodBuffering :: Handle -> IO () +chooseGoodBuffering h = do + bufMode <- hGetBuffering h + when (bufMode == BlockBuffering Nothing) $ + hSetBuffering h (BlockBuffering (Just 16384)) + +lazyRead :: Handle -> IO Text +lazyRead h = unsafeInterleaveIO $ + withHandle "hGetContents" h $ \hh -> do + case haType hh of + ClosedHandle -> return (hh, L.empty) + SemiClosedHandle -> lazyReadBuffered h hh + _ -> ioException + (IOError (Just h) IllegalOperation "hGetContents" + "illegal handle type" Nothing Nothing) + +lazyReadBuffered :: Handle -> Handle__ -> IO (Handle__, Text) +lazyReadBuffered h hh@Handle__{..} = do + buf <- readIORef haCharBuffer + (do t <- readChunk hh buf + ts <- lazyRead h + return (hh, chunk t ts)) `E.catch` \e -> do + (hh', _) <- hClose_help hh + if isEOFError e + then return $ if isEmptyBuffer buf + then (hh', empty) + else (hh', L.singleton '\r') + else E.throwIO (augmentIOError e "hGetContents" h) + +-- | Read a single line from a handle. +hGetLine :: Handle -> IO Text +hGetLine = hGetLineWith L.fromChunks + +-- | Write a string to a handle. +hPutStr :: Handle -> Text -> IO () +hPutStr h = mapM_ (T.hPutStr h) . L.toChunks + +-- | Write a string to a handle, followed by a newline. +hPutStrLn :: Handle -> Text -> IO () +hPutStrLn h t = hPutStr h t >> hPutChar h '\n' + +-- | The 'interact' function takes a function of type @Text -> Text@ +-- as its argument. The entire input from the standard input device is +-- passed (lazily) to this function as its argument, and the resulting +-- string is output on the standard output device. +interact :: (Text -> Text) -> IO () +interact f = putStr . f =<< getContents + +-- | Lazily read all user input on 'stdin' as a single string. +getContents :: IO Text +getContents = hGetContents stdin + +-- | Read a single line of user input from 'stdin'. +getLine :: IO Text +getLine = hGetLine stdin + +-- | Write a string to 'stdout'. +putStr :: Text -> IO () +putStr = hPutStr stdout + +-- | Write a string to 'stdout', followed by a newline. +putStrLn :: Text -> IO () +putStrLn = hPutStrLn stdout + +-- $locale +-- +-- /Note/: The behaviour of functions in this module depends on the +-- version of GHC you are using. +-- +-- Beginning with GHC 6.12, text I\/O is performed using the system or +-- handle's current locale and line ending conventions. +-- +-- Under GHC 6.10 and earlier, the system I\/O libraries /do not +-- support/ locale-sensitive I\/O or line ending conversion. On these +-- versions of GHC, functions in this library all use UTF-8. What +-- does this mean in practice? +-- +-- * All data that is read will be decoded as UTF-8. +-- +-- * Before data is written, it is first encoded as UTF-8. +-- +-- * On both reading and writing, the platform's native newline +-- conversion is performed. +-- +-- If you must use a non-UTF-8 locale on an older version of GHC, you +-- will have to perform the transcoding yourself, e.g. as follows: +-- +-- > import qualified Data.ByteString.Lazy as B +-- > import Data.Text.Lazy (Text) +-- > import Data.Text.Lazy.Encoding (encodeUtf16) +-- > +-- > putStr_Utf16LE :: Text -> IO () +-- > putStr_Utf16LE t = B.putStr (encodeUtf16LE t) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Read.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Read.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy/Read.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy/Read.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,193 @@ +{-# LANGUAGE OverloadedStrings, CPP #-} +#if __GLASGOW_HASKELL__ >= 704 +{-# LANGUAGE Safe #-} +#elif __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif + +-- | +-- Module : Data.Text.Lazy.Read +-- Copyright : (c) 2010, 2011 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- Functions used frequently when reading textual data. +module Data.Text.Lazy.Read + ( + Reader + , decimal + , hexadecimal + , signed + , rational + , double + ) where + +import Control.Monad (liftM) +import Data.Char (isDigit, isHexDigit) +import Data.Int (Int8, Int16, Int32, Int64) +import Data.Ratio ((%)) +import Data.Text.Internal.Read +import Data.Text.Lazy as T +import Data.Word (Word, Word8, Word16, Word32, Word64) + +-- | Read some text. If the read succeeds, return its value and the +-- remaining text, otherwise an error message. +type Reader a = IReader Text a +type Parser = IParser Text + +-- | Read a decimal integer. The input must begin with at least one +-- decimal digit, and is consumed until a non-digit or end of string +-- is reached. +-- +-- This function does not handle leading sign characters. If you need +-- to handle signed input, use @'signed' 'decimal'@. +-- +-- /Note/: For fixed-width integer types, this function does not +-- attempt to detect overflow, so a sufficiently long input may give +-- incorrect results. If you are worried about overflow, use +-- 'Integer' for your result type. +decimal :: Integral a => Reader a +{-# SPECIALIZE decimal :: Reader Int #-} +{-# SPECIALIZE decimal :: Reader Int8 #-} +{-# SPECIALIZE decimal :: Reader Int16 #-} +{-# SPECIALIZE decimal :: Reader Int32 #-} +{-# SPECIALIZE decimal :: Reader Int64 #-} +{-# SPECIALIZE decimal :: Reader Integer #-} +{-# SPECIALIZE decimal :: Reader Data.Word.Word #-} +{-# SPECIALIZE decimal :: Reader Word8 #-} +{-# SPECIALIZE decimal :: Reader Word16 #-} +{-# SPECIALIZE decimal :: Reader Word32 #-} +{-# SPECIALIZE decimal :: Reader Word64 #-} +decimal txt + | T.null h = Left "input does not start with a digit" + | otherwise = Right (T.foldl' go 0 h, t) + where (h,t) = T.span isDigit txt + go n d = (n * 10 + fromIntegral (digitToInt d)) + +-- | Read a hexadecimal integer, consisting of an optional leading +-- @\"0x\"@ followed by at least one decimal digit. Input is consumed +-- until a non-hex-digit or end of string is reached. This function +-- is case insensitive. +-- +-- This function does not handle leading sign characters. If you need +-- to handle signed input, use @'signed' 'hexadecimal'@. +-- +-- /Note/: For fixed-width integer types, this function does not +-- attempt to detect overflow, so a sufficiently long input may give +-- incorrect results. If you are worried about overflow, use +-- 'Integer' for your result type. +hexadecimal :: Integral a => Reader a +{-# SPECIALIZE hexadecimal :: Reader Int #-} +{-# SPECIALIZE hexadecimal :: Reader Integer #-} +hexadecimal txt + | h == "0x" || h == "0X" = hex t + | otherwise = hex txt + where (h,t) = T.splitAt 2 txt + +hex :: Integral a => Reader a +{-# SPECIALIZE hexadecimal :: Reader Int #-} +{-# SPECIALIZE hexadecimal :: Reader Int8 #-} +{-# SPECIALIZE hexadecimal :: Reader Int16 #-} +{-# SPECIALIZE hexadecimal :: Reader Int32 #-} +{-# SPECIALIZE hexadecimal :: Reader Int64 #-} +{-# SPECIALIZE hexadecimal :: Reader Integer #-} +{-# SPECIALIZE hexadecimal :: Reader Word #-} +{-# SPECIALIZE hexadecimal :: Reader Word8 #-} +{-# SPECIALIZE hexadecimal :: Reader Word16 #-} +{-# SPECIALIZE hexadecimal :: Reader Word32 #-} +{-# SPECIALIZE hexadecimal :: Reader Word64 #-} +hex txt + | T.null h = Left "input does not start with a hexadecimal digit" + | otherwise = Right (T.foldl' go 0 h, t) + where (h,t) = T.span isHexDigit txt + go n d = (n * 16 + fromIntegral (hexDigitToInt d)) + +-- | Read an optional leading sign character (@\'-\'@ or @\'+\'@) and +-- apply it to the result of applying the given reader. +signed :: Num a => Reader a -> Reader a +{-# INLINE signed #-} +signed f = runP (signa (P f)) + +-- | Read a rational number. +-- +-- This function accepts an optional leading sign character, followed +-- by at least one decimal digit. The syntax similar to that accepted +-- by the 'read' function, with the exception that a trailing @\'.\'@ +-- or @\'e\'@ /not/ followed by a number is not consumed. +-- +-- Examples: +-- +-- >rational "3" == Right (3.0, "") +-- >rational "3.1" == Right (3.1, "") +-- >rational "3e4" == Right (30000.0, "") +-- >rational "3.1e4" == Right (31000.0, "") +-- >rational ".3" == Left "input does not start with a digit" +-- >rational "e3" == Left "input does not start with a digit" +-- +-- Examples of differences from 'read': +-- +-- >rational "3.foo" == Right (3.0, ".foo") +-- >rational "3e" == Right (3.0, "e") +rational :: Fractional a => Reader a +{-# SPECIALIZE rational :: Reader Double #-} +rational = floaty $ \real frac fracDenom -> fromRational $ + real % 1 + frac % fracDenom + +-- | Read a rational number. +-- +-- The syntax accepted by this function is the same as for 'rational'. +-- +-- /Note/: This function is almost ten times faster than 'rational', +-- but is slightly less accurate. +-- +-- The 'Double' type supports about 16 decimal places of accuracy. +-- For 94.2% of numbers, this function and 'rational' give identical +-- results, but for the remaining 5.8%, this function loses precision +-- around the 15th decimal place. For 0.001% of numbers, this +-- function will lose precision at the 13th or 14th decimal place. +double :: Reader Double +double = floaty $ \real frac fracDenom -> + fromIntegral real + + fromIntegral frac / fromIntegral fracDenom + +signa :: Num a => Parser a -> Parser a +{-# SPECIALIZE signa :: Parser Int -> Parser Int #-} +{-# SPECIALIZE signa :: Parser Int8 -> Parser Int8 #-} +{-# SPECIALIZE signa :: Parser Int16 -> Parser Int16 #-} +{-# SPECIALIZE signa :: Parser Int32 -> Parser Int32 #-} +{-# SPECIALIZE signa :: Parser Int64 -> Parser Int64 #-} +{-# SPECIALIZE signa :: Parser Integer -> Parser Integer #-} +signa p = do + sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') + if sign == '+' then p else negate `liftM` p + +char :: (Char -> Bool) -> Parser Char +char p = P $ \t -> case T.uncons t of + Just (c,t') | p c -> Right (c,t') + _ -> Left "character does not match" + +floaty :: Fractional a => (Integer -> Integer -> Integer -> a) -> Reader a +{-# INLINE floaty #-} +floaty f = runP $ do + sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') + real <- P decimal + T fraction fracDigits <- perhaps (T 0 0) $ do + _ <- char (=='.') + digits <- P $ \t -> Right (fromIntegral . T.length $ T.takeWhile isDigit t, t) + n <- P decimal + return $ T n digits + let e c = c == 'e' || c == 'E' + power <- perhaps 0 (char e >> signa (P decimal) :: Parser Int) + let n = if fracDigits == 0 + then if power == 0 + then fromIntegral real + else fromIntegral real * (10 ^^ power) + else if power == 0 + then f real fraction (10 ^ fracDigits) + else f real fraction (10 ^ fracDigits) * (10 ^^ power) + return $! if sign == '+' + then n + else -n diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Lazy.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,1695 @@ +{-# OPTIONS_GHC -fno-warn-orphans #-} +{-# LANGUAGE BangPatterns, MagicHash, CPP #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif +#if __GLASGOW_HASKELL__ >= 708 +{-# LANGUAGE TypeFamilies #-} +#endif + +-- | +-- Module : Data.Text.Lazy +-- Copyright : (c) 2009, 2010, 2012 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- A time and space-efficient implementation of Unicode text using +-- lists of packed arrays. +-- +-- /Note/: Read below the synopsis for important notes on the use of +-- this module. +-- +-- The representation used by this module is suitable for high +-- performance use and for streaming large quantities of data. It +-- provides a means to manipulate a large body of text without +-- requiring that the entire content be resident in memory. +-- +-- Some operations, such as 'concat', 'append', 'reverse' and 'cons', +-- have better time complexity than their "Data.Text" equivalents, due +-- to the underlying representation being a list of chunks. For other +-- operations, lazy 'Text's are usually within a few percent of strict +-- ones, but often with better heap usage if used in a streaming +-- fashion. For data larger than available memory, or if you have +-- tight memory constraints, this module will be the only option. +-- +-- This module is intended to be imported @qualified@, to avoid name +-- clashes with "Prelude" functions. eg. +-- +-- > import qualified Data.Text.Lazy as L + +module Data.Text.Lazy + ( + -- * Fusion + -- $fusion + + -- * Acceptable data + -- $replacement + + -- * Types + Text + + -- * Creation and elimination + , pack + , unpack + , singleton + , empty + , fromChunks + , toChunks + , toStrict + , fromStrict + , foldrChunks + , foldlChunks + + -- * Basic interface + , cons + , snoc + , append + , uncons + , head + , last + , tail + , init + , null + , length + , compareLength + + -- * Transformations + , map + , intercalate + , intersperse + , transpose + , reverse + , replace + + -- ** Case conversion + -- $case + , toCaseFold + , toLower + , toUpper + , toTitle + + -- ** Justification + , justifyLeft + , justifyRight + , center + + -- * Folds + , foldl + , foldl' + , foldl1 + , foldl1' + , foldr + , foldr1 + + -- ** Special folds + , concat + , concatMap + , any + , all + , maximum + , minimum + + -- * Construction + + -- ** Scans + , scanl + , scanl1 + , scanr + , scanr1 + + -- ** Accumulating maps + , mapAccumL + , mapAccumR + + -- ** Generation and unfolding + , repeat + , replicate + , cycle + , iterate + , unfoldr + , unfoldrN + + -- * Substrings + + -- ** Breaking strings + , take + , takeEnd + , drop + , dropEnd + , takeWhile + , takeWhileEnd + , dropWhile + , dropWhileEnd + , dropAround + , strip + , stripStart + , stripEnd + , splitAt + , span + , breakOn + , breakOnEnd + , break + , group + , groupBy + , inits + , tails + + -- ** Breaking into many substrings + -- $split + , splitOn + , split + , chunksOf + -- , breakSubstring + + -- ** Breaking into lines and words + , lines + , words + , unlines + , unwords + + -- * Predicates + , isPrefixOf + , isSuffixOf + , isInfixOf + + -- ** View patterns + , stripPrefix + , stripSuffix + , commonPrefixes + + -- * Searching + , filter + , find + , breakOnAll + , partition + + -- , findSubstring + + -- * Indexing + , index + , count + + -- * Zipping and unzipping + , zip + , zipWith + + -- -* Ordered text + -- , sort + ) where + +import Prelude (Char, Bool(..), Maybe(..), String, + Eq(..), Ord(..), Ordering(..), Read(..), Show(..), + (&&), (||), (+), (-), (.), ($), (++), + error, flip, fmap, fromIntegral, not, otherwise, quot) +import qualified Prelude as P +#if defined(HAVE_DEEPSEQ) +import Control.DeepSeq (NFData(..)) +#endif +import Data.Int (Int64) +import qualified Data.List as L +import Data.Char (isSpace) +import Data.Data (Data(gfoldl, toConstr, gunfold, dataTypeOf), constrIndex, + Constr, mkConstr, DataType, mkDataType, Fixity(Prefix)) +import Data.Binary (Binary(get, put)) +import Data.Monoid (Monoid(..)) +#if MIN_VERSION_base(4,9,0) +import Data.Semigroup (Semigroup(..)) +#endif +import Data.String (IsString(..)) +import qualified Data.Text as T +import qualified Data.Text.Internal as T +import qualified Data.Text.Internal.Fusion.Common as S +import qualified Data.Text.Unsafe as T +import qualified Data.Text.Internal.Lazy.Fusion as S +import Data.Text.Internal.Fusion.Types (PairS(..)) +import Data.Text.Internal.Lazy.Fusion (stream, unstream) +import Data.Text.Internal.Lazy (Text(..), chunk, empty, foldlChunks, + foldrChunks, smallChunkSize) +import Data.Text.Internal (firstf, safe, text) +import Data.Text.Lazy.Encoding (decodeUtf8', encodeUtf8) +import qualified Data.Text.Internal.Functions as F +import Data.Text.Internal.Lazy.Search (indices) +#if __GLASGOW_HASKELL__ >= 702 +import qualified GHC.CString as GHC +#else +import qualified GHC.Base as GHC +#endif +#if __GLASGOW_HASKELL__ >= 708 +import qualified GHC.Exts as Exts +#endif +import GHC.Prim (Addr#) +#if MIN_VERSION_base(4,7,0) +import Text.Printf (PrintfArg, formatArg, formatString) +#endif + +-- $fusion +-- +-- Most of the functions in this module are subject to /fusion/, +-- meaning that a pipeline of such functions will usually allocate at +-- most one 'Text' value. +-- +-- As an example, consider the following pipeline: +-- +-- > import Data.Text.Lazy as T +-- > import Data.Text.Lazy.Encoding as E +-- > import Data.ByteString.Lazy (ByteString) +-- > +-- > countChars :: ByteString -> Int +-- > countChars = T.length . T.toUpper . E.decodeUtf8 +-- +-- From the type signatures involved, this looks like it should +-- allocate one 'ByteString' value, and two 'Text' values. However, +-- when a module is compiled with optimisation enabled under GHC, the +-- two intermediate 'Text' values will be optimised away, and the +-- function will be compiled down to a single loop over the source +-- 'ByteString'. +-- +-- Functions that can be fused by the compiler are documented with the +-- phrase \"Subject to fusion\". + +-- $replacement +-- +-- A 'Text' value is a sequence of Unicode scalar values, as defined +-- in §3.9, definition D76 of the Unicode 5.2 standard: +-- . As +-- such, a 'Text' cannot contain values in the range U+D800 to U+DFFF +-- inclusive. Haskell implementations admit all Unicode code points +-- (§3.4, definition D10) as 'Char' values, including code points +-- from this invalid range. This means that there are some 'Char' +-- values that are not valid Unicode scalar values, and the functions +-- in this module must handle those cases. +-- +-- Within this module, many functions construct a 'Text' from one or +-- more 'Char' values. Those functions will substitute 'Char' values +-- that are not valid Unicode scalar values with the replacement +-- character \"�\" (U+FFFD). Functions that perform this +-- inspection and replacement are documented with the phrase +-- \"Performs replacement on invalid scalar values\". +-- +-- (One reason for this policy of replacement is that internally, a +-- 'Text' value is represented as packed UTF-16 data. Values in the +-- range U+D800 through U+DFFF are used by UTF-16 to denote surrogate +-- code points, and so cannot be represented. The functions replace +-- invalid scalar values, instead of dropping them, as a security +-- measure. For details, see Unicode Technical Report 36, §3.5: +-- ) + +equal :: Text -> Text -> Bool +equal Empty Empty = True +equal Empty _ = False +equal _ Empty = False +equal (Chunk a as) (Chunk b bs) = + case compare lenA lenB of + LT -> a == (T.takeWord16 lenA b) && + as `equal` Chunk (T.dropWord16 lenA b) bs + EQ -> a == b && as `equal` bs + GT -> T.takeWord16 lenB a == b && + Chunk (T.dropWord16 lenB a) as `equal` bs + where lenA = T.lengthWord16 a + lenB = T.lengthWord16 b + +instance Eq Text where + (==) = equal + {-# INLINE (==) #-} + +instance Ord Text where + compare = compareText + +compareText :: Text -> Text -> Ordering +compareText Empty Empty = EQ +compareText Empty _ = LT +compareText _ Empty = GT +compareText (Chunk a0 as) (Chunk b0 bs) = outer a0 b0 + where + outer ta@(T.Text arrA offA lenA) tb@(T.Text arrB offB lenB) = go 0 0 + where + go !i !j + | i >= lenA = compareText as (chunk (T.Text arrB (offB+j) (lenB-j)) bs) + | j >= lenB = compareText (chunk (T.Text arrA (offA+i) (lenA-i)) as) bs + | a < b = LT + | a > b = GT + | otherwise = go (i+di) (j+dj) + where T.Iter a di = T.iter ta i + T.Iter b dj = T.iter tb j + +instance Show Text where + showsPrec p ps r = showsPrec p (unpack ps) r + +instance Read Text where + readsPrec p str = [(pack x,y) | (x,y) <- readsPrec p str] + +#if MIN_VERSION_base(4,9,0) +-- Semigroup orphan instances for older GHCs are provided by +-- 'semigroups` package + +instance Semigroup Text where + (<>) = append +#endif + +instance Monoid Text where + mempty = empty +#if MIN_VERSION_base(4,9,0) + mappend = (<>) -- future-proof definition +#else + mappend = append +#endif + mconcat = concat + +instance IsString Text where + fromString = pack + +#if __GLASGOW_HASKELL__ >= 708 +instance Exts.IsList Text where + type Item Text = Char + fromList = pack + toList = unpack +#endif + +#if defined(HAVE_DEEPSEQ) +instance NFData Text where + rnf Empty = () + rnf (Chunk _ ts) = rnf ts +#endif + +instance Binary Text where + put t = put (encodeUtf8 t) + get = do + bs <- get + case decodeUtf8' bs of + P.Left exn -> P.fail (P.show exn) + P.Right a -> P.return a + +-- | This instance preserves data abstraction at the cost of inefficiency. +-- We omit reflection services for the sake of data abstraction. +-- +-- This instance was created by copying the updated behavior of +-- @"Data.Text".@'Data.Text.Text' +instance Data Text where + gfoldl f z txt = z pack `f` (unpack txt) + toConstr _ = packConstr + gunfold k z c = case constrIndex c of + 1 -> k (z pack) + _ -> error "Data.Text.Lazy.Text.gunfold" + dataTypeOf _ = textDataType + +#if MIN_VERSION_base(4,7,0) +-- | Only defined for @base-4.7.0.0@ and later +instance PrintfArg Text where + formatArg txt = formatString $ unpack txt +#endif + +packConstr :: Constr +packConstr = mkConstr textDataType "pack" [] Prefix + +textDataType :: DataType +textDataType = mkDataType "Data.Text.Lazy.Text" [packConstr] + +-- | /O(n)/ Convert a 'String' into a 'Text'. +-- +-- Subject to fusion. Performs replacement on invalid scalar values. +pack :: String -> Text +pack = unstream . S.streamList . L.map safe +{-# INLINE [1] pack #-} + +-- | /O(n)/ Convert a 'Text' into a 'String'. +-- Subject to fusion. +unpack :: Text -> String +unpack t = S.unstreamList (stream t) +{-# INLINE [1] unpack #-} + +-- | /O(n)/ Convert a literal string into a Text. +unpackCString# :: Addr# -> Text +unpackCString# addr# = unstream (S.streamCString# addr#) +{-# NOINLINE unpackCString# #-} + +{-# RULES "TEXT literal" forall a. + unstream (S.streamList (L.map safe (GHC.unpackCString# a))) + = unpackCString# a #-} + +{-# RULES "TEXT literal UTF8" forall a. + unstream (S.streamList (L.map safe (GHC.unpackCStringUtf8# a))) + = unpackCString# a #-} + +{-# RULES "LAZY TEXT empty literal" + unstream (S.streamList (L.map safe [])) + = Empty #-} + +{-# RULES "LAZY TEXT empty literal" forall a. + unstream (S.streamList (L.map safe [a])) + = Chunk (T.singleton a) Empty #-} + +-- | /O(1)/ Convert a character into a Text. Subject to fusion. +-- Performs replacement on invalid scalar values. +singleton :: Char -> Text +singleton c = Chunk (T.singleton c) Empty +{-# INLINE [1] singleton #-} + +{-# RULES +"LAZY TEXT singleton -> fused" [~1] forall c. + singleton c = unstream (S.singleton c) +"LAZY TEXT singleton -> unfused" [1] forall c. + unstream (S.singleton c) = singleton c + #-} + +-- | /O(c)/ Convert a list of strict 'T.Text's into a lazy 'Text'. +fromChunks :: [T.Text] -> Text +fromChunks cs = L.foldr chunk Empty cs + +-- | /O(n)/ Convert a lazy 'Text' into a list of strict 'T.Text's. +toChunks :: Text -> [T.Text] +toChunks cs = foldrChunks (:) [] cs + +-- | /O(n)/ Convert a lazy 'Text' into a strict 'T.Text'. +toStrict :: Text -> T.Text +toStrict t = T.concat (toChunks t) +{-# INLINE [1] toStrict #-} + +-- | /O(c)/ Convert a strict 'T.Text' into a lazy 'Text'. +fromStrict :: T.Text -> Text +fromStrict t = chunk t Empty +{-# INLINE [1] fromStrict #-} + +-- ----------------------------------------------------------------------------- +-- * Basic functions + +-- | /O(n)/ Adds a character to the front of a 'Text'. This function +-- is more costly than its 'List' counterpart because it requires +-- copying a new array. Subject to fusion. +cons :: Char -> Text -> Text +cons c t = Chunk (T.singleton c) t +{-# INLINE [1] cons #-} + +infixr 5 `cons` + +{-# RULES +"LAZY TEXT cons -> fused" [~1] forall c t. + cons c t = unstream (S.cons c (stream t)) +"LAZY TEXT cons -> unfused" [1] forall c t. + unstream (S.cons c (stream t)) = cons c t + #-} + +-- | /O(n)/ Adds a character to the end of a 'Text'. This copies the +-- entire array in the process, unless fused. Subject to fusion. +snoc :: Text -> Char -> Text +snoc t c = foldrChunks Chunk (singleton c) t +{-# INLINE [1] snoc #-} + +{-# RULES +"LAZY TEXT snoc -> fused" [~1] forall t c. + snoc t c = unstream (S.snoc (stream t) c) +"LAZY TEXT snoc -> unfused" [1] forall t c. + unstream (S.snoc (stream t) c) = snoc t c + #-} + +-- | /O(n\/c)/ Appends one 'Text' to another. Subject to fusion. +append :: Text -> Text -> Text +append xs ys = foldrChunks Chunk ys xs +{-# INLINE [1] append #-} + +{-# RULES +"LAZY TEXT append -> fused" [~1] forall t1 t2. + append t1 t2 = unstream (S.append (stream t1) (stream t2)) +"LAZY TEXT append -> unfused" [1] forall t1 t2. + unstream (S.append (stream t1) (stream t2)) = append t1 t2 + #-} + +-- | /O(1)/ Returns the first character and rest of a 'Text', or +-- 'Nothing' if empty. Subject to fusion. +uncons :: Text -> Maybe (Char, Text) +uncons Empty = Nothing +uncons (Chunk t ts) = Just (T.unsafeHead t, ts') + where ts' | T.compareLength t 1 == EQ = ts + | otherwise = Chunk (T.unsafeTail t) ts +{-# INLINE uncons #-} + +-- | /O(1)/ Returns the first character of a 'Text', which must be +-- non-empty. Subject to fusion. +head :: Text -> Char +head t = S.head (stream t) +{-# INLINE head #-} + +-- | /O(1)/ Returns all characters after the head of a 'Text', which +-- must be non-empty. Subject to fusion. +tail :: Text -> Text +tail (Chunk t ts) = chunk (T.tail t) ts +tail Empty = emptyError "tail" +{-# INLINE [1] tail #-} + +{-# RULES +"LAZY TEXT tail -> fused" [~1] forall t. + tail t = unstream (S.tail (stream t)) +"LAZY TEXT tail -> unfused" [1] forall t. + unstream (S.tail (stream t)) = tail t + #-} + +-- | /O(1)/ Returns all but the last character of a 'Text', which must +-- be non-empty. Subject to fusion. +init :: Text -> Text +init (Chunk t0 ts0) = go t0 ts0 + where go t (Chunk t' ts) = Chunk t (go t' ts) + go t Empty = chunk (T.init t) Empty +init Empty = emptyError "init" +{-# INLINE [1] init #-} + +{-# RULES +"LAZY TEXT init -> fused" [~1] forall t. + init t = unstream (S.init (stream t)) +"LAZY TEXT init -> unfused" [1] forall t. + unstream (S.init (stream t)) = init t + #-} + +-- | /O(1)/ Tests whether a 'Text' is empty or not. Subject to +-- fusion. +null :: Text -> Bool +null Empty = True +null _ = False +{-# INLINE [1] null #-} + +{-# RULES +"LAZY TEXT null -> fused" [~1] forall t. + null t = S.null (stream t) +"LAZY TEXT null -> unfused" [1] forall t. + S.null (stream t) = null t + #-} + +-- | /O(1)/ Tests whether a 'Text' contains exactly one character. +-- Subject to fusion. +isSingleton :: Text -> Bool +isSingleton = S.isSingleton . stream +{-# INLINE isSingleton #-} + +-- | /O(1)/ Returns the last character of a 'Text', which must be +-- non-empty. Subject to fusion. +last :: Text -> Char +last Empty = emptyError "last" +last (Chunk t ts) = go t ts + where go _ (Chunk t' ts') = go t' ts' + go t' Empty = T.last t' +{-# INLINE [1] last #-} + +{-# RULES +"LAZY TEXT last -> fused" [~1] forall t. + last t = S.last (stream t) +"LAZY TEXT last -> unfused" [1] forall t. + S.last (stream t) = last t + #-} + +-- | /O(n)/ Returns the number of characters in a 'Text'. +-- Subject to fusion. +length :: Text -> Int64 +length = foldlChunks go 0 + where go l t = l + fromIntegral (T.length t) +{-# INLINE [1] length #-} + +{-# RULES +"LAZY TEXT length -> fused" [~1] forall t. + length t = S.length (stream t) +"LAZY TEXT length -> unfused" [1] forall t. + S.length (stream t) = length t + #-} + +-- | /O(n)/ Compare the count of characters in a 'Text' to a number. +-- Subject to fusion. +-- +-- This function gives the same answer as comparing against the result +-- of 'length', but can short circuit if the count of characters is +-- greater than the number, and hence be more efficient. +compareLength :: Text -> Int64 -> Ordering +compareLength t n = S.compareLengthI (stream t) n +{-# INLINE [1] compareLength #-} + +-- We don't apply those otherwise appealing length-to-compareLength +-- rewrite rules here, because they can change the strictness +-- properties of code. + +-- | /O(n)/ 'map' @f@ @t@ is the 'Text' obtained by applying @f@ to +-- each element of @t@. Subject to fusion. Performs replacement on +-- invalid scalar values. +map :: (Char -> Char) -> Text -> Text +map f t = unstream (S.map (safe . f) (stream t)) +{-# INLINE [1] map #-} + +-- | /O(n)/ The 'intercalate' function takes a 'Text' and a list of +-- 'Text's and concatenates the list after interspersing the first +-- argument between each element of the list. +intercalate :: Text -> [Text] -> Text +intercalate t = concat . (F.intersperse t) +{-# INLINE intercalate #-} + +-- | /O(n)/ The 'intersperse' function takes a character and places it +-- between the characters of a 'Text'. Subject to fusion. Performs +-- replacement on invalid scalar values. +intersperse :: Char -> Text -> Text +intersperse c t = unstream (S.intersperse (safe c) (stream t)) +{-# INLINE intersperse #-} + +-- | /O(n)/ Left-justify a string to the given length, using the +-- specified fill character on the right. Subject to fusion. Performs +-- replacement on invalid scalar values. +-- +-- Examples: +-- +-- > justifyLeft 7 'x' "foo" == "fooxxxx" +-- > justifyLeft 3 'x' "foobar" == "foobar" +justifyLeft :: Int64 -> Char -> Text -> Text +justifyLeft k c t + | len >= k = t + | otherwise = t `append` replicateChar (k-len) c + where len = length t +{-# INLINE [1] justifyLeft #-} + +{-# RULES +"LAZY TEXT justifyLeft -> fused" [~1] forall k c t. + justifyLeft k c t = unstream (S.justifyLeftI k c (stream t)) +"LAZY TEXT justifyLeft -> unfused" [1] forall k c t. + unstream (S.justifyLeftI k c (stream t)) = justifyLeft k c t + #-} + +-- | /O(n)/ Right-justify a string to the given length, using the +-- specified fill character on the left. Performs replacement on +-- invalid scalar values. +-- +-- Examples: +-- +-- > justifyRight 7 'x' "bar" == "xxxxbar" +-- > justifyRight 3 'x' "foobar" == "foobar" +justifyRight :: Int64 -> Char -> Text -> Text +justifyRight k c t + | len >= k = t + | otherwise = replicateChar (k-len) c `append` t + where len = length t +{-# INLINE justifyRight #-} + +-- | /O(n)/ Center a string to the given length, using the specified +-- fill character on either side. Performs replacement on invalid +-- scalar values. +-- +-- Examples: +-- +-- > center 8 'x' "HS" = "xxxHSxxx" +center :: Int64 -> Char -> Text -> Text +center k c t + | len >= k = t + | otherwise = replicateChar l c `append` t `append` replicateChar r c + where len = length t + d = k - len + r = d `quot` 2 + l = d - r +{-# INLINE center #-} + +-- | /O(n)/ The 'transpose' function transposes the rows and columns +-- of its 'Text' argument. Note that this function uses 'pack', +-- 'unpack', and the list version of transpose, and is thus not very +-- efficient. +transpose :: [Text] -> [Text] +transpose ts = L.map (\ss -> Chunk (T.pack ss) Empty) + (L.transpose (L.map unpack ts)) +-- TODO: make this fast + +-- | /O(n)/ 'reverse' @t@ returns the elements of @t@ in reverse order. +reverse :: Text -> Text +reverse = rev Empty + where rev a Empty = a + rev a (Chunk t ts) = rev (Chunk (T.reverse t) a) ts + +-- | /O(m+n)/ Replace every non-overlapping occurrence of @needle@ in +-- @haystack@ with @replacement@. +-- +-- This function behaves as though it was defined as follows: +-- +-- @ +-- replace needle replacement haystack = +-- 'intercalate' replacement ('splitOn' needle haystack) +-- @ +-- +-- As this suggests, each occurrence is replaced exactly once. So if +-- @needle@ occurs in @replacement@, that occurrence will /not/ itself +-- be replaced recursively: +-- +-- > replace "oo" "foo" "oo" == "foo" +-- +-- In cases where several instances of @needle@ overlap, only the +-- first one will be replaced: +-- +-- > replace "ofo" "bar" "ofofo" == "barfo" +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +replace :: Text + -- ^ @needle@ to search for. If this string is empty, an + -- error will occur. + -> Text + -- ^ @replacement@ to replace @needle@ with. + -> Text + -- ^ @haystack@ in which to search. + -> Text +replace s d = intercalate d . splitOn s +{-# INLINE replace #-} + +-- ---------------------------------------------------------------------------- +-- ** Case conversions (folds) + +-- $case +-- +-- With Unicode text, it is incorrect to use combinators like @map +-- toUpper@ to case convert each character of a string individually. +-- Instead, use the whole-string case conversion functions from this +-- module. For correctness in different writing systems, these +-- functions may map one input character to two or three output +-- characters. + +-- | /O(n)/ Convert a string to folded case. Subject to fusion. +-- +-- This function is mainly useful for performing caseless (or case +-- insensitive) string comparisons. +-- +-- A string @x@ is a caseless match for a string @y@ if and only if: +-- +-- @toCaseFold x == toCaseFold y@ +-- +-- The result string may be longer than the input string, and may +-- differ from applying 'toLower' to the input string. For instance, +-- the Armenian small ligature men now (U+FB13) is case folded to the +-- bigram men now (U+0574 U+0576), while the micro sign (U+00B5) is +-- case folded to the Greek small letter letter mu (U+03BC) instead of +-- itself. +toCaseFold :: Text -> Text +toCaseFold t = unstream (S.toCaseFold (stream t)) +{-# INLINE [0] toCaseFold #-} + +-- | /O(n)/ Convert a string to lower case, using simple case +-- conversion. Subject to fusion. +-- +-- The result string may be longer than the input string. For +-- instance, the Latin capital letter I with dot above (U+0130) maps +-- to the sequence Latin small letter i (U+0069) followed by combining +-- dot above (U+0307). +toLower :: Text -> Text +toLower t = unstream (S.toLower (stream t)) +{-# INLINE toLower #-} + +-- | /O(n)/ Convert a string to upper case, using simple case +-- conversion. Subject to fusion. +-- +-- The result string may be longer than the input string. For +-- instance, the German eszett (U+00DF) maps to the two-letter +-- sequence SS. +toUpper :: Text -> Text +toUpper t = unstream (S.toUpper (stream t)) +{-# INLINE toUpper #-} + + +-- | /O(n)/ Convert a string to title case, using simple case +-- conversion. Subject to fusion. +-- +-- The first letter of the input is converted to title case, as is +-- every subsequent letter that immediately follows a non-letter. +-- Every letter that immediately follows another letter is converted +-- to lower case. +-- +-- The result string may be longer than the input string. For example, +-- the Latin small ligature fl (U+FB02) is converted to the +-- sequence Latin capital letter F (U+0046) followed by Latin small +-- letter l (U+006C). +-- +-- /Note/: this function does not take language or culture specific +-- rules into account. For instance, in English, different style +-- guides disagree on whether the book name \"The Hill of the Red +-- Fox\" is correctly title cased—but this function will +-- capitalize /every/ word. +toTitle :: Text -> Text +toTitle t = unstream (S.toTitle (stream t)) +{-# INLINE toTitle #-} + +-- | /O(n)/ 'foldl', applied to a binary operator, a starting value +-- (typically the left-identity of the operator), and a 'Text', +-- reduces the 'Text' using the binary operator, from left to right. +-- Subject to fusion. +foldl :: (a -> Char -> a) -> a -> Text -> a +foldl f z t = S.foldl f z (stream t) +{-# INLINE foldl #-} + +-- | /O(n)/ A strict version of 'foldl'. +-- Subject to fusion. +foldl' :: (a -> Char -> a) -> a -> Text -> a +foldl' f z t = S.foldl' f z (stream t) +{-# INLINE foldl' #-} + +-- | /O(n)/ A variant of 'foldl' that has no starting value argument, +-- and thus must be applied to a non-empty 'Text'. Subject to fusion. +foldl1 :: (Char -> Char -> Char) -> Text -> Char +foldl1 f t = S.foldl1 f (stream t) +{-# INLINE foldl1 #-} + +-- | /O(n)/ A strict version of 'foldl1'. Subject to fusion. +foldl1' :: (Char -> Char -> Char) -> Text -> Char +foldl1' f t = S.foldl1' f (stream t) +{-# INLINE foldl1' #-} + +-- | /O(n)/ 'foldr', applied to a binary operator, a starting value +-- (typically the right-identity of the operator), and a 'Text', +-- reduces the 'Text' using the binary operator, from right to left. +-- Subject to fusion. +foldr :: (Char -> a -> a) -> a -> Text -> a +foldr f z t = S.foldr f z (stream t) +{-# INLINE foldr #-} + +-- | /O(n)/ A variant of 'foldr' that has no starting value argument, +-- and thus must be applied to a non-empty 'Text'. Subject to +-- fusion. +foldr1 :: (Char -> Char -> Char) -> Text -> Char +foldr1 f t = S.foldr1 f (stream t) +{-# INLINE foldr1 #-} + +-- | /O(n)/ Concatenate a list of 'Text's. +concat :: [Text] -> Text +concat = to + where + go Empty css = to css + go (Chunk c cs) css = Chunk c (go cs css) + to [] = Empty + to (cs:css) = go cs css +{-# INLINE concat #-} + +-- | /O(n)/ Map a function over a 'Text' that results in a 'Text', and +-- concatenate the results. +concatMap :: (Char -> Text) -> Text -> Text +concatMap f = concat . foldr ((:) . f) [] +{-# INLINE concatMap #-} + +-- | /O(n)/ 'any' @p@ @t@ determines whether any character in the +-- 'Text' @t@ satisfies the predicate @p@. Subject to fusion. +any :: (Char -> Bool) -> Text -> Bool +any p t = S.any p (stream t) +{-# INLINE any #-} + +-- | /O(n)/ 'all' @p@ @t@ determines whether all characters in the +-- 'Text' @t@ satisfy the predicate @p@. Subject to fusion. +all :: (Char -> Bool) -> Text -> Bool +all p t = S.all p (stream t) +{-# INLINE all #-} + +-- | /O(n)/ 'maximum' returns the maximum value from a 'Text', which +-- must be non-empty. Subject to fusion. +maximum :: Text -> Char +maximum t = S.maximum (stream t) +{-# INLINE maximum #-} + +-- | /O(n)/ 'minimum' returns the minimum value from a 'Text', which +-- must be non-empty. Subject to fusion. +minimum :: Text -> Char +minimum t = S.minimum (stream t) +{-# INLINE minimum #-} + +-- | /O(n)/ 'scanl' is similar to 'foldl', but returns a list of +-- successive reduced values from the left. Subject to fusion. +-- Performs replacement on invalid scalar values. +-- +-- > scanl f z [x1, x2, ...] == [z, z `f` x1, (z `f` x1) `f` x2, ...] +-- +-- Note that +-- +-- > last (scanl f z xs) == foldl f z xs. +scanl :: (Char -> Char -> Char) -> Char -> Text -> Text +scanl f z t = unstream (S.scanl g z (stream t)) + where g a b = safe (f a b) +{-# INLINE scanl #-} + +-- | /O(n)/ 'scanl1' is a variant of 'scanl' that has no starting +-- value argument. Subject to fusion. Performs replacement on +-- invalid scalar values. +-- +-- > scanl1 f [x1, x2, ...] == [x1, x1 `f` x2, ...] +scanl1 :: (Char -> Char -> Char) -> Text -> Text +scanl1 f t0 = case uncons t0 of + Nothing -> empty + Just (t,ts) -> scanl f t ts +{-# INLINE scanl1 #-} + +-- | /O(n)/ 'scanr' is the right-to-left dual of 'scanl'. Performs +-- replacement on invalid scalar values. +-- +-- > scanr f v == reverse . scanl (flip f) v . reverse +scanr :: (Char -> Char -> Char) -> Char -> Text -> Text +scanr f v = reverse . scanl g v . reverse + where g a b = safe (f b a) + +-- | /O(n)/ 'scanr1' is a variant of 'scanr' that has no starting +-- value argument. Performs replacement on invalid scalar values. +scanr1 :: (Char -> Char -> Char) -> Text -> Text +scanr1 f t | null t = empty + | otherwise = scanr f (last t) (init t) + +-- | /O(n)/ Like a combination of 'map' and 'foldl''. Applies a +-- function to each element of a 'Text', passing an accumulating +-- parameter from left to right, and returns a final 'Text'. Performs +-- replacement on invalid scalar values. +mapAccumL :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) +mapAccumL f = go + where + go z (Chunk c cs) = (z'', Chunk c' cs') + where (z', c') = T.mapAccumL f z c + (z'', cs') = go z' cs + go z Empty = (z, Empty) +{-# INLINE mapAccumL #-} + +-- | The 'mapAccumR' function behaves like a combination of 'map' and +-- a strict 'foldr'; it applies a function to each element of a +-- 'Text', passing an accumulating parameter from right to left, and +-- returning a final value of this accumulator together with the new +-- 'Text'. Performs replacement on invalid scalar values. +mapAccumR :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) +mapAccumR f = go + where + go z (Chunk c cs) = (z'', Chunk c' cs') + where (z'', c') = T.mapAccumR f z' c + (z', cs') = go z cs + go z Empty = (z, Empty) +{-# INLINE mapAccumR #-} + +-- | @'repeat' x@ is an infinite 'Text', with @x@ the value of every +-- element. +repeat :: Char -> Text +repeat c = let t = Chunk (T.replicate smallChunkSize (T.singleton c)) t + in t + +-- | /O(n*m)/ 'replicate' @n@ @t@ is a 'Text' consisting of the input +-- @t@ repeated @n@ times. +replicate :: Int64 -> Text -> Text +replicate n t + | null t || n <= 0 = empty + | isSingleton t = replicateChar n (head t) + | otherwise = concat (rep 0) + where rep !i | i >= n = [] + | otherwise = t : rep (i+1) +{-# INLINE [1] replicate #-} + +-- | 'cycle' ties a finite, non-empty 'Text' into a circular one, or +-- equivalently, the infinite repetition of the original 'Text'. +cycle :: Text -> Text +cycle Empty = emptyError "cycle" +cycle t = let t' = foldrChunks Chunk t' t + in t' + +-- | @'iterate' f x@ returns an infinite 'Text' of repeated applications +-- of @f@ to @x@: +-- +-- > iterate f x == [x, f x, f (f x), ...] +iterate :: (Char -> Char) -> Char -> Text +iterate f c = let t c' = Chunk (T.singleton c') (t (f c')) + in t c + +-- | /O(n)/ 'replicateChar' @n@ @c@ is a 'Text' of length @n@ with @c@ the +-- value of every element. Subject to fusion. +replicateChar :: Int64 -> Char -> Text +replicateChar n c = unstream (S.replicateCharI n (safe c)) +{-# INLINE replicateChar #-} + +{-# RULES +"LAZY TEXT replicate/singleton -> replicateChar" [~1] forall n c. + replicate n (singleton c) = replicateChar n c + #-} + +-- | /O(n)/, where @n@ is the length of the result. The 'unfoldr' +-- function is analogous to the List 'L.unfoldr'. 'unfoldr' builds a +-- 'Text' from a seed value. The function takes the element and +-- returns 'Nothing' if it is done producing the 'Text', otherwise +-- 'Just' @(a,b)@. In this case, @a@ is the next 'Char' in the +-- string, and @b@ is the seed value for further production. Performs +-- replacement on invalid scalar values. +unfoldr :: (a -> Maybe (Char,a)) -> a -> Text +unfoldr f s = unstream (S.unfoldr (firstf safe . f) s) +{-# INLINE unfoldr #-} + +-- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a 'Text' from a seed +-- value. However, the length of the result should be limited by the +-- first argument to 'unfoldrN'. This function is more efficient than +-- 'unfoldr' when the maximum length of the result is known and +-- correct, otherwise its performance is similar to 'unfoldr'. +-- Performs replacement on invalid scalar values. +unfoldrN :: Int64 -> (a -> Maybe (Char,a)) -> a -> Text +unfoldrN n f s = unstream (S.unfoldrN n (firstf safe . f) s) +{-# INLINE unfoldrN #-} + +-- | /O(n)/ 'take' @n@, applied to a 'Text', returns the prefix of the +-- 'Text' of length @n@, or the 'Text' itself if @n@ is greater than +-- the length of the Text. Subject to fusion. +take :: Int64 -> Text -> Text +take i _ | i <= 0 = Empty +take i t0 = take' i t0 + where take' 0 _ = Empty + take' _ Empty = Empty + take' n (Chunk t ts) + | n < len = Chunk (T.take (fromIntegral n) t) Empty + | otherwise = Chunk t (take' (n - len) ts) + where len = fromIntegral (T.length t) +{-# INLINE [1] take #-} + +{-# RULES +"LAZY TEXT take -> fused" [~1] forall n t. + take n t = unstream (S.take n (stream t)) +"LAZY TEXT take -> unfused" [1] forall n t. + unstream (S.take n (stream t)) = take n t + #-} + +-- | /O(n)/ 'takeEnd' @n@ @t@ returns the suffix remaining after +-- taking @n@ characters from the end of @t@. +-- +-- Examples: +-- +-- > takeEnd 3 "foobar" == "bar" +takeEnd :: Int64 -> Text -> Text +takeEnd n t0 + | n <= 0 = empty + | otherwise = takeChunk n empty . L.reverse . toChunks $ t0 + where takeChunk _ acc [] = acc + takeChunk i acc (t:ts) + | i <= l = chunk (T.takeEnd (fromIntegral i) t) acc + | otherwise = takeChunk (i-l) (Chunk t acc) ts + where l = fromIntegral (T.length t) + +-- | /O(n)/ 'drop' @n@, applied to a 'Text', returns the suffix of the +-- 'Text' after the first @n@ characters, or the empty 'Text' if @n@ +-- is greater than the length of the 'Text'. Subject to fusion. +drop :: Int64 -> Text -> Text +drop i t0 + | i <= 0 = t0 + | otherwise = drop' i t0 + where drop' 0 ts = ts + drop' _ Empty = Empty + drop' n (Chunk t ts) + | n < len = Chunk (T.drop (fromIntegral n) t) ts + | otherwise = drop' (n - len) ts + where len = fromIntegral (T.length t) +{-# INLINE [1] drop #-} + +{-# RULES +"LAZY TEXT drop -> fused" [~1] forall n t. + drop n t = unstream (S.drop n (stream t)) +"LAZY TEXT drop -> unfused" [1] forall n t. + unstream (S.drop n (stream t)) = drop n t + #-} + +-- | /O(n)/ 'dropEnd' @n@ @t@ returns the prefix remaining after +-- dropping @n@ characters from the end of @t@. +-- +-- Examples: +-- +-- > dropEnd 3 "foobar" == "foo" +dropEnd :: Int64 -> Text -> Text +dropEnd n t0 + | n <= 0 = t0 + | otherwise = dropChunk n . L.reverse . toChunks $ t0 + where dropChunk _ [] = empty + dropChunk m (t:ts) + | m >= l = dropChunk (m-l) ts + | otherwise = fromChunks . L.reverse $ + T.dropEnd (fromIntegral m) t : ts + where l = fromIntegral (T.length t) + +-- | /O(n)/ 'dropWords' @n@ returns the suffix with @n@ 'Word16' +-- values dropped, or the empty 'Text' if @n@ is greater than the +-- number of 'Word16' values present. +dropWords :: Int64 -> Text -> Text +dropWords i t0 + | i <= 0 = t0 + | otherwise = drop' i t0 + where drop' 0 ts = ts + drop' _ Empty = Empty + drop' n (Chunk (T.Text arr off len) ts) + | n < len' = chunk (text arr (off+n') (len-n')) ts + | otherwise = drop' (n - len') ts + where len' = fromIntegral len + n' = fromIntegral n + +-- | /O(n)/ 'takeWhile', applied to a predicate @p@ and a 'Text', +-- returns the longest prefix (possibly empty) of elements that +-- satisfy @p@. Subject to fusion. +takeWhile :: (Char -> Bool) -> Text -> Text +takeWhile p t0 = takeWhile' t0 + where takeWhile' Empty = Empty + takeWhile' (Chunk t ts) = + case T.findIndex (not . p) t of + Just n | n > 0 -> Chunk (T.take n t) Empty + | otherwise -> Empty + Nothing -> Chunk t (takeWhile' ts) +{-# INLINE [1] takeWhile #-} + +{-# RULES +"LAZY TEXT takeWhile -> fused" [~1] forall p t. + takeWhile p t = unstream (S.takeWhile p (stream t)) +"LAZY TEXT takeWhile -> unfused" [1] forall p t. + unstream (S.takeWhile p (stream t)) = takeWhile p t + #-} +-- | /O(n)/ 'takeWhileEnd', applied to a predicate @p@ and a 'Text', +-- returns the longest suffix (possibly empty) of elements that +-- satisfy @p@. +-- Examples: +-- +-- > takeWhileEnd (=='o') "foo" == "oo" +takeWhileEnd :: (Char -> Bool) -> Text -> Text +takeWhileEnd p = takeChunk empty . L.reverse . toChunks + where takeChunk acc [] = acc + takeChunk acc (t:ts) = if T.length t' < T.length t + then (Chunk t' acc) + else takeChunk (Chunk t' acc) ts + where t' = T.takeWhileEnd p t +{-# INLINE takeWhileEnd #-} + +-- | /O(n)/ 'dropWhile' @p@ @t@ returns the suffix remaining after +-- 'takeWhile' @p@ @t@. Subject to fusion. +dropWhile :: (Char -> Bool) -> Text -> Text +dropWhile p t0 = dropWhile' t0 + where dropWhile' Empty = Empty + dropWhile' (Chunk t ts) = + case T.findIndex (not . p) t of + Just n -> Chunk (T.drop n t) ts + Nothing -> dropWhile' ts +{-# INLINE [1] dropWhile #-} + +{-# RULES +"LAZY TEXT dropWhile -> fused" [~1] forall p t. + dropWhile p t = unstream (S.dropWhile p (stream t)) +"LAZY TEXT dropWhile -> unfused" [1] forall p t. + unstream (S.dropWhile p (stream t)) = dropWhile p t + #-} + +-- | /O(n)/ 'dropWhileEnd' @p@ @t@ returns the prefix remaining after +-- dropping characters that satisfy the predicate @p@ from the end of +-- @t@. +-- +-- Examples: +-- +-- > dropWhileEnd (=='.') "foo..." == "foo" +dropWhileEnd :: (Char -> Bool) -> Text -> Text +dropWhileEnd p = go + where go Empty = Empty + go (Chunk t Empty) = if T.null t' + then Empty + else Chunk t' Empty + where t' = T.dropWhileEnd p t + go (Chunk t ts) = case go ts of + Empty -> go (Chunk t Empty) + ts' -> Chunk t ts' +{-# INLINE dropWhileEnd #-} + +-- | /O(n)/ 'dropAround' @p@ @t@ returns the substring remaining after +-- dropping characters that satisfy the predicate @p@ from both the +-- beginning and end of @t@. Subject to fusion. +dropAround :: (Char -> Bool) -> Text -> Text +dropAround p = dropWhile p . dropWhileEnd p +{-# INLINE [1] dropAround #-} + +-- | /O(n)/ Remove leading white space from a string. Equivalent to: +-- +-- > dropWhile isSpace +stripStart :: Text -> Text +stripStart = dropWhile isSpace +{-# INLINE [1] stripStart #-} + +-- | /O(n)/ Remove trailing white space from a string. Equivalent to: +-- +-- > dropWhileEnd isSpace +stripEnd :: Text -> Text +stripEnd = dropWhileEnd isSpace +{-# INLINE [1] stripEnd #-} + +-- | /O(n)/ Remove leading and trailing white space from a string. +-- Equivalent to: +-- +-- > dropAround isSpace +strip :: Text -> Text +strip = dropAround isSpace +{-# INLINE [1] strip #-} + +-- | /O(n)/ 'splitAt' @n t@ returns a pair whose first element is a +-- prefix of @t@ of length @n@, and whose second is the remainder of +-- the string. It is equivalent to @('take' n t, 'drop' n t)@. +splitAt :: Int64 -> Text -> (Text, Text) +splitAt = loop + where loop _ Empty = (empty, empty) + loop n t | n <= 0 = (empty, t) + loop n (Chunk t ts) + | n < len = let (t',t'') = T.splitAt (fromIntegral n) t + in (Chunk t' Empty, Chunk t'' ts) + | otherwise = let (ts',ts'') = loop (n - len) ts + in (Chunk t ts', ts'') + where len = fromIntegral (T.length t) + +-- | /O(n)/ 'splitAtWord' @n t@ returns a strict pair whose first +-- element is a prefix of @t@ whose chunks contain @n@ 'Word16' +-- values, and whose second is the remainder of the string. +splitAtWord :: Int64 -> Text -> PairS Text Text +splitAtWord _ Empty = empty :*: empty +splitAtWord x (Chunk c@(T.Text arr off len) cs) + | y >= len = let h :*: t = splitAtWord (x-fromIntegral len) cs + in Chunk c h :*: t + | otherwise = chunk (text arr off y) empty :*: + chunk (text arr (off+y) (len-y)) cs + where y = fromIntegral x + +-- | /O(n+m)/ Find the first instance of @needle@ (which must be +-- non-'null') in @haystack@. The first element of the returned tuple +-- is the prefix of @haystack@ before @needle@ is matched. The second +-- is the remainder of @haystack@, starting with the match. +-- +-- Examples: +-- +-- > breakOn "::" "a::b::c" ==> ("a", "::b::c") +-- > breakOn "/" "foobar" ==> ("foobar", "") +-- +-- Laws: +-- +-- > append prefix match == haystack +-- > where (prefix, match) = breakOn needle haystack +-- +-- If you need to break a string by a substring repeatedly (e.g. you +-- want to break on every instance of a substring), use 'breakOnAll' +-- instead, as it has lower startup overhead. +-- +-- This function is strict in its first argument, and lazy in its +-- second. +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +breakOn :: Text -> Text -> (Text, Text) +breakOn pat src + | null pat = emptyError "breakOn" + | otherwise = case indices pat src of + [] -> (src, empty) + (x:_) -> let h :*: t = splitAtWord x src + in (h, t) + +-- | /O(n+m)/ Similar to 'breakOn', but searches from the end of the string. +-- +-- The first element of the returned tuple is the prefix of @haystack@ +-- up to and including the last match of @needle@. The second is the +-- remainder of @haystack@, following the match. +-- +-- > breakOnEnd "::" "a::b::c" ==> ("a::b::", "c") +breakOnEnd :: Text -> Text -> (Text, Text) +breakOnEnd pat src = let (a,b) = breakOn (reverse pat) (reverse src) + in (reverse b, reverse a) +{-# INLINE breakOnEnd #-} + +-- | /O(n+m)/ Find all non-overlapping instances of @needle@ in +-- @haystack@. Each element of the returned list consists of a pair: +-- +-- * The entire string prior to the /k/th match (i.e. the prefix) +-- +-- * The /k/th match, followed by the remainder of the string +-- +-- Examples: +-- +-- > breakOnAll "::" "" +-- > ==> [] +-- > breakOnAll "/" "a/b/c/" +-- > ==> [("a", "/b/c/"), ("a/b", "/c/"), ("a/b/c", "/")] +-- +-- This function is strict in its first argument, and lazy in its +-- second. +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +-- +-- The @needle@ parameter may not be empty. +breakOnAll :: Text -- ^ @needle@ to search for + -> Text -- ^ @haystack@ in which to search + -> [(Text, Text)] +breakOnAll pat src + | null pat = emptyError "breakOnAll" + | otherwise = go 0 empty src (indices pat src) + where + go !n p s (x:xs) = let h :*: t = splitAtWord (x-n) s + h' = append p h + in (h',t) : go x h' t xs + go _ _ _ _ = [] + +-- | /O(n)/ 'break' is like 'span', but the prefix returned is over +-- elements that fail the predicate @p@. +break :: (Char -> Bool) -> Text -> (Text, Text) +break p t0 = break' t0 + where break' Empty = (empty, empty) + break' c@(Chunk t ts) = + case T.findIndex p t of + Nothing -> let (ts', ts'') = break' ts + in (Chunk t ts', ts'') + Just n | n == 0 -> (Empty, c) + | otherwise -> let (a,b) = T.splitAt n t + in (Chunk a Empty, Chunk b ts) + +-- | /O(n)/ 'span', applied to a predicate @p@ and text @t@, returns +-- a pair whose first element is the longest prefix (possibly empty) +-- of @t@ of elements that satisfy @p@, and whose second is the +-- remainder of the list. +span :: (Char -> Bool) -> Text -> (Text, Text) +span p = break (not . p) +{-# INLINE span #-} + +-- | The 'group' function takes a 'Text' and returns a list of 'Text's +-- such that the concatenation of the result is equal to the argument. +-- Moreover, each sublist in the result contains only equal elements. +-- For example, +-- +-- > group "Mississippi" = ["M","i","ss","i","ss","i","pp","i"] +-- +-- It is a special case of 'groupBy', which allows the programmer to +-- supply their own equality test. +group :: Text -> [Text] +group = groupBy (==) +{-# INLINE group #-} + +-- | The 'groupBy' function is the non-overloaded version of 'group'. +groupBy :: (Char -> Char -> Bool) -> Text -> [Text] +groupBy _ Empty = [] +groupBy eq (Chunk t ts) = cons x ys : groupBy eq zs + where (ys,zs) = span (eq x) xs + x = T.unsafeHead t + xs = chunk (T.unsafeTail t) ts + +-- | /O(n)/ Return all initial segments of the given 'Text', +-- shortest first. +inits :: Text -> [Text] +inits = (Empty :) . inits' + where inits' Empty = [] + inits' (Chunk t ts) = L.map (\t' -> Chunk t' Empty) (L.tail (T.inits t)) + ++ L.map (Chunk t) (inits' ts) + +-- | /O(n)/ Return all final segments of the given 'Text', longest +-- first. +tails :: Text -> [Text] +tails Empty = Empty : [] +tails ts@(Chunk t ts') + | T.length t == 1 = ts : tails ts' + | otherwise = ts : tails (Chunk (T.unsafeTail t) ts') + +-- $split +-- +-- Splitting functions in this library do not perform character-wise +-- copies to create substrings; they just construct new 'Text's that +-- are slices of the original. + +-- | /O(m+n)/ Break a 'Text' into pieces separated by the first 'Text' +-- argument (which cannot be an empty string), consuming the +-- delimiter. An empty delimiter is invalid, and will cause an error +-- to be raised. +-- +-- Examples: +-- +-- > splitOn "\r\n" "a\r\nb\r\nd\r\ne" == ["a","b","d","e"] +-- > splitOn "aaa" "aaaXaaaXaaaXaaa" == ["","X","X","X",""] +-- > splitOn "x" "x" == ["",""] +-- +-- and +-- +-- > intercalate s . splitOn s == id +-- > splitOn (singleton c) == split (==c) +-- +-- (Note: the string @s@ to split on above cannot be empty.) +-- +-- This function is strict in its first argument, and lazy in its +-- second. +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +splitOn :: Text + -- ^ String to split on. If this string is empty, an error + -- will occur. + -> Text + -- ^ Input text. + -> [Text] +splitOn pat src + | null pat = emptyError "splitOn" + | isSingleton pat = split (== head pat) src + | otherwise = go 0 (indices pat src) src + where + go _ [] cs = [cs] + go !i (x:xs) cs = let h :*: t = splitAtWord (x-i) cs + in h : go (x+l) xs (dropWords l t) + l = foldlChunks (\a (T.Text _ _ b) -> a + fromIntegral b) 0 pat +{-# INLINE [1] splitOn #-} + +{-# RULES +"LAZY TEXT splitOn/singleton -> split/==" [~1] forall c t. + splitOn (singleton c) t = split (==c) t + #-} + +-- | /O(n)/ Splits a 'Text' into components delimited by separators, +-- where the predicate returns True for a separator element. The +-- resulting components do not contain the separators. Two adjacent +-- separators result in an empty component in the output. eg. +-- +-- > split (=='a') "aabbaca" == ["","","bb","c",""] +-- > split (=='a') [] == [""] +split :: (Char -> Bool) -> Text -> [Text] +split _ Empty = [Empty] +split p (Chunk t0 ts0) = comb [] (T.split p t0) ts0 + where comb acc (s:[]) Empty = revChunks (s:acc) : [] + comb acc (s:[]) (Chunk t ts) = comb (s:acc) (T.split p t) ts + comb acc (s:ss) ts = revChunks (s:acc) : comb [] ss ts + comb _ [] _ = impossibleError "split" +{-# INLINE split #-} + +-- | /O(n)/ Splits a 'Text' into components of length @k@. The last +-- element may be shorter than the other chunks, depending on the +-- length of the input. Examples: +-- +-- > chunksOf 3 "foobarbaz" == ["foo","bar","baz"] +-- > chunksOf 4 "haskell.org" == ["hask","ell.","org"] +chunksOf :: Int64 -> Text -> [Text] +chunksOf k = go + where + go t = case splitAt k t of + (a,b) | null a -> [] + | otherwise -> a : go b +{-# INLINE chunksOf #-} + +-- | /O(n)/ Breaks a 'Text' up into a list of 'Text's at +-- newline 'Char's. The resulting strings do not contain newlines. +lines :: Text -> [Text] +lines Empty = [] +lines t = let (l,t') = break ((==) '\n') t + in l : if null t' then [] + else lines (tail t') + +-- | /O(n)/ Breaks a 'Text' up into a list of words, delimited by 'Char's +-- representing white space. +words :: Text -> [Text] +words = L.filter (not . null) . split isSpace +{-# INLINE words #-} + +-- | /O(n)/ Joins lines, after appending a terminating newline to +-- each. +unlines :: [Text] -> Text +unlines = concat . L.map (`snoc` '\n') +{-# INLINE unlines #-} + +-- | /O(n)/ Joins words using single space characters. +unwords :: [Text] -> Text +unwords = intercalate (singleton ' ') +{-# INLINE unwords #-} + +-- | /O(n)/ The 'isPrefixOf' function takes two 'Text's and returns +-- 'True' iff the first is a prefix of the second. Subject to fusion. +isPrefixOf :: Text -> Text -> Bool +isPrefixOf Empty _ = True +isPrefixOf _ Empty = False +isPrefixOf (Chunk x xs) (Chunk y ys) + | lx == ly = x == y && isPrefixOf xs ys + | lx < ly = x == yh && isPrefixOf xs (Chunk yt ys) + | otherwise = xh == y && isPrefixOf (Chunk xt xs) ys + where (xh,xt) = T.splitAt ly x + (yh,yt) = T.splitAt lx y + lx = T.length x + ly = T.length y +{-# INLINE [1] isPrefixOf #-} + +{-# RULES +"LAZY TEXT isPrefixOf -> fused" [~1] forall s t. + isPrefixOf s t = S.isPrefixOf (stream s) (stream t) +"LAZY TEXT isPrefixOf -> unfused" [1] forall s t. + S.isPrefixOf (stream s) (stream t) = isPrefixOf s t + #-} + +-- | /O(n)/ The 'isSuffixOf' function takes two 'Text's and returns +-- 'True' iff the first is a suffix of the second. +isSuffixOf :: Text -> Text -> Bool +isSuffixOf x y = reverse x `isPrefixOf` reverse y +{-# INLINE isSuffixOf #-} +-- TODO: a better implementation + +-- | /O(n+m)/ The 'isInfixOf' function takes two 'Text's and returns +-- 'True' iff the first is contained, wholly and intact, anywhere +-- within the second. +-- +-- This function is strict in its first argument, and lazy in its +-- second. +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +isInfixOf :: Text -> Text -> Bool +isInfixOf needle haystack + | null needle = True + | isSingleton needle = S.elem (head needle) . S.stream $ haystack + | otherwise = not . L.null . indices needle $ haystack +{-# INLINE [1] isInfixOf #-} + +{-# RULES +"LAZY TEXT isInfixOf/singleton -> S.elem/S.stream" [~1] forall n h. + isInfixOf (singleton n) h = S.elem n (S.stream h) + #-} + +------------------------------------------------------------------------------- +-- * View patterns + +-- | /O(n)/ Return the suffix of the second string if its prefix +-- matches the entire first string. +-- +-- Examples: +-- +-- > stripPrefix "foo" "foobar" == Just "bar" +-- > stripPrefix "" "baz" == Just "baz" +-- > stripPrefix "foo" "quux" == Nothing +-- +-- This is particularly useful with the @ViewPatterns@ extension to +-- GHC, as follows: +-- +-- > {-# LANGUAGE ViewPatterns #-} +-- > import Data.Text.Lazy as T +-- > +-- > fnordLength :: Text -> Int +-- > fnordLength (stripPrefix "fnord" -> Just suf) = T.length suf +-- > fnordLength _ = -1 +stripPrefix :: Text -> Text -> Maybe Text +stripPrefix p t + | null p = Just t + | otherwise = case commonPrefixes p t of + Just (_,c,r) | null c -> Just r + _ -> Nothing + +-- | /O(n)/ Find the longest non-empty common prefix of two strings +-- and return it, along with the suffixes of each string at which they +-- no longer match. +-- +-- If the strings do not have a common prefix or either one is empty, +-- this function returns 'Nothing'. +-- +-- Examples: +-- +-- > commonPrefixes "foobar" "fooquux" == Just ("foo","bar","quux") +-- > commonPrefixes "veeble" "fetzer" == Nothing +-- > commonPrefixes "" "baz" == Nothing +commonPrefixes :: Text -> Text -> Maybe (Text,Text,Text) +commonPrefixes Empty _ = Nothing +commonPrefixes _ Empty = Nothing +commonPrefixes a0 b0 = Just (go a0 b0 []) + where + go t0@(Chunk x xs) t1@(Chunk y ys) ps + = case T.commonPrefixes x y of + Just (p,a,b) + | T.null a -> go xs (chunk b ys) (p:ps) + | T.null b -> go (chunk a xs) ys (p:ps) + | otherwise -> (fromChunks (L.reverse (p:ps)),chunk a xs, chunk b ys) + Nothing -> (fromChunks (L.reverse ps),t0,t1) + go t0 t1 ps = (fromChunks (L.reverse ps),t0,t1) + +-- | /O(n)/ Return the prefix of the second string if its suffix +-- matches the entire first string. +-- +-- Examples: +-- +-- > stripSuffix "bar" "foobar" == Just "foo" +-- > stripSuffix "" "baz" == Just "baz" +-- > stripSuffix "foo" "quux" == Nothing +-- +-- This is particularly useful with the @ViewPatterns@ extension to +-- GHC, as follows: +-- +-- > {-# LANGUAGE ViewPatterns #-} +-- > import Data.Text.Lazy as T +-- > +-- > quuxLength :: Text -> Int +-- > quuxLength (stripSuffix "quux" -> Just pre) = T.length pre +-- > quuxLength _ = -1 +stripSuffix :: Text -> Text -> Maybe Text +stripSuffix p t = reverse `fmap` stripPrefix (reverse p) (reverse t) + +-- | /O(n)/ 'filter', applied to a predicate and a 'Text', +-- returns a 'Text' containing those characters that satisfy the +-- predicate. +filter :: (Char -> Bool) -> Text -> Text +filter p t = unstream (S.filter p (stream t)) +{-# INLINE filter #-} + +-- | /O(n)/ The 'find' function takes a predicate and a 'Text', and +-- returns the first element in matching the predicate, or 'Nothing' +-- if there is no such element. +find :: (Char -> Bool) -> Text -> Maybe Char +find p t = S.findBy p (stream t) +{-# INLINE find #-} + +-- | /O(n)/ The 'partition' function takes a predicate and a 'Text', +-- and returns the pair of 'Text's with elements which do and do not +-- satisfy the predicate, respectively; i.e. +-- +-- > partition p t == (filter p t, filter (not . p) t) +partition :: (Char -> Bool) -> Text -> (Text, Text) +partition p t = (filter p t, filter (not . p) t) +{-# INLINE partition #-} + +-- | /O(n)/ 'Text' index (subscript) operator, starting from 0. +index :: Text -> Int64 -> Char +index t n = S.index (stream t) n +{-# INLINE index #-} + +-- | /O(n+m)/ The 'count' function returns the number of times the +-- query string appears in the given 'Text'. An empty query string is +-- invalid, and will cause an error to be raised. +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +count :: Text -> Text -> Int64 +count pat src + | null pat = emptyError "count" + | otherwise = go 0 (indices pat src) + where go !n [] = n + go !n (_:xs) = go (n+1) xs +{-# INLINE [1] count #-} + +{-# RULES +"LAZY TEXT count/singleton -> countChar" [~1] forall c t. + count (singleton c) t = countChar c t + #-} + +-- | /O(n)/ The 'countChar' function returns the number of times the +-- query element appears in the given 'Text'. Subject to fusion. +countChar :: Char -> Text -> Int64 +countChar c t = S.countChar c (stream t) + +-- | /O(n)/ 'zip' takes two 'Text's and returns a list of +-- corresponding pairs of bytes. If one input 'Text' is short, +-- excess elements of the longer 'Text' are discarded. This is +-- equivalent to a pair of 'unpack' operations. +zip :: Text -> Text -> [(Char,Char)] +zip a b = S.unstreamList $ S.zipWith (,) (stream a) (stream b) +{-# INLINE [0] zip #-} + +-- | /O(n)/ 'zipWith' generalises 'zip' by zipping with the function +-- given as the first argument, instead of a tupling function. +-- Performs replacement on invalid scalar values. +zipWith :: (Char -> Char -> Char) -> Text -> Text -> Text +zipWith f t1 t2 = unstream (S.zipWith g (stream t1) (stream t2)) + where g a b = safe (f a b) +{-# INLINE [0] zipWith #-} + +revChunks :: [T.Text] -> Text +revChunks = L.foldl' (flip chunk) Empty + +emptyError :: String -> a +emptyError fun = P.error ("Data.Text.Lazy." ++ fun ++ ": empty input") + +impossibleError :: String -> a +impossibleError fun = P.error ("Data.Text.Lazy." ++ fun ++ ": impossible case") diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Read.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Read.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Read.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Read.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,201 @@ +{-# LANGUAGE OverloadedStrings, UnboxedTuples, CPP #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif + +-- | +-- Module : Data.Text.Read +-- Copyright : (c) 2010, 2011 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- Functions used frequently when reading textual data. +module Data.Text.Read + ( + Reader + , decimal + , hexadecimal + , signed + , rational + , double + ) where + +import Control.Monad (liftM) +import Data.Char (isDigit, isHexDigit) +import Data.Int (Int8, Int16, Int32, Int64) +import Data.Ratio ((%)) +import Data.Text as T +import Data.Text.Internal.Private (span_) +import Data.Text.Internal.Read +import Data.Word (Word, Word8, Word16, Word32, Word64) + +-- | Read some text. If the read succeeds, return its value and the +-- remaining text, otherwise an error message. +type Reader a = IReader Text a +type Parser a = IParser Text a + +-- | Read a decimal integer. The input must begin with at least one +-- decimal digit, and is consumed until a non-digit or end of string +-- is reached. +-- +-- This function does not handle leading sign characters. If you need +-- to handle signed input, use @'signed' 'decimal'@. +-- +-- /Note/: For fixed-width integer types, this function does not +-- attempt to detect overflow, so a sufficiently long input may give +-- incorrect results. If you are worried about overflow, use +-- 'Integer' for your result type. +decimal :: Integral a => Reader a +{-# SPECIALIZE decimal :: Reader Int #-} +{-# SPECIALIZE decimal :: Reader Int8 #-} +{-# SPECIALIZE decimal :: Reader Int16 #-} +{-# SPECIALIZE decimal :: Reader Int32 #-} +{-# SPECIALIZE decimal :: Reader Int64 #-} +{-# SPECIALIZE decimal :: Reader Integer #-} +{-# SPECIALIZE decimal :: Reader Data.Word.Word #-} +{-# SPECIALIZE decimal :: Reader Word8 #-} +{-# SPECIALIZE decimal :: Reader Word16 #-} +{-# SPECIALIZE decimal :: Reader Word32 #-} +{-# SPECIALIZE decimal :: Reader Word64 #-} +decimal txt + | T.null h = Left "input does not start with a digit" + | otherwise = Right (T.foldl' go 0 h, t) + where (# h,t #) = span_ isDigit txt + go n d = (n * 10 + fromIntegral (digitToInt d)) + +-- | Read a hexadecimal integer, consisting of an optional leading +-- @\"0x\"@ followed by at least one decimal digit. Input is consumed +-- until a non-hex-digit or end of string is reached. This function +-- is case insensitive. +-- +-- This function does not handle leading sign characters. If you need +-- to handle signed input, use @'signed' 'hexadecimal'@. +-- +-- /Note/: For fixed-width integer types, this function does not +-- attempt to detect overflow, so a sufficiently long input may give +-- incorrect results. If you are worried about overflow, use +-- 'Integer' for your result type. +hexadecimal :: Integral a => Reader a +{-# SPECIALIZE hexadecimal :: Reader Int #-} +{-# SPECIALIZE hexadecimal :: Reader Int8 #-} +{-# SPECIALIZE hexadecimal :: Reader Int16 #-} +{-# SPECIALIZE hexadecimal :: Reader Int32 #-} +{-# SPECIALIZE hexadecimal :: Reader Int64 #-} +{-# SPECIALIZE hexadecimal :: Reader Integer #-} +{-# SPECIALIZE hexadecimal :: Reader Word #-} +{-# SPECIALIZE hexadecimal :: Reader Word8 #-} +{-# SPECIALIZE hexadecimal :: Reader Word16 #-} +{-# SPECIALIZE hexadecimal :: Reader Word32 #-} +{-# SPECIALIZE hexadecimal :: Reader Word64 #-} +hexadecimal txt + | h == "0x" || h == "0X" = hex t + | otherwise = hex txt + where (h,t) = T.splitAt 2 txt + +hex :: Integral a => Reader a +{-# SPECIALIZE hex :: Reader Int #-} +{-# SPECIALIZE hex :: Reader Int8 #-} +{-# SPECIALIZE hex :: Reader Int16 #-} +{-# SPECIALIZE hex :: Reader Int32 #-} +{-# SPECIALIZE hex :: Reader Int64 #-} +{-# SPECIALIZE hex :: Reader Integer #-} +{-# SPECIALIZE hex :: Reader Word #-} +{-# SPECIALIZE hex :: Reader Word8 #-} +{-# SPECIALIZE hex :: Reader Word16 #-} +{-# SPECIALIZE hex :: Reader Word32 #-} +{-# SPECIALIZE hex :: Reader Word64 #-} +hex txt + | T.null h = Left "input does not start with a hexadecimal digit" + | otherwise = Right (T.foldl' go 0 h, t) + where (# h,t #) = span_ isHexDigit txt + go n d = (n * 16 + fromIntegral (hexDigitToInt d)) + +-- | Read an optional leading sign character (@\'-\'@ or @\'+\'@) and +-- apply it to the result of applying the given reader. +signed :: Num a => Reader a -> Reader a +{-# INLINE signed #-} +signed f = runP (signa (P f)) + +-- | Read a rational number. +-- +-- This function accepts an optional leading sign character, followed +-- by at least one decimal digit. The syntax similar to that accepted +-- by the 'read' function, with the exception that a trailing @\'.\'@ +-- or @\'e\'@ /not/ followed by a number is not consumed. +-- +-- Examples (with behaviour identical to 'read'): +-- +-- >rational "3" == Right (3.0, "") +-- >rational "3.1" == Right (3.1, "") +-- >rational "3e4" == Right (30000.0, "") +-- >rational "3.1e4" == Right (31000.0, "") +-- >rational ".3" == Left "input does not start with a digit" +-- >rational "e3" == Left "input does not start with a digit" +-- +-- Examples of differences from 'read': +-- +-- >rational "3.foo" == Right (3.0, ".foo") +-- >rational "3e" == Right (3.0, "e") +rational :: Fractional a => Reader a +{-# SPECIALIZE rational :: Reader Double #-} +rational = floaty $ \real frac fracDenom -> fromRational $ + real % 1 + frac % fracDenom + +-- | Read a rational number. +-- +-- The syntax accepted by this function is the same as for 'rational'. +-- +-- /Note/: This function is almost ten times faster than 'rational', +-- but is slightly less accurate. +-- +-- The 'Double' type supports about 16 decimal places of accuracy. +-- For 94.2% of numbers, this function and 'rational' give identical +-- results, but for the remaining 5.8%, this function loses precision +-- around the 15th decimal place. For 0.001% of numbers, this +-- function will lose precision at the 13th or 14th decimal place. +double :: Reader Double +double = floaty $ \real frac fracDenom -> + fromIntegral real + + fromIntegral frac / fromIntegral fracDenom + +signa :: Num a => Parser a -> Parser a +{-# SPECIALIZE signa :: Parser Int -> Parser Int #-} +{-# SPECIALIZE signa :: Parser Int8 -> Parser Int8 #-} +{-# SPECIALIZE signa :: Parser Int16 -> Parser Int16 #-} +{-# SPECIALIZE signa :: Parser Int32 -> Parser Int32 #-} +{-# SPECIALIZE signa :: Parser Int64 -> Parser Int64 #-} +{-# SPECIALIZE signa :: Parser Integer -> Parser Integer #-} +signa p = do + sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') + if sign == '+' then p else negate `liftM` p + +char :: (Char -> Bool) -> Parser Char +char p = P $ \t -> case T.uncons t of + Just (c,t') | p c -> Right (c,t') + _ -> Left "character does not match" + +floaty :: Fractional a => (Integer -> Integer -> Integer -> a) -> Reader a +{-# INLINE floaty #-} +floaty f = runP $ do + sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') + real <- P decimal + T fraction fracDigits <- perhaps (T 0 0) $ do + _ <- char (=='.') + digits <- P $ \t -> Right (T.length $ T.takeWhile isDigit t, t) + n <- P decimal + return $ T n digits + let e c = c == 'e' || c == 'E' + power <- perhaps 0 (char e >> signa (P decimal) :: Parser Int) + let n = if fracDigits == 0 + then if power == 0 + then fromIntegral real + else fromIntegral real * (10 ^^ power) + else if power == 0 + then f real fraction (10 ^ fracDigits) + else f real fraction (10 ^ fracDigits) * (10 ^^ power) + return $! if sign == '+' + then n + else -n diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Show.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Show.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Show.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Show.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,89 @@ +{-# LANGUAGE CPP, MagicHash #-} +{-# OPTIONS_GHC -fno-warn-orphans #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif + +-- | +-- Module : Data.Text.Show +-- Copyright : (c) 2009-2015 Bryan O'Sullivan +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC + +module Data.Text.Show + ( + singleton + , unpack + , unpackCString# + ) where + +import Control.Monad.ST (ST) +import Data.Text.Internal (Text(..), empty_, safe) +import Data.Text.Internal.Fusion (stream, unstream) +import Data.Text.Internal.Unsafe.Char (unsafeWrite) +import GHC.Prim (Addr#) +import qualified Data.Text.Array as A +import qualified Data.Text.Internal.Fusion.Common as S + +#if __GLASGOW_HASKELL__ >= 702 +import qualified GHC.CString as GHC +#else +import qualified GHC.Base as GHC +#endif + +instance Show Text where + showsPrec p ps r = showsPrec p (unpack ps) r + +-- | /O(n)/ Convert a 'Text' into a 'String'. Subject to fusion. +unpack :: Text -> String +unpack = S.unstreamList . stream +{-# INLINE [1] unpack #-} + +-- | /O(n)/ Convert a literal string into a 'Text'. Subject to +-- fusion. +-- +-- This is exposed solely for people writing GHC rewrite rules. +unpackCString# :: Addr# -> Text +unpackCString# addr# = unstream (S.streamCString# addr#) +{-# NOINLINE unpackCString# #-} + +{-# RULES "TEXT literal" [1] forall a. + unstream (S.map safe (S.streamList (GHC.unpackCString# a))) + = unpackCString# a #-} + +{-# RULES "TEXT literal UTF8" [1] forall a. + unstream (S.map safe (S.streamList (GHC.unpackCStringUtf8# a))) + = unpackCString# a #-} + +{-# RULES "TEXT empty literal" [1] + unstream (S.map safe (S.streamList [])) + = empty_ #-} + +{-# RULES "TEXT singleton literal" [1] forall a. + unstream (S.map safe (S.streamList [a])) + = singleton_ a #-} + +-- | /O(1)/ Convert a character into a Text. Subject to fusion. +-- Performs replacement on invalid scalar values. +singleton :: Char -> Text +singleton = unstream . S.singleton . safe +{-# INLINE [1] singleton #-} + +{-# RULES "TEXT singleton" forall a. + unstream (S.singleton (safe a)) + = singleton_ a #-} + +-- This is intended to reduce inlining bloat. +singleton_ :: Char -> Text +singleton_ c = Text (A.run x) 0 len + where x :: ST s (A.MArray s) + x = do arr <- A.new len + _ <- unsafeWrite arr 0 d + return arr + len | d < '\x10000' = 1 + | otherwise = 2 + d = safe c +{-# NOINLINE singleton_ #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Unsafe.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Unsafe.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text/Unsafe.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text/Unsafe.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,123 @@ +{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} +-- | +-- Module : Data.Text.Unsafe +-- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : portable +-- +-- A module containing unsafe 'Text' operations, for very very careful +-- use in heavily tested code. +module Data.Text.Unsafe + ( + inlineInterleaveST + , inlinePerformIO + , unsafeDupablePerformIO + , Iter(..) + , iter + , iter_ + , reverseIter + , reverseIter_ + , unsafeHead + , unsafeTail + , lengthWord16 + , takeWord16 + , dropWord16 + ) where + +#if defined(ASSERTS) +import Control.Exception (assert) +#endif +import Data.Text.Internal.Encoding.Utf16 (chr2) +import Data.Text.Internal (Text(..)) +import Data.Text.Internal.Unsafe (inlineInterleaveST, inlinePerformIO) +import Data.Text.Internal.Unsafe.Char (unsafeChr) +import qualified Data.Text.Array as A +import GHC.IO (unsafeDupablePerformIO) + +-- | /O(1)/ A variant of 'head' for non-empty 'Text'. 'unsafeHead' +-- omits the check for the empty case, so there is an obligation on +-- the programmer to provide a proof that the 'Text' is non-empty. +unsafeHead :: Text -> Char +unsafeHead (Text arr off _len) + | m < 0xD800 || m > 0xDBFF = unsafeChr m + | otherwise = chr2 m n + where m = A.unsafeIndex arr off + n = A.unsafeIndex arr (off+1) +{-# INLINE unsafeHead #-} + +-- | /O(1)/ A variant of 'tail' for non-empty 'Text'. 'unsafeTail' +-- omits the check for the empty case, so there is an obligation on +-- the programmer to provide a proof that the 'Text' is non-empty. +unsafeTail :: Text -> Text +unsafeTail t@(Text arr off len) = +#if defined(ASSERTS) + assert (d <= len) $ +#endif + Text arr (off+d) (len-d) + where d = iter_ t 0 +{-# INLINE unsafeTail #-} + +data Iter = Iter {-# UNPACK #-} !Char {-# UNPACK #-} !Int + +-- | /O(1)/ Iterate (unsafely) one step forwards through a UTF-16 +-- array, returning the current character and the delta to add to give +-- the next offset to iterate at. +iter :: Text -> Int -> Iter +iter (Text arr off _len) i + | m < 0xD800 || m > 0xDBFF = Iter (unsafeChr m) 1 + | otherwise = Iter (chr2 m n) 2 + where m = A.unsafeIndex arr j + n = A.unsafeIndex arr k + j = off + i + k = j + 1 +{-# INLINE iter #-} + +-- | /O(1)/ Iterate one step through a UTF-16 array, returning the +-- delta to add to give the next offset to iterate at. +iter_ :: Text -> Int -> Int +iter_ (Text arr off _len) i | m < 0xD800 || m > 0xDBFF = 1 + | otherwise = 2 + where m = A.unsafeIndex arr (off+i) +{-# INLINE iter_ #-} + +-- | /O(1)/ Iterate one step backwards through a UTF-16 array, +-- returning the current character and the delta to add (i.e. a +-- negative number) to give the next offset to iterate at. +reverseIter :: Text -> Int -> (Char,Int) +reverseIter (Text arr off _len) i + | m < 0xDC00 || m > 0xDFFF = (unsafeChr m, -1) + | otherwise = (chr2 n m, -2) + where m = A.unsafeIndex arr j + n = A.unsafeIndex arr k + j = off + i + k = j - 1 +{-# INLINE reverseIter #-} + +-- | /O(1)/ Iterate one step backwards through a UTF-16 array, +-- returning the delta to add (i.e. a negative number) to give the +-- next offset to iterate at. +reverseIter_ :: Text -> Int -> Int +reverseIter_ (Text arr off _len) i + | m < 0xDC00 || m > 0xDFFF = -1 + | otherwise = -2 + where m = A.unsafeIndex arr (off+i) +{-# INLINE reverseIter_ #-} + +-- | /O(1)/ Return the length of a 'Text' in units of 'Word16'. This +-- is useful for sizing a target array appropriately before using +-- 'unsafeCopyToPtr'. +lengthWord16 :: Text -> Int +lengthWord16 (Text _arr _off len) = len +{-# INLINE lengthWord16 #-} + +-- | /O(1)/ Unchecked take of 'k' 'Word16's from the front of a 'Text'. +takeWord16 :: Int -> Text -> Text +takeWord16 k (Text arr off _len) = Text arr off k +{-# INLINE takeWord16 #-} + +-- | /O(1)/ Unchecked drop of 'k' 'Word16's from the front of a 'Text'. +dropWord16 :: Int -> Text -> Text +dropWord16 k (Text arr off len) = Text arr (off+k) (len-k) +{-# INLINE dropWord16 #-} diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Data/Text.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Data/Text.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,1752 @@ +{-# LANGUAGE BangPatterns, CPP, MagicHash, Rank2Types, UnboxedTuples #-} +{-# OPTIONS_GHC -fno-warn-orphans #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif +#if __GLASGOW_HASKELL__ >= 708 +{-# LANGUAGE TypeFamilies #-} +#endif + +-- | +-- Module : Data.Text +-- Copyright : (c) 2009, 2010, 2011, 2012 Bryan O'Sullivan, +-- (c) 2009 Duncan Coutts, +-- (c) 2008, 2009 Tom Harper +-- +-- License : BSD-style +-- Maintainer : bos@serpentine.com +-- Stability : experimental +-- Portability : GHC +-- +-- A time and space-efficient implementation of Unicode text. +-- Suitable for performance critical use, both in terms of large data +-- quantities and high speed. +-- +-- /Note/: Read below the synopsis for important notes on the use of +-- this module. +-- +-- This module is intended to be imported @qualified@, to avoid name +-- clashes with "Prelude" functions, e.g. +-- +-- > import qualified Data.Text as T +-- +-- To use an extended and very rich family of functions for working +-- with Unicode text (including normalization, regular expressions, +-- non-standard encodings, text breaking, and locales), see +-- . + +module Data.Text + ( + -- * Strict vs lazy types + -- $strict + + -- * Acceptable data + -- $replacement + + -- * Fusion + -- $fusion + + -- * Types + Text + + -- * Creation and elimination + , pack + , unpack + , singleton + , empty + + -- * Basic interface + , cons + , snoc + , append + , uncons + , head + , last + , tail + , init + , null + , length + , compareLength + + -- * Transformations + , map + , intercalate + , intersperse + , transpose + , reverse + , replace + + -- ** Case conversion + -- $case + , toCaseFold + , toLower + , toUpper + , toTitle + + -- ** Justification + , justifyLeft + , justifyRight + , center + + -- * Folds + , foldl + , foldl' + , foldl1 + , foldl1' + , foldr + , foldr1 + + -- ** Special folds + , concat + , concatMap + , any + , all + , maximum + , minimum + + -- * Construction + + -- ** Scans + , scanl + , scanl1 + , scanr + , scanr1 + + -- ** Accumulating maps + , mapAccumL + , mapAccumR + + -- ** Generation and unfolding + , replicate + , unfoldr + , unfoldrN + + -- * Substrings + + -- ** Breaking strings + , take + , takeEnd + , drop + , dropEnd + , takeWhile + , takeWhileEnd + , dropWhile + , dropWhileEnd + , dropAround + , strip + , stripStart + , stripEnd + , splitAt + , breakOn + , breakOnEnd + , break + , span + , group + , groupBy + , inits + , tails + + -- ** Breaking into many substrings + -- $split + , splitOn + , split + , chunksOf + + -- ** Breaking into lines and words + , lines + --, lines' + , words + , unlines + , unwords + + -- * Predicates + , isPrefixOf + , isSuffixOf + , isInfixOf + + -- ** View patterns + , stripPrefix + , stripSuffix + , commonPrefixes + + -- * Searching + , filter + , breakOnAll + , find + , partition + + -- , findSubstring + + -- * Indexing + -- $index + , index + , findIndex + , count + + -- * Zipping + , zip + , zipWith + + -- -* Ordered text + -- , sort + + -- * Low level operations + , copy + , unpackCString# + ) where + +import Prelude (Char, Bool(..), Int, Maybe(..), String, + Eq(..), Ord(..), Ordering(..), (++), + Read(..), + (&&), (||), (+), (-), (.), ($), ($!), (>>), + not, return, otherwise, quot) +#if defined(HAVE_DEEPSEQ) +import Control.DeepSeq (NFData(rnf)) +#endif +#if defined(ASSERTS) +import Control.Exception (assert) +#endif +import Data.Char (isSpace) +import Data.Data (Data(gfoldl, toConstr, gunfold, dataTypeOf), constrIndex, + Constr, mkConstr, DataType, mkDataType, Fixity(Prefix)) +import Control.Monad (foldM) +import Control.Monad.ST (ST) +import qualified Data.Text.Array as A +import qualified Data.List as L +import Data.Binary (Binary(get, put)) +import Data.Monoid (Monoid(..)) +#if MIN_VERSION_base(4,9,0) +import Data.Semigroup (Semigroup(..)) +#endif +import Data.String (IsString(..)) +import qualified Data.Text.Internal.Fusion as S +import qualified Data.Text.Internal.Fusion.Common as S +import Data.Text.Encoding (decodeUtf8', encodeUtf8) +import Data.Text.Internal.Fusion (stream, reverseStream, unstream) +import Data.Text.Internal.Private (span_) +import Data.Text.Internal (Text(..), empty, firstf, mul, safe, text) +import Data.Text.Show (singleton, unpack, unpackCString#) +import qualified Prelude as P +import Data.Text.Unsafe (Iter(..), iter, iter_, lengthWord16, reverseIter, + reverseIter_, unsafeHead, unsafeTail) +import Data.Text.Internal.Unsafe.Char (unsafeChr) +import qualified Data.Text.Internal.Functions as F +import qualified Data.Text.Internal.Encoding.Utf16 as U16 +import Data.Text.Internal.Search (indices) +#if defined(__HADDOCK__) +import Data.ByteString (ByteString) +import qualified Data.Text.Lazy as L +import Data.Int (Int64) +#endif +#if __GLASGOW_HASKELL__ >= 708 +import qualified GHC.Exts as Exts +#endif +#if MIN_VERSION_base(4,7,0) +import Text.Printf (PrintfArg, formatArg, formatString) +#endif + +-- $strict +-- +-- This package provides both strict and lazy 'Text' types. The +-- strict type is provided by the "Data.Text" module, while the lazy +-- type is provided by the "Data.Text.Lazy" module. Internally, the +-- lazy @Text@ type consists of a list of strict chunks. +-- +-- The strict 'Text' type requires that an entire string fit into +-- memory at once. The lazy 'Data.Text.Lazy.Text' type is capable of +-- streaming strings that are larger than memory using a small memory +-- footprint. In many cases, the overhead of chunked streaming makes +-- the lazy 'Data.Text.Lazy.Text' type slower than its strict +-- counterpart, but this is not always the case. Sometimes, the time +-- complexity of a function in one module may be different from the +-- other, due to their differing internal structures. +-- +-- Each module provides an almost identical API, with the main +-- difference being that the strict module uses 'Int' values for +-- lengths and counts, while the lazy module uses 'Data.Int.Int64' +-- lengths. + +-- $replacement +-- +-- A 'Text' value is a sequence of Unicode scalar values, as defined +-- in +-- . +-- As such, a 'Text' cannot contain values in the range U+D800 to +-- U+DFFF inclusive. Haskell implementations admit all Unicode code +-- points +-- () +-- as 'Char' values, including code points from this invalid range. +-- This means that there are some 'Char' values that are not valid +-- Unicode scalar values, and the functions in this module must handle +-- those cases. +-- +-- Within this module, many functions construct a 'Text' from one or +-- more 'Char' values. Those functions will substitute 'Char' values +-- that are not valid Unicode scalar values with the replacement +-- character \"�\" (U+FFFD). Functions that perform this +-- inspection and replacement are documented with the phrase +-- \"Performs replacement on invalid scalar values\". +-- +-- (One reason for this policy of replacement is that internally, a +-- 'Text' value is represented as packed UTF-16 data. Values in the +-- range U+D800 through U+DFFF are used by UTF-16 to denote surrogate +-- code points, and so cannot be represented. The functions replace +-- invalid scalar values, instead of dropping them, as a security +-- measure. For details, see +-- .) + +-- $fusion +-- +-- Most of the functions in this module are subject to /fusion/, +-- meaning that a pipeline of such functions will usually allocate at +-- most one 'Text' value. +-- +-- As an example, consider the following pipeline: +-- +-- > import Data.Text as T +-- > import Data.Text.Encoding as E +-- > import Data.ByteString (ByteString) +-- > +-- > countChars :: ByteString -> Int +-- > countChars = T.length . T.toUpper . E.decodeUtf8 +-- +-- From the type signatures involved, this looks like it should +-- allocate one 'Data.ByteString.ByteString' value, and two 'Text' +-- values. However, when a module is compiled with optimisation +-- enabled under GHC, the two intermediate 'Text' values will be +-- optimised away, and the function will be compiled down to a single +-- loop over the source 'Data.ByteString.ByteString'. +-- +-- Functions that can be fused by the compiler are documented with the +-- phrase \"Subject to fusion\". + +instance Eq Text where + Text arrA offA lenA == Text arrB offB lenB + | lenA == lenB = A.equal arrA offA arrB offB lenA + | otherwise = False + {-# INLINE (==) #-} + +instance Ord Text where + compare = compareText + +instance Read Text where + readsPrec p str = [(pack x,y) | (x,y) <- readsPrec p str] + +#if MIN_VERSION_base(4,9,0) +-- Semigroup orphan instances for older GHCs are provided by +-- 'semigroups` package + +instance Semigroup Text where + (<>) = append +#endif + +instance Monoid Text where + mempty = empty +#if MIN_VERSION_base(4,9,0) + mappend = (<>) -- future-proof definition +#else + mappend = append +#endif + mconcat = concat + +instance IsString Text where + fromString = pack + +#if __GLASGOW_HASKELL__ >= 708 +instance Exts.IsList Text where + type Item Text = Char + fromList = pack + toList = unpack +#endif + +#if defined(HAVE_DEEPSEQ) +instance NFData Text where rnf !_ = () +#endif + +instance Binary Text where + put t = put (encodeUtf8 t) + get = do + bs <- get + case decodeUtf8' bs of + P.Left exn -> P.fail (P.show exn) + P.Right a -> P.return a + +-- | This instance preserves data abstraction at the cost of inefficiency. +-- We omit reflection services for the sake of data abstraction. +-- +-- This instance was created by copying the updated behavior of +-- @"Data.Set".@'Data.Set.Set' and @"Data.Map".@'Data.Map.Map'. If you +-- feel a mistake has been made, please feel free to submit +-- improvements. +-- +-- The original discussion is archived here: +-- +-- +-- The followup discussion that changed the behavior of 'Data.Set.Set' +-- and 'Data.Map.Map' is archived here: +-- + +instance Data Text where + gfoldl f z txt = z pack `f` (unpack txt) + toConstr _ = packConstr + gunfold k z c = case constrIndex c of + 1 -> k (z pack) + _ -> P.error "gunfold" + dataTypeOf _ = textDataType + +#if MIN_VERSION_base(4,7,0) +-- | Only defined for @base-4.7.0.0@ and later +instance PrintfArg Text where + formatArg txt = formatString $ unpack txt +#endif + +packConstr :: Constr +packConstr = mkConstr textDataType "pack" [] Prefix + +textDataType :: DataType +textDataType = mkDataType "Data.Text.Text" [packConstr] + +-- | /O(n)/ Compare two 'Text' values lexicographically. +compareText :: Text -> Text -> Ordering +compareText ta@(Text _arrA _offA lenA) tb@(Text _arrB _offB lenB) + | lenA == 0 && lenB == 0 = EQ + | otherwise = go 0 0 + where + go !i !j + | i >= lenA || j >= lenB = compare lenA lenB + | a < b = LT + | a > b = GT + | otherwise = go (i+di) (j+dj) + where Iter a di = iter ta i + Iter b dj = iter tb j + +-- ----------------------------------------------------------------------------- +-- * Conversion to/from 'Text' + +-- | /O(n)/ Convert a 'String' into a 'Text'. Subject to +-- fusion. Performs replacement on invalid scalar values. +pack :: String -> Text +pack = unstream . S.map safe . S.streamList +{-# INLINE [1] pack #-} + +-- ----------------------------------------------------------------------------- +-- * Basic functions + +-- | /O(n)/ Adds a character to the front of a 'Text'. This function +-- is more costly than its 'List' counterpart because it requires +-- copying a new array. Subject to fusion. Performs replacement on +-- invalid scalar values. +cons :: Char -> Text -> Text +cons c t = unstream (S.cons (safe c) (stream t)) +{-# INLINE cons #-} + +infixr 5 `cons` + +-- | /O(n)/ Adds a character to the end of a 'Text'. This copies the +-- entire array in the process, unless fused. Subject to fusion. +-- Performs replacement on invalid scalar values. +snoc :: Text -> Char -> Text +snoc t c = unstream (S.snoc (stream t) (safe c)) +{-# INLINE snoc #-} + +-- | /O(n)/ Appends one 'Text' to the other by copying both of them +-- into a new 'Text'. Subject to fusion. +append :: Text -> Text -> Text +append a@(Text arr1 off1 len1) b@(Text arr2 off2 len2) + | len1 == 0 = b + | len2 == 0 = a + | len > 0 = Text (A.run x) 0 len + | otherwise = overflowError "append" + where + len = len1+len2 + x :: ST s (A.MArray s) + x = do + arr <- A.new len + A.copyI arr 0 arr1 off1 len1 + A.copyI arr len1 arr2 off2 len + return arr +{-# NOINLINE append #-} + +{-# RULES +"TEXT append -> fused" [~1] forall t1 t2. + append t1 t2 = unstream (S.append (stream t1) (stream t2)) +"TEXT append -> unfused" [1] forall t1 t2. + unstream (S.append (stream t1) (stream t2)) = append t1 t2 + #-} + +-- | /O(1)/ Returns the first character of a 'Text', which must be +-- non-empty. Subject to fusion. +head :: Text -> Char +head t = S.head (stream t) +{-# INLINE head #-} + +-- | /O(1)/ Returns the first character and rest of a 'Text', or +-- 'Nothing' if empty. Subject to fusion. +uncons :: Text -> Maybe (Char, Text) +uncons t@(Text arr off len) + | len <= 0 = Nothing + | otherwise = Just $ let !(Iter c d) = iter t 0 + in (c, text arr (off+d) (len-d)) +{-# INLINE [1] uncons #-} + +-- | Lifted from Control.Arrow and specialized. +second :: (b -> c) -> (a,b) -> (a,c) +second f (a, b) = (a, f b) + +-- | /O(1)/ Returns the last character of a 'Text', which must be +-- non-empty. Subject to fusion. +last :: Text -> Char +last (Text arr off len) + | len <= 0 = emptyError "last" + | n < 0xDC00 || n > 0xDFFF = unsafeChr n + | otherwise = U16.chr2 n0 n + where n = A.unsafeIndex arr (off+len-1) + n0 = A.unsafeIndex arr (off+len-2) +{-# INLINE [1] last #-} + +{-# RULES +"TEXT last -> fused" [~1] forall t. + last t = S.last (stream t) +"TEXT last -> unfused" [1] forall t. + S.last (stream t) = last t + #-} + +-- | /O(1)/ Returns all characters after the head of a 'Text', which +-- must be non-empty. Subject to fusion. +tail :: Text -> Text +tail t@(Text arr off len) + | len <= 0 = emptyError "tail" + | otherwise = text arr (off+d) (len-d) + where d = iter_ t 0 +{-# INLINE [1] tail #-} + +{-# RULES +"TEXT tail -> fused" [~1] forall t. + tail t = unstream (S.tail (stream t)) +"TEXT tail -> unfused" [1] forall t. + unstream (S.tail (stream t)) = tail t + #-} + +-- | /O(1)/ Returns all but the last character of a 'Text', which must +-- be non-empty. Subject to fusion. +init :: Text -> Text +init (Text arr off len) | len <= 0 = emptyError "init" + | n >= 0xDC00 && n <= 0xDFFF = text arr off (len-2) + | otherwise = text arr off (len-1) + where + n = A.unsafeIndex arr (off+len-1) +{-# INLINE [1] init #-} + +{-# RULES +"TEXT init -> fused" [~1] forall t. + init t = unstream (S.init (stream t)) +"TEXT init -> unfused" [1] forall t. + unstream (S.init (stream t)) = init t + #-} + +-- | /O(1)/ Tests whether a 'Text' is empty or not. Subject to +-- fusion. +null :: Text -> Bool +null (Text _arr _off len) = +#if defined(ASSERTS) + assert (len >= 0) $ +#endif + len <= 0 +{-# INLINE [1] null #-} + +{-# RULES +"TEXT null -> fused" [~1] forall t. + null t = S.null (stream t) +"TEXT null -> unfused" [1] forall t. + S.null (stream t) = null t + #-} + +-- | /O(1)/ Tests whether a 'Text' contains exactly one character. +-- Subject to fusion. +isSingleton :: Text -> Bool +isSingleton = S.isSingleton . stream +{-# INLINE isSingleton #-} + +-- | /O(n)/ Returns the number of characters in a 'Text'. +-- Subject to fusion. +length :: Text -> Int +length t = S.length (stream t) +{-# INLINE length #-} + +-- | /O(n)/ Compare the count of characters in a 'Text' to a number. +-- Subject to fusion. +-- +-- This function gives the same answer as comparing against the result +-- of 'length', but can short circuit if the count of characters is +-- greater than the number, and hence be more efficient. +compareLength :: Text -> Int -> Ordering +compareLength t n = S.compareLengthI (stream t) n +{-# INLINE [1] compareLength #-} + +{-# RULES +"TEXT compareN/length -> compareLength" [~1] forall t n. + compare (length t) n = compareLength t n + #-} + +{-# RULES +"TEXT ==N/length -> compareLength/==EQ" [~1] forall t n. + (==) (length t) n = compareLength t n == EQ + #-} + +{-# RULES +"TEXT /=N/length -> compareLength//=EQ" [~1] forall t n. + (/=) (length t) n = compareLength t n /= EQ + #-} + +{-# RULES +"TEXT compareLength/==LT" [~1] forall t n. + (<) (length t) n = compareLength t n == LT + #-} + +{-# RULES +"TEXT <=N/length -> compareLength//=GT" [~1] forall t n. + (<=) (length t) n = compareLength t n /= GT + #-} + +{-# RULES +"TEXT >N/length -> compareLength/==GT" [~1] forall t n. + (>) (length t) n = compareLength t n == GT + #-} + +{-# RULES +"TEXT >=N/length -> compareLength//=LT" [~1] forall t n. + (>=) (length t) n = compareLength t n /= LT + #-} + +-- ----------------------------------------------------------------------------- +-- * Transformations +-- | /O(n)/ 'map' @f@ @t@ is the 'Text' obtained by applying @f@ to +-- each element of @t@. Subject to fusion. Performs replacement on +-- invalid scalar values. +map :: (Char -> Char) -> Text -> Text +map f t = unstream (S.map (safe . f) (stream t)) +{-# INLINE [1] map #-} + +-- | /O(n)/ The 'intercalate' function takes a 'Text' and a list of +-- 'Text's and concatenates the list after interspersing the first +-- argument between each element of the list. +intercalate :: Text -> [Text] -> Text +intercalate t = concat . (F.intersperse t) +{-# INLINE intercalate #-} + +-- | /O(n)/ The 'intersperse' function takes a character and places it +-- between the characters of a 'Text'. Subject to fusion. Performs +-- replacement on invalid scalar values. +intersperse :: Char -> Text -> Text +intersperse c t = unstream (S.intersperse (safe c) (stream t)) +{-# INLINE intersperse #-} + +-- | /O(n)/ Reverse the characters of a string. Subject to fusion. +reverse :: Text -> Text +reverse t = S.reverse (stream t) +{-# INLINE reverse #-} + +-- | /O(m+n)/ Replace every non-overlapping occurrence of @needle@ in +-- @haystack@ with @replacement@. +-- +-- This function behaves as though it was defined as follows: +-- +-- @ +-- replace needle replacement haystack = +-- 'intercalate' replacement ('splitOn' needle haystack) +-- @ +-- +-- As this suggests, each occurrence is replaced exactly once. So if +-- @needle@ occurs in @replacement@, that occurrence will /not/ itself +-- be replaced recursively: +-- +-- > replace "oo" "foo" "oo" == "foo" +-- +-- In cases where several instances of @needle@ overlap, only the +-- first one will be replaced: +-- +-- > replace "ofo" "bar" "ofofo" == "barfo" +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +replace :: Text + -- ^ @needle@ to search for. If this string is empty, an + -- error will occur. + -> Text + -- ^ @replacement@ to replace @needle@ with. + -> Text + -- ^ @haystack@ in which to search. + -> Text +replace needle@(Text _ _ neeLen) + (Text repArr repOff repLen) + haystack@(Text hayArr hayOff hayLen) + | neeLen == 0 = emptyError "replace" + | L.null ixs = haystack + | len > 0 = Text (A.run x) 0 len + | otherwise = empty + where + ixs = indices needle haystack + len = hayLen - (neeLen - repLen) `mul` L.length ixs + x :: ST s (A.MArray s) + x = do + marr <- A.new len + let loop (i:is) o d = do + let d0 = d + i - o + d1 = d0 + repLen + A.copyI marr d hayArr (hayOff+o) d0 + A.copyI marr d0 repArr repOff d1 + loop is (i + neeLen) d1 + loop [] o d = A.copyI marr d hayArr (hayOff+o) len + loop ixs 0 0 + return marr + +-- ---------------------------------------------------------------------------- +-- ** Case conversions (folds) + +-- $case +-- +-- When case converting 'Text' values, do not use combinators like +-- @map toUpper@ to case convert each character of a string +-- individually, as this gives incorrect results according to the +-- rules of some writing systems. The whole-string case conversion +-- functions from this module, such as @toUpper@, obey the correct +-- case conversion rules. As a result, these functions may map one +-- input character to two or three output characters. For examples, +-- see the documentation of each function. +-- +-- /Note/: In some languages, case conversion is a locale- and +-- context-dependent operation. The case conversion functions in this +-- module are /not/ locale sensitive. Programs that require locale +-- sensitivity should use appropriate versions of the +-- . + +-- | /O(n)/ Convert a string to folded case. Subject to fusion. +-- +-- This function is mainly useful for performing caseless (also known +-- as case insensitive) string comparisons. +-- +-- A string @x@ is a caseless match for a string @y@ if and only if: +-- +-- @toCaseFold x == toCaseFold y@ +-- +-- The result string may be longer than the input string, and may +-- differ from applying 'toLower' to the input string. For instance, +-- the Armenian small ligature \"ﬓ\" (men now, U+FB13) is case +-- folded to the sequence \"մ\" (men, U+0574) followed by +-- \"ն\" (now, U+0576), while the Greek \"µ\" (micro sign, +-- U+00B5) is case folded to \"μ\" (small letter mu, U+03BC) +-- instead of itself. +toCaseFold :: Text -> Text +toCaseFold t = unstream (S.toCaseFold (stream t)) +{-# INLINE toCaseFold #-} + +-- | /O(n)/ Convert a string to lower case, using simple case +-- conversion. Subject to fusion. +-- +-- The result string may be longer than the input string. For +-- instance, \"İ\" (Latin capital letter I with dot above, +-- U+0130) maps to the sequence \"i\" (Latin small letter i, U+0069) +-- followed by \" ̇\" (combining dot above, U+0307). +toLower :: Text -> Text +toLower t = unstream (S.toLower (stream t)) +{-# INLINE toLower #-} + +-- | /O(n)/ Convert a string to upper case, using simple case +-- conversion. Subject to fusion. +-- +-- The result string may be longer than the input string. For +-- instance, the German \"ß\" (eszett, U+00DF) maps to the +-- two-letter sequence \"SS\". +toUpper :: Text -> Text +toUpper t = unstream (S.toUpper (stream t)) +{-# INLINE toUpper #-} + +-- | /O(n)/ Convert a string to title case, using simple case +-- conversion. Subject to fusion. +-- +-- The first letter of the input is converted to title case, as is +-- every subsequent letter that immediately follows a non-letter. +-- Every letter that immediately follows another letter is converted +-- to lower case. +-- +-- The result string may be longer than the input string. For example, +-- the Latin small ligature fl (U+FB02) is converted to the +-- sequence Latin capital letter F (U+0046) followed by Latin small +-- letter l (U+006C). +-- +-- /Note/: this function does not take language or culture specific +-- rules into account. For instance, in English, different style +-- guides disagree on whether the book name \"The Hill of the Red +-- Fox\" is correctly title cased—but this function will +-- capitalize /every/ word. +toTitle :: Text -> Text +toTitle t = unstream (S.toTitle (stream t)) +{-# INLINE toTitle #-} + +-- | /O(n)/ Left-justify a string to the given length, using the +-- specified fill character on the right. Subject to fusion. +-- Performs replacement on invalid scalar values. +-- +-- Examples: +-- +-- > justifyLeft 7 'x' "foo" == "fooxxxx" +-- > justifyLeft 3 'x' "foobar" == "foobar" +justifyLeft :: Int -> Char -> Text -> Text +justifyLeft k c t + | len >= k = t + | otherwise = t `append` replicateChar (k-len) c + where len = length t +{-# INLINE [1] justifyLeft #-} + +{-# RULES +"TEXT justifyLeft -> fused" [~1] forall k c t. + justifyLeft k c t = unstream (S.justifyLeftI k c (stream t)) +"TEXT justifyLeft -> unfused" [1] forall k c t. + unstream (S.justifyLeftI k c (stream t)) = justifyLeft k c t + #-} + +-- | /O(n)/ Right-justify a string to the given length, using the +-- specified fill character on the left. Performs replacement on +-- invalid scalar values. +-- +-- Examples: +-- +-- > justifyRight 7 'x' "bar" == "xxxxbar" +-- > justifyRight 3 'x' "foobar" == "foobar" +justifyRight :: Int -> Char -> Text -> Text +justifyRight k c t + | len >= k = t + | otherwise = replicateChar (k-len) c `append` t + where len = length t +{-# INLINE justifyRight #-} + +-- | /O(n)/ Center a string to the given length, using the specified +-- fill character on either side. Performs replacement on invalid +-- scalar values. +-- +-- Examples: +-- +-- > center 8 'x' "HS" = "xxxHSxxx" +center :: Int -> Char -> Text -> Text +center k c t + | len >= k = t + | otherwise = replicateChar l c `append` t `append` replicateChar r c + where len = length t + d = k - len + r = d `quot` 2 + l = d - r +{-# INLINE center #-} + +-- | /O(n)/ The 'transpose' function transposes the rows and columns +-- of its 'Text' argument. Note that this function uses 'pack', +-- 'unpack', and the list version of transpose, and is thus not very +-- efficient. +transpose :: [Text] -> [Text] +transpose ts = P.map pack (L.transpose (P.map unpack ts)) + +-- ----------------------------------------------------------------------------- +-- * Reducing 'Text's (folds) + +-- | /O(n)/ 'foldl', applied to a binary operator, a starting value +-- (typically the left-identity of the operator), and a 'Text', +-- reduces the 'Text' using the binary operator, from left to right. +-- Subject to fusion. +foldl :: (a -> Char -> a) -> a -> Text -> a +foldl f z t = S.foldl f z (stream t) +{-# INLINE foldl #-} + +-- | /O(n)/ A strict version of 'foldl'. Subject to fusion. +foldl' :: (a -> Char -> a) -> a -> Text -> a +foldl' f z t = S.foldl' f z (stream t) +{-# INLINE foldl' #-} + +-- | /O(n)/ A variant of 'foldl' that has no starting value argument, +-- and thus must be applied to a non-empty 'Text'. Subject to fusion. +foldl1 :: (Char -> Char -> Char) -> Text -> Char +foldl1 f t = S.foldl1 f (stream t) +{-# INLINE foldl1 #-} + +-- | /O(n)/ A strict version of 'foldl1'. Subject to fusion. +foldl1' :: (Char -> Char -> Char) -> Text -> Char +foldl1' f t = S.foldl1' f (stream t) +{-# INLINE foldl1' #-} + +-- | /O(n)/ 'foldr', applied to a binary operator, a starting value +-- (typically the right-identity of the operator), and a 'Text', +-- reduces the 'Text' using the binary operator, from right to left. +-- Subject to fusion. +foldr :: (Char -> a -> a) -> a -> Text -> a +foldr f z t = S.foldr f z (stream t) +{-# INLINE foldr #-} + +-- | /O(n)/ A variant of 'foldr' that has no starting value argument, +-- and thus must be applied to a non-empty 'Text'. Subject to +-- fusion. +foldr1 :: (Char -> Char -> Char) -> Text -> Char +foldr1 f t = S.foldr1 f (stream t) +{-# INLINE foldr1 #-} + +-- ----------------------------------------------------------------------------- +-- ** Special folds + +-- | /O(n)/ Concatenate a list of 'Text's. +concat :: [Text] -> Text +concat ts = case ts' of + [] -> empty + [t] -> t + _ -> Text (A.run go) 0 len + where + ts' = L.filter (not . null) ts + len = sumP "concat" $ L.map lengthWord16 ts' + go :: ST s (A.MArray s) + go = do + arr <- A.new len + let step i (Text a o l) = + let !j = i + l in A.copyI arr i a o j >> return j + foldM step 0 ts' >> return arr + +-- | /O(n)/ Map a function over a 'Text' that results in a 'Text', and +-- concatenate the results. +concatMap :: (Char -> Text) -> Text -> Text +concatMap f = concat . foldr ((:) . f) [] +{-# INLINE concatMap #-} + +-- | /O(n)/ 'any' @p@ @t@ determines whether any character in the +-- 'Text' @t@ satisfies the predicate @p@. Subject to fusion. +any :: (Char -> Bool) -> Text -> Bool +any p t = S.any p (stream t) +{-# INLINE any #-} + +-- | /O(n)/ 'all' @p@ @t@ determines whether all characters in the +-- 'Text' @t@ satisfy the predicate @p@. Subject to fusion. +all :: (Char -> Bool) -> Text -> Bool +all p t = S.all p (stream t) +{-# INLINE all #-} + +-- | /O(n)/ 'maximum' returns the maximum value from a 'Text', which +-- must be non-empty. Subject to fusion. +maximum :: Text -> Char +maximum t = S.maximum (stream t) +{-# INLINE maximum #-} + +-- | /O(n)/ 'minimum' returns the minimum value from a 'Text', which +-- must be non-empty. Subject to fusion. +minimum :: Text -> Char +minimum t = S.minimum (stream t) +{-# INLINE minimum #-} + +-- ----------------------------------------------------------------------------- +-- * Building 'Text's + +-- | /O(n)/ 'scanl' is similar to 'foldl', but returns a list of +-- successive reduced values from the left. Subject to fusion. +-- Performs replacement on invalid scalar values. +-- +-- > scanl f z [x1, x2, ...] == [z, z `f` x1, (z `f` x1) `f` x2, ...] +-- +-- Note that +-- +-- > last (scanl f z xs) == foldl f z xs. +scanl :: (Char -> Char -> Char) -> Char -> Text -> Text +scanl f z t = unstream (S.scanl g z (stream t)) + where g a b = safe (f a b) +{-# INLINE scanl #-} + +-- | /O(n)/ 'scanl1' is a variant of 'scanl' that has no starting +-- value argument. Subject to fusion. Performs replacement on +-- invalid scalar values. +-- +-- > scanl1 f [x1, x2, ...] == [x1, x1 `f` x2, ...] +scanl1 :: (Char -> Char -> Char) -> Text -> Text +scanl1 f t | null t = empty + | otherwise = scanl f (unsafeHead t) (unsafeTail t) +{-# INLINE scanl1 #-} + +-- | /O(n)/ 'scanr' is the right-to-left dual of 'scanl'. Performs +-- replacement on invalid scalar values. +-- +-- > scanr f v == reverse . scanl (flip f) v . reverse +scanr :: (Char -> Char -> Char) -> Char -> Text -> Text +scanr f z = S.reverse . S.reverseScanr g z . reverseStream + where g a b = safe (f a b) +{-# INLINE scanr #-} + +-- | /O(n)/ 'scanr1' is a variant of 'scanr' that has no starting +-- value argument. Subject to fusion. Performs replacement on +-- invalid scalar values. +scanr1 :: (Char -> Char -> Char) -> Text -> Text +scanr1 f t | null t = empty + | otherwise = scanr f (last t) (init t) +{-# INLINE scanr1 #-} + +-- | /O(n)/ Like a combination of 'map' and 'foldl''. Applies a +-- function to each element of a 'Text', passing an accumulating +-- parameter from left to right, and returns a final 'Text'. Performs +-- replacement on invalid scalar values. +mapAccumL :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) +mapAccumL f z0 = S.mapAccumL g z0 . stream + where g a b = second safe (f a b) +{-# INLINE mapAccumL #-} + +-- | The 'mapAccumR' function behaves like a combination of 'map' and +-- a strict 'foldr'; it applies a function to each element of a +-- 'Text', passing an accumulating parameter from right to left, and +-- returning a final value of this accumulator together with the new +-- 'Text'. +-- Performs replacement on invalid scalar values. +mapAccumR :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) +mapAccumR f z0 = second reverse . S.mapAccumL g z0 . reverseStream + where g a b = second safe (f a b) +{-# INLINE mapAccumR #-} + +-- ----------------------------------------------------------------------------- +-- ** Generating and unfolding 'Text's + +-- | /O(n*m)/ 'replicate' @n@ @t@ is a 'Text' consisting of the input +-- @t@ repeated @n@ times. +replicate :: Int -> Text -> Text +replicate n t@(Text a o l) + | n <= 0 || l <= 0 = empty + | n == 1 = t + | isSingleton t = replicateChar n (unsafeHead t) + | otherwise = Text (A.run x) 0 len + where + len = l `mul` n + x :: ST s (A.MArray s) + x = do + arr <- A.new len + let loop !d !i | i >= n = return arr + | otherwise = let m = d + l + in A.copyI arr d a o m >> loop m (i+1) + loop 0 0 +{-# INLINE [1] replicate #-} + +{-# RULES +"TEXT replicate/singleton -> replicateChar" [~1] forall n c. + replicate n (singleton c) = replicateChar n c + #-} + +-- | /O(n)/ 'replicateChar' @n@ @c@ is a 'Text' of length @n@ with @c@ the +-- value of every element. Subject to fusion. +replicateChar :: Int -> Char -> Text +replicateChar n c = unstream (S.replicateCharI n (safe c)) +{-# INLINE replicateChar #-} + +-- | /O(n)/, where @n@ is the length of the result. The 'unfoldr' +-- function is analogous to the List 'L.unfoldr'. 'unfoldr' builds a +-- 'Text' from a seed value. The function takes the element and +-- returns 'Nothing' if it is done producing the 'Text', otherwise +-- 'Just' @(a,b)@. In this case, @a@ is the next 'Char' in the +-- string, and @b@ is the seed value for further production. Subject +-- to fusion. Performs replacement on invalid scalar values. +unfoldr :: (a -> Maybe (Char,a)) -> a -> Text +unfoldr f s = unstream (S.unfoldr (firstf safe . f) s) +{-# INLINE unfoldr #-} + +-- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a 'Text' from a seed +-- value. However, the length of the result should be limited by the +-- first argument to 'unfoldrN'. This function is more efficient than +-- 'unfoldr' when the maximum length of the result is known and +-- correct, otherwise its performance is similar to 'unfoldr'. Subject +-- to fusion. Performs replacement on invalid scalar values. +unfoldrN :: Int -> (a -> Maybe (Char,a)) -> a -> Text +unfoldrN n f s = unstream (S.unfoldrN n (firstf safe . f) s) +{-# INLINE unfoldrN #-} + +-- ----------------------------------------------------------------------------- +-- * Substrings + +-- | /O(n)/ 'take' @n@, applied to a 'Text', returns the prefix of the +-- 'Text' of length @n@, or the 'Text' itself if @n@ is greater than +-- the length of the Text. Subject to fusion. +take :: Int -> Text -> Text +take n t@(Text arr off len) + | n <= 0 = empty + | n >= len = t + | otherwise = text arr off (iterN n t) +{-# INLINE [1] take #-} + +iterN :: Int -> Text -> Int +iterN n t@(Text _arr _off len) = loop 0 0 + where loop !i !cnt + | i >= len || cnt >= n = i + | otherwise = loop (i+d) (cnt+1) + where d = iter_ t i + +{-# RULES +"TEXT take -> fused" [~1] forall n t. + take n t = unstream (S.take n (stream t)) +"TEXT take -> unfused" [1] forall n t. + unstream (S.take n (stream t)) = take n t + #-} + +-- | /O(n)/ 'takeEnd' @n@ @t@ returns the suffix remaining after +-- taking @n@ characters from the end of @t@. +-- +-- Examples: +-- +-- > takeEnd 3 "foobar" == "bar" +takeEnd :: Int -> Text -> Text +takeEnd n t@(Text arr off len) + | n <= 0 = empty + | n >= len = t + | otherwise = text arr (off+i) (len-i) + where i = iterNEnd n t + +iterNEnd :: Int -> Text -> Int +iterNEnd n t@(Text _arr _off len) = loop (len-1) n + where loop i !m + | i <= 0 = 0 + | m <= 1 = i + | otherwise = loop (i+d) (m-1) + where d = reverseIter_ t i + +-- | /O(n)/ 'drop' @n@, applied to a 'Text', returns the suffix of the +-- 'Text' after the first @n@ characters, or the empty 'Text' if @n@ +-- is greater than the length of the 'Text'. Subject to fusion. +drop :: Int -> Text -> Text +drop n t@(Text arr off len) + | n <= 0 = t + | n >= len = empty + | otherwise = text arr (off+i) (len-i) + where i = iterN n t +{-# INLINE [1] drop #-} + +{-# RULES +"TEXT drop -> fused" [~1] forall n t. + drop n t = unstream (S.drop n (stream t)) +"TEXT drop -> unfused" [1] forall n t. + unstream (S.drop n (stream t)) = drop n t + #-} + +-- | /O(n)/ 'dropEnd' @n@ @t@ returns the prefix remaining after +-- dropping @n@ characters from the end of @t@. +-- +-- Examples: +-- +-- > dropEnd 3 "foobar" == "foo" +dropEnd :: Int -> Text -> Text +dropEnd n t@(Text arr off len) + | n <= 0 = t + | n >= len = empty + | otherwise = text arr off (iterNEnd n t) + +-- | /O(n)/ 'takeWhile', applied to a predicate @p@ and a 'Text', +-- returns the longest prefix (possibly empty) of elements that +-- satisfy @p@. Subject to fusion. +takeWhile :: (Char -> Bool) -> Text -> Text +takeWhile p t@(Text arr off len) = loop 0 + where loop !i | i >= len = t + | p c = loop (i+d) + | otherwise = text arr off i + where Iter c d = iter t i +{-# INLINE [1] takeWhile #-} + +{-# RULES +"TEXT takeWhile -> fused" [~1] forall p t. + takeWhile p t = unstream (S.takeWhile p (stream t)) +"TEXT takeWhile -> unfused" [1] forall p t. + unstream (S.takeWhile p (stream t)) = takeWhile p t + #-} + +-- | /O(n)/ 'takeWhileEnd', applied to a predicate @p@ and a 'Text', +-- returns the longest suffix (possibly empty) of elements that +-- satisfy @p@. Subject to fusion. +-- Examples: +-- +-- > takeWhileEnd (=='o') "foo" == "oo" +takeWhileEnd :: (Char -> Bool) -> Text -> Text +takeWhileEnd p t@(Text arr off len) = loop (len-1) len + where loop !i !l | l <= 0 = t + | p c = loop (i+d) (l+d) + | otherwise = text arr (off+l) (len-l) + where (c,d) = reverseIter t i +{-# INLINE [1] takeWhileEnd #-} + +{-# RULES +"TEXT takeWhileEnd -> fused" [~1] forall p t. + takeWhileEnd p t = S.reverse (S.takeWhile p (S.reverseStream t)) +"TEXT takeWhileEnd -> unfused" [1] forall p t. + S.reverse (S.takeWhile p (S.reverseStream t)) = takeWhileEnd p t + #-} + +-- | /O(n)/ 'dropWhile' @p@ @t@ returns the suffix remaining after +-- 'takeWhile' @p@ @t@. Subject to fusion. +dropWhile :: (Char -> Bool) -> Text -> Text +dropWhile p t@(Text arr off len) = loop 0 0 + where loop !i !l | l >= len = empty + | p c = loop (i+d) (l+d) + | otherwise = Text arr (off+i) (len-l) + where Iter c d = iter t i +{-# INLINE [1] dropWhile #-} + +{-# RULES +"TEXT dropWhile -> fused" [~1] forall p t. + dropWhile p t = unstream (S.dropWhile p (stream t)) +"TEXT dropWhile -> unfused" [1] forall p t. + unstream (S.dropWhile p (stream t)) = dropWhile p t + #-} + +-- | /O(n)/ 'dropWhileEnd' @p@ @t@ returns the prefix remaining after +-- dropping characters that satisfy the predicate @p@ from the end of +-- @t@. Subject to fusion. +-- +-- Examples: +-- +-- > dropWhileEnd (=='.') "foo..." == "foo" +dropWhileEnd :: (Char -> Bool) -> Text -> Text +dropWhileEnd p t@(Text arr off len) = loop (len-1) len + where loop !i !l | l <= 0 = empty + | p c = loop (i+d) (l+d) + | otherwise = Text arr off l + where (c,d) = reverseIter t i +{-# INLINE [1] dropWhileEnd #-} + +{-# RULES +"TEXT dropWhileEnd -> fused" [~1] forall p t. + dropWhileEnd p t = S.reverse (S.dropWhile p (S.reverseStream t)) +"TEXT dropWhileEnd -> unfused" [1] forall p t. + S.reverse (S.dropWhile p (S.reverseStream t)) = dropWhileEnd p t + #-} + +-- | /O(n)/ 'dropAround' @p@ @t@ returns the substring remaining after +-- dropping characters that satisfy the predicate @p@ from both the +-- beginning and end of @t@. Subject to fusion. +dropAround :: (Char -> Bool) -> Text -> Text +dropAround p = dropWhile p . dropWhileEnd p +{-# INLINE [1] dropAround #-} + +-- | /O(n)/ Remove leading white space from a string. Equivalent to: +-- +-- > dropWhile isSpace +stripStart :: Text -> Text +stripStart = dropWhile isSpace +{-# INLINE [1] stripStart #-} + +-- | /O(n)/ Remove trailing white space from a string. Equivalent to: +-- +-- > dropWhileEnd isSpace +stripEnd :: Text -> Text +stripEnd = dropWhileEnd isSpace +{-# INLINE [1] stripEnd #-} + +-- | /O(n)/ Remove leading and trailing white space from a string. +-- Equivalent to: +-- +-- > dropAround isSpace +strip :: Text -> Text +strip = dropAround isSpace +{-# INLINE [1] strip #-} + +-- | /O(n)/ 'splitAt' @n t@ returns a pair whose first element is a +-- prefix of @t@ of length @n@, and whose second is the remainder of +-- the string. It is equivalent to @('take' n t, 'drop' n t)@. +splitAt :: Int -> Text -> (Text, Text) +splitAt n t@(Text arr off len) + | n <= 0 = (empty, t) + | n >= len = (t, empty) + | otherwise = let k = iterN n t + in (text arr off k, text arr (off+k) (len-k)) + +-- | /O(n)/ 'span', applied to a predicate @p@ and text @t@, returns +-- a pair whose first element is the longest prefix (possibly empty) +-- of @t@ of elements that satisfy @p@, and whose second is the +-- remainder of the list. +span :: (Char -> Bool) -> Text -> (Text, Text) +span p t = case span_ p t of + (# hd,tl #) -> (hd,tl) +{-# INLINE span #-} + +-- | /O(n)/ 'break' is like 'span', but the prefix returned is +-- over elements that fail the predicate @p@. +break :: (Char -> Bool) -> Text -> (Text, Text) +break p = span (not . p) +{-# INLINE break #-} + +-- | /O(n)/ Group characters in a string according to a predicate. +groupBy :: (Char -> Char -> Bool) -> Text -> [Text] +groupBy p = loop + where + loop t@(Text arr off len) + | null t = [] + | otherwise = text arr off n : loop (text arr (off+n) (len-n)) + where Iter c d = iter t 0 + n = d + findAIndexOrEnd (not . p c) (Text arr (off+d) (len-d)) + +-- | Returns the /array/ index (in units of 'Word16') at which a +-- character may be found. This is /not/ the same as the logical +-- index returned by e.g. 'findIndex'. +findAIndexOrEnd :: (Char -> Bool) -> Text -> Int +findAIndexOrEnd q t@(Text _arr _off len) = go 0 + where go !i | i >= len || q c = i + | otherwise = go (i+d) + where Iter c d = iter t i + +-- | /O(n)/ Group characters in a string by equality. +group :: Text -> [Text] +group = groupBy (==) + +-- | /O(n)/ Return all initial segments of the given 'Text', shortest +-- first. +inits :: Text -> [Text] +inits t@(Text arr off len) = loop 0 + where loop i | i >= len = [t] + | otherwise = Text arr off i : loop (i + iter_ t i) + +-- | /O(n)/ Return all final segments of the given 'Text', longest +-- first. +tails :: Text -> [Text] +tails t | null t = [empty] + | otherwise = t : tails (unsafeTail t) + +-- $split +-- +-- Splitting functions in this library do not perform character-wise +-- copies to create substrings; they just construct new 'Text's that +-- are slices of the original. + +-- | /O(m+n)/ Break a 'Text' into pieces separated by the first 'Text' +-- argument (which cannot be empty), consuming the delimiter. An empty +-- delimiter is invalid, and will cause an error to be raised. +-- +-- Examples: +-- +-- > splitOn "\r\n" "a\r\nb\r\nd\r\ne" == ["a","b","d","e"] +-- > splitOn "aaa" "aaaXaaaXaaaXaaa" == ["","X","X","X",""] +-- > splitOn "x" "x" == ["",""] +-- +-- and +-- +-- > intercalate s . splitOn s == id +-- > splitOn (singleton c) == split (==c) +-- +-- (Note: the string @s@ to split on above cannot be empty.) +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +splitOn :: Text + -- ^ String to split on. If this string is empty, an error + -- will occur. + -> Text + -- ^ Input text. + -> [Text] +splitOn pat@(Text _ _ l) src@(Text arr off len) + | l <= 0 = emptyError "splitOn" + | isSingleton pat = split (== unsafeHead pat) src + | otherwise = go 0 (indices pat src) + where + go !s (x:xs) = text arr (s+off) (x-s) : go (x+l) xs + go s _ = [text arr (s+off) (len-s)] +{-# INLINE [1] splitOn #-} + +{-# RULES +"TEXT splitOn/singleton -> split/==" [~1] forall c t. + splitOn (singleton c) t = split (==c) t + #-} + +-- | /O(n)/ Splits a 'Text' into components delimited by separators, +-- where the predicate returns True for a separator element. The +-- resulting components do not contain the separators. Two adjacent +-- separators result in an empty component in the output. eg. +-- +-- > split (=='a') "aabbaca" == ["","","bb","c",""] +-- > split (=='a') "" == [""] +split :: (Char -> Bool) -> Text -> [Text] +split _ t@(Text _off _arr 0) = [t] +split p t = loop t + where loop s | null s' = [l] + | otherwise = l : loop (unsafeTail s') + where (# l, s' #) = span_ (not . p) s +{-# INLINE split #-} + +-- | /O(n)/ Splits a 'Text' into components of length @k@. The last +-- element may be shorter than the other chunks, depending on the +-- length of the input. Examples: +-- +-- > chunksOf 3 "foobarbaz" == ["foo","bar","baz"] +-- > chunksOf 4 "haskell.org" == ["hask","ell.","org"] +chunksOf :: Int -> Text -> [Text] +chunksOf k = go + where + go t = case splitAt k t of + (a,b) | null a -> [] + | otherwise -> a : go b +{-# INLINE chunksOf #-} + +-- ---------------------------------------------------------------------------- +-- * Searching + +------------------------------------------------------------------------------- +-- ** Searching with a predicate + +-- | /O(n)/ The 'find' function takes a predicate and a 'Text', and +-- returns the first element matching the predicate, or 'Nothing' if +-- there is no such element. +find :: (Char -> Bool) -> Text -> Maybe Char +find p t = S.findBy p (stream t) +{-# INLINE find #-} + +-- | /O(n)/ The 'partition' function takes a predicate and a 'Text', +-- and returns the pair of 'Text's with elements which do and do not +-- satisfy the predicate, respectively; i.e. +-- +-- > partition p t == (filter p t, filter (not . p) t) +partition :: (Char -> Bool) -> Text -> (Text, Text) +partition p t = (filter p t, filter (not . p) t) +{-# INLINE partition #-} + +-- | /O(n)/ 'filter', applied to a predicate and a 'Text', +-- returns a 'Text' containing those characters that satisfy the +-- predicate. +filter :: (Char -> Bool) -> Text -> Text +filter p t = unstream (S.filter p (stream t)) +{-# INLINE filter #-} + +-- | /O(n+m)/ Find the first instance of @needle@ (which must be +-- non-'null') in @haystack@. The first element of the returned tuple +-- is the prefix of @haystack@ before @needle@ is matched. The second +-- is the remainder of @haystack@, starting with the match. +-- +-- Examples: +-- +-- > breakOn "::" "a::b::c" ==> ("a", "::b::c") +-- > breakOn "/" "foobar" ==> ("foobar", "") +-- +-- Laws: +-- +-- > append prefix match == haystack +-- > where (prefix, match) = breakOn needle haystack +-- +-- If you need to break a string by a substring repeatedly (e.g. you +-- want to break on every instance of a substring), use 'breakOnAll' +-- instead, as it has lower startup overhead. +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +breakOn :: Text -> Text -> (Text, Text) +breakOn pat src@(Text arr off len) + | null pat = emptyError "breakOn" + | otherwise = case indices pat src of + [] -> (src, empty) + (x:_) -> (text arr off x, text arr (off+x) (len-x)) +{-# INLINE breakOn #-} + +-- | /O(n+m)/ Similar to 'breakOn', but searches from the end of the +-- string. +-- +-- The first element of the returned tuple is the prefix of @haystack@ +-- up to and including the last match of @needle@. The second is the +-- remainder of @haystack@, following the match. +-- +-- > breakOnEnd "::" "a::b::c" ==> ("a::b::", "c") +breakOnEnd :: Text -> Text -> (Text, Text) +breakOnEnd pat src = (reverse b, reverse a) + where (a,b) = breakOn (reverse pat) (reverse src) +{-# INLINE breakOnEnd #-} + +-- | /O(n+m)/ Find all non-overlapping instances of @needle@ in +-- @haystack@. Each element of the returned list consists of a pair: +-- +-- * The entire string prior to the /k/th match (i.e. the prefix) +-- +-- * The /k/th match, followed by the remainder of the string +-- +-- Examples: +-- +-- > breakOnAll "::" "" +-- > ==> [] +-- > breakOnAll "/" "a/b/c/" +-- > ==> [("a", "/b/c/"), ("a/b", "/c/"), ("a/b/c", "/")] +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +-- +-- The @needle@ parameter may not be empty. +breakOnAll :: Text -- ^ @needle@ to search for + -> Text -- ^ @haystack@ in which to search + -> [(Text, Text)] +breakOnAll pat src@(Text arr off slen) + | null pat = emptyError "breakOnAll" + | otherwise = L.map step (indices pat src) + where + step x = (chunk 0 x, chunk x (slen-x)) + chunk !n !l = text arr (n+off) l +{-# INLINE breakOnAll #-} + +------------------------------------------------------------------------------- +-- ** Indexing 'Text's + +-- $index +-- +-- If you think of a 'Text' value as an array of 'Char' values (which +-- it is not), you run the risk of writing inefficient code. +-- +-- An idiom that is common in some languages is to find the numeric +-- offset of a character or substring, then use that number to split +-- or trim the searched string. With a 'Text' value, this approach +-- would require two /O(n)/ operations: one to perform the search, and +-- one to operate from wherever the search ended. +-- +-- For example, suppose you have a string that you want to split on +-- the substring @\"::\"@, such as @\"foo::bar::quux\"@. Instead of +-- searching for the index of @\"::\"@ and taking the substrings +-- before and after that index, you would instead use @breakOnAll \"::\"@. + +-- | /O(n)/ 'Text' index (subscript) operator, starting from 0. +index :: Text -> Int -> Char +index t n = S.index (stream t) n +{-# INLINE index #-} + +-- | /O(n)/ The 'findIndex' function takes a predicate and a 'Text' +-- and returns the index of the first element in the 'Text' satisfying +-- the predicate. Subject to fusion. +findIndex :: (Char -> Bool) -> Text -> Maybe Int +findIndex p t = S.findIndex p (stream t) +{-# INLINE findIndex #-} + +-- | /O(n+m)/ The 'count' function returns the number of times the +-- query string appears in the given 'Text'. An empty query string is +-- invalid, and will cause an error to be raised. +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +count :: Text -> Text -> Int +count pat src + | null pat = emptyError "count" + | isSingleton pat = countChar (unsafeHead pat) src + | otherwise = L.length (indices pat src) +{-# INLINE [1] count #-} + +{-# RULES +"TEXT count/singleton -> countChar" [~1] forall c t. + count (singleton c) t = countChar c t + #-} + +-- | /O(n)/ The 'countChar' function returns the number of times the +-- query element appears in the given 'Text'. Subject to fusion. +countChar :: Char -> Text -> Int +countChar c t = S.countChar c (stream t) +{-# INLINE countChar #-} + +------------------------------------------------------------------------------- +-- * Zipping + +-- | /O(n)/ 'zip' takes two 'Text's and returns a list of +-- corresponding pairs of bytes. If one input 'Text' is short, +-- excess elements of the longer 'Text' are discarded. This is +-- equivalent to a pair of 'unpack' operations. +zip :: Text -> Text -> [(Char,Char)] +zip a b = S.unstreamList $ S.zipWith (,) (stream a) (stream b) +{-# INLINE zip #-} + +-- | /O(n)/ 'zipWith' generalises 'zip' by zipping with the function +-- given as the first argument, instead of a tupling function. +-- Performs replacement on invalid scalar values. +zipWith :: (Char -> Char -> Char) -> Text -> Text -> Text +zipWith f t1 t2 = unstream (S.zipWith g (stream t1) (stream t2)) + where g a b = safe (f a b) +{-# INLINE zipWith #-} + +-- | /O(n)/ Breaks a 'Text' up into a list of words, delimited by 'Char's +-- representing white space. +words :: Text -> [Text] +words t@(Text arr off len) = loop 0 0 + where + loop !start !n + | n >= len = if start == n + then [] + else [Text arr (start+off) (n-start)] + | isSpace c = + if start == n + then loop (start+1) (start+1) + else Text arr (start+off) (n-start) : loop (n+d) (n+d) + | otherwise = loop start (n+d) + where Iter c d = iter t n +{-# INLINE words #-} + +-- | /O(n)/ Breaks a 'Text' up into a list of 'Text's at +-- newline 'Char's. The resulting strings do not contain newlines. +lines :: Text -> [Text] +lines ps | null ps = [] + | otherwise = h : if null t + then [] + else lines (unsafeTail t) + where (# h,t #) = span_ (/= '\n') ps +{-# INLINE lines #-} + +{- +-- | /O(n)/ Portably breaks a 'Text' up into a list of 'Text's at line +-- boundaries. +-- +-- A line boundary is considered to be either a line feed, a carriage +-- return immediately followed by a line feed, or a carriage return. +-- This accounts for both Unix and Windows line ending conventions, +-- and for the old convention used on Mac OS 9 and earlier. +lines' :: Text -> [Text] +lines' ps | null ps = [] + | otherwise = h : case uncons t of + Nothing -> [] + Just (c,t') + | c == '\n' -> lines t' + | c == '\r' -> case uncons t' of + Just ('\n',t'') -> lines t'' + _ -> lines t' + where (h,t) = span notEOL ps + notEOL c = c /= '\n' && c /= '\r' +{-# INLINE lines' #-} +-} + +-- | /O(n)/ Joins lines, after appending a terminating newline to +-- each. +unlines :: [Text] -> Text +unlines = concat . L.map (`snoc` '\n') +{-# INLINE unlines #-} + +-- | /O(n)/ Joins words using single space characters. +unwords :: [Text] -> Text +unwords = intercalate (singleton ' ') +{-# INLINE unwords #-} + +-- | /O(n)/ The 'isPrefixOf' function takes two 'Text's and returns +-- 'True' iff the first is a prefix of the second. Subject to fusion. +isPrefixOf :: Text -> Text -> Bool +isPrefixOf a@(Text _ _ alen) b@(Text _ _ blen) = + alen <= blen && S.isPrefixOf (stream a) (stream b) +{-# INLINE [1] isPrefixOf #-} + +{-# RULES +"TEXT isPrefixOf -> fused" [~1] forall s t. + isPrefixOf s t = S.isPrefixOf (stream s) (stream t) + #-} + +-- | /O(n)/ The 'isSuffixOf' function takes two 'Text's and returns +-- 'True' iff the first is a suffix of the second. +isSuffixOf :: Text -> Text -> Bool +isSuffixOf a@(Text _aarr _aoff alen) b@(Text barr boff blen) = + d >= 0 && a == b' + where d = blen - alen + b' | d == 0 = b + | otherwise = Text barr (boff+d) alen +{-# INLINE isSuffixOf #-} + +-- | /O(n+m)/ The 'isInfixOf' function takes two 'Text's and returns +-- 'True' iff the first is contained, wholly and intact, anywhere +-- within the second. +-- +-- In (unlikely) bad cases, this function's time complexity degrades +-- towards /O(n*m)/. +isInfixOf :: Text -> Text -> Bool +isInfixOf needle haystack + | null needle = True + | isSingleton needle = S.elem (unsafeHead needle) . S.stream $ haystack + | otherwise = not . L.null . indices needle $ haystack +{-# INLINE [1] isInfixOf #-} + +{-# RULES +"TEXT isInfixOf/singleton -> S.elem/S.stream" [~1] forall n h. + isInfixOf (singleton n) h = S.elem n (S.stream h) + #-} + +------------------------------------------------------------------------------- +-- * View patterns + +-- | /O(n)/ Return the suffix of the second string if its prefix +-- matches the entire first string. +-- +-- Examples: +-- +-- > stripPrefix "foo" "foobar" == Just "bar" +-- > stripPrefix "" "baz" == Just "baz" +-- > stripPrefix "foo" "quux" == Nothing +-- +-- This is particularly useful with the @ViewPatterns@ extension to +-- GHC, as follows: +-- +-- > {-# LANGUAGE ViewPatterns #-} +-- > import Data.Text as T +-- > +-- > fnordLength :: Text -> Int +-- > fnordLength (stripPrefix "fnord" -> Just suf) = T.length suf +-- > fnordLength _ = -1 +stripPrefix :: Text -> Text -> Maybe Text +stripPrefix p@(Text _arr _off plen) t@(Text arr off len) + | p `isPrefixOf` t = Just $! text arr (off+plen) (len-plen) + | otherwise = Nothing + +-- | /O(n)/ Find the longest non-empty common prefix of two strings +-- and return it, along with the suffixes of each string at which they +-- no longer match. +-- +-- If the strings do not have a common prefix or either one is empty, +-- this function returns 'Nothing'. +-- +-- Examples: +-- +-- > commonPrefixes "foobar" "fooquux" == Just ("foo","bar","quux") +-- > commonPrefixes "veeble" "fetzer" == Nothing +-- > commonPrefixes "" "baz" == Nothing +commonPrefixes :: Text -> Text -> Maybe (Text,Text,Text) +commonPrefixes t0@(Text arr0 off0 len0) t1@(Text arr1 off1 len1) = go 0 0 + where + go !i !j | i < len0 && j < len1 && a == b = go (i+d0) (j+d1) + | i > 0 = Just (Text arr0 off0 i, + text arr0 (off0+i) (len0-i), + text arr1 (off1+j) (len1-j)) + | otherwise = Nothing + where Iter a d0 = iter t0 i + Iter b d1 = iter t1 j + +-- | /O(n)/ Return the prefix of the second string if its suffix +-- matches the entire first string. +-- +-- Examples: +-- +-- > stripSuffix "bar" "foobar" == Just "foo" +-- > stripSuffix "" "baz" == Just "baz" +-- > stripSuffix "foo" "quux" == Nothing +-- +-- This is particularly useful with the @ViewPatterns@ extension to +-- GHC, as follows: +-- +-- > {-# LANGUAGE ViewPatterns #-} +-- > import Data.Text as T +-- > +-- > quuxLength :: Text -> Int +-- > quuxLength (stripSuffix "quux" -> Just pre) = T.length pre +-- > quuxLength _ = -1 +stripSuffix :: Text -> Text -> Maybe Text +stripSuffix p@(Text _arr _off plen) t@(Text arr off len) + | p `isSuffixOf` t = Just $! text arr off (len-plen) + | otherwise = Nothing + +-- | Add a list of non-negative numbers. Errors out on overflow. +sumP :: String -> [Int] -> Int +sumP fun = go 0 + where go !a (x:xs) + | ax >= 0 = go ax xs + | otherwise = overflowError fun + where ax = a + x + go a _ = a + +emptyError :: String -> a +emptyError fun = P.error $ "Data.Text." ++ fun ++ ": empty input" + +overflowError :: String -> a +overflowError fun = P.error $ "Data.Text." ++ fun ++ ": size overflow" + +-- | /O(n)/ Make a distinct copy of the given string, sharing no +-- storage with the original string. +-- +-- As an example, suppose you read a large string, of which you need +-- only a small portion. If you do not use 'copy', the entire original +-- array will be kept alive in memory by the smaller string. Making a +-- copy \"breaks the link\" to the original array, allowing it to be +-- garbage collected if there are no other live references to it. +copy :: Text -> Text +copy (Text arr off len) = Text (A.run go) 0 len + where + go :: ST s (A.MArray s) + go = do + marr <- A.new len + A.copyI marr 0 arr off len + return marr diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/include/text_cbits.h cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/include/text_cbits.h --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/include/text_cbits.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/include/text_cbits.h 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,11 @@ +/* + * Copyright (c) 2013 Bryan O'Sullivan . + */ + +#ifndef _text_cbits_h +#define _text_cbits_h + +#define UTF8_ACCEPT 0 +#define UTF8_REJECT 12 + +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/LICENSE cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/LICENSE --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/LICENSE 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,26 @@ +Copyright (c) 2008-2009, Tom Harper +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/README.markdown cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/README.markdown --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/README.markdown 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/README.markdown 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,42 @@ +# Text: Fast, packed Unicode strings, using stream fusion + +This package provides the Data.Text library, a library for the space- +and time-efficient manipulation of Unicode text in Haskell. + + +# Normalization, conversion, and collation, oh my! + +This library intentionally provides a simple API based on the +Haskell prelude's list manipulation functions. For more complicated +real-world tasks, such as Unicode normalization, conversion to and +from a larger variety of encodings, and collation, use the [text-icu +package](http://hackage.haskell.org/cgi-bin/hackage-scripts/package/text-icu). + +That library uses the well-respected and liberally licensed ICU +library to provide these facilities. + + +# Get involved! + +Please report bugs via the +[github issue tracker](https://github.com/bos/text/issues). + +Master [git repository](https://github.com/bos/text): + +* `git clone git://github.com/bos/text.git` + +There's also a [Mercurial mirror](https://bitbucket.org/bos/text): + +* `hg clone https://bitbucket.org/bos/text` + +(You can create and contribute changes using either Mercurial or git.) + + +# Authors + +The base code for this library was originally written by Tom Harper, +based on the stream fusion framework developed by Roman Leshchinskiy, +Duncan Coutts, and Don Stewart. + +The core library was fleshed out, debugged, and tested by Bryan +O'Sullivan , and he is the current maintainer. diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/ApiCompare.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/ApiCompare.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/ApiCompare.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/ApiCompare.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,28 @@ +-- This script compares the strict and lazy Text APIs to ensure that +-- they're reasonably in sync. + +{-# LANGUAGE OverloadedStrings #-} + +import qualified Data.Set as S +import qualified Data.Text as T +import System.Process + +main = do + let tidy pkg = (S.fromList . filter (T.isInfixOf "::") . T.lines . + T.replace "GHC.Int.Int64" "Int" . + T.replace "\n " "" . + T.replace (T.append (T.pack pkg) ".") "" . T.pack) `fmap` + readProcess "ghci" [] (":browse " ++ pkg) + let diff a b = mapM_ (putStrLn . (" "++) . T.unpack) . S.toList $ + S.difference a b + text <- tidy "Data.Text" + lazy <- tidy "Data.Text.Lazy" + list <- tidy "Data.List" + putStrLn "Text \\ List:" + diff text list + putStrLn "" + putStrLn "Text \\ Lazy:" + diff text lazy + putStrLn "" + putStrLn "Lazy \\ Text:" + diff lazy text diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/Arsec.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/Arsec.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/Arsec.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/Arsec.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,44 @@ +module Arsec + ( + Comment + , comment + , semi + , showC + , unichar + , unichars + , module Control.Applicative + , module Control.Monad + , module Data.Char + , module Text.ParserCombinators.Parsec.Char + , module Text.ParserCombinators.Parsec.Combinator + , module Text.ParserCombinators.Parsec.Error + , module Text.ParserCombinators.Parsec.Prim + ) where + +import Control.Monad +import Control.Applicative +import Data.Char +import Numeric +import Text.ParserCombinators.Parsec.Char hiding (lower, upper) +import Text.ParserCombinators.Parsec.Combinator hiding (optional) +import Text.ParserCombinators.Parsec.Error +import Text.ParserCombinators.Parsec.Prim hiding ((<|>), many) + +type Comment = String + +unichar :: Parser Char +unichar = chr . fst . head . readHex <$> many1 hexDigit + +unichars :: Parser [Char] +unichars = manyTill (unichar <* spaces) semi + +semi :: Parser () +semi = char ';' *> spaces *> pure () + +comment :: Parser Comment +comment = (char '#' *> manyTill anyToken (char '\n')) <|> string "\n" + +showC :: Char -> String +showC c = "'\\x" ++ d ++ "'" + where h = showHex (ord c) "" + d = replicate (4 - length h) '0' ++ h diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/CaseFolding.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/CaseFolding.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/CaseFolding.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/CaseFolding.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,46 @@ +-- This script processes the following source file: +-- +-- http://unicode.org/Public/UNIDATA/CaseFolding.txt + +module CaseFolding + ( + CaseFolding(..) + , Fold(..) + , parseCF + , mapCF + ) where + +import Arsec + +data Fold = Fold { + code :: Char + , status :: Char + , mapping :: [Char] + , name :: String + } deriving (Eq, Ord, Show) + +data CaseFolding = CF { cfComments :: [Comment], cfFolding :: [Fold] } + deriving (Show) + +entries :: Parser CaseFolding +entries = CF <$> many comment <*> many (entry <* many comment) + where + entry = Fold <$> unichar <* semi + <*> oneOf "CFST" <* semi + <*> unichars + <*> (string "# " *> manyTill anyToken (char '\n')) + +parseCF :: FilePath -> IO (Either ParseError CaseFolding) +parseCF name = parse entries name <$> readFile name + +mapCF :: CaseFolding -> [String] +mapCF (CF _ ms) = typ ++ (map nice . filter p $ ms) ++ [last] + where + typ = ["foldMapping :: forall s. Char -> s -> Step (CC s) Char" + ,"{-# NOINLINE foldMapping #-}"] + last = "foldMapping c s = Yield (toLower c) (CC s '\\0' '\\0')" + nice c = "-- " ++ name c ++ "\n" ++ + "foldMapping " ++ showC (code c) ++ " s = Yield " ++ x ++ " (CC s " ++ y ++ " " ++ z ++ ")" + where [x,y,z] = (map showC . take 3) (mapping c ++ repeat '\0') + p f = status f `elem` "CF" && + mapping f /= [toLower (code f)] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/CaseMapping.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/CaseMapping.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/CaseMapping.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/CaseMapping.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,38 @@ +import System.Environment +import System.IO + +import Arsec +import CaseFolding +import SpecialCasing + +main = do + args <- getArgs + let oname = case args of + [] -> "../Data/Text/Internal/Fusion/CaseMapping.hs" + [o] -> o + psc <- parseSC "SpecialCasing.txt" + pcf <- parseCF "CaseFolding.txt" + scs <- case psc of + Left err -> print err >> return undefined + Right ms -> return ms + cfs <- case pcf of + Left err -> print err >> return undefined + Right ms -> return ms + h <- openFile oname WriteMode + let comments = map ("--" ++) $ + take 2 (cfComments cfs) ++ take 2 (scComments scs) + mapM_ (hPutStrLn h) $ + ["{-# LANGUAGE Rank2Types #-}" + ,"-- AUTOMATICALLY GENERATED - DO NOT EDIT" + ,"-- Generated by scripts/CaseMapping.hs"] ++ + comments ++ + ["" + ,"module Data.Text.Internal.Fusion.CaseMapping where" + ,"import Data.Char" + ,"import Data.Text.Internal.Fusion.Types" + ,""] + mapM_ (hPutStrLn h) (mapSC "upper" upper toUpper scs) + mapM_ (hPutStrLn h) (mapSC "lower" lower toLower scs) + mapM_ (hPutStrLn h) (mapSC "title" title toTitle scs) + mapM_ (hPutStrLn h) (mapCF cfs) + hClose h diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/SpecialCasing.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/SpecialCasing.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/scripts/SpecialCasing.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/scripts/SpecialCasing.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,56 @@ +-- This script processes the following source file: +-- +-- http://unicode.org/Public/UNIDATA/SpecialCasing.txt + +module SpecialCasing + ( + SpecialCasing(..) + , Case(..) + , parseSC + , mapSC + ) where + +import Arsec + +data SpecialCasing = SC { scComments :: [Comment], scCasing :: [Case] } + deriving (Show) + +data Case = Case { + code :: Char + , lower :: [Char] + , title :: [Char] + , upper :: [Char] + , conditions :: String + , name :: String + } deriving (Eq, Ord, Show) + +entries :: Parser SpecialCasing +entries = SC <$> many comment <*> many (entry <* many comment) + where + entry = Case <$> unichar <* semi + <*> unichars + <*> unichars + <*> unichars + <*> manyTill anyToken (string "# ") + <*> manyTill anyToken (char '\n') + +parseSC :: FilePath -> IO (Either ParseError SpecialCasing) +parseSC name = parse entries name <$> readFile name + +mapSC :: String -> (Case -> String) -> (Char -> Char) -> SpecialCasing + -> [String] +mapSC which access twiddle (SC _ ms) = + typ ++ (map nice . filter p $ ms) ++ [last] + where + typ = [which ++ "Mapping :: forall s. Char -> s -> Step (CC s) Char" + ,"{-# NOINLINE " ++ which ++ "Mapping #-}"] + last = which ++ "Mapping c s = Yield (to" ++ ucFirst which ++ " c) (CC s '\\0' '\\0')" + nice c = "-- " ++ name c ++ "\n" ++ + which ++ "Mapping " ++ showC (code c) ++ " s = Yield " ++ x ++ " (CC s " ++ y ++ " " ++ z ++ ")" + where [x,y,z] = (map showC . take 3) (access c ++ repeat '\0') + p c = [k] /= a && a /= [twiddle k] && null (conditions c) + where a = access c + k = code c + +ucFirst (c:cs) = toUpper c : cs +ucFirst [] = [] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Setup.lhs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Setup.lhs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/Setup.lhs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/Setup.lhs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,3 @@ +#!/usr/bin/env runhaskell +> import Distribution.Simple +> main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/cabal.config cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/cabal.config --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/cabal.config 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/cabal.config 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,6 @@ +-- These flags help to speed up building the test suite. + +documentation: False +executable-stripping: False +flags: developer +library-profiling: False diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/.ghci cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/.ghci --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/.ghci 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/.ghci 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1 @@ +:set -DHAVE_DEEPSEQ -isrc -i../.. diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/LiteralRuleTest.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/LiteralRuleTest.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/LiteralRuleTest.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/LiteralRuleTest.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,21 @@ +{-# LANGUAGE OverloadedStrings #-} + +module LiteralRuleTest where + +import Data.Text (Text) + +-- This should produce 8 firings of the "TEXT literal" rule +strings :: [Text] +strings = [ "abstime", "aclitem", "bit", "bool", "box", "bpchar", "bytea", "char" ] + +-- This should produce 7 firings of the "TEXT literal UTF8" rule +utf8Strings :: [Text] +utf8Strings = [ "\0abstime", "\0aclitem", "\xfefe bit", "\0bool", "\0box", "\0bpchar", "\0bytea" ] + +-- This should produce 4 firings of the "TEXT empty literal" rule +empties :: [Text] +empties = [ "", "", "", "" ] + +-- This should produce 5 firings of the "TEXT empty literal" rule +--singletons :: [Text] +--singletons = [ "a", "b", "c", "d", "e" ] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Makefile cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Makefile --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Makefile 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,40 @@ +count = 1000 + +all: coverage literal-rule-test + +literal-rule-test: + ./literal-rule-test.sh + +coverage: build coverage/hpc_index.html + +build: text-test-data + cabal configure -fhpc + cabal build + +text-test-data: + hg clone https://bitbucket.org/bos/text-test-data + $(MAKE) -C text-test-data + +coverage/text-tests.tix: + -mkdir -p coverage + ./dist/build/text-tests/text-tests -a $(count) + mv text-tests.tix $@ + +coverage/text-tests-stdio.tix: + -mkdir -p coverage + ./scripts/cover-stdio.sh ./dist/build/text-tests-stdio/text-tests-stdio + mv text-tests-stdio.tix $@ + +coverage/coverage.tix: coverage/text-tests.tix coverage/text-tests-stdio.tix + hpc combine --output=$@ \ + --exclude=Main \ + coverage/text-tests.tix \ + coverage/text-tests-stdio.tix + +coverage/hpc_index.html: coverage/coverage.tix + hpc markup --destdir=coverage coverage/coverage.tix + +clean: + rm -rf dist coverage .hpc + +.PHONY: build coverage all literal-rule-test diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/scripts/cover-stdio.sh cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/scripts/cover-stdio.sh --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/scripts/cover-stdio.sh 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/scripts/cover-stdio.sh 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,62 @@ +#!/bin/bash + +if [[ $# < 1 ]]; then + echo "Usage: $0 " + exit 1 +fi + +exe=$1 + +rm -f $exe.tix + +f=$(mktemp stdio-f.XXXXXX) +g=$(mktemp stdio-g.XXXXXX) + +for t in T TL; do + echo $t.readFile > $f + $exe $t.readFile $f > $g + if ! diff -u $f $g; then + errs=$((errs+1)) + echo FAIL: $t.readFile 1>&2 + fi + + $exe $t.writeFile $f $t.writeFile + echo -n $t.writeFile > $g + if ! diff -u $f $g; then + errs=$((errs+1)) + echo FAIL: $t.writeFile 1>&2 + fi + + echo -n quux > $f + $exe $t.appendFile $f $t.appendFile + echo -n quux$t.appendFile > $g + if ! diff -u $f $g; then + errs=$((errs+1)) + echo FAIL: $t.appendFile 1>&2 + fi + + echo $t.interact | $exe $t.interact > $f + echo $t.interact > $g + if ! diff -u $f $g; then + errs=$((errs+1)) + echo FAIL: $t.interact 1>&2 + fi + + echo $t.getContents | $exe $t.getContents > $f + echo $t.getContents > $g + if ! diff -u $f $g; then + errs=$((errs+1)) + echo FAIL: $t.getContents 1>&2 + fi + + echo $t.getLine | $exe $t.getLine > $f + echo $t.getLine > $g + if ! diff -u $f $g; then + errs=$((errs+1)) + echo FAIL: $t.getLine 1>&2 + fi +done + +rm -f $f $g + +exit $errs diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/IO.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/IO.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/IO.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/IO.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,34 @@ +-- | Program which exposes some haskell functions as an exutable. The results +-- and coverage of this module is meant to be checked using a shell script. +-- +module Main + ( + main + ) where + +import System.Environment (getArgs) +import System.Exit (exitFailure) +import System.IO (hPutStrLn, stderr) +import qualified Data.Text as T +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.IO as TL + +main :: IO () +main = do + args <- getArgs + case args of + ["T.readFile", name] -> T.putStr =<< T.readFile name + ["T.writeFile", name, t] -> T.writeFile name (T.pack t) + ["T.appendFile", name, t] -> T.appendFile name (T.pack t) + ["T.interact"] -> T.interact id + ["T.getContents"] -> T.putStr =<< T.getContents + ["T.getLine"] -> T.putStrLn =<< T.getLine + + ["TL.readFile", name] -> TL.putStr =<< TL.readFile name + ["TL.writeFile", name, t] -> TL.writeFile name (TL.pack t) + ["TL.appendFile", name, t] -> TL.appendFile name (TL.pack t) + ["TL.interact"] -> TL.interact id + ["TL.getContents"] -> TL.putStr =<< TL.getContents + ["TL.getLine"] -> TL.putStrLn =<< TL.getLine + _ -> hPutStrLn stderr "invalid directive!" >> exitFailure diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/Properties/Mul.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/Properties/Mul.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/Properties/Mul.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/Properties/Mul.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,40 @@ +{-# LANGUAGE ScopedTypeVariables #-} + +module Tests.Properties.Mul (tests) where + +import Control.Applicative ((<$>), pure) +import Control.Exception as E (SomeException, catch, evaluate) +import Data.Int (Int32, Int64) +import Data.Text.Internal (mul, mul32, mul64) +import System.IO.Unsafe (unsafePerformIO) +import Test.Framework (Test) +import Test.Framework.Providers.QuickCheck2 (testProperty) +import Test.QuickCheck hiding ((.&.)) + +mulRef :: (Integral a, Bounded a) => a -> a -> Maybe a +mulRef a b + | ab < bot || ab > top = Nothing + | otherwise = Just (fromIntegral ab) + where ab = fromIntegral a * fromIntegral b + top = fromIntegral (maxBound `asTypeOf` a) :: Integer + bot = fromIntegral (minBound `asTypeOf` a) :: Integer + +eval :: (a -> b -> c) -> a -> b -> Maybe c +eval f a b = unsafePerformIO $ + (Just <$> evaluate (f a b)) `E.catch` (\(_::SomeException) -> pure Nothing) + +t_mul32 :: Int32 -> Int32 -> Property +t_mul32 a b = mulRef a b === eval mul32 a b + +t_mul64 :: Int64 -> Int64 -> Property +t_mul64 a b = mulRef a b === eval mul64 a b + +t_mul :: Int -> Int -> Property +t_mul a b = mulRef a b === eval mul a b + +tests :: [Test] +tests = [ + testProperty "t_mul" t_mul + , testProperty "t_mul32" t_mul32 + , testProperty "t_mul64" t_mul64 + ] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/Properties.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/Properties.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/Properties.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/Properties.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,1363 @@ +-- | QuickCheck properties for the text library. + +{-# LANGUAGE BangPatterns, FlexibleInstances, OverloadedStrings, + ScopedTypeVariables, TypeSynonymInstances #-} +{-# OPTIONS_GHC -fno-enable-rewrite-rules -fno-warn-missing-signatures #-} +module Tests.Properties + ( + tests + ) where + +import Control.Applicative ((<$>), (<*>)) +import Control.Arrow ((***), second) +import Data.Bits ((.&.)) +import Data.Char (chr, isDigit, isHexDigit, isLower, isSpace, isUpper, ord) +import Data.Int (Int8, Int16, Int32, Int64) +import Data.Monoid (Monoid(..)) +import Data.String (IsString(fromString)) +import Data.Text.Encoding.Error +import Data.Text.Foreign +import Data.Text.Internal.Encoding.Utf8 +import Data.Text.Internal.Fusion.Size +import Data.Text.Internal.Search (indices) +import Data.Text.Lazy.Read as TL +import Data.Text.Read as T +import Data.Word (Word, Word8, Word16, Word32, Word64) +import Numeric (showEFloat, showFFloat, showGFloat, showHex) +import Prelude hiding (replicate) +import Test.Framework (Test, testGroup) +import Test.Framework.Providers.QuickCheck2 (testProperty) +import Test.QuickCheck hiding ((.&.)) +import Test.QuickCheck.Monadic +import Test.QuickCheck.Property (Property(..)) +import Tests.QuickCheckUtils +import Tests.Utils +import Text.Show.Functions () +import qualified Control.Exception as Exception +import qualified Data.Bits as Bits (shiftL, shiftR) +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as BL +import qualified Data.List as L +import qualified Data.Text as T +import qualified Data.Text.Encoding as E +import qualified Data.Text.IO as T +import qualified Data.Text.Internal.Fusion as S +import qualified Data.Text.Internal.Fusion.Common as S +import qualified Data.Text.Internal.Lazy.Fusion as SL +import qualified Data.Text.Internal.Lazy.Search as S (indices) +import qualified Data.Text.Internal.Unsafe.Shift as U +import qualified Data.Text.Lazy as TL +import qualified Data.Text.Lazy.Builder as TB +import qualified Data.Text.Lazy.Builder.Int as TB +import qualified Data.Text.Lazy.Builder.RealFloat as TB +import qualified Data.Text.Lazy.Encoding as EL +import qualified Data.Text.Lazy.IO as TL +import qualified System.IO as IO +import qualified Tests.Properties.Mul as Mul +import qualified Tests.SlowFunctions as Slow + +t_pack_unpack = (T.unpack . T.pack) `eq` id +tl_pack_unpack = (TL.unpack . TL.pack) `eq` id +t_stream_unstream = (S.unstream . S.stream) `eq` id +tl_stream_unstream = (SL.unstream . SL.stream) `eq` id +t_reverse_stream t = (S.reverse . S.reverseStream) t == t +t_singleton c = [c] == (T.unpack . T.singleton) c +tl_singleton c = [c] == (TL.unpack . TL.singleton) c +tl_unstreamChunks x = f 11 x == f 1000 x + where f n = SL.unstreamChunks n . S.streamList +tl_chunk_unchunk = (TL.fromChunks . TL.toChunks) `eq` id +tl_from_to_strict = (TL.fromStrict . TL.toStrict) `eq` id + +-- Note: this silently truncates code-points > 255 to 8-bit due to 'B.pack' +encodeL1 :: T.Text -> B.ByteString +encodeL1 = B.pack . map (fromIntegral . fromEnum) . T.unpack +encodeLazyL1 :: TL.Text -> BL.ByteString +encodeLazyL1 = BL.fromChunks . map encodeL1 . TL.toChunks + +t_ascii t = E.decodeASCII (E.encodeUtf8 a) == a + where a = T.map (\c -> chr (ord c `mod` 128)) t +tl_ascii t = EL.decodeASCII (EL.encodeUtf8 a) == a + where a = TL.map (\c -> chr (ord c `mod` 128)) t +t_latin1 t = E.decodeLatin1 (encodeL1 a) == a + where a = T.map (\c -> chr (ord c `mod` 256)) t +tl_latin1 t = EL.decodeLatin1 (encodeLazyL1 a) == a + where a = TL.map (\c -> chr (ord c `mod` 256)) t +t_utf8 = forAll genUnicode $ (E.decodeUtf8 . E.encodeUtf8) `eq` id +t_utf8' = forAll genUnicode $ (E.decodeUtf8' . E.encodeUtf8) `eq` (id . Right) +tl_utf8 = forAll genUnicode $ (EL.decodeUtf8 . EL.encodeUtf8) `eq` id +tl_utf8' = forAll genUnicode $ (EL.decodeUtf8' . EL.encodeUtf8) `eq` (id . Right) +t_utf16LE = forAll genUnicode $ (E.decodeUtf16LE . E.encodeUtf16LE) `eq` id +tl_utf16LE = forAll genUnicode $ (EL.decodeUtf16LE . EL.encodeUtf16LE) `eq` id +t_utf16BE = forAll genUnicode $ (E.decodeUtf16BE . E.encodeUtf16BE) `eq` id +tl_utf16BE = forAll genUnicode $ (EL.decodeUtf16BE . EL.encodeUtf16BE) `eq` id +t_utf32LE = forAll genUnicode $ (E.decodeUtf32LE . E.encodeUtf32LE) `eq` id +tl_utf32LE = forAll genUnicode $ (EL.decodeUtf32LE . EL.encodeUtf32LE) `eq` id +t_utf32BE = forAll genUnicode $ (E.decodeUtf32BE . E.encodeUtf32BE) `eq` id +tl_utf32BE = forAll genUnicode $ (EL.decodeUtf32BE . EL.encodeUtf32BE) `eq` id + +t_utf8_incr = forAll genUnicode $ \s (Positive n) -> (recode n `eq` id) s + where recode n = T.concat . map fst . feedChunksOf n E.streamDecodeUtf8 . + E.encodeUtf8 + +feedChunksOf :: Int -> (B.ByteString -> E.Decoding) -> B.ByteString + -> [(T.Text, B.ByteString)] +feedChunksOf n f bs + | B.null bs = [] + | otherwise = let (x,y) = B.splitAt n bs + E.Some t b f' = f x + in (t,b) : feedChunksOf n f' y + +t_utf8_undecoded = forAll genUnicode $ \t -> + let b = E.encodeUtf8 t + ls = concatMap (leftover . E.encodeUtf8 . T.singleton) . T.unpack $ t + leftover = (++ [B.empty]) . init . tail . B.inits + in (map snd . feedChunksOf 1 E.streamDecodeUtf8) b == ls + +data Badness = Solo | Leading | Trailing + deriving (Eq, Show) + +instance Arbitrary Badness where + arbitrary = elements [Solo, Leading, Trailing] + +t_utf8_err :: Badness -> DecodeErr -> Property +t_utf8_err bad de = do + let gen = case bad of + Solo -> genInvalidUTF8 + Leading -> B.append <$> genInvalidUTF8 <*> genUTF8 + Trailing -> B.append <$> genUTF8 <*> genInvalidUTF8 + genUTF8 = E.encodeUtf8 <$> genUnicode + forAll gen $ \bs -> MkProperty $ do + onErr <- genDecodeErr de + unProperty . monadicIO $ do + l <- run $ let len = T.length (E.decodeUtf8With onErr bs) + in (len `seq` return (Right len)) `Exception.catch` + (\(e::UnicodeException) -> return (Left e)) + assert $ case l of + Left err -> length (show err) >= 0 + Right _ -> de /= Strict + +t_utf8_err' :: B.ByteString -> Property +t_utf8_err' bs = monadicIO . assert $ case E.decodeUtf8' bs of + Left err -> length (show err) >= 0 + Right t -> T.length t >= 0 + +genInvalidUTF8 :: Gen B.ByteString +genInvalidUTF8 = B.pack <$> oneof [ + -- invalid leading byte of a 2-byte sequence + (:) <$> choose (0xC0, 0xC1) <*> upTo 1 contByte + -- invalid leading byte of a 4-byte sequence + , (:) <$> choose (0xF5, 0xFF) <*> upTo 3 contByte + -- 4-byte sequence greater than U+10FFFF + , do k <- choose (0x11, 0x13) + let w0 = 0xF0 + (k `Bits.shiftR` 2) + w1 = 0x80 + ((k .&. 3) `Bits.shiftL` 4) + ([w0,w1]++) <$> vectorOf 2 contByte + -- continuation bytes without a start byte + , listOf1 contByte + -- short 2-byte sequence + , (:[]) <$> choose (0xC2, 0xDF) + -- short 3-byte sequence + , (:) <$> choose (0xE0, 0xEF) <*> upTo 1 contByte + -- short 4-byte sequence + , (:) <$> choose (0xF0, 0xF4) <*> upTo 2 contByte + -- overlong encoding + , do k <- choose (0,0xFFFF) + let c = chr k + case k of + _ | k < 0x80 -> oneof [ let (w,x) = ord2 c in return [w,x] + , let (w,x,y) = ord3 c in return [w,x,y] + , let (w,x,y,z) = ord4 c in return [w,x,y,z] ] + | k < 0x7FF -> oneof [ let (w,x,y) = ord3 c in return [w,x,y] + , let (w,x,y,z) = ord4 c in return [w,x,y,z] ] + | otherwise -> let (w,x,y,z) = ord4 c in return [w,x,y,z] + ] + where + contByte = (0x80 +) <$> choose (0, 0x3f) + upTo n gen = do + k <- choose (0,n) + vectorOf k gen + +s_Eq s = (s==) `eq` ((S.streamList s==) . S.streamList) + where _types = s :: String +sf_Eq p s = + ((L.filter p s==) . L.filter p) `eq` + (((S.filter p $ S.streamList s)==) . S.filter p . S.streamList) +t_Eq s = (s==) `eq` ((T.pack s==) . T.pack) +tl_Eq s = (s==) `eq` ((TL.pack s==) . TL.pack) +s_Ord s = (compare s) `eq` (compare (S.streamList s) . S.streamList) + where _types = s :: String +sf_Ord p s = + ((compare $ L.filter p s) . L.filter p) `eq` + (compare (S.filter p $ S.streamList s) . S.filter p . S.streamList) +t_Ord s = (compare s) `eq` (compare (T.pack s) . T.pack) +tl_Ord s = (compare s) `eq` (compare (TL.pack s) . TL.pack) +t_Read = id `eq` (T.unpack . read . show) +tl_Read = id `eq` (TL.unpack . read . show) +t_Show = show `eq` (show . T.pack) +tl_Show = show `eq` (show . TL.pack) +t_mappend s = mappend s`eqP` (unpackS . mappend (T.pack s)) +tl_mappend s = mappend s`eqP` (unpackS . mappend (TL.pack s)) +t_mconcat = unsquare $ + mconcat `eq` (unpackS . mconcat . L.map T.pack) +tl_mconcat = unsquare $ + mconcat `eq` (unpackS . mconcat . L.map TL.pack) +t_mempty = mempty == (unpackS (mempty :: T.Text)) +tl_mempty = mempty == (unpackS (mempty :: TL.Text)) +t_IsString = fromString `eqP` (T.unpack . fromString) +tl_IsString = fromString `eqP` (TL.unpack . fromString) + +s_cons x = (x:) `eqP` (unpackS . S.cons x) +s_cons_s x = (x:) `eqP` (unpackS . S.unstream . S.cons x) +sf_cons p x = ((x:) . L.filter p) `eqP` (unpackS . S.cons x . S.filter p) +t_cons x = (x:) `eqP` (unpackS . T.cons x) +tl_cons x = (x:) `eqP` (unpackS . TL.cons x) +s_snoc x = (++ [x]) `eqP` (unpackS . (flip S.snoc) x) +t_snoc x = (++ [x]) `eqP` (unpackS . (flip T.snoc) x) +tl_snoc x = (++ [x]) `eqP` (unpackS . (flip TL.snoc) x) +s_append s = (s++) `eqP` (unpackS . S.append (S.streamList s)) +s_append_s s = (s++) `eqP` + (unpackS . S.unstream . S.append (S.streamList s)) +sf_append p s = (L.filter p s++) `eqP` + (unpackS . S.append (S.filter p $ S.streamList s)) +t_append s = (s++) `eqP` (unpackS . T.append (packS s)) + +uncons (x:xs) = Just (x,xs) +uncons _ = Nothing + +s_uncons = uncons `eqP` (fmap (second unpackS) . S.uncons) +sf_uncons p = (uncons . L.filter p) `eqP` + (fmap (second unpackS) . S.uncons . S.filter p) +t_uncons = uncons `eqP` (fmap (second unpackS) . T.uncons) +tl_uncons = uncons `eqP` (fmap (second unpackS) . TL.uncons) +s_head = head `eqP` S.head +sf_head p = (head . L.filter p) `eqP` (S.head . S.filter p) +t_head = head `eqP` T.head +tl_head = head `eqP` TL.head +s_last = last `eqP` S.last +sf_last p = (last . L.filter p) `eqP` (S.last . S.filter p) +t_last = last `eqP` T.last +tl_last = last `eqP` TL.last +s_tail = tail `eqP` (unpackS . S.tail) +s_tail_s = tail `eqP` (unpackS . S.unstream . S.tail) +sf_tail p = (tail . L.filter p) `eqP` (unpackS . S.tail . S.filter p) +t_tail = tail `eqP` (unpackS . T.tail) +tl_tail = tail `eqP` (unpackS . TL.tail) +s_init = init `eqP` (unpackS . S.init) +s_init_s = init `eqP` (unpackS . S.unstream . S.init) +sf_init p = (init . L.filter p) `eqP` (unpackS . S.init . S.filter p) +t_init = init `eqP` (unpackS . T.init) +tl_init = init `eqP` (unpackS . TL.init) +s_null = null `eqP` S.null +sf_null p = (null . L.filter p) `eqP` (S.null . S.filter p) +t_null = null `eqP` T.null +tl_null = null `eqP` TL.null +s_length = length `eqP` S.length +sf_length p = (length . L.filter p) `eqP` (S.length . S.filter p) +sl_length = (fromIntegral . length) `eqP` SL.length +t_length = length `eqP` T.length +tl_length = L.genericLength `eqP` TL.length +t_compareLength t = (compare (T.length t)) `eq` T.compareLength t +tl_compareLength t= (compare (TL.length t)) `eq` TL.compareLength t + +s_map f = map f `eqP` (unpackS . S.map f) +s_map_s f = map f `eqP` (unpackS . S.unstream . S.map f) +sf_map p f = (map f . L.filter p) `eqP` (unpackS . S.map f . S.filter p) +t_map f = map f `eqP` (unpackS . T.map f) +tl_map f = map f `eqP` (unpackS . TL.map f) +s_intercalate c = unsquare $ + L.intercalate c `eq` + (unpackS . S.intercalate (packS c) . map packS) +t_intercalate c = unsquare $ + L.intercalate c `eq` + (unpackS . T.intercalate (packS c) . map packS) +tl_intercalate c = unsquare $ + L.intercalate c `eq` + (unpackS . TL.intercalate (TL.pack c) . map TL.pack) +s_intersperse c = L.intersperse c `eqP` + (unpackS . S.intersperse c) +s_intersperse_s c = L.intersperse c `eqP` + (unpackS . S.unstream . S.intersperse c) +sf_intersperse p c= (L.intersperse c . L.filter p) `eqP` + (unpackS . S.intersperse c . S.filter p) +t_intersperse c = unsquare $ + L.intersperse c `eqP` (unpackS . T.intersperse c) +tl_intersperse c = unsquare $ + L.intersperse c `eqP` (unpackS . TL.intersperse c) +t_transpose = unsquare $ + L.transpose `eq` (map unpackS . T.transpose . map packS) +tl_transpose = unsquare $ + L.transpose `eq` (map unpackS . TL.transpose . map TL.pack) +t_reverse = L.reverse `eqP` (unpackS . T.reverse) +tl_reverse = L.reverse `eqP` (unpackS . TL.reverse) +t_reverse_short n = L.reverse `eqP` (unpackS . S.reverse . shorten n . S.stream) + +t_replace s d = (L.intercalate d . splitOn s) `eqP` + (unpackS . T.replace (T.pack s) (T.pack d)) +tl_replace s d = (L.intercalate d . splitOn s) `eqP` + (unpackS . TL.replace (TL.pack s) (TL.pack d)) + +splitOn :: (Eq a) => [a] -> [a] -> [[a]] +splitOn pat src0 + | l == 0 = error "splitOn: empty" + | otherwise = go src0 + where + l = length pat + go src = search 0 src + where + search _ [] = [src] + search !n s@(_:s') + | pat `L.isPrefixOf` s = take n src : go (drop l s) + | otherwise = search (n+1) s' + +s_toCaseFold_length xs = S.length (S.toCaseFold s) >= length xs + where s = S.streamList xs +sf_toCaseFold_length p xs = + (S.length . S.toCaseFold . S.filter p $ s) >= (length . L.filter p $ xs) + where s = S.streamList xs +t_toCaseFold_length t = T.length (T.toCaseFold t) >= T.length t +tl_toCaseFold_length t = TL.length (TL.toCaseFold t) >= TL.length t +t_toLower_length t = T.length (T.toLower t) >= T.length t +t_toLower_lower t = p (T.toLower t) >= p t + where p = T.length . T.filter isLower +tl_toLower_lower t = p (TL.toLower t) >= p t + where p = TL.length . TL.filter isLower +t_toUpper_length t = T.length (T.toUpper t) >= T.length t +t_toUpper_upper t = p (T.toUpper t) >= p t + where p = T.length . T.filter isUpper +tl_toUpper_upper t = p (TL.toUpper t) >= p t + where p = TL.length . TL.filter isUpper + +justifyLeft k c xs = xs ++ L.replicate (k - length xs) c +justifyRight m n xs = L.replicate (m - length xs) n ++ xs +center k c xs + | len >= k = xs + | otherwise = L.replicate l c ++ xs ++ L.replicate r c + where len = length xs + d = k - len + r = d `div` 2 + l = d - r + +s_justifyLeft k c = justifyLeft j c `eqP` (unpackS . S.justifyLeftI j c) + where j = fromIntegral (k :: Word8) +s_justifyLeft_s k c = justifyLeft j c `eqP` + (unpackS . S.unstream . S.justifyLeftI j c) + where j = fromIntegral (k :: Word8) +sf_justifyLeft p k c = (justifyLeft j c . L.filter p) `eqP` + (unpackS . S.justifyLeftI j c . S.filter p) + where j = fromIntegral (k :: Word8) +t_justifyLeft k c = justifyLeft j c `eqP` (unpackS . T.justifyLeft j c) + where j = fromIntegral (k :: Word8) +tl_justifyLeft k c = justifyLeft j c `eqP` + (unpackS . TL.justifyLeft (fromIntegral j) c) + where j = fromIntegral (k :: Word8) +t_justifyRight k c = justifyRight j c `eqP` (unpackS . T.justifyRight j c) + where j = fromIntegral (k :: Word8) +tl_justifyRight k c = justifyRight j c `eqP` + (unpackS . TL.justifyRight (fromIntegral j) c) + where j = fromIntegral (k :: Word8) +t_center k c = center j c `eqP` (unpackS . T.center j c) + where j = fromIntegral (k :: Word8) +tl_center k c = center j c `eqP` (unpackS . TL.center (fromIntegral j) c) + where j = fromIntegral (k :: Word8) + +sf_foldl p f z = (L.foldl f z . L.filter p) `eqP` (S.foldl f z . S.filter p) + where _types = f :: Char -> Char -> Char +t_foldl f z = L.foldl f z `eqP` (T.foldl f z) + where _types = f :: Char -> Char -> Char +tl_foldl f z = L.foldl f z `eqP` (TL.foldl f z) + where _types = f :: Char -> Char -> Char +sf_foldl' p f z = (L.foldl' f z . L.filter p) `eqP` + (S.foldl' f z . S.filter p) + where _types = f :: Char -> Char -> Char +t_foldl' f z = L.foldl' f z `eqP` T.foldl' f z + where _types = f :: Char -> Char -> Char +tl_foldl' f z = L.foldl' f z `eqP` TL.foldl' f z + where _types = f :: Char -> Char -> Char +sf_foldl1 p f = (L.foldl1 f . L.filter p) `eqP` (S.foldl1 f . S.filter p) +t_foldl1 f = L.foldl1 f `eqP` T.foldl1 f +tl_foldl1 f = L.foldl1 f `eqP` TL.foldl1 f +sf_foldl1' p f = (L.foldl1' f . L.filter p) `eqP` (S.foldl1' f . S.filter p) +t_foldl1' f = L.foldl1' f `eqP` T.foldl1' f +tl_foldl1' f = L.foldl1' f `eqP` TL.foldl1' f +sf_foldr p f z = (L.foldr f z . L.filter p) `eqP` (S.foldr f z . S.filter p) + where _types = f :: Char -> Char -> Char +t_foldr f z = L.foldr f z `eqP` T.foldr f z + where _types = f :: Char -> Char -> Char +tl_foldr f z = unsquare $ + L.foldr f z `eqP` TL.foldr f z + where _types = f :: Char -> Char -> Char +sf_foldr1 p f = unsquare $ + (L.foldr1 f . L.filter p) `eqP` (S.foldr1 f . S.filter p) +t_foldr1 f = L.foldr1 f `eqP` T.foldr1 f +tl_foldr1 f = unsquare $ + L.foldr1 f `eqP` TL.foldr1 f + +s_concat_s = unsquare $ + L.concat `eq` (unpackS . S.unstream . S.concat . map packS) +sf_concat p = unsquare $ + (L.concat . map (L.filter p)) `eq` + (unpackS . S.concat . map (S.filter p . packS)) +t_concat = unsquare $ + L.concat `eq` (unpackS . T.concat . map packS) +tl_concat = unsquare $ + L.concat `eq` (unpackS . TL.concat . map TL.pack) +sf_concatMap p f = unsquare $ (L.concatMap f . L.filter p) `eqP` + (unpackS . S.concatMap (packS . f) . S.filter p) +t_concatMap f = unsquare $ + L.concatMap f `eqP` (unpackS . T.concatMap (packS . f)) +tl_concatMap f = unsquare $ + L.concatMap f `eqP` (unpackS . TL.concatMap (TL.pack . f)) +sf_any q p = (L.any p . L.filter q) `eqP` (S.any p . S.filter q) +t_any p = L.any p `eqP` T.any p +tl_any p = L.any p `eqP` TL.any p +sf_all q p = (L.all p . L.filter q) `eqP` (S.all p . S.filter q) +t_all p = L.all p `eqP` T.all p +tl_all p = L.all p `eqP` TL.all p +sf_maximum p = (L.maximum . L.filter p) `eqP` (S.maximum . S.filter p) +t_maximum = L.maximum `eqP` T.maximum +tl_maximum = L.maximum `eqP` TL.maximum +sf_minimum p = (L.minimum . L.filter p) `eqP` (S.minimum . S.filter p) +t_minimum = L.minimum `eqP` T.minimum +tl_minimum = L.minimum `eqP` TL.minimum + +sf_scanl p f z = (L.scanl f z . L.filter p) `eqP` + (unpackS . S.scanl f z . S.filter p) +t_scanl f z = L.scanl f z `eqP` (unpackS . T.scanl f z) +tl_scanl f z = L.scanl f z `eqP` (unpackS . TL.scanl f z) +t_scanl1 f = L.scanl1 f `eqP` (unpackS . T.scanl1 f) +tl_scanl1 f = L.scanl1 f `eqP` (unpackS . TL.scanl1 f) +t_scanr f z = L.scanr f z `eqP` (unpackS . T.scanr f z) +tl_scanr f z = L.scanr f z `eqP` (unpackS . TL.scanr f z) +t_scanr1 f = L.scanr1 f `eqP` (unpackS . T.scanr1 f) +tl_scanr1 f = L.scanr1 f `eqP` (unpackS . TL.scanr1 f) + +t_mapAccumL f z = L.mapAccumL f z `eqP` (second unpackS . T.mapAccumL f z) + where _types = f :: Int -> Char -> (Int,Char) +tl_mapAccumL f z = L.mapAccumL f z `eqP` (second unpackS . TL.mapAccumL f z) + where _types = f :: Int -> Char -> (Int,Char) +t_mapAccumR f z = L.mapAccumR f z `eqP` (second unpackS . T.mapAccumR f z) + where _types = f :: Int -> Char -> (Int,Char) +tl_mapAccumR f z = L.mapAccumR f z `eqP` (second unpackS . TL.mapAccumR f z) + where _types = f :: Int -> Char -> (Int,Char) + +tl_repeat n = (L.take m . L.repeat) `eq` + (unpackS . TL.take (fromIntegral m) . TL.repeat) + where m = fromIntegral (n :: Word8) + +replicate n l = concat (L.replicate n l) + +s_replicate n = replicate m `eq` + (unpackS . S.replicateI (fromIntegral m) . packS) + where m = fromIntegral (n :: Word8) +t_replicate n = replicate m `eq` (unpackS . T.replicate m . packS) + where m = fromIntegral (n :: Word8) +tl_replicate n = replicate m `eq` + (unpackS . TL.replicate (fromIntegral m) . packS) + where m = fromIntegral (n :: Word8) + +tl_cycle n = (L.take m . L.cycle) `eq` + (unpackS . TL.take (fromIntegral m) . TL.cycle . packS) + where m = fromIntegral (n :: Word8) + +tl_iterate f n = (L.take m . L.iterate f) `eq` + (unpackS . TL.take (fromIntegral m) . TL.iterate f) + where m = fromIntegral (n :: Word8) + +unf :: Int -> Char -> Maybe (Char, Char) +unf n c | fromEnum c * 100 > n = Nothing + | otherwise = Just (c, succ c) + +t_unfoldr n = L.unfoldr (unf m) `eq` (unpackS . T.unfoldr (unf m)) + where m = fromIntegral (n :: Word16) +tl_unfoldr n = L.unfoldr (unf m) `eq` (unpackS . TL.unfoldr (unf m)) + where m = fromIntegral (n :: Word16) +t_unfoldrN n m = (L.take i . L.unfoldr (unf j)) `eq` + (unpackS . T.unfoldrN i (unf j)) + where i = fromIntegral (n :: Word16) + j = fromIntegral (m :: Word16) +tl_unfoldrN n m = (L.take i . L.unfoldr (unf j)) `eq` + (unpackS . TL.unfoldrN (fromIntegral i) (unf j)) + where i = fromIntegral (n :: Word16) + j = fromIntegral (m :: Word16) + +unpack2 :: (Stringy s) => (s,s) -> (String,String) +unpack2 = unpackS *** unpackS + +s_take n = L.take n `eqP` (unpackS . S.take n) +s_take_s m = L.take n `eqP` (unpackS . S.unstream . S.take n) + where n = small m +sf_take p n = (L.take n . L.filter p) `eqP` + (unpackS . S.take n . S.filter p) +t_take n = L.take n `eqP` (unpackS . T.take n) +t_takeEnd n = (L.reverse . L.take n . L.reverse) `eqP` + (unpackS . T.takeEnd n) +tl_take n = L.take n `eqP` (unpackS . TL.take (fromIntegral n)) +tl_takeEnd n = (L.reverse . L.take (fromIntegral n) . L.reverse) `eqP` + (unpackS . TL.takeEnd n) +s_drop n = L.drop n `eqP` (unpackS . S.drop n) +s_drop_s m = L.drop n `eqP` (unpackS . S.unstream . S.drop n) + where n = small m +sf_drop p n = (L.drop n . L.filter p) `eqP` + (unpackS . S.drop n . S.filter p) +t_drop n = L.drop n `eqP` (unpackS . T.drop n) +t_dropEnd n = (L.reverse . L.drop n . L.reverse) `eqP` + (unpackS . T.dropEnd n) +tl_drop n = L.drop n `eqP` (unpackS . TL.drop (fromIntegral n)) +tl_dropEnd n = (L.reverse . L.drop n . L.reverse) `eqP` + (unpackS . TL.dropEnd (fromIntegral n)) +s_take_drop m = (L.take n . L.drop n) `eqP` (unpackS . S.take n . S.drop n) + where n = small m +s_take_drop_s m = (L.take n . L.drop n) `eqP` + (unpackS . S.unstream . S.take n . S.drop n) + where n = small m +s_takeWhile p = L.takeWhile p `eqP` (unpackS . S.takeWhile p) +s_takeWhile_s p = L.takeWhile p `eqP` (unpackS . S.unstream . S.takeWhile p) +sf_takeWhile q p = (L.takeWhile p . L.filter q) `eqP` + (unpackS . S.takeWhile p . S.filter q) +t_takeWhile p = L.takeWhile p `eqP` (unpackS . T.takeWhile p) +tl_takeWhile p = L.takeWhile p `eqP` (unpackS . TL.takeWhile p) +t_takeWhileEnd p = (L.reverse . L.takeWhile p . L.reverse) `eqP` + (unpackS . T.takeWhileEnd p) +tl_takeWhileEnd p = (L.reverse . L.takeWhile p . L.reverse) `eqP` + (unpackS . TL.takeWhileEnd p) +s_dropWhile p = L.dropWhile p `eqP` (unpackS . S.dropWhile p) +s_dropWhile_s p = L.dropWhile p `eqP` (unpackS . S.unstream . S.dropWhile p) +sf_dropWhile q p = (L.dropWhile p . L.filter q) `eqP` + (unpackS . S.dropWhile p . S.filter q) +t_dropWhile p = L.dropWhile p `eqP` (unpackS . T.dropWhile p) +tl_dropWhile p = L.dropWhile p `eqP` (unpackS . S.dropWhile p) +t_dropWhileEnd p = (L.reverse . L.dropWhile p . L.reverse) `eqP` + (unpackS . T.dropWhileEnd p) +tl_dropWhileEnd p = (L.reverse . L.dropWhile p . L.reverse) `eqP` + (unpackS . TL.dropWhileEnd p) +t_dropAround p = (L.dropWhile p . L.reverse . L.dropWhile p . L.reverse) + `eqP` (unpackS . T.dropAround p) +tl_dropAround p = (L.dropWhile p . L.reverse . L.dropWhile p . L.reverse) + `eqP` (unpackS . TL.dropAround p) +t_stripStart = T.dropWhile isSpace `eq` T.stripStart +tl_stripStart = TL.dropWhile isSpace `eq` TL.stripStart +t_stripEnd = T.dropWhileEnd isSpace `eq` T.stripEnd +tl_stripEnd = TL.dropWhileEnd isSpace `eq` TL.stripEnd +t_strip = T.dropAround isSpace `eq` T.strip +tl_strip = TL.dropAround isSpace `eq` TL.strip +t_splitAt n = L.splitAt n `eqP` (unpack2 . T.splitAt n) +tl_splitAt n = L.splitAt n `eqP` (unpack2 . TL.splitAt (fromIntegral n)) +t_span p = L.span p `eqP` (unpack2 . T.span p) +tl_span p = L.span p `eqP` (unpack2 . TL.span p) + +t_breakOn_id s = squid `eq` (uncurry T.append . T.breakOn s) + where squid t | T.null s = error "empty" + | otherwise = t +tl_breakOn_id s = squid `eq` (uncurry TL.append . TL.breakOn s) + where squid t | TL.null s = error "empty" + | otherwise = t +t_breakOn_start (NotEmpty s) t = + let (k,m) = T.breakOn s t + in k `T.isPrefixOf` t && (T.null m || s `T.isPrefixOf` m) +tl_breakOn_start (NotEmpty s) t = + let (k,m) = TL.breakOn s t + in k `TL.isPrefixOf` t && TL.null m || s `TL.isPrefixOf` m +t_breakOnEnd_end (NotEmpty s) t = + let (m,k) = T.breakOnEnd s t + in k `T.isSuffixOf` t && (T.null m || s `T.isSuffixOf` m) +tl_breakOnEnd_end (NotEmpty s) t = + let (m,k) = TL.breakOnEnd s t + in k `TL.isSuffixOf` t && (TL.null m || s `TL.isSuffixOf` m) +t_break p = L.break p `eqP` (unpack2 . T.break p) +tl_break p = L.break p `eqP` (unpack2 . TL.break p) +t_group = L.group `eqP` (map unpackS . T.group) +tl_group = L.group `eqP` (map unpackS . TL.group) +t_groupBy p = L.groupBy p `eqP` (map unpackS . T.groupBy p) +tl_groupBy p = L.groupBy p `eqP` (map unpackS . TL.groupBy p) +t_inits = L.inits `eqP` (map unpackS . T.inits) +tl_inits = L.inits `eqP` (map unpackS . TL.inits) +t_tails = L.tails `eqP` (map unpackS . T.tails) +tl_tails = unsquare $ + L.tails `eqP` (map unpackS . TL.tails) +t_findAppendId = unsquare $ \(NotEmpty s) ts -> + let t = T.intercalate s ts + in all (==t) $ map (uncurry T.append) (T.breakOnAll s t) +tl_findAppendId = unsquare $ \(NotEmpty s) ts -> + let t = TL.intercalate s ts + in all (==t) $ map (uncurry TL.append) (TL.breakOnAll s t) +t_findContains = unsquare $ \(NotEmpty s) -> + all (T.isPrefixOf s . snd) . T.breakOnAll s . T.intercalate s +tl_findContains = unsquare $ \(NotEmpty s) -> all (TL.isPrefixOf s . snd) . + TL.breakOnAll s . TL.intercalate s +sl_filterCount c = (L.genericLength . L.filter (==c)) `eqP` SL.countChar c +t_findCount s = (L.length . T.breakOnAll s) `eq` T.count s +tl_findCount s = (L.genericLength . TL.breakOnAll s) `eq` TL.count s + +t_splitOn_split s = unsquare $ + (T.splitOn s `eq` Slow.splitOn s) . T.intercalate s +tl_splitOn_split s = unsquare $ + ((TL.splitOn (TL.fromStrict s) . TL.fromStrict) `eq` + (map TL.fromStrict . T.splitOn s)) . T.intercalate s +t_splitOn_i (NotEmpty t) = id `eq` (T.intercalate t . T.splitOn t) +tl_splitOn_i (NotEmpty t) = id `eq` (TL.intercalate t . TL.splitOn t) + +t_split p = split p `eqP` (map unpackS . T.split p) +t_split_count c = (L.length . T.split (==c)) `eq` + ((1+) . T.count (T.singleton c)) +t_split_splitOn c = T.split (==c) `eq` T.splitOn (T.singleton c) +tl_split p = split p `eqP` (map unpackS . TL.split p) + +split :: (a -> Bool) -> [a] -> [[a]] +split _ [] = [[]] +split p xs = loop xs + where loop s | null s' = [l] + | otherwise = l : loop (tail s') + where (l, s') = break p s + +t_chunksOf_same_lengths k = all ((==k) . T.length) . ini . T.chunksOf k + where ini [] = [] + ini xs = init xs + +t_chunksOf_length k t = len == T.length t || (k <= 0 && len == 0) + where len = L.sum . L.map T.length $ T.chunksOf k t + +tl_chunksOf k = T.chunksOf k `eq` (map (T.concat . TL.toChunks) . + TL.chunksOf (fromIntegral k) . TL.fromStrict) + +t_lines = L.lines `eqP` (map unpackS . T.lines) +tl_lines = L.lines `eqP` (map unpackS . TL.lines) +{- +t_lines' = lines' `eqP` (map unpackS . T.lines') + where lines' "" = [] + lines' s = let (l, s') = break eol s + in l : case s' of + [] -> [] + ('\r':'\n':s'') -> lines' s'' + (_:s'') -> lines' s'' + eol c = c == '\r' || c == '\n' +-} +t_words = L.words `eqP` (map unpackS . T.words) + +tl_words = L.words `eqP` (map unpackS . TL.words) +t_unlines = unsquare $ + L.unlines `eq` (unpackS . T.unlines . map packS) +tl_unlines = unsquare $ + L.unlines `eq` (unpackS . TL.unlines . map packS) +t_unwords = unsquare $ + L.unwords `eq` (unpackS . T.unwords . map packS) +tl_unwords = unsquare $ + L.unwords `eq` (unpackS . TL.unwords . map packS) + +s_isPrefixOf s = L.isPrefixOf s `eqP` + (S.isPrefixOf (S.stream $ packS s) . S.stream) +sf_isPrefixOf p s = (L.isPrefixOf s . L.filter p) `eqP` + (S.isPrefixOf (S.stream $ packS s) . S.filter p . S.stream) +t_isPrefixOf s = L.isPrefixOf s`eqP` T.isPrefixOf (packS s) +tl_isPrefixOf s = L.isPrefixOf s`eqP` TL.isPrefixOf (packS s) +t_isSuffixOf s = L.isSuffixOf s`eqP` T.isSuffixOf (packS s) +tl_isSuffixOf s = L.isSuffixOf s`eqP` TL.isSuffixOf (packS s) +t_isInfixOf s = L.isInfixOf s `eqP` T.isInfixOf (packS s) +tl_isInfixOf s = L.isInfixOf s `eqP` TL.isInfixOf (packS s) + +t_stripPrefix s = (fmap packS . L.stripPrefix s) `eqP` T.stripPrefix (packS s) +tl_stripPrefix s = (fmap packS . L.stripPrefix s) `eqP` TL.stripPrefix (packS s) + +stripSuffix p t = reverse `fmap` L.stripPrefix (reverse p) (reverse t) + +t_stripSuffix s = (fmap packS . stripSuffix s) `eqP` T.stripSuffix (packS s) +tl_stripSuffix s = (fmap packS . stripSuffix s) `eqP` TL.stripSuffix (packS s) + +commonPrefixes a0@(_:_) b0@(_:_) = Just (go a0 b0 []) + where go (a:as) (b:bs) ps + | a == b = go as bs (a:ps) + go as bs ps = (reverse ps,as,bs) +commonPrefixes _ _ = Nothing + +t_commonPrefixes a b (NonEmpty p) + = commonPrefixes pa pb == + repack `fmap` T.commonPrefixes (packS pa) (packS pb) + where repack (x,y,z) = (unpackS x,unpackS y,unpackS z) + pa = p ++ a + pb = p ++ b + +tl_commonPrefixes a b (NonEmpty p) + = commonPrefixes pa pb == + repack `fmap` TL.commonPrefixes (packS pa) (packS pb) + where repack (x,y,z) = (unpackS x,unpackS y,unpackS z) + pa = p ++ a + pb = p ++ b + +sf_elem p c = (L.elem c . L.filter p) `eqP` (S.elem c . S.filter p) +sf_filter q p = (L.filter p . L.filter q) `eqP` + (unpackS . S.filter p . S.filter q) +t_filter p = L.filter p `eqP` (unpackS . T.filter p) +tl_filter p = L.filter p `eqP` (unpackS . TL.filter p) +sf_findBy q p = (L.find p . L.filter q) `eqP` (S.findBy p . S.filter q) +t_find p = L.find p `eqP` T.find p +tl_find p = L.find p `eqP` TL.find p +t_partition p = L.partition p `eqP` (unpack2 . T.partition p) +tl_partition p = L.partition p `eqP` (unpack2 . TL.partition p) + +sf_index p s = forAll (choose (-l,l*2)) + ((L.filter p s L.!!) `eq` S.index (S.filter p $ packS s)) + where l = L.length s +t_index s = forAll (choose (-l,l*2)) ((s L.!!) `eq` T.index (packS s)) + where l = L.length s + +tl_index s = forAll (choose (-l,l*2)) + ((s L.!!) `eq` (TL.index (packS s) . fromIntegral)) + where l = L.length s + +t_findIndex p = L.findIndex p `eqP` T.findIndex p +t_count (NotEmpty t) = (subtract 1 . L.length . T.splitOn t) `eq` T.count t +tl_count (NotEmpty t) = (subtract 1 . L.genericLength . TL.splitOn t) `eq` + TL.count t +t_zip s = L.zip s `eqP` T.zip (packS s) +tl_zip s = L.zip s `eqP` TL.zip (packS s) +sf_zipWith p c s = (L.zipWith c (L.filter p s) . L.filter p) `eqP` + (unpackS . S.zipWith c (S.filter p $ packS s) . S.filter p) +t_zipWith c s = L.zipWith c s `eqP` (unpackS . T.zipWith c (packS s)) +tl_zipWith c s = L.zipWith c s `eqP` (unpackS . TL.zipWith c (packS s)) + +t_indices (NotEmpty s) = Slow.indices s `eq` indices s +tl_indices (NotEmpty s) = lazyIndices s `eq` S.indices s + where lazyIndices ss t = map fromIntegral $ Slow.indices (conc ss) (conc t) + conc = T.concat . TL.toChunks +t_indices_occurs = unsquare $ \(NotEmpty t) ts -> + let s = T.intercalate t ts + in Slow.indices t s == indices t s + +-- Bit shifts. +shiftL w = forAll (choose (0,width-1)) $ \k -> Bits.shiftL w k == U.shiftL w k + where width = round (log (fromIntegral m) / log 2 :: Double) + (m,_) = (maxBound, m == w) +shiftR w = forAll (choose (0,width-1)) $ \k -> Bits.shiftR w k == U.shiftR w k + where width = round (log (fromIntegral m) / log 2 :: Double) + (m,_) = (maxBound, m == w) + +shiftL_Int = shiftL :: Int -> Property +shiftL_Word16 = shiftL :: Word16 -> Property +shiftL_Word32 = shiftL :: Word32 -> Property +shiftR_Int = shiftR :: Int -> Property +shiftR_Word16 = shiftR :: Word16 -> Property +shiftR_Word32 = shiftR :: Word32 -> Property + +-- Builder. + +tb_singleton = id `eqP` + (unpackS . TB.toLazyText . mconcat . map TB.singleton) +tb_fromText = L.concat `eq` (unpackS . TB.toLazyText . mconcat . + map (TB.fromText . packS)) +tb_associative s1 s2 s3 = + TB.toLazyText (b1 `mappend` (b2 `mappend` b3)) == + TB.toLazyText ((b1 `mappend` b2) `mappend` b3) + where b1 = TB.fromText (packS s1) + b2 = TB.fromText (packS s2) + b3 = TB.fromText (packS s3) + +-- Numeric builder stuff. + +tb_decimal :: (Integral a, Show a) => a -> Bool +tb_decimal = (TB.toLazyText . TB.decimal) `eq` (TL.pack . show) + +tb_decimal_integer (a::Integer) = tb_decimal a +tb_decimal_integer_big (Big a) = tb_decimal a +tb_decimal_int (a::Int) = tb_decimal a +tb_decimal_int8 (a::Int8) = tb_decimal a +tb_decimal_int16 (a::Int16) = tb_decimal a +tb_decimal_int32 (a::Int32) = tb_decimal a +tb_decimal_int64 (a::Int64) = tb_decimal a +tb_decimal_word (a::Word) = tb_decimal a +tb_decimal_word8 (a::Word8) = tb_decimal a +tb_decimal_word16 (a::Word16) = tb_decimal a +tb_decimal_word32 (a::Word32) = tb_decimal a +tb_decimal_word64 (a::Word64) = tb_decimal a + +tb_decimal_big_int (BigBounded (a::Int)) = tb_decimal a +tb_decimal_big_int64 (BigBounded (a::Int64)) = tb_decimal a +tb_decimal_big_word (BigBounded (a::Word)) = tb_decimal a +tb_decimal_big_word64 (BigBounded (a::Word64)) = tb_decimal a + +tb_hex :: (Integral a, Show a) => a -> Bool +tb_hex = (TB.toLazyText . TB.hexadecimal) `eq` (TL.pack . flip showHex "") + +tb_hexadecimal_integer (a::Integer) = tb_hex a +tb_hexadecimal_int (a::Int) = tb_hex a +tb_hexadecimal_int8 (a::Int8) = tb_hex a +tb_hexadecimal_int16 (a::Int16) = tb_hex a +tb_hexadecimal_int32 (a::Int32) = tb_hex a +tb_hexadecimal_int64 (a::Int64) = tb_hex a +tb_hexadecimal_word (a::Word) = tb_hex a +tb_hexadecimal_word8 (a::Word8) = tb_hex a +tb_hexadecimal_word16 (a::Word16) = tb_hex a +tb_hexadecimal_word32 (a::Word32) = tb_hex a +tb_hexadecimal_word64 (a::Word64) = tb_hex a + +tb_realfloat :: (RealFloat a, Show a) => a -> Bool +tb_realfloat = (TB.toLazyText . TB.realFloat) `eq` (TL.pack . show) + +tb_realfloat_float (a::Float) = tb_realfloat a +tb_realfloat_double (a::Double) = tb_realfloat a + +showFloat :: (RealFloat a) => TB.FPFormat -> Maybe Int -> a -> ShowS +showFloat TB.Exponent = showEFloat +showFloat TB.Fixed = showFFloat +showFloat TB.Generic = showGFloat + +tb_formatRealFloat :: (RealFloat a, Show a) => + a -> TB.FPFormat -> Precision a -> Property +tb_formatRealFloat a fmt prec = + TB.formatRealFloat fmt p a === + TB.fromString (showFloat fmt p a "") + where p = precision a prec + +tb_formatRealFloat_float (a::Float) = tb_formatRealFloat a +tb_formatRealFloat_double (a::Double) = tb_formatRealFloat a + +-- Reading. + +t_decimal (n::Int) s = + T.signed T.decimal (T.pack (show n) `T.append` t) == Right (n,t) + where t = T.dropWhile isDigit s +tl_decimal (n::Int) s = + TL.signed TL.decimal (TL.pack (show n) `TL.append` t) == Right (n,t) + where t = TL.dropWhile isDigit s +t_hexadecimal m s ox = + T.hexadecimal (T.concat [p, T.pack (showHex n ""), t]) == Right (n,t) + where t = T.dropWhile isHexDigit s + p = if ox then "0x" else "" + n = getPositive m :: Int +tl_hexadecimal m s ox = + TL.hexadecimal (TL.concat [p, TL.pack (showHex n ""), t]) == Right (n,t) + where t = TL.dropWhile isHexDigit s + p = if ox then "0x" else "" + n = getPositive m :: Int + +isFloaty c = c `elem` ("+-.0123456789eE" :: String) + +t_read_rational p tol (n::Double) s = + case p (T.pack (show n) `T.append` t) of + Left _err -> False + Right (n',t') -> t == t' && abs (n-n') <= tol + where t = T.dropWhile isFloaty s + +tl_read_rational p tol (n::Double) s = + case p (TL.pack (show n) `TL.append` t) of + Left _err -> False + Right (n',t') -> t == t' && abs (n-n') <= tol + where t = TL.dropWhile isFloaty s + +t_double = t_read_rational T.double 1e-13 +tl_double = tl_read_rational TL.double 1e-13 +t_rational = t_read_rational T.rational 1e-16 +tl_rational = tl_read_rational TL.rational 1e-16 + +-- Input and output. + +t_put_get = write_read T.unlines T.filter put get + where put h = withRedirect h IO.stdout . T.putStr + get h = withRedirect h IO.stdin T.getContents +tl_put_get = write_read TL.unlines TL.filter put get + where put h = withRedirect h IO.stdout . TL.putStr + get h = withRedirect h IO.stdin TL.getContents +t_write_read = write_read T.unlines T.filter T.hPutStr T.hGetContents +tl_write_read = write_read TL.unlines TL.filter TL.hPutStr TL.hGetContents + +t_write_read_line e m b t = write_read head T.filter T.hPutStrLn + T.hGetLine e m b [t] +tl_write_read_line e m b t = write_read head TL.filter TL.hPutStrLn + TL.hGetLine e m b [t] + +-- Low-level. + +t_dropWord16 m t = dropWord16 m t `T.isSuffixOf` t +t_takeWord16 m t = takeWord16 m t `T.isPrefixOf` t +t_take_drop_16 m t = T.append (takeWord16 n t) (dropWord16 n t) == t + where n = small m +t_use_from t = monadicIO $ assert . (==t) =<< run (useAsPtr t fromPtr) + +t_copy t = T.copy t == t + +-- Regression tests. +s_filter_eq s = S.filter p t == S.streamList (filter p s) + where p = (/= S.last t) + t = S.streamList s + +-- Make a stream appear shorter than it really is, to ensure that +-- functions that consume inaccurately sized streams behave +-- themselves. +shorten :: Int -> S.Stream a -> S.Stream a +shorten n t@(S.Stream arr off len) + | n > 0 = S.Stream arr off (smaller (exactSize n) len) + | otherwise = t + +tests :: Test +tests = + testGroup "Properties" [ + testGroup "creation/elimination" [ + testProperty "t_pack_unpack" t_pack_unpack, + testProperty "tl_pack_unpack" tl_pack_unpack, + testProperty "t_stream_unstream" t_stream_unstream, + testProperty "tl_stream_unstream" tl_stream_unstream, + testProperty "t_reverse_stream" t_reverse_stream, + testProperty "t_singleton" t_singleton, + testProperty "tl_singleton" tl_singleton, + testProperty "tl_unstreamChunks" tl_unstreamChunks, + testProperty "tl_chunk_unchunk" tl_chunk_unchunk, + testProperty "tl_from_to_strict" tl_from_to_strict + ], + + testGroup "transcoding" [ + testProperty "t_ascii" t_ascii, + testProperty "tl_ascii" tl_ascii, + testProperty "t_latin1" t_latin1, + testProperty "tl_latin1" tl_latin1, + testProperty "t_utf8" t_utf8, + testProperty "t_utf8'" t_utf8', + testProperty "t_utf8_incr" t_utf8_incr, + testProperty "t_utf8_undecoded" t_utf8_undecoded, + testProperty "tl_utf8" tl_utf8, + testProperty "tl_utf8'" tl_utf8', + testProperty "t_utf16LE" t_utf16LE, + testProperty "tl_utf16LE" tl_utf16LE, + testProperty "t_utf16BE" t_utf16BE, + testProperty "tl_utf16BE" tl_utf16BE, + testProperty "t_utf32LE" t_utf32LE, + testProperty "tl_utf32LE" tl_utf32LE, + testProperty "t_utf32BE" t_utf32BE, + testProperty "tl_utf32BE" tl_utf32BE, + testGroup "errors" [ + testProperty "t_utf8_err" t_utf8_err, + testProperty "t_utf8_err'" t_utf8_err' + ] + ], + + testGroup "instances" [ + testProperty "s_Eq" s_Eq, + testProperty "sf_Eq" sf_Eq, + testProperty "t_Eq" t_Eq, + testProperty "tl_Eq" tl_Eq, + testProperty "s_Ord" s_Ord, + testProperty "sf_Ord" sf_Ord, + testProperty "t_Ord" t_Ord, + testProperty "tl_Ord" tl_Ord, + testProperty "t_Read" t_Read, + testProperty "tl_Read" tl_Read, + testProperty "t_Show" t_Show, + testProperty "tl_Show" tl_Show, + testProperty "t_mappend" t_mappend, + testProperty "tl_mappend" tl_mappend, + testProperty "t_mconcat" t_mconcat, + testProperty "tl_mconcat" tl_mconcat, + testProperty "t_mempty" t_mempty, + testProperty "tl_mempty" tl_mempty, + testProperty "t_IsString" t_IsString, + testProperty "tl_IsString" tl_IsString + ], + + testGroup "basics" [ + testProperty "s_cons" s_cons, + testProperty "s_cons_s" s_cons_s, + testProperty "sf_cons" sf_cons, + testProperty "t_cons" t_cons, + testProperty "tl_cons" tl_cons, + testProperty "s_snoc" s_snoc, + testProperty "t_snoc" t_snoc, + testProperty "tl_snoc" tl_snoc, + testProperty "s_append" s_append, + testProperty "s_append_s" s_append_s, + testProperty "sf_append" sf_append, + testProperty "t_append" t_append, + testProperty "s_uncons" s_uncons, + testProperty "sf_uncons" sf_uncons, + testProperty "t_uncons" t_uncons, + testProperty "tl_uncons" tl_uncons, + testProperty "s_head" s_head, + testProperty "sf_head" sf_head, + testProperty "t_head" t_head, + testProperty "tl_head" tl_head, + testProperty "s_last" s_last, + testProperty "sf_last" sf_last, + testProperty "t_last" t_last, + testProperty "tl_last" tl_last, + testProperty "s_tail" s_tail, + testProperty "s_tail_s" s_tail_s, + testProperty "sf_tail" sf_tail, + testProperty "t_tail" t_tail, + testProperty "tl_tail" tl_tail, + testProperty "s_init" s_init, + testProperty "s_init_s" s_init_s, + testProperty "sf_init" sf_init, + testProperty "t_init" t_init, + testProperty "tl_init" tl_init, + testProperty "s_null" s_null, + testProperty "sf_null" sf_null, + testProperty "t_null" t_null, + testProperty "tl_null" tl_null, + testProperty "s_length" s_length, + testProperty "sf_length" sf_length, + testProperty "sl_length" sl_length, + testProperty "t_length" t_length, + testProperty "tl_length" tl_length, + testProperty "t_compareLength" t_compareLength, + testProperty "tl_compareLength" tl_compareLength + ], + + testGroup "transformations" [ + testProperty "s_map" s_map, + testProperty "s_map_s" s_map_s, + testProperty "sf_map" sf_map, + testProperty "t_map" t_map, + testProperty "tl_map" tl_map, + testProperty "s_intercalate" s_intercalate, + testProperty "t_intercalate" t_intercalate, + testProperty "tl_intercalate" tl_intercalate, + testProperty "s_intersperse" s_intersperse, + testProperty "s_intersperse_s" s_intersperse_s, + testProperty "sf_intersperse" sf_intersperse, + testProperty "t_intersperse" t_intersperse, + testProperty "tl_intersperse" tl_intersperse, + testProperty "t_transpose" t_transpose, + testProperty "tl_transpose" tl_transpose, + testProperty "t_reverse" t_reverse, + testProperty "tl_reverse" tl_reverse, + testProperty "t_reverse_short" t_reverse_short, + testProperty "t_replace" t_replace, + testProperty "tl_replace" tl_replace, + + testGroup "case conversion" [ + testProperty "s_toCaseFold_length" s_toCaseFold_length, + testProperty "sf_toCaseFold_length" sf_toCaseFold_length, + testProperty "t_toCaseFold_length" t_toCaseFold_length, + testProperty "tl_toCaseFold_length" tl_toCaseFold_length, + testProperty "t_toLower_length" t_toLower_length, + testProperty "t_toLower_lower" t_toLower_lower, + testProperty "tl_toLower_lower" tl_toLower_lower, + testProperty "t_toUpper_length" t_toUpper_length, + testProperty "t_toUpper_upper" t_toUpper_upper, + testProperty "tl_toUpper_upper" tl_toUpper_upper + ], + + testGroup "justification" [ + testProperty "s_justifyLeft" s_justifyLeft, + testProperty "s_justifyLeft_s" s_justifyLeft_s, + testProperty "sf_justifyLeft" sf_justifyLeft, + testProperty "t_justifyLeft" t_justifyLeft, + testProperty "tl_justifyLeft" tl_justifyLeft, + testProperty "t_justifyRight" t_justifyRight, + testProperty "tl_justifyRight" tl_justifyRight, + testProperty "t_center" t_center, + testProperty "tl_center" tl_center + ] + ], + + testGroup "folds" [ + testProperty "sf_foldl" sf_foldl, + testProperty "t_foldl" t_foldl, + testProperty "tl_foldl" tl_foldl, + testProperty "sf_foldl'" sf_foldl', + testProperty "t_foldl'" t_foldl', + testProperty "tl_foldl'" tl_foldl', + testProperty "sf_foldl1" sf_foldl1, + testProperty "t_foldl1" t_foldl1, + testProperty "tl_foldl1" tl_foldl1, + testProperty "t_foldl1'" t_foldl1', + testProperty "sf_foldl1'" sf_foldl1', + testProperty "tl_foldl1'" tl_foldl1', + testProperty "sf_foldr" sf_foldr, + testProperty "t_foldr" t_foldr, + testProperty "tl_foldr" tl_foldr, + testProperty "sf_foldr1" sf_foldr1, + testProperty "t_foldr1" t_foldr1, + testProperty "tl_foldr1" tl_foldr1, + + testGroup "special" [ + testProperty "s_concat_s" s_concat_s, + testProperty "sf_concat" sf_concat, + testProperty "t_concat" t_concat, + testProperty "tl_concat" tl_concat, + testProperty "sf_concatMap" sf_concatMap, + testProperty "t_concatMap" t_concatMap, + testProperty "tl_concatMap" tl_concatMap, + testProperty "sf_any" sf_any, + testProperty "t_any" t_any, + testProperty "tl_any" tl_any, + testProperty "sf_all" sf_all, + testProperty "t_all" t_all, + testProperty "tl_all" tl_all, + testProperty "sf_maximum" sf_maximum, + testProperty "t_maximum" t_maximum, + testProperty "tl_maximum" tl_maximum, + testProperty "sf_minimum" sf_minimum, + testProperty "t_minimum" t_minimum, + testProperty "tl_minimum" tl_minimum + ] + ], + + testGroup "construction" [ + testGroup "scans" [ + testProperty "sf_scanl" sf_scanl, + testProperty "t_scanl" t_scanl, + testProperty "tl_scanl" tl_scanl, + testProperty "t_scanl1" t_scanl1, + testProperty "tl_scanl1" tl_scanl1, + testProperty "t_scanr" t_scanr, + testProperty "tl_scanr" tl_scanr, + testProperty "t_scanr1" t_scanr1, + testProperty "tl_scanr1" tl_scanr1 + ], + + testGroup "mapAccum" [ + testProperty "t_mapAccumL" t_mapAccumL, + testProperty "tl_mapAccumL" tl_mapAccumL, + testProperty "t_mapAccumR" t_mapAccumR, + testProperty "tl_mapAccumR" tl_mapAccumR + ], + + testGroup "unfolds" [ + testProperty "tl_repeat" tl_repeat, + testProperty "s_replicate" s_replicate, + testProperty "t_replicate" t_replicate, + testProperty "tl_replicate" tl_replicate, + testProperty "tl_cycle" tl_cycle, + testProperty "tl_iterate" tl_iterate, + testProperty "t_unfoldr" t_unfoldr, + testProperty "tl_unfoldr" tl_unfoldr, + testProperty "t_unfoldrN" t_unfoldrN, + testProperty "tl_unfoldrN" tl_unfoldrN + ] + ], + + testGroup "substrings" [ + testGroup "breaking" [ + testProperty "s_take" s_take, + testProperty "s_take_s" s_take_s, + testProperty "sf_take" sf_take, + testProperty "t_take" t_take, + testProperty "t_takeEnd" t_takeEnd, + testProperty "tl_take" tl_take, + testProperty "tl_takeEnd" tl_takeEnd, + testProperty "s_drop" s_drop, + testProperty "s_drop_s" s_drop_s, + testProperty "sf_drop" sf_drop, + testProperty "t_drop" t_drop, + testProperty "t_dropEnd" t_dropEnd, + testProperty "tl_drop" tl_drop, + testProperty "tl_dropEnd" tl_dropEnd, + testProperty "s_take_drop" s_take_drop, + testProperty "s_take_drop_s" s_take_drop_s, + testProperty "s_takeWhile" s_takeWhile, + testProperty "s_takeWhile_s" s_takeWhile_s, + testProperty "sf_takeWhile" sf_takeWhile, + testProperty "t_takeWhile" t_takeWhile, + testProperty "tl_takeWhile" tl_takeWhile, + testProperty "t_takeWhileEnd" t_takeWhileEnd, + testProperty "tl_takeWhileEnd" tl_takeWhileEnd, + testProperty "sf_dropWhile" sf_dropWhile, + testProperty "s_dropWhile" s_dropWhile, + testProperty "s_dropWhile_s" s_dropWhile_s, + testProperty "t_dropWhile" t_dropWhile, + testProperty "tl_dropWhile" tl_dropWhile, + testProperty "t_dropWhileEnd" t_dropWhileEnd, + testProperty "tl_dropWhileEnd" tl_dropWhileEnd, + testProperty "t_dropAround" t_dropAround, + testProperty "tl_dropAround" tl_dropAround, + testProperty "t_stripStart" t_stripStart, + testProperty "tl_stripStart" tl_stripStart, + testProperty "t_stripEnd" t_stripEnd, + testProperty "tl_stripEnd" tl_stripEnd, + testProperty "t_strip" t_strip, + testProperty "tl_strip" tl_strip, + testProperty "t_splitAt" t_splitAt, + testProperty "tl_splitAt" tl_splitAt, + testProperty "t_span" t_span, + testProperty "tl_span" tl_span, + testProperty "t_breakOn_id" t_breakOn_id, + testProperty "tl_breakOn_id" tl_breakOn_id, + testProperty "t_breakOn_start" t_breakOn_start, + testProperty "tl_breakOn_start" tl_breakOn_start, + testProperty "t_breakOnEnd_end" t_breakOnEnd_end, + testProperty "tl_breakOnEnd_end" tl_breakOnEnd_end, + testProperty "t_break" t_break, + testProperty "tl_break" tl_break, + testProperty "t_group" t_group, + testProperty "tl_group" tl_group, + testProperty "t_groupBy" t_groupBy, + testProperty "tl_groupBy" tl_groupBy, + testProperty "t_inits" t_inits, + testProperty "tl_inits" tl_inits, + testProperty "t_tails" t_tails, + testProperty "tl_tails" tl_tails + ], + + testGroup "breaking many" [ + testProperty "t_findAppendId" t_findAppendId, + testProperty "tl_findAppendId" tl_findAppendId, + testProperty "t_findContains" t_findContains, + testProperty "tl_findContains" tl_findContains, + testProperty "sl_filterCount" sl_filterCount, + testProperty "t_findCount" t_findCount, + testProperty "tl_findCount" tl_findCount, + testProperty "t_splitOn_split" t_splitOn_split, + testProperty "tl_splitOn_split" tl_splitOn_split, + testProperty "t_splitOn_i" t_splitOn_i, + testProperty "tl_splitOn_i" tl_splitOn_i, + testProperty "t_split" t_split, + testProperty "t_split_count" t_split_count, + testProperty "t_split_splitOn" t_split_splitOn, + testProperty "tl_split" tl_split, + testProperty "t_chunksOf_same_lengths" t_chunksOf_same_lengths, + testProperty "t_chunksOf_length" t_chunksOf_length, + testProperty "tl_chunksOf" tl_chunksOf + ], + + testGroup "lines and words" [ + testProperty "t_lines" t_lines, + testProperty "tl_lines" tl_lines, + --testProperty "t_lines'" t_lines', + testProperty "t_words" t_words, + testProperty "tl_words" tl_words, + testProperty "t_unlines" t_unlines, + testProperty "tl_unlines" tl_unlines, + testProperty "t_unwords" t_unwords, + testProperty "tl_unwords" tl_unwords + ] + ], + + testGroup "predicates" [ + testProperty "s_isPrefixOf" s_isPrefixOf, + testProperty "sf_isPrefixOf" sf_isPrefixOf, + testProperty "t_isPrefixOf" t_isPrefixOf, + testProperty "tl_isPrefixOf" tl_isPrefixOf, + testProperty "t_isSuffixOf" t_isSuffixOf, + testProperty "tl_isSuffixOf" tl_isSuffixOf, + testProperty "t_isInfixOf" t_isInfixOf, + testProperty "tl_isInfixOf" tl_isInfixOf, + + testGroup "view" [ + testProperty "t_stripPrefix" t_stripPrefix, + testProperty "tl_stripPrefix" tl_stripPrefix, + testProperty "t_stripSuffix" t_stripSuffix, + testProperty "tl_stripSuffix" tl_stripSuffix, + testProperty "t_commonPrefixes" t_commonPrefixes, + testProperty "tl_commonPrefixes" tl_commonPrefixes + ] + ], + + testGroup "searching" [ + testProperty "sf_elem" sf_elem, + testProperty "sf_filter" sf_filter, + testProperty "t_filter" t_filter, + testProperty "tl_filter" tl_filter, + testProperty "sf_findBy" sf_findBy, + testProperty "t_find" t_find, + testProperty "tl_find" tl_find, + testProperty "t_partition" t_partition, + testProperty "tl_partition" tl_partition + ], + + testGroup "indexing" [ + testProperty "sf_index" sf_index, + testProperty "t_index" t_index, + testProperty "tl_index" tl_index, + testProperty "t_findIndex" t_findIndex, + testProperty "t_count" t_count, + testProperty "tl_count" tl_count, + testProperty "t_indices" t_indices, + testProperty "tl_indices" tl_indices, + testProperty "t_indices_occurs" t_indices_occurs + ], + + testGroup "zips" [ + testProperty "t_zip" t_zip, + testProperty "tl_zip" tl_zip, + testProperty "sf_zipWith" sf_zipWith, + testProperty "t_zipWith" t_zipWith, + testProperty "tl_zipWith" tl_zipWith + ], + + testGroup "regressions" [ + testProperty "s_filter_eq" s_filter_eq + ], + + testGroup "shifts" [ + testProperty "shiftL_Int" shiftL_Int, + testProperty "shiftL_Word16" shiftL_Word16, + testProperty "shiftL_Word32" shiftL_Word32, + testProperty "shiftR_Int" shiftR_Int, + testProperty "shiftR_Word16" shiftR_Word16, + testProperty "shiftR_Word32" shiftR_Word32 + ], + + testGroup "builder" [ + testProperty "tb_associative" tb_associative, + testGroup "decimal" [ + testProperty "tb_decimal_int" tb_decimal_int, + testProperty "tb_decimal_int8" tb_decimal_int8, + testProperty "tb_decimal_int16" tb_decimal_int16, + testProperty "tb_decimal_int32" tb_decimal_int32, + testProperty "tb_decimal_int64" tb_decimal_int64, + testProperty "tb_decimal_integer" tb_decimal_integer, + testProperty "tb_decimal_integer_big" tb_decimal_integer_big, + testProperty "tb_decimal_word" tb_decimal_word, + testProperty "tb_decimal_word8" tb_decimal_word8, + testProperty "tb_decimal_word16" tb_decimal_word16, + testProperty "tb_decimal_word32" tb_decimal_word32, + testProperty "tb_decimal_word64" tb_decimal_word64, + testProperty "tb_decimal_big_int" tb_decimal_big_int, + testProperty "tb_decimal_big_word" tb_decimal_big_word, + testProperty "tb_decimal_big_int64" tb_decimal_big_int64, + testProperty "tb_decimal_big_word64" tb_decimal_big_word64 + ], + testGroup "hexadecimal" [ + testProperty "tb_hexadecimal_int" tb_hexadecimal_int, + testProperty "tb_hexadecimal_int8" tb_hexadecimal_int8, + testProperty "tb_hexadecimal_int16" tb_hexadecimal_int16, + testProperty "tb_hexadecimal_int32" tb_hexadecimal_int32, + testProperty "tb_hexadecimal_int64" tb_hexadecimal_int64, + testProperty "tb_hexadecimal_integer" tb_hexadecimal_integer, + testProperty "tb_hexadecimal_word" tb_hexadecimal_word, + testProperty "tb_hexadecimal_word8" tb_hexadecimal_word8, + testProperty "tb_hexadecimal_word16" tb_hexadecimal_word16, + testProperty "tb_hexadecimal_word32" tb_hexadecimal_word32, + testProperty "tb_hexadecimal_word64" tb_hexadecimal_word64 + ], + testGroup "realfloat" [ + testProperty "tb_realfloat_double" tb_realfloat_double, + testProperty "tb_realfloat_float" tb_realfloat_float, + testProperty "tb_formatRealFloat_float" tb_formatRealFloat_float, + testProperty "tb_formatRealFloat_double" tb_formatRealFloat_double + ], + testProperty "tb_fromText" tb_fromText, + testProperty "tb_singleton" tb_singleton + ], + + testGroup "read" [ + testProperty "t_decimal" t_decimal, + testProperty "tl_decimal" tl_decimal, + testProperty "t_hexadecimal" t_hexadecimal, + testProperty "tl_hexadecimal" tl_hexadecimal, + testProperty "t_double" t_double, + testProperty "tl_double" tl_double, + testProperty "t_rational" t_rational, + testProperty "tl_rational" tl_rational + ], + + {- + testGroup "input-output" [ + testProperty "t_write_read" t_write_read, + testProperty "tl_write_read" tl_write_read, + testProperty "t_write_read_line" t_write_read_line, + testProperty "tl_write_read_line" tl_write_read_line + -- These tests are subject to I/O race conditions when run under + -- test-framework-quickcheck2. + -- testProperty "t_put_get" t_put_get + -- testProperty "tl_put_get" tl_put_get + ], + -} + + testGroup "lowlevel" [ + testProperty "t_dropWord16" t_dropWord16, + testProperty "t_takeWord16" t_takeWord16, + testProperty "t_take_drop_16" t_take_drop_16, + testProperty "t_use_from" t_use_from, + testProperty "t_copy" t_copy + ], + + testGroup "mul" Mul.tests + ] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/QuickCheckUtils.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/QuickCheckUtils.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/QuickCheckUtils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/QuickCheckUtils.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,360 @@ +-- | This module provides quickcheck utilities, e.g. arbitrary and show +-- instances, and comparison functions, so we can focus on the actual properties +-- in the 'Tests.Properties' module. +-- +{-# LANGUAGE CPP, FlexibleInstances, TypeSynonymInstances #-} +{-# OPTIONS_GHC -fno-warn-orphans #-} +module Tests.QuickCheckUtils + ( + genUnicode + , unsquare + , smallArbitrary + + , BigBounded(..) + , BigInt(..) + , NotEmpty(..) + + , Small(..) + , small + + , Precision(..) + , precision + + , integralRandomR + + , DecodeErr(..) + , genDecodeErr + + , Stringy(..) + , eq + , eqP + + , Encoding(..) + + , write_read + ) where + +import Control.Applicative ((<$>)) +import Control.Arrow (first, (***)) +import Control.DeepSeq (NFData (..), deepseq) +import Control.Exception (bracket) +import Data.String (IsString, fromString) +import Data.Text.Foreign (I16) +import Data.Text.Lazy.Builder.RealFloat (FPFormat(..)) +import Data.Word (Word8, Word16) +import Debug.Trace (trace) +import System.Random (Random(..), RandomGen) +import Test.QuickCheck hiding (Fixed(..), Small (..), (.&.)) +import Test.QuickCheck.Monadic (assert, monadicIO, run) +import Test.QuickCheck.Unicode (string) +import Tests.Utils +import qualified Data.ByteString as B +import qualified Data.Text as T +import qualified Data.Text.Encoding.Error as T +import qualified Data.Text.Internal.Fusion as TF +import qualified Data.Text.Internal.Fusion.Common as TF +import qualified Data.Text.Internal.Lazy as TL +import qualified Data.Text.Internal.Lazy.Fusion as TLF +import qualified Data.Text.Lazy as TL +import qualified System.IO as IO + +#if !MIN_VERSION_base(4,4,0) +import Data.Int (Int64) +import Data.Word (Word, Word64) +#endif + +genUnicode :: IsString a => Gen a +genUnicode = fromString <$> string + +instance Random I16 where + randomR = integralRandomR + random = randomR (minBound,maxBound) + +instance Arbitrary I16 where + arbitrary = arbitrarySizedIntegral + shrink = shrinkIntegral + +instance Arbitrary B.ByteString where + arbitrary = B.pack `fmap` arbitrary + shrink = map B.pack . shrink . B.unpack + +#if !MIN_VERSION_base(4,4,0) +instance Random Int64 where + randomR = integralRandomR + random = randomR (minBound,maxBound) + +instance Random Word where + randomR = integralRandomR + random = randomR (minBound,maxBound) + +instance Random Word8 where + randomR = integralRandomR + random = randomR (minBound,maxBound) + +instance Random Word64 where + randomR = integralRandomR + random = randomR (minBound,maxBound) +#endif + +-- For tests that have O(n^2) running times or input sizes, resize +-- their inputs to the square root of the originals. +unsquare :: (Arbitrary a, Show a, Testable b) => (a -> b) -> Property +unsquare = forAll smallArbitrary + +smallArbitrary :: (Arbitrary a, Show a) => Gen a +smallArbitrary = sized $ \n -> resize (smallish n) arbitrary + where smallish = round . (sqrt :: Double -> Double) . fromIntegral . abs + +instance Arbitrary T.Text where + arbitrary = T.pack `fmap` arbitrary + shrink = map T.pack . shrink . T.unpack + +instance Arbitrary TL.Text where + arbitrary = (TL.fromChunks . map notEmpty) `fmap` smallArbitrary + shrink = map TL.pack . shrink . TL.unpack + +newtype BigInt = Big Integer + deriving (Eq, Show) + +instance Arbitrary BigInt where + arbitrary = choose (1::Int,200) >>= \e -> Big <$> choose (10^(e-1),10^e) + shrink (Big a) = [Big (a `div` 2^(l-e)) | e <- shrink l] + where l = truncate (log (fromIntegral a) / log 2 :: Double) :: Integer + +newtype BigBounded a = BigBounded a + deriving (Eq, Show) + +instance (Bounded a, Random a, Arbitrary a) => Arbitrary (BigBounded a) where + arbitrary = BigBounded <$> choose (minBound, maxBound) + +newtype NotEmpty a = NotEmpty { notEmpty :: a } + deriving (Eq, Ord) + +instance Show a => Show (NotEmpty a) where + show (NotEmpty a) = show a + +instance Functor NotEmpty where + fmap f (NotEmpty a) = NotEmpty (f a) + +instance Arbitrary a => Arbitrary (NotEmpty [a]) where + arbitrary = sized (\n -> NotEmpty `fmap` (choose (1,n+1) >>= vector)) + shrink = shrinkNotEmpty null + +instance Arbitrary (NotEmpty T.Text) where + arbitrary = (fmap T.pack) `fmap` arbitrary + shrink = shrinkNotEmpty T.null + +instance Arbitrary (NotEmpty TL.Text) where + arbitrary = (fmap TL.pack) `fmap` arbitrary + shrink = shrinkNotEmpty TL.null + +instance Arbitrary (NotEmpty B.ByteString) where + arbitrary = (fmap B.pack) `fmap` arbitrary + shrink = shrinkNotEmpty B.null + +shrinkNotEmpty :: Arbitrary a => (a -> Bool) -> NotEmpty a -> [NotEmpty a] +shrinkNotEmpty isNull (NotEmpty xs) = + [ NotEmpty xs' | xs' <- shrink xs, not (isNull xs') ] + +data Small = S0 | S1 | S2 | S3 | S4 | S5 | S6 | S7 + | S8 | S9 | S10 | S11 | S12 | S13 | S14 | S15 + | S16 | S17 | S18 | S19 | S20 | S21 | S22 | S23 + | S24 | S25 | S26 | S27 | S28 | S29 | S30 | S31 + deriving (Eq, Ord, Enum, Bounded) + +small :: Integral a => Small -> a +small = fromIntegral . fromEnum + +intf :: (Int -> Int -> Int) -> Small -> Small -> Small +intf f a b = toEnum ((fromEnum a `f` fromEnum b) `mod` 32) + +instance Show Small where + show = show . fromEnum + +instance Read Small where + readsPrec n = map (first toEnum) . readsPrec n + +instance Num Small where + fromInteger = toEnum . fromIntegral + signum _ = 1 + abs = id + (+) = intf (+) + (-) = intf (-) + (*) = intf (*) + +instance Real Small where + toRational = toRational . fromEnum + +instance Integral Small where + toInteger = toInteger . fromEnum + quotRem a b = (toEnum x, toEnum y) + where (x, y) = fromEnum a `quotRem` fromEnum b + +instance Random Small where + randomR = integralRandomR + random = randomR (minBound,maxBound) + +instance Arbitrary Small where + arbitrary = choose (minBound, maxBound) + shrink = shrinkIntegral + +integralRandomR :: (Integral a, RandomGen g) => (a,a) -> g -> (a,g) +integralRandomR (a,b) g = case randomR (fromIntegral a :: Integer, + fromIntegral b :: Integer) g of + (x,h) -> (fromIntegral x, h) + +data DecodeErr = Lenient | Ignore | Strict | Replace + deriving (Show, Eq) + +genDecodeErr :: DecodeErr -> Gen T.OnDecodeError +genDecodeErr Lenient = return T.lenientDecode +genDecodeErr Ignore = return T.ignore +genDecodeErr Strict = return T.strictDecode +genDecodeErr Replace = arbitrary + +instance Arbitrary DecodeErr where + arbitrary = elements [Lenient, Ignore, Strict, Replace] + +class Stringy s where + packS :: String -> s + unpackS :: s -> String + splitAtS :: Int -> s -> (s,s) + packSChunkSize :: Int -> String -> s + packSChunkSize _ = packS + +instance Stringy String where + packS = id + unpackS = id + splitAtS = splitAt + +instance Stringy (TF.Stream Char) where + packS = TF.streamList + unpackS = TF.unstreamList + splitAtS n s = (TF.take n s, TF.drop n s) + +instance Stringy T.Text where + packS = T.pack + unpackS = T.unpack + splitAtS = T.splitAt + +instance Stringy TL.Text where + packSChunkSize k = TLF.unstreamChunks k . TF.streamList + packS = TL.pack + unpackS = TL.unpack + splitAtS = ((TL.lazyInvariant *** TL.lazyInvariant) .) . + TL.splitAt . fromIntegral + +-- Do two functions give the same answer? +eq :: (Eq a, Show a) => (t -> a) -> (t -> a) -> t -> Bool +eq a b s = a s =^= b s + +-- What about with the RHS packed? +eqP :: (Eq a, Show a, Stringy s) => + (String -> a) -> (s -> a) -> String -> Word8 -> Bool +eqP f g s w = eql "orig" (f s) (g t) && + eql "mini" (f s) (g mini) && + eql "head" (f sa) (g ta) && + eql "tail" (f sb) (g tb) + where t = packS s + mini = packSChunkSize 10 s + (sa,sb) = splitAt m s + (ta,tb) = splitAtS m t + l = length s + m | l == 0 = n + | otherwise = n `mod` l + n = fromIntegral w + eql d a b + | a =^= b = True + | otherwise = trace (d ++ ": " ++ show a ++ " /= " ++ show b) False + +instance Arbitrary FPFormat where + arbitrary = elements [Exponent, Fixed, Generic] + +newtype Precision a = Precision (Maybe Int) + deriving (Eq, Show) + +precision :: a -> Precision a -> Maybe Int +precision _ (Precision prec) = prec + +arbitraryPrecision :: Int -> Gen (Precision a) +arbitraryPrecision maxDigits = Precision <$> do + n <- choose (-1,maxDigits) + return $ if n == -1 + then Nothing + else Just n + +instance Arbitrary (Precision Float) where + arbitrary = arbitraryPrecision 11 + shrink = map Precision . shrink . precision undefined + +instance Arbitrary (Precision Double) where + arbitrary = arbitraryPrecision 22 + shrink = map Precision . shrink . precision undefined + +-- Work around lack of Show instance for TextEncoding. +data Encoding = E String IO.TextEncoding + +instance Show Encoding where show (E n _) = "utf" ++ n + +instance Arbitrary Encoding where + arbitrary = oneof . map return $ + [ E "8" IO.utf8, E "8_bom" IO.utf8_bom, E "16" IO.utf16 + , E "16le" IO.utf16le, E "16be" IO.utf16be, E "32" IO.utf32 + , E "32le" IO.utf32le, E "32be" IO.utf32be + ] + +windowsNewlineMode :: IO.NewlineMode +windowsNewlineMode = IO.NewlineMode + { IO.inputNL = IO.CRLF, IO.outputNL = IO.CRLF + } + +instance Arbitrary IO.NewlineMode where + arbitrary = oneof . map return $ + [ IO.noNewlineTranslation, IO.universalNewlineMode, IO.nativeNewlineMode + , windowsNewlineMode + ] + +instance Arbitrary IO.BufferMode where + arbitrary = oneof [ return IO.NoBuffering, + return IO.LineBuffering, + return (IO.BlockBuffering Nothing), + (IO.BlockBuffering . Just . (+1) . fromIntegral) `fmap` + (arbitrary :: Gen Word16) ] + +-- This test harness is complex! What property are we checking? +-- +-- Reading after writing a multi-line file should give the same +-- results as were written. +-- +-- What do we vary while checking this property? +-- * The lines themselves, scrubbed to contain neither CR nor LF. (By +-- working with a list of lines, we ensure that the data will +-- sometimes contain line endings.) +-- * Encoding. +-- * Newline translation mode. +-- * Buffering. +write_read :: (NFData a, Eq a) + => ([b] -> a) + -> ((Char -> Bool) -> a -> b) + -> (IO.Handle -> a -> IO ()) + -> (IO.Handle -> IO a) + -> Encoding + -> IO.NewlineMode + -> IO.BufferMode + -> [a] + -> Property +write_read unline filt writer reader (E _ _) nl buf ts = + monadicIO $ assert . (==t) =<< run act + where t = unline . map (filt (not . (`elem` "\r\n"))) $ ts + act = withTempFile $ \path h -> do + -- hSetEncoding h enc + IO.hSetNewlineMode h nl + IO.hSetBuffering h buf + () <- writer h t + IO.hClose h + bracket (IO.openFile path IO.ReadMode) IO.hClose $ \h' -> do + -- hSetEncoding h' enc + IO.hSetNewlineMode h' nl + IO.hSetBuffering h' buf + r <- reader h' + r `deepseq` return r diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/Regressions.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/Regressions.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/Regressions.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/Regressions.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,82 @@ +-- | Regression tests for specific bugs. +-- +{-# LANGUAGE OverloadedStrings, ScopedTypeVariables #-} +module Tests.Regressions + ( + tests + ) where + +import Control.Exception (SomeException, handle) +import System.IO +import Test.HUnit (assertBool, assertEqual, assertFailure) +import qualified Data.ByteString as B +import Data.ByteString.Char8 () +import qualified Data.ByteString.Lazy as LB +import qualified Data.Text as T +import qualified Data.Text.Encoding as TE +import qualified Data.Text.IO as T +import qualified Data.Text.Lazy as LT +import qualified Data.Text.Lazy.Encoding as LE +import qualified Data.Text.Unsafe as T +import qualified Test.Framework as F +import qualified Test.Framework.Providers.HUnit as F + +import Tests.Utils (withTempFile) + +-- Reported by Michael Snoyman: UTF-8 encoding a large lazy bytestring +-- caused either a segfault or attempt to allocate a negative number +-- of bytes. +lazy_encode_crash :: IO () +lazy_encode_crash = withTempFile $ \ _ h -> + LB.hPut h . LE.encodeUtf8 . LT.pack . replicate 100000 $ 'a' + +-- Reported by Pieter Laeremans: attempting to read an incorrectly +-- encoded file can result in a crash in the RTS (i.e. not merely an +-- exception). +hGetContents_crash :: IO () +hGetContents_crash = withTempFile $ \ path h -> do + B.hPut h (B.pack [0x78, 0xc4 ,0x0a]) >> hClose h + h' <- openFile path ReadMode + hSetEncoding h' utf8 + handle (\(_::SomeException) -> return ()) $ + T.hGetContents h' >> assertFailure "T.hGetContents should crash" + +-- Reported by Ian Lynagh: attempting to allocate a sufficiently large +-- string (via either Array.new or Text.replicate) could result in an +-- integer overflow. +replicate_crash :: IO () +replicate_crash = handle (\(_::SomeException) -> return ()) $ + T.replicate (2^power) "0123456789abcdef" `seq` + assertFailure "T.replicate should crash" + where + power | maxBound == (2147483647::Int) = 28 + | otherwise = 60 :: Int + +-- Reported by John Millikin: a UTF-8 decode error handler could +-- return a bogus substitution character, which we would write without +-- checking. +utf8_decode_unsafe :: IO () +utf8_decode_unsafe = do + let t = TE.decodeUtf8With (\_ _ -> Just '\xdc00') "\x80" + assertBool "broken error recovery shouldn't break us" (t == "\xfffd") + +-- Reported by Eric Seidel: we mishandled mapping Chars that fit in a +-- single Word16 to Chars that require two. +mapAccumL_resize :: IO () +mapAccumL_resize = do + let f a _ = (a, '\65536') + count = 5 + val = T.mapAccumL f (0::Int) (T.replicate count "a") + assertEqual "mapAccumL should correctly fill buffers for two-word results" + (0, T.replicate count "\65536") val + assertEqual "mapAccumL should correctly size buffers for two-word results" + (count * 2) (T.lengthWord16 (snd val)) + +tests :: F.Test +tests = F.testGroup "Regressions" + [ F.testCase "hGetContents_crash" hGetContents_crash + , F.testCase "lazy_encode_crash" lazy_encode_crash + , F.testCase "mapAccumL_resize" mapAccumL_resize + , F.testCase "replicate_crash" replicate_crash + , F.testCase "utf8_decode_unsafe" utf8_decode_unsafe + ] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/SlowFunctions.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/SlowFunctions.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/SlowFunctions.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/SlowFunctions.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,39 @@ +{-# LANGUAGE BangPatterns #-} +module Tests.SlowFunctions + ( + indices + , splitOn + ) where + +import qualified Data.Text as T +import Data.Text.Internal (Text(..)) +import Data.Text.Unsafe (iter_, unsafeHead, unsafeTail) + +indices :: T.Text -- ^ Substring to search for (@needle@) + -> T.Text -- ^ Text to search in (@haystack@) + -> [Int] +indices needle@(Text _narr _noff nlen) haystack@(Text harr hoff hlen) + | T.null needle = [] + | otherwise = scan 0 + where + scan i | i >= hlen = [] + | needle `T.isPrefixOf` t = i : scan (i+nlen) + | otherwise = scan (i+d) + where t = Text harr (hoff+i) (hlen-i) + d = iter_ haystack i + +splitOn :: T.Text -- ^ Text to split on + -> T.Text -- ^ Input text + -> [T.Text] +splitOn pat src0 + | T.null pat = error "splitOn: empty" + | l == 1 = T.split (== (unsafeHead pat)) src0 + | otherwise = go src0 + where + l = T.length pat + go src = search 0 src + where + search !n !s + | T.null s = [src] -- not found + | pat `T.isPrefixOf` s = T.take n src : go (T.drop l s) + | otherwise = search (n+1) (unsafeTail s) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/Utils.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/Utils.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests/Utils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests/Utils.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,52 @@ +-- | Miscellaneous testing utilities +-- +{-# LANGUAGE ScopedTypeVariables #-} +module Tests.Utils + ( + (=^=) + , withRedirect + , withTempFile + ) where + +import Control.Exception (SomeException, bracket, bracket_, evaluate, try) +import Control.Monad (when) +import Debug.Trace (trace) +import GHC.IO.Handle.Internals (withHandle) +import System.Directory (removeFile) +import System.IO (Handle, hClose, hFlush, hIsOpen, hIsWritable, openTempFile) +import System.IO.Unsafe (unsafePerformIO) + +-- Ensure that two potentially bottom values (in the sense of crashing +-- for some inputs, not looping infinitely) either both crash, or both +-- give comparable results for some input. +(=^=) :: (Eq a, Show a) => a -> a -> Bool +i =^= j = unsafePerformIO $ do + x <- try (evaluate i) + y <- try (evaluate j) + case (x,y) of + (Left (_ :: SomeException), Left (_ :: SomeException)) + -> return True + (Right a, Right b) -> return (a == b) + e -> trace ("*** Divergence: " ++ show e) return False +infix 4 =^= +{-# NOINLINE (=^=) #-} + +withTempFile :: (FilePath -> Handle -> IO a) -> IO a +withTempFile = bracket (openTempFile "." "crashy.txt") cleanupTemp . uncurry + where + cleanupTemp (path,h) = do + open <- hIsOpen h + when open (hClose h) + removeFile path + +withRedirect :: Handle -> Handle -> IO a -> IO a +withRedirect tmp h = bracket_ swap swap + where + whenM p a = p >>= (`when` a) + swap = do + whenM (hIsOpen tmp) $ whenM (hIsWritable tmp) $ hFlush tmp + whenM (hIsOpen h) $ whenM (hIsWritable h) $ hFlush h + withHandle "spam" tmp $ \tmph -> do + hh <- withHandle "spam" h $ \hh -> + return (tmph,hh) + return (hh,()) diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests.hs cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests.hs --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/Tests.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/Tests.hs 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,13 @@ +-- | Provides a simple main function which runs all the tests +-- +module Main + ( main + ) where + +import Test.Framework (defaultMain) + +import qualified Tests.Properties as Properties +import qualified Tests.Regressions as Regressions + +main :: IO () +main = defaultMain [Properties.tests, Regressions.tests] diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/text-tests.cabal cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/text-tests.cabal --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests/text-tests.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests/text-tests.cabal 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,143 @@ +name: text-tests +version: 0.0.0.0 +synopsis: Functional tests for the text package +description: Functional tests for the text package +homepage: https://github.com/bos/text +license: BSD3 +license-file: ../LICENSE +author: Jasper Van der Jeugt , + Bryan O'Sullivan , + Tom Harper , + Duncan Coutts +maintainer: Bryan O'Sullivan +category: Text +build-type: Simple + +cabal-version: >=1.8 + +flag hpc + description: Enable HPC to generate coverage reports + default: False + manual: True + +executable text-tests + main-is: Tests.hs + + other-modules: + Tests.IO + Tests.Properties + Tests.Properties.Mul + Tests.QuickCheckUtils + Tests.Regressions + Tests.SlowFunctions + Tests.Utils + + ghc-options: + -Wall -threaded -O0 -rtsopts + + if flag(hpc) + ghc-options: + -fhpc + + cpp-options: + -DTEST_SUITE + -DASSERTS + -DHAVE_DEEPSEQ + + build-depends: + HUnit >= 1.2, + QuickCheck >= 2.7, + base == 4.*, + bytestring, + deepseq, + directory, + quickcheck-unicode, + random, + test-framework >= 0.4, + test-framework-hunit >= 0.2, + test-framework-quickcheck2 >= 0.2, + text-tests + +executable text-tests-stdio + main-is: Tests/IO.hs + + ghc-options: + -Wall -threaded -rtsopts + + -- Optional HPC support + if flag(hpc) + ghc-options: + -fhpc + + build-depends: + text-tests, + base >= 4 && < 5 + +library + hs-source-dirs: .. + c-sources: ../cbits/cbits.c + include-dirs: ../include + ghc-options: -Wall + exposed-modules: + Data.Text + Data.Text.Array + Data.Text.Encoding + Data.Text.Encoding.Error + Data.Text.Internal.Encoding.Fusion + Data.Text.Internal.Encoding.Fusion.Common + Data.Text.Internal.Encoding.Utf16 + Data.Text.Internal.Encoding.Utf32 + Data.Text.Internal.Encoding.Utf8 + Data.Text.Foreign + Data.Text.Internal.Fusion + Data.Text.Internal.Fusion.CaseMapping + Data.Text.Internal.Fusion.Common + Data.Text.Internal.Fusion.Size + Data.Text.Internal.Fusion.Types + Data.Text.IO + Data.Text.Internal.IO + Data.Text.Internal + Data.Text.Lazy + Data.Text.Lazy.Builder + Data.Text.Internal.Builder.Functions + Data.Text.Lazy.Builder.Int + Data.Text.Internal.Builder.Int.Digits + Data.Text.Internal.Builder + Data.Text.Lazy.Builder.RealFloat + Data.Text.Internal.Builder.RealFloat.Functions + Data.Text.Lazy.Encoding + Data.Text.Internal.Lazy.Encoding.Fusion + Data.Text.Internal.Lazy.Fusion + Data.Text.Lazy.IO + Data.Text.Internal.Lazy + Data.Text.Lazy.Read + Data.Text.Internal.Lazy.Search + Data.Text.Internal.Private + Data.Text.Read + Data.Text.Show + Data.Text.Internal.Read + Data.Text.Internal.Search + Data.Text.Unsafe + Data.Text.Internal.Unsafe + Data.Text.Internal.Unsafe.Char + Data.Text.Internal.Unsafe.Shift + Data.Text.Internal.Functions + + if flag(hpc) + ghc-options: + -fhpc + + cpp-options: + -DTEST_SUITE + -DHAVE_DEEPSEQ + -DASSERTS + -DINTEGER_GMP + + build-depends: + array, + base == 4.*, + binary, + bytestring, + deepseq, + ghc-prim, + integer-gmp diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests-and-benchmarks.markdown cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests-and-benchmarks.markdown --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/tests-and-benchmarks.markdown 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/tests-and-benchmarks.markdown 2016-03-17 17:53:39.000000000 +0000 @@ -0,0 +1,63 @@ +Tests and benchmarks +==================== + +Prerequisites +------------- + +To run the tests and benchmarks, you will need the test data, which +you can clone from one of the following locations: + +* Mercurial master repository: + [bitbucket.org/bos/text-test-data](https://bitbucket.org/bos/text-test-data) + +* Git mirror repository: + [github.com/bos/text-test-data](https://github.com/bos/text-test-data) + +You should clone that repository into the `tests` subdirectory (your +clone must be named `text-test-data` locally), then run `make -C +tests/text-test-data` to uncompress the test files. Many tests and +benchmarks will fail if the test files are missing. + +Functional tests +---------------- + +The functional tests are located in the `tests` subdirectory. An overview of +what's in that directory: + + Makefile Has targets for common tasks + Tests Source files of the testing code + scripts Various utility scripts + text-tests.cabal Cabal file that compiles all benchmarks + +The `text-tests.cabal` builds: + +- A copy of the text library, sharing the source code, but exposing all internal + modules, for testing purposes +- The different test suites + +To compile, run all tests, and generate a coverage report, simply use `make`. + +Benchmarks +---------- + +The benchmarks are located in the `benchmarks` subdirectory. An overview of +what's in that directory: + + Makefile Has targets for common tasks + haskell Source files of the haskell benchmarks + python Python implementations of some benchmarks + ruby Ruby implementations of some benchmarks + text-benchmarks.cabal Cabal file which compiles all benchmarks + +To compile the benchmarks, navigate to the `benchmarks` subdirectory and run +`cabal configure && cabal build`. Then, you can run the benchmarks using: + + ./dist/build/text-benchmarks/text-benchmarks + +However, since there's quite a lot of benchmarks, you usually don't want to +run them all. Instead, use the `-l` flag to get a list of benchmarks: + + ./dist/build/text-benchmarks/text-benchmarks + +And run the ones you want to inspect. If you want to configure the benchmarks +further, the exact parameters can be changed in `Benchmarks.hs`. diff -Nru cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/text.cabal cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/text.cabal --- cabal-install-1.22-1.22.6.0/src/text-1.2.2.1/text.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/text-1.2.2.1/text.cabal 2016-06-02 07:15:42.000000000 +0000 @@ -0,0 +1,197 @@ +name: text +version: 1.2.2.1 +homepage: https://github.com/bos/text +bug-reports: https://github.com/bos/text/issues +synopsis: An efficient packed Unicode text type. +description: + . + An efficient packed, immutable Unicode text type (both strict and + lazy), with a powerful loop fusion optimization framework. + . + The 'Text' type represents Unicode character strings, in a time and + space-efficient manner. This package provides text processing + capabilities that are optimized for performance critical use, both + in terms of large data quantities and high speed. + . + The 'Text' type provides character-encoding, type-safe case + conversion via whole-string case conversion functions. It also + provides a range of functions for converting 'Text' values to and from + 'ByteStrings', using several standard encodings. + . + Efficient locale-sensitive support for text IO is also supported. + . + These modules are intended to be imported qualified, to avoid name + clashes with Prelude functions, e.g. + . + > import qualified Data.Text as T + . + To use an extended and very rich family of functions for working + with Unicode text (including normalization, regular expressions, + non-standard encodings, text breaking, and locales), see + the @text-icu@ package: + + +license: BSD3 +license-file: LICENSE +author: Bryan O'Sullivan +maintainer: Bryan O'Sullivan +copyright: 2009-2011 Bryan O'Sullivan, 2008-2009 Tom Harper +category: Data, Text +build-type: Simple +cabal-version: >= 1.8 +extra-source-files: + -- scripts/CaseFolding.txt + -- scripts/SpecialCasing.txt + README.markdown + benchmarks/Setup.hs + benchmarks/cbits/*.c + benchmarks/haskell/*.hs + benchmarks/haskell/Benchmarks/*.hs + benchmarks/haskell/Benchmarks/Programs/*.hs + benchmarks/python/*.py + benchmarks/ruby/*.rb + benchmarks/text-benchmarks.cabal + changelog.md + include/*.h + scripts/*.hs + tests-and-benchmarks.markdown + tests/*.hs + tests/.ghci + tests/Makefile + tests/Tests/*.hs + tests/Tests/Properties/*.hs + tests/cabal.config + tests/scripts/*.sh + tests/text-tests.cabal + +flag developer + description: operate in developer mode + default: False + manual: True + +flag integer-simple + description: Use the simple integer library instead of GMP + default: False + manual: False + +library + c-sources: cbits/cbits.c + include-dirs: include + + exposed-modules: + Data.Text + Data.Text.Array + Data.Text.Encoding + Data.Text.Encoding.Error + Data.Text.Foreign + Data.Text.IO + Data.Text.Internal + Data.Text.Internal.Builder + Data.Text.Internal.Builder.Functions + Data.Text.Internal.Builder.Int.Digits + Data.Text.Internal.Builder.RealFloat.Functions + Data.Text.Internal.Encoding.Fusion + Data.Text.Internal.Encoding.Fusion.Common + Data.Text.Internal.Encoding.Utf16 + Data.Text.Internal.Encoding.Utf32 + Data.Text.Internal.Encoding.Utf8 + Data.Text.Internal.Functions + Data.Text.Internal.Fusion + Data.Text.Internal.Fusion.CaseMapping + Data.Text.Internal.Fusion.Common + Data.Text.Internal.Fusion.Size + Data.Text.Internal.Fusion.Types + Data.Text.Internal.IO + Data.Text.Internal.Lazy + Data.Text.Internal.Lazy.Encoding.Fusion + Data.Text.Internal.Lazy.Fusion + Data.Text.Internal.Lazy.Search + Data.Text.Internal.Private + Data.Text.Internal.Read + Data.Text.Internal.Search + Data.Text.Internal.Unsafe + Data.Text.Internal.Unsafe.Char + Data.Text.Internal.Unsafe.Shift + Data.Text.Lazy + Data.Text.Lazy.Builder + Data.Text.Lazy.Builder.Int + Data.Text.Lazy.Builder.RealFloat + Data.Text.Lazy.Encoding + Data.Text.Lazy.IO + Data.Text.Lazy.Internal + Data.Text.Lazy.Read + Data.Text.Read + Data.Text.Unsafe + + other-modules: + Data.Text.Show + + build-depends: + array >= 0.3, + base >= 4.2 && < 5, + binary, + deepseq >= 1.1.0.0, + ghc-prim >= 0.2 + + if impl(ghc >= 7.7) + build-depends: bytestring >= 0.10.4.0 + else + build-depends: bytestring >= 0.9 + + cpp-options: -DHAVE_DEEPSEQ + ghc-options: -Wall -fwarn-tabs -funbox-strict-fields -O2 + if flag(developer) + ghc-prof-options: -auto-all + ghc-options: -Werror + cpp-options: -DASSERTS + + if flag(integer-simple) + cpp-options: -DINTEGER_SIMPLE + build-depends: integer-simple >= 0.1 && < 0.5 + else + cpp-options: -DINTEGER_GMP + build-depends: integer-gmp >= 0.2 + +test-suite tests + type: exitcode-stdio-1.0 + hs-source-dirs: tests . + main-is: Tests.hs + c-sources: cbits/cbits.c + include-dirs: include + + ghc-options: + -Wall -threaded -O0 -rtsopts + + cpp-options: + -DASSERTS -DHAVE_DEEPSEQ -DTEST_SUITE + + build-depends: + HUnit >= 1.2, + QuickCheck >= 2.7, + array, + base, + binary, + bytestring, + deepseq, + directory, + ghc-prim, + quickcheck-unicode, + random, + test-framework >= 0.4, + test-framework-hunit >= 0.2, + test-framework-quickcheck2 >= 0.2 + + if flag(integer-simple) + cpp-options: -DINTEGER_SIMPLE + build-depends: integer-simple >= 0.1 && < 0.5 + else + cpp-options: -DINTEGER_GMP + build-depends: integer-gmp >= 0.2 + +source-repository head + type: git + location: https://github.com/bos/text + +source-repository head + type: mercurial + location: https://bitbucket.org/bos/text diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/adler32.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/adler32.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/adler32.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/adler32.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,179 @@ +/* adler32.c -- compute the Adler-32 checksum of a data stream + * Copyright (C) 1995-2011 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#include "zutil.h" + +#define local static + +local uLong adler32_combine_ OF((uLong adler1, uLong adler2, z_off64_t len2)); + +#define BASE 65521 /* largest prime smaller than 65536 */ +#define NMAX 5552 +/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ + +#define DO1(buf,i) {adler += (buf)[i]; sum2 += adler;} +#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); +#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); +#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); +#define DO16(buf) DO8(buf,0); DO8(buf,8); + +/* use NO_DIVIDE if your processor does not do division in hardware -- + try it both ways to see which is faster */ +#ifdef NO_DIVIDE +/* note that this assumes BASE is 65521, where 65536 % 65521 == 15 + (thank you to John Reiser for pointing this out) */ +# define CHOP(a) \ + do { \ + unsigned long tmp = a >> 16; \ + a &= 0xffffUL; \ + a += (tmp << 4) - tmp; \ + } while (0) +# define MOD28(a) \ + do { \ + CHOP(a); \ + if (a >= BASE) a -= BASE; \ + } while (0) +# define MOD(a) \ + do { \ + CHOP(a); \ + MOD28(a); \ + } while (0) +# define MOD63(a) \ + do { /* this assumes a is not negative */ \ + z_off64_t tmp = a >> 32; \ + a &= 0xffffffffL; \ + a += (tmp << 8) - (tmp << 5) + tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ + if (a >= BASE) a -= BASE; \ + } while (0) +#else +# define MOD(a) a %= BASE +# define MOD28(a) a %= BASE +# define MOD63(a) a %= BASE +#endif + +/* ========================================================================= */ +uLong ZEXPORT adler32(adler, buf, len) + uLong adler; + const Bytef *buf; + uInt len; +{ + unsigned long sum2; + unsigned n; + + /* split Adler-32 into component sums */ + sum2 = (adler >> 16) & 0xffff; + adler &= 0xffff; + + /* in case user likes doing a byte at a time, keep it fast */ + if (len == 1) { + adler += buf[0]; + if (adler >= BASE) + adler -= BASE; + sum2 += adler; + if (sum2 >= BASE) + sum2 -= BASE; + return adler | (sum2 << 16); + } + + /* initial Adler-32 value (deferred check for len == 1 speed) */ + if (buf == Z_NULL) + return 1L; + + /* in case short lengths are provided, keep it somewhat fast */ + if (len < 16) { + while (len--) { + adler += *buf++; + sum2 += adler; + } + if (adler >= BASE) + adler -= BASE; + MOD28(sum2); /* only added so many BASE's */ + return adler | (sum2 << 16); + } + + /* do length NMAX blocks -- requires just one modulo operation */ + while (len >= NMAX) { + len -= NMAX; + n = NMAX / 16; /* NMAX is divisible by 16 */ + do { + DO16(buf); /* 16 sums unrolled */ + buf += 16; + } while (--n); + MOD(adler); + MOD(sum2); + } + + /* do remaining bytes (less than NMAX, still just one modulo) */ + if (len) { /* avoid modulos if none remaining */ + while (len >= 16) { + len -= 16; + DO16(buf); + buf += 16; + } + while (len--) { + adler += *buf++; + sum2 += adler; + } + MOD(adler); + MOD(sum2); + } + + /* return recombined sums */ + return adler | (sum2 << 16); +} + +/* ========================================================================= */ +local uLong adler32_combine_(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off64_t len2; +{ + unsigned long sum1; + unsigned long sum2; + unsigned rem; + + /* for negative len, return invalid adler32 as a clue for debugging */ + if (len2 < 0) + return 0xffffffffUL; + + /* the derivation of this formula is left as an exercise for the reader */ + MOD63(len2); /* assumes len2 >= 0 */ + rem = (unsigned)len2; + sum1 = adler1 & 0xffff; + sum2 = rem * sum1; + MOD(sum2); + sum1 += (adler2 & 0xffff) + BASE - 1; + sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; + if (sum1 >= BASE) sum1 -= BASE; + if (sum1 >= BASE) sum1 -= BASE; + if (sum2 >= (BASE << 1)) sum2 -= (BASE << 1); + if (sum2 >= BASE) sum2 -= BASE; + return sum1 | (sum2 << 16); +} + +/* ========================================================================= */ +uLong ZEXPORT adler32_combine(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off_t len2; +{ + return adler32_combine_(adler1, adler2, len2); +} + +uLong ZEXPORT adler32_combine64(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off64_t len2; +{ + return adler32_combine_(adler1, adler2, len2); +} diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/compress.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/compress.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/compress.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/compress.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,80 @@ +/* compress.c -- compress a memory buffer + * Copyright (C) 1995-2005 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#define ZLIB_INTERNAL +#include "zlib.h" + +/* =========================================================================== + Compresses the source buffer into the destination buffer. The level + parameter has the same meaning as in deflateInit. sourceLen is the byte + length of the source buffer. Upon entry, destLen is the total size of the + destination buffer, which must be at least 0.1% larger than sourceLen plus + 12 bytes. Upon exit, destLen is the actual size of the compressed buffer. + + compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_BUF_ERROR if there was not enough room in the output buffer, + Z_STREAM_ERROR if the level parameter is invalid. +*/ +int ZEXPORT compress2 (dest, destLen, source, sourceLen, level) + Bytef *dest; + uLongf *destLen; + const Bytef *source; + uLong sourceLen; + int level; +{ + z_stream stream; + int err; + + stream.next_in = (z_const Bytef *)source; + stream.avail_in = (uInt)sourceLen; +#ifdef MAXSEG_64K + /* Check for source > 64K on 16-bit machine: */ + if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR; +#endif + stream.next_out = dest; + stream.avail_out = (uInt)*destLen; + if ((uLong)stream.avail_out != *destLen) return Z_BUF_ERROR; + + stream.zalloc = (alloc_func)0; + stream.zfree = (free_func)0; + stream.opaque = (voidpf)0; + + err = deflateInit(&stream, level); + if (err != Z_OK) return err; + + err = deflate(&stream, Z_FINISH); + if (err != Z_STREAM_END) { + deflateEnd(&stream); + return err == Z_OK ? Z_BUF_ERROR : err; + } + *destLen = stream.total_out; + + err = deflateEnd(&stream); + return err; +} + +/* =========================================================================== + */ +int ZEXPORT compress (dest, destLen, source, sourceLen) + Bytef *dest; + uLongf *destLen; + const Bytef *source; + uLong sourceLen; +{ + return compress2(dest, destLen, source, sourceLen, Z_DEFAULT_COMPRESSION); +} + +/* =========================================================================== + If the default memLevel or windowBits for deflateInit() is changed, then + this function needs to be updated. + */ +uLong ZEXPORT compressBound (sourceLen) + uLong sourceLen; +{ + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + + (sourceLen >> 25) + 13; +} diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/crc32.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/crc32.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/crc32.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/crc32.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,425 @@ +/* crc32.c -- compute the CRC-32 of a data stream + * Copyright (C) 1995-2006, 2010, 2011, 2012 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + * + * Thanks to Rodney Brown for his contribution of faster + * CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing + * tables for updating the shift register in one step with three exclusive-ors + * instead of four steps with four exclusive-ors. This results in about a + * factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3. + */ + +/* @(#) $Id$ */ + +/* + Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore + protection on the static variables used to control the first-use generation + of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should + first call get_crc_table() to initialize the tables before allowing more than + one thread to use crc32(). + + DYNAMIC_CRC_TABLE and MAKECRCH can be #defined to write out crc32.h. + */ + +#ifdef MAKECRCH +# include +# ifndef DYNAMIC_CRC_TABLE +# define DYNAMIC_CRC_TABLE +# endif /* !DYNAMIC_CRC_TABLE */ +#endif /* MAKECRCH */ + +#include "zutil.h" /* for STDC and FAR definitions */ + +#define local static + +/* Definitions for doing the crc four data bytes at a time. */ +#if !defined(NOBYFOUR) && defined(Z_U4) +# define BYFOUR +#endif +#ifdef BYFOUR + local unsigned long crc32_little OF((unsigned long, + const unsigned char FAR *, unsigned)); + local unsigned long crc32_big OF((unsigned long, + const unsigned char FAR *, unsigned)); +# define TBLS 8 +#else +# define TBLS 1 +#endif /* BYFOUR */ + +/* Local functions for crc concatenation */ +local unsigned long gf2_matrix_times OF((unsigned long *mat, + unsigned long vec)); +local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat)); +local uLong crc32_combine_ OF((uLong crc1, uLong crc2, z_off64_t len2)); + + +#ifdef DYNAMIC_CRC_TABLE + +local volatile int crc_table_empty = 1; +local z_crc_t FAR crc_table[TBLS][256]; +local void make_crc_table OF((void)); +#ifdef MAKECRCH + local void write_table OF((FILE *, const z_crc_t FAR *)); +#endif /* MAKECRCH */ +/* + Generate tables for a byte-wise 32-bit CRC calculation on the polynomial: + x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. + + Polynomials over GF(2) are represented in binary, one bit per coefficient, + with the lowest powers in the most significant bit. Then adding polynomials + is just exclusive-or, and multiplying a polynomial by x is a right shift by + one. If we call the above polynomial p, and represent a byte as the + polynomial q, also with the lowest power in the most significant bit (so the + byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p, + where a mod b means the remainder after dividing a by b. + + This calculation is done using the shift-register method of multiplying and + taking the remainder. The register is initialized to zero, and for each + incoming bit, x^32 is added mod p to the register if the bit is a one (where + x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by + x (which is shifting right by one and adding x^32 mod p if the bit shifted + out is a one). We start with the highest power (least significant bit) of + q and repeat for all eight bits of q. + + The first table is simply the CRC of all possible eight bit values. This is + all the information needed to generate CRCs on data a byte at a time for all + combinations of CRC register values and incoming bytes. The remaining tables + allow for word-at-a-time CRC calculation for both big-endian and little- + endian machines, where a word is four bytes. +*/ +local void make_crc_table() +{ + z_crc_t c; + int n, k; + z_crc_t poly; /* polynomial exclusive-or pattern */ + /* terms of polynomial defining this crc (except x^32): */ + static volatile int first = 1; /* flag to limit concurrent making */ + static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; + + /* See if another task is already doing this (not thread-safe, but better + than nothing -- significantly reduces duration of vulnerability in + case the advice about DYNAMIC_CRC_TABLE is ignored) */ + if (first) { + first = 0; + + /* make exclusive-or pattern from polynomial (0xedb88320UL) */ + poly = 0; + for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++) + poly |= (z_crc_t)1 << (31 - p[n]); + + /* generate a crc for every 8-bit value */ + for (n = 0; n < 256; n++) { + c = (z_crc_t)n; + for (k = 0; k < 8; k++) + c = c & 1 ? poly ^ (c >> 1) : c >> 1; + crc_table[0][n] = c; + } + +#ifdef BYFOUR + /* generate crc for each value followed by one, two, and three zeros, + and then the byte reversal of those as well as the first table */ + for (n = 0; n < 256; n++) { + c = crc_table[0][n]; + crc_table[4][n] = ZSWAP32(c); + for (k = 1; k < 4; k++) { + c = crc_table[0][c & 0xff] ^ (c >> 8); + crc_table[k][n] = c; + crc_table[k + 4][n] = ZSWAP32(c); + } + } +#endif /* BYFOUR */ + + crc_table_empty = 0; + } + else { /* not first */ + /* wait for the other guy to finish (not efficient, but rare) */ + while (crc_table_empty) + ; + } + +#ifdef MAKECRCH + /* write out CRC tables to crc32.h */ + { + FILE *out; + + out = fopen("crc32.h", "w"); + if (out == NULL) return; + fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n"); + fprintf(out, " * Generated automatically by crc32.c\n */\n\n"); + fprintf(out, "local const z_crc_t FAR "); + fprintf(out, "crc_table[TBLS][256] =\n{\n {\n"); + write_table(out, crc_table[0]); +# ifdef BYFOUR + fprintf(out, "#ifdef BYFOUR\n"); + for (k = 1; k < 8; k++) { + fprintf(out, " },\n {\n"); + write_table(out, crc_table[k]); + } + fprintf(out, "#endif\n"); +# endif /* BYFOUR */ + fprintf(out, " }\n};\n"); + fclose(out); + } +#endif /* MAKECRCH */ +} + +#ifdef MAKECRCH +local void write_table(out, table) + FILE *out; + const z_crc_t FAR *table; +{ + int n; + + for (n = 0; n < 256; n++) + fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", + (unsigned long)(table[n]), + n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", ")); +} +#endif /* MAKECRCH */ + +#else /* !DYNAMIC_CRC_TABLE */ +/* ======================================================================== + * Tables of CRC-32s of all single-byte values, made by make_crc_table(). + */ +#include "crc32.h" +#endif /* DYNAMIC_CRC_TABLE */ + +/* ========================================================================= + * This function can be used by asm versions of crc32() + */ +const z_crc_t FAR * ZEXPORT get_crc_table() +{ +#ifdef DYNAMIC_CRC_TABLE + if (crc_table_empty) + make_crc_table(); +#endif /* DYNAMIC_CRC_TABLE */ + return (const z_crc_t FAR *)crc_table; +} + +/* ========================================================================= */ +#define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8) +#define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1 + +/* ========================================================================= */ +unsigned long ZEXPORT crc32(crc, buf, len) + unsigned long crc; + const unsigned char FAR *buf; + uInt len; +{ + if (buf == Z_NULL) return 0UL; + +#ifdef DYNAMIC_CRC_TABLE + if (crc_table_empty) + make_crc_table(); +#endif /* DYNAMIC_CRC_TABLE */ + +#ifdef BYFOUR + if (sizeof(void *) == sizeof(ptrdiff_t)) { + z_crc_t endian; + + endian = 1; + if (*((unsigned char *)(&endian))) + return crc32_little(crc, buf, len); + else + return crc32_big(crc, buf, len); + } +#endif /* BYFOUR */ + crc = crc ^ 0xffffffffUL; + while (len >= 8) { + DO8; + len -= 8; + } + if (len) do { + DO1; + } while (--len); + return crc ^ 0xffffffffUL; +} + +#ifdef BYFOUR + +/* ========================================================================= */ +#define DOLIT4 c ^= *buf4++; \ + c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ + crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24] +#define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 + +/* ========================================================================= */ +local unsigned long crc32_little(crc, buf, len) + unsigned long crc; + const unsigned char FAR *buf; + unsigned len; +{ + register z_crc_t c; + register const z_crc_t FAR *buf4; + + c = (z_crc_t)crc; + c = ~c; + while (len && ((ptrdiff_t)buf & 3)) { + c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); + len--; + } + + buf4 = (const z_crc_t FAR *)(const void FAR *)buf; + while (len >= 32) { + DOLIT32; + len -= 32; + } + while (len >= 4) { + DOLIT4; + len -= 4; + } + buf = (const unsigned char FAR *)buf4; + + if (len) do { + c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); + } while (--len); + c = ~c; + return (unsigned long)c; +} + +/* ========================================================================= */ +#define DOBIG4 c ^= *++buf4; \ + c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ + crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] +#define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 + +/* ========================================================================= */ +local unsigned long crc32_big(crc, buf, len) + unsigned long crc; + const unsigned char FAR *buf; + unsigned len; +{ + register z_crc_t c; + register const z_crc_t FAR *buf4; + + c = ZSWAP32((z_crc_t)crc); + c = ~c; + while (len && ((ptrdiff_t)buf & 3)) { + c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); + len--; + } + + buf4 = (const z_crc_t FAR *)(const void FAR *)buf; + buf4--; + while (len >= 32) { + DOBIG32; + len -= 32; + } + while (len >= 4) { + DOBIG4; + len -= 4; + } + buf4++; + buf = (const unsigned char FAR *)buf4; + + if (len) do { + c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); + } while (--len); + c = ~c; + return (unsigned long)(ZSWAP32(c)); +} + +#endif /* BYFOUR */ + +#define GF2_DIM 32 /* dimension of GF(2) vectors (length of CRC) */ + +/* ========================================================================= */ +local unsigned long gf2_matrix_times(mat, vec) + unsigned long *mat; + unsigned long vec; +{ + unsigned long sum; + + sum = 0; + while (vec) { + if (vec & 1) + sum ^= *mat; + vec >>= 1; + mat++; + } + return sum; +} + +/* ========================================================================= */ +local void gf2_matrix_square(square, mat) + unsigned long *square; + unsigned long *mat; +{ + int n; + + for (n = 0; n < GF2_DIM; n++) + square[n] = gf2_matrix_times(mat, mat[n]); +} + +/* ========================================================================= */ +local uLong crc32_combine_(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off64_t len2; +{ + int n; + unsigned long row; + unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */ + unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */ + + /* degenerate case (also disallow negative lengths) */ + if (len2 <= 0) + return crc1; + + /* put operator for one zero bit in odd */ + odd[0] = 0xedb88320UL; /* CRC-32 polynomial */ + row = 1; + for (n = 1; n < GF2_DIM; n++) { + odd[n] = row; + row <<= 1; + } + + /* put operator for two zero bits in even */ + gf2_matrix_square(even, odd); + + /* put operator for four zero bits in odd */ + gf2_matrix_square(odd, even); + + /* apply len2 zeros to crc1 (first square will put the operator for one + zero byte, eight zero bits, in even) */ + do { + /* apply zeros operator for this bit of len2 */ + gf2_matrix_square(even, odd); + if (len2 & 1) + crc1 = gf2_matrix_times(even, crc1); + len2 >>= 1; + + /* if no more bits set, then done */ + if (len2 == 0) + break; + + /* another iteration of the loop with odd and even swapped */ + gf2_matrix_square(odd, even); + if (len2 & 1) + crc1 = gf2_matrix_times(odd, crc1); + len2 >>= 1; + + /* if no more bits set, then done */ + } while (len2 != 0); + + /* return combined crc */ + crc1 ^= crc2; + return crc1; +} + +/* ========================================================================= */ +uLong ZEXPORT crc32_combine(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off_t len2; +{ + return crc32_combine_(crc1, crc2, len2); +} + +uLong ZEXPORT crc32_combine64(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off64_t len2; +{ + return crc32_combine_(crc1, crc2, len2); +} diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/crc32.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/crc32.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/crc32.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/crc32.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,441 @@ +/* crc32.h -- tables for rapid CRC calculation + * Generated automatically by crc32.c + */ + +local const z_crc_t FAR crc_table[TBLS][256] = +{ + { + 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, + 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL, + 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL, + 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL, + 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL, + 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL, + 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL, + 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL, + 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL, + 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL, + 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL, + 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL, + 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL, + 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL, + 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL, + 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL, + 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL, + 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL, + 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL, + 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL, + 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL, + 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL, + 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL, + 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL, + 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL, + 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL, + 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL, + 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL, + 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL, + 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL, + 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL, + 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL, + 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL, + 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL, + 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL, + 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL, + 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL, + 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL, + 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL, + 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL, + 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL, + 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL, + 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL, + 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL, + 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL, + 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL, + 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL, + 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL, + 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL, + 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL, + 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL, + 0x2d02ef8dUL +#ifdef BYFOUR + }, + { + 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL, + 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL, + 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL, + 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL, + 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL, + 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL, + 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL, + 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL, + 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL, + 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL, + 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL, + 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL, + 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL, + 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL, + 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL, + 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL, + 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL, + 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL, + 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL, + 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL, + 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL, + 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL, + 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL, + 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL, + 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL, + 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL, + 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL, + 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL, + 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL, + 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL, + 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL, + 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL, + 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL, + 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL, + 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL, + 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL, + 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL, + 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL, + 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL, + 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL, + 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL, + 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL, + 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL, + 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL, + 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL, + 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL, + 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL, + 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL, + 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL, + 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL, + 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL, + 0x9324fd72UL + }, + { + 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL, + 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL, + 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL, + 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL, + 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL, + 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL, + 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL, + 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL, + 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL, + 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL, + 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL, + 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL, + 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL, + 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL, + 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL, + 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL, + 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL, + 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL, + 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL, + 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL, + 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL, + 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL, + 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL, + 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL, + 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL, + 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL, + 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL, + 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL, + 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL, + 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL, + 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL, + 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL, + 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL, + 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL, + 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL, + 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL, + 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL, + 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL, + 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL, + 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL, + 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL, + 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL, + 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL, + 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL, + 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL, + 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL, + 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL, + 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL, + 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL, + 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL, + 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL, + 0xbe9834edUL + }, + { + 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL, + 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL, + 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL, + 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL, + 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL, + 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL, + 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL, + 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL, + 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL, + 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL, + 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL, + 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL, + 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL, + 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL, + 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL, + 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL, + 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL, + 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL, + 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL, + 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL, + 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL, + 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL, + 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL, + 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL, + 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL, + 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL, + 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL, + 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL, + 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL, + 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL, + 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL, + 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL, + 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL, + 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL, + 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL, + 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL, + 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL, + 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL, + 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL, + 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL, + 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL, + 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL, + 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL, + 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL, + 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL, + 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL, + 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL, + 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL, + 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL, + 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL, + 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL, + 0xde0506f1UL + }, + { + 0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL, + 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL, + 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL, + 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL, + 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL, + 0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL, + 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL, + 0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL, + 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL, + 0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL, + 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL, + 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL, + 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL, + 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL, + 0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL, + 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL, + 0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL, + 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL, + 0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL, + 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL, + 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL, + 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL, + 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL, + 0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL, + 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL, + 0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL, + 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL, + 0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL, + 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL, + 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL, + 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL, + 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL, + 0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL, + 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL, + 0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL, + 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL, + 0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL, + 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL, + 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL, + 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL, + 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL, + 0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL, + 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL, + 0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL, + 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL, + 0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL, + 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL, + 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL, + 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL, + 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL, + 0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL, + 0x8def022dUL + }, + { + 0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL, + 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL, + 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL, + 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL, + 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL, + 0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL, + 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL, + 0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL, + 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL, + 0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL, + 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL, + 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL, + 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL, + 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL, + 0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL, + 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL, + 0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL, + 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL, + 0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL, + 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL, + 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL, + 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL, + 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL, + 0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL, + 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL, + 0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL, + 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL, + 0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL, + 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL, + 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL, + 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL, + 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL, + 0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL, + 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL, + 0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL, + 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL, + 0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL, + 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL, + 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL, + 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL, + 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL, + 0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL, + 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL, + 0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL, + 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL, + 0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL, + 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL, + 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL, + 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL, + 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL, + 0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL, + 0x72fd2493UL + }, + { + 0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL, + 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL, + 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL, + 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL, + 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL, + 0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL, + 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL, + 0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL, + 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL, + 0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL, + 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL, + 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL, + 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL, + 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL, + 0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL, + 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL, + 0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL, + 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL, + 0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL, + 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL, + 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL, + 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL, + 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL, + 0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL, + 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL, + 0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL, + 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL, + 0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL, + 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL, + 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL, + 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL, + 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL, + 0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL, + 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL, + 0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL, + 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL, + 0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL, + 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL, + 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL, + 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL, + 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL, + 0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL, + 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL, + 0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL, + 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL, + 0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL, + 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL, + 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL, + 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL, + 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL, + 0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL, + 0xed3498beUL + }, + { + 0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL, + 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL, + 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL, + 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL, + 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL, + 0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL, + 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL, + 0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL, + 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL, + 0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL, + 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL, + 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL, + 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL, + 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL, + 0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL, + 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL, + 0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL, + 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL, + 0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL, + 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL, + 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL, + 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL, + 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL, + 0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL, + 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL, + 0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL, + 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL, + 0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL, + 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL, + 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL, + 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL, + 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL, + 0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL, + 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL, + 0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL, + 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL, + 0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL, + 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL, + 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL, + 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL, + 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL, + 0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL, + 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL, + 0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL, + 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL, + 0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL, + 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL, + 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL, + 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL, + 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL, + 0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL, + 0xf10605deUL +#endif + } +}; diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/deflate.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/deflate.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/deflate.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/deflate.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,1967 @@ +/* deflate.c -- compress data using the deflation algorithm + * Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * ALGORITHM + * + * The "deflation" process depends on being able to identify portions + * of the input text which are identical to earlier input (within a + * sliding window trailing behind the input currently being processed). + * + * The most straightforward technique turns out to be the fastest for + * most input files: try all possible matches and select the longest. + * The key feature of this algorithm is that insertions into the string + * dictionary are very simple and thus fast, and deletions are avoided + * completely. Insertions are performed at each input character, whereas + * string matches are performed only when the previous match ends. So it + * is preferable to spend more time in matches to allow very fast string + * insertions and avoid deletions. The matching algorithm for small + * strings is inspired from that of Rabin & Karp. A brute force approach + * is used to find longer strings when a small match has been found. + * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze + * (by Leonid Broukhis). + * A previous version of this file used a more sophisticated algorithm + * (by Fiala and Greene) which is guaranteed to run in linear amortized + * time, but has a larger average cost, uses more memory and is patented. + * However the F&G algorithm may be faster for some highly redundant + * files if the parameter max_chain_length (described below) is too large. + * + * ACKNOWLEDGEMENTS + * + * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and + * I found it in 'freeze' written by Leonid Broukhis. + * Thanks to many people for bug reports and testing. + * + * REFERENCES + * + * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". + * Available in http://tools.ietf.org/html/rfc1951 + * + * A description of the Rabin and Karp algorithm is given in the book + * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. + * + * Fiala,E.R., and Greene,D.H. + * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 + * + */ + +/* @(#) $Id$ */ + +#include "deflate.h" + +const char deflate_copyright[] = + " deflate 1.2.8 Copyright 1995-2013 Jean-loup Gailly and Mark Adler "; +/* + If you use the zlib library in a product, an acknowledgment is welcome + in the documentation of your product. If for some reason you cannot + include such an acknowledgment, I would appreciate that you keep this + copyright string in the executable of your product. + */ + +/* =========================================================================== + * Function prototypes. + */ +typedef enum { + need_more, /* block not completed, need more input or more output */ + block_done, /* block flush performed */ + finish_started, /* finish started, need only more output at next deflate */ + finish_done /* finish done, accept no more input or output */ +} block_state; + +typedef block_state (*compress_func) OF((deflate_state *s, int flush)); +/* Compression function. Returns the block state after the call. */ + +local void fill_window OF((deflate_state *s)); +local block_state deflate_stored OF((deflate_state *s, int flush)); +local block_state deflate_fast OF((deflate_state *s, int flush)); +#ifndef FASTEST +local block_state deflate_slow OF((deflate_state *s, int flush)); +#endif +local block_state deflate_rle OF((deflate_state *s, int flush)); +local block_state deflate_huff OF((deflate_state *s, int flush)); +local void lm_init OF((deflate_state *s)); +local void putShortMSB OF((deflate_state *s, uInt b)); +local void flush_pending OF((z_streamp strm)); +local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); +#ifdef ASMV + void match_init OF((void)); /* asm code initialization */ + uInt longest_match OF((deflate_state *s, IPos cur_match)); +#else +local uInt longest_match OF((deflate_state *s, IPos cur_match)); +#endif + +#ifdef DEBUG +local void check_match OF((deflate_state *s, IPos start, IPos match, + int length)); +#endif + +/* =========================================================================== + * Local data + */ + +#define NIL 0 +/* Tail of hash chains */ + +#ifndef TOO_FAR +# define TOO_FAR 4096 +#endif +/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ + +/* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ +typedef struct config_s { + ush good_length; /* reduce lazy search above this match length */ + ush max_lazy; /* do not perform lazy search above this match length */ + ush nice_length; /* quit search above this match length */ + ush max_chain; + compress_func func; +} config; + +#ifdef FASTEST +local const config configuration_table[2] = { +/* good lazy nice chain */ +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ +/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ +#else +local const config configuration_table[10] = { +/* good lazy nice chain */ +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ +/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ +/* 2 */ {4, 5, 16, 8, deflate_fast}, +/* 3 */ {4, 6, 32, 32, deflate_fast}, + +/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ +/* 5 */ {8, 16, 32, 32, deflate_slow}, +/* 6 */ {8, 16, 128, 128, deflate_slow}, +/* 7 */ {8, 32, 128, 256, deflate_slow}, +/* 8 */ {32, 128, 258, 1024, deflate_slow}, +/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ +#endif + +/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 + * For deflate_fast() (levels <= 3) good is ignored and lazy has a different + * meaning. + */ + +#define EQUAL 0 +/* result of memcmp for equal strings */ + +#ifndef NO_DUMMY_DECL +struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ +#endif + +/* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */ +#define RANK(f) (((f) << 1) - ((f) > 4 ? 9 : 0)) + +/* =========================================================================== + * Update a hash value with the given input byte + * IN assertion: all calls to to UPDATE_HASH are made with consecutive + * input characters, so that a running hash key can be computed from the + * previous key instead of complete recalculation each time. + */ +#define UPDATE_HASH(s,h,c) (h = (((h)<hash_shift) ^ (c)) & s->hash_mask) + + +/* =========================================================================== + * Insert string str in the dictionary and set match_head to the previous head + * of the hash chain (the most recent string with same hash key). Return + * the previous length of the hash chain. + * If this file is compiled with -DFASTEST, the compression level is forced + * to 1, and no hash chains are maintained. + * IN assertion: all calls to to INSERT_STRING are made with consecutive + * input characters and the first MIN_MATCH bytes of str are valid + * (except for the last MIN_MATCH-1 bytes of the input file). + */ +#ifdef FASTEST +#define INSERT_STRING(s, str, match_head) \ + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ + match_head = s->head[s->ins_h], \ + s->head[s->ins_h] = (Pos)(str)) +#else +#define INSERT_STRING(s, str, match_head) \ + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ + match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ + s->head[s->ins_h] = (Pos)(str)) +#endif + +/* =========================================================================== + * Initialize the hash table (avoiding 64K overflow for 16 bit systems). + * prev[] will be initialized on the fly. + */ +#define CLEAR_HASH(s) \ + s->head[s->hash_size-1] = NIL; \ + zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); + +/* ========================================================================= */ +int ZEXPORT deflateInit_(strm, level, version, stream_size) + z_streamp strm; + int level; + const char *version; + int stream_size; +{ + return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, + Z_DEFAULT_STRATEGY, version, stream_size); + /* To do: ignore strm->next_in if we use it as window */ +} + +/* ========================================================================= */ +int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + version, stream_size) + z_streamp strm; + int level; + int method; + int windowBits; + int memLevel; + int strategy; + const char *version; + int stream_size; +{ + deflate_state *s; + int wrap = 1; + static const char my_version[] = ZLIB_VERSION; + + ushf *overlay; + /* We overlay pending_buf and d_buf+l_buf. This works since the average + * output size for (length,distance) codes is <= 24 bits. + */ + + if (version == Z_NULL || version[0] != my_version[0] || + stream_size != sizeof(z_stream)) { + return Z_VERSION_ERROR; + } + if (strm == Z_NULL) return Z_STREAM_ERROR; + + strm->msg = Z_NULL; + if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; +#endif + } + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif + +#ifdef FASTEST + if (level != 0) level = 1; +#else + if (level == Z_DEFAULT_COMPRESSION) level = 6; +#endif + + if (windowBits < 0) { /* suppress zlib wrapper */ + wrap = 0; + windowBits = -windowBits; + } +#ifdef GZIP + else if (windowBits > 15) { + wrap = 2; /* write gzip wrapper instead */ + windowBits -= 16; + } +#endif + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || + strategy < 0 || strategy > Z_FIXED) { + return Z_STREAM_ERROR; + } + if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ + s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); + if (s == Z_NULL) return Z_MEM_ERROR; + strm->state = (struct internal_state FAR *)s; + s->strm = strm; + + s->wrap = wrap; + s->gzhead = Z_NULL; + s->w_bits = windowBits; + s->w_size = 1 << s->w_bits; + s->w_mask = s->w_size - 1; + + s->hash_bits = memLevel + 7; + s->hash_size = 1 << s->hash_bits; + s->hash_mask = s->hash_size - 1; + s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); + + s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); + s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); + s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); + + s->high_water = 0; /* nothing written to s->window yet */ + + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + + overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); + s->pending_buf = (uchf *) overlay; + s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); + + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || + s->pending_buf == Z_NULL) { + s->status = FINISH_STATE; + strm->msg = ERR_MSG(Z_MEM_ERROR); + deflateEnd (strm); + return Z_MEM_ERROR; + } + s->d_buf = overlay + s->lit_bufsize/sizeof(ush); + s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; + + s->level = level; + s->strategy = strategy; + s->method = (Byte)method; + + return deflateReset(strm); +} + +/* ========================================================================= */ +int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) + z_streamp strm; + const Bytef *dictionary; + uInt dictLength; +{ + deflate_state *s; + uInt str, n; + int wrap; + unsigned avail; + z_const unsigned char *next; + + if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL) + return Z_STREAM_ERROR; + s = strm->state; + wrap = s->wrap; + if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead) + return Z_STREAM_ERROR; + + /* when using zlib wrappers, compute Adler-32 for provided dictionary */ + if (wrap == 1) + strm->adler = adler32(strm->adler, dictionary, dictLength); + s->wrap = 0; /* avoid computing Adler-32 in read_buf */ + + /* if dictionary would fill window, just replace the history */ + if (dictLength >= s->w_size) { + if (wrap == 0) { /* already empty otherwise */ + CLEAR_HASH(s); + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } + dictionary += dictLength - s->w_size; /* use the tail */ + dictLength = s->w_size; + } + + /* insert dictionary into window and hash */ + avail = strm->avail_in; + next = strm->next_in; + strm->avail_in = dictLength; + strm->next_in = (z_const Bytef *)dictionary; + fill_window(s); + while (s->lookahead >= MIN_MATCH) { + str = s->strstart; + n = s->lookahead - (MIN_MATCH-1); + do { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + } while (--n); + s->strstart = str; + s->lookahead = MIN_MATCH-1; + fill_window(s); + } + s->strstart += s->lookahead; + s->block_start = (long)s->strstart; + s->insert = s->lookahead; + s->lookahead = 0; + s->match_length = s->prev_length = MIN_MATCH-1; + s->match_available = 0; + strm->next_in = next; + strm->avail_in = avail; + s->wrap = wrap; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateResetKeep (strm) + z_streamp strm; +{ + deflate_state *s; + + if (strm == Z_NULL || strm->state == Z_NULL || + strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) { + return Z_STREAM_ERROR; + } + + strm->total_in = strm->total_out = 0; + strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ + strm->data_type = Z_UNKNOWN; + + s = (deflate_state *)strm->state; + s->pending = 0; + s->pending_out = s->pending_buf; + + if (s->wrap < 0) { + s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ + } + s->status = s->wrap ? INIT_STATE : BUSY_STATE; + strm->adler = +#ifdef GZIP + s->wrap == 2 ? crc32(0L, Z_NULL, 0) : +#endif + adler32(0L, Z_NULL, 0); + s->last_flush = Z_NO_FLUSH; + + _tr_init(s); + + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateReset (strm) + z_streamp strm; +{ + int ret; + + ret = deflateResetKeep(strm); + if (ret == Z_OK) + lm_init(strm->state); + return ret; +} + +/* ========================================================================= */ +int ZEXPORT deflateSetHeader (strm, head) + z_streamp strm; + gz_headerp head; +{ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + if (strm->state->wrap != 2) return Z_STREAM_ERROR; + strm->state->gzhead = head; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflatePending (strm, pending, bits) + unsigned *pending; + int *bits; + z_streamp strm; +{ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + if (pending != Z_NULL) + *pending = strm->state->pending; + if (bits != Z_NULL) + *bits = strm->state->bi_valid; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflatePrime (strm, bits, value) + z_streamp strm; + int bits; + int value; +{ + deflate_state *s; + int put; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + s = strm->state; + if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) + return Z_BUF_ERROR; + do { + put = Buf_size - s->bi_valid; + if (put > bits) + put = bits; + s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid); + s->bi_valid += put; + _tr_flush_bits(s); + value >>= put; + bits -= put; + } while (bits); + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateParams(strm, level, strategy) + z_streamp strm; + int level; + int strategy; +{ + deflate_state *s; + compress_func func; + int err = Z_OK; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + s = strm->state; + +#ifdef FASTEST + if (level != 0) level = 1; +#else + if (level == Z_DEFAULT_COMPRESSION) level = 6; +#endif + if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { + return Z_STREAM_ERROR; + } + func = configuration_table[s->level].func; + + if ((strategy != s->strategy || func != configuration_table[level].func) && + strm->total_in != 0) { + /* Flush the last buffer: */ + err = deflate(strm, Z_BLOCK); + if (err == Z_BUF_ERROR && s->pending == 0) + err = Z_OK; + } + if (s->level != level) { + s->level = level; + s->max_lazy_match = configuration_table[level].max_lazy; + s->good_match = configuration_table[level].good_length; + s->nice_match = configuration_table[level].nice_length; + s->max_chain_length = configuration_table[level].max_chain; + } + s->strategy = strategy; + return err; +} + +/* ========================================================================= */ +int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) + z_streamp strm; + int good_length; + int max_lazy; + int nice_length; + int max_chain; +{ + deflate_state *s; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + s = strm->state; + s->good_match = good_length; + s->max_lazy_match = max_lazy; + s->nice_match = nice_length; + s->max_chain_length = max_chain; + return Z_OK; +} + +/* ========================================================================= + * For the default windowBits of 15 and memLevel of 8, this function returns + * a close to exact, as well as small, upper bound on the compressed size. + * They are coded as constants here for a reason--if the #define's are + * changed, then this function needs to be changed as well. The return + * value for 15 and 8 only works for those exact settings. + * + * For any setting other than those defaults for windowBits and memLevel, + * the value returned is a conservative worst case for the maximum expansion + * resulting from using fixed blocks instead of stored blocks, which deflate + * can emit on compressed data for some combinations of the parameters. + * + * This function could be more sophisticated to provide closer upper bounds for + * every combination of windowBits and memLevel. But even the conservative + * upper bound of about 14% expansion does not seem onerous for output buffer + * allocation. + */ +uLong ZEXPORT deflateBound(strm, sourceLen) + z_streamp strm; + uLong sourceLen; +{ + deflate_state *s; + uLong complen, wraplen; + Bytef *str; + + /* conservative upper bound for compressed data */ + complen = sourceLen + + ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; + + /* if can't get parameters, return conservative bound plus zlib wrapper */ + if (strm == Z_NULL || strm->state == Z_NULL) + return complen + 6; + + /* compute wrapper length */ + s = strm->state; + switch (s->wrap) { + case 0: /* raw deflate */ + wraplen = 0; + break; + case 1: /* zlib wrapper */ + wraplen = 6 + (s->strstart ? 4 : 0); + break; + case 2: /* gzip wrapper */ + wraplen = 18; + if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ + if (s->gzhead->extra != Z_NULL) + wraplen += 2 + s->gzhead->extra_len; + str = s->gzhead->name; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + str = s->gzhead->comment; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + if (s->gzhead->hcrc) + wraplen += 2; + } + break; + default: /* for compiler happiness */ + wraplen = 6; + } + + /* if not default parameters, return conservative bound */ + if (s->w_bits != 15 || s->hash_bits != 8 + 7) + return complen + wraplen; + + /* default settings: return tight bound for that case */ + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + + (sourceLen >> 25) + 13 - 6 + wraplen; +} + +/* ========================================================================= + * Put a short in the pending buffer. The 16-bit value is put in MSB order. + * IN assertion: the stream state is correct and there is enough room in + * pending_buf. + */ +local void putShortMSB (s, b) + deflate_state *s; + uInt b; +{ + put_byte(s, (Byte)(b >> 8)); + put_byte(s, (Byte)(b & 0xff)); +} + +/* ========================================================================= + * Flush as much pending output as possible. All deflate() output goes + * through this function so some applications may wish to modify it + * to avoid allocating a large strm->next_out buffer and copying into it. + * (See also read_buf()). + */ +local void flush_pending(strm) + z_streamp strm; +{ + unsigned len; + deflate_state *s = strm->state; + + _tr_flush_bits(s); + len = s->pending; + if (len > strm->avail_out) len = strm->avail_out; + if (len == 0) return; + + zmemcpy(strm->next_out, s->pending_out, len); + strm->next_out += len; + s->pending_out += len; + strm->total_out += len; + strm->avail_out -= len; + s->pending -= len; + if (s->pending == 0) { + s->pending_out = s->pending_buf; + } +} + +/* ========================================================================= */ +int ZEXPORT deflate (strm, flush) + z_streamp strm; + int flush; +{ + int old_flush; /* value of flush param for previous deflate call */ + deflate_state *s; + + if (strm == Z_NULL || strm->state == Z_NULL || + flush > Z_BLOCK || flush < 0) { + return Z_STREAM_ERROR; + } + s = strm->state; + + if (strm->next_out == Z_NULL || + (strm->next_in == Z_NULL && strm->avail_in != 0) || + (s->status == FINISH_STATE && flush != Z_FINISH)) { + ERR_RETURN(strm, Z_STREAM_ERROR); + } + if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); + + s->strm = strm; /* just in case */ + old_flush = s->last_flush; + s->last_flush = flush; + + /* Write the header */ + if (s->status == INIT_STATE) { +#ifdef GZIP + if (s->wrap == 2) { + strm->adler = crc32(0L, Z_NULL, 0); + put_byte(s, 31); + put_byte(s, 139); + put_byte(s, 8); + if (s->gzhead == Z_NULL) { + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, s->level == 9 ? 2 : + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? + 4 : 0)); + put_byte(s, OS_CODE); + s->status = BUSY_STATE; + } + else { + put_byte(s, (s->gzhead->text ? 1 : 0) + + (s->gzhead->hcrc ? 2 : 0) + + (s->gzhead->extra == Z_NULL ? 0 : 4) + + (s->gzhead->name == Z_NULL ? 0 : 8) + + (s->gzhead->comment == Z_NULL ? 0 : 16) + ); + put_byte(s, (Byte)(s->gzhead->time & 0xff)); + put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); + put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); + put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); + put_byte(s, s->level == 9 ? 2 : + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? + 4 : 0)); + put_byte(s, s->gzhead->os & 0xff); + if (s->gzhead->extra != Z_NULL) { + put_byte(s, s->gzhead->extra_len & 0xff); + put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); + } + if (s->gzhead->hcrc) + strm->adler = crc32(strm->adler, s->pending_buf, + s->pending); + s->gzindex = 0; + s->status = EXTRA_STATE; + } + } + else +#endif + { + uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; + uInt level_flags; + + if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) + level_flags = 0; + else if (s->level < 6) + level_flags = 1; + else if (s->level == 6) + level_flags = 2; + else + level_flags = 3; + header |= (level_flags << 6); + if (s->strstart != 0) header |= PRESET_DICT; + header += 31 - (header % 31); + + s->status = BUSY_STATE; + putShortMSB(s, header); + + /* Save the adler32 of the preset dictionary: */ + if (s->strstart != 0) { + putShortMSB(s, (uInt)(strm->adler >> 16)); + putShortMSB(s, (uInt)(strm->adler & 0xffff)); + } + strm->adler = adler32(0L, Z_NULL, 0); + } + } +#ifdef GZIP + if (s->status == EXTRA_STATE) { + if (s->gzhead->extra != Z_NULL) { + uInt beg = s->pending; /* start of bytes to update crc */ + + while (s->gzindex < (s->gzhead->extra_len & 0xffff)) { + if (s->pending == s->pending_buf_size) { + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + flush_pending(strm); + beg = s->pending; + if (s->pending == s->pending_buf_size) + break; + } + put_byte(s, s->gzhead->extra[s->gzindex]); + s->gzindex++; + } + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + if (s->gzindex == s->gzhead->extra_len) { + s->gzindex = 0; + s->status = NAME_STATE; + } + } + else + s->status = NAME_STATE; + } + if (s->status == NAME_STATE) { + if (s->gzhead->name != Z_NULL) { + uInt beg = s->pending; /* start of bytes to update crc */ + int val; + + do { + if (s->pending == s->pending_buf_size) { + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + flush_pending(strm); + beg = s->pending; + if (s->pending == s->pending_buf_size) { + val = 1; + break; + } + } + val = s->gzhead->name[s->gzindex++]; + put_byte(s, val); + } while (val != 0); + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + if (val == 0) { + s->gzindex = 0; + s->status = COMMENT_STATE; + } + } + else + s->status = COMMENT_STATE; + } + if (s->status == COMMENT_STATE) { + if (s->gzhead->comment != Z_NULL) { + uInt beg = s->pending; /* start of bytes to update crc */ + int val; + + do { + if (s->pending == s->pending_buf_size) { + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + flush_pending(strm); + beg = s->pending; + if (s->pending == s->pending_buf_size) { + val = 1; + break; + } + } + val = s->gzhead->comment[s->gzindex++]; + put_byte(s, val); + } while (val != 0); + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + if (val == 0) + s->status = HCRC_STATE; + } + else + s->status = HCRC_STATE; + } + if (s->status == HCRC_STATE) { + if (s->gzhead->hcrc) { + if (s->pending + 2 > s->pending_buf_size) + flush_pending(strm); + if (s->pending + 2 <= s->pending_buf_size) { + put_byte(s, (Byte)(strm->adler & 0xff)); + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); + strm->adler = crc32(0L, Z_NULL, 0); + s->status = BUSY_STATE; + } + } + else + s->status = BUSY_STATE; + } +#endif + + /* Flush as much pending output as possible */ + if (s->pending != 0) { + flush_pending(strm); + if (strm->avail_out == 0) { + /* Since avail_out is 0, deflate will be called again with + * more output space, but possibly with both pending and + * avail_in equal to zero. There won't be anything to do, + * but this is not an error situation so make sure we + * return OK instead of BUF_ERROR at next call of deflate: + */ + s->last_flush = -1; + return Z_OK; + } + + /* Make sure there is something to do and avoid duplicate consecutive + * flushes. For repeated and useless calls with Z_FINISH, we keep + * returning Z_STREAM_END instead of Z_BUF_ERROR. + */ + } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) && + flush != Z_FINISH) { + ERR_RETURN(strm, Z_BUF_ERROR); + } + + /* User must not provide more input after the first FINISH: */ + if (s->status == FINISH_STATE && strm->avail_in != 0) { + ERR_RETURN(strm, Z_BUF_ERROR); + } + + /* Start a new block or continue the current one. + */ + if (strm->avail_in != 0 || s->lookahead != 0 || + (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { + block_state bstate; + + bstate = s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : + (s->strategy == Z_RLE ? deflate_rle(s, flush) : + (*(configuration_table[s->level].func))(s, flush)); + + if (bstate == finish_started || bstate == finish_done) { + s->status = FINISH_STATE; + } + if (bstate == need_more || bstate == finish_started) { + if (strm->avail_out == 0) { + s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ + } + return Z_OK; + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call + * of deflate should use the same flush parameter to make sure + * that the flush is complete. So we don't have to output an + * empty block here, this will be done at next call. This also + * ensures that for a very small output buffer, we emit at most + * one empty block. + */ + } + if (bstate == block_done) { + if (flush == Z_PARTIAL_FLUSH) { + _tr_align(s); + } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ + _tr_stored_block(s, (char*)0, 0L, 0); + /* For a full flush, this empty block will be recognized + * as a special marker by inflate_sync(). + */ + if (flush == Z_FULL_FLUSH) { + CLEAR_HASH(s); /* forget history */ + if (s->lookahead == 0) { + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } + } + } + flush_pending(strm); + if (strm->avail_out == 0) { + s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ + return Z_OK; + } + } + } + Assert(strm->avail_out > 0, "bug2"); + + if (flush != Z_FINISH) return Z_OK; + if (s->wrap <= 0) return Z_STREAM_END; + + /* Write the trailer */ +#ifdef GZIP + if (s->wrap == 2) { + put_byte(s, (Byte)(strm->adler & 0xff)); + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); + put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); + put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); + put_byte(s, (Byte)(strm->total_in & 0xff)); + put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); + put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); + put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); + } + else +#endif + { + putShortMSB(s, (uInt)(strm->adler >> 16)); + putShortMSB(s, (uInt)(strm->adler & 0xffff)); + } + flush_pending(strm); + /* If avail_out is zero, the application will call deflate again + * to flush the rest. + */ + if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ + return s->pending != 0 ? Z_OK : Z_STREAM_END; +} + +/* ========================================================================= */ +int ZEXPORT deflateEnd (strm) + z_streamp strm; +{ + int status; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + + status = strm->state->status; + if (status != INIT_STATE && + status != EXTRA_STATE && + status != NAME_STATE && + status != COMMENT_STATE && + status != HCRC_STATE && + status != BUSY_STATE && + status != FINISH_STATE) { + return Z_STREAM_ERROR; + } + + /* Deallocate in reverse order of allocations: */ + TRY_FREE(strm, strm->state->pending_buf); + TRY_FREE(strm, strm->state->head); + TRY_FREE(strm, strm->state->prev); + TRY_FREE(strm, strm->state->window); + + ZFREE(strm, strm->state); + strm->state = Z_NULL; + + return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; +} + +/* ========================================================================= + * Copy the source state to the destination state. + * To simplify the source, this is not supported for 16-bit MSDOS (which + * doesn't have enough memory anyway to duplicate compression states). + */ +int ZEXPORT deflateCopy (dest, source) + z_streamp dest; + z_streamp source; +{ +#ifdef MAXSEG_64K + return Z_STREAM_ERROR; +#else + deflate_state *ds; + deflate_state *ss; + ushf *overlay; + + + if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { + return Z_STREAM_ERROR; + } + + ss = source->state; + + zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); + + ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); + if (ds == Z_NULL) return Z_MEM_ERROR; + dest->state = (struct internal_state FAR *) ds; + zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state)); + ds->strm = dest; + + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); + overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); + ds->pending_buf = (uchf *) overlay; + + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || + ds->pending_buf == Z_NULL) { + deflateEnd (dest); + return Z_MEM_ERROR; + } + /* following zmemcpy do not work for 16-bit MSDOS */ + zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); + zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos)); + zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos)); + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); + + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); + ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); + ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; + + ds->l_desc.dyn_tree = ds->dyn_ltree; + ds->d_desc.dyn_tree = ds->dyn_dtree; + ds->bl_desc.dyn_tree = ds->bl_tree; + + return Z_OK; +#endif /* MAXSEG_64K */ +} + +/* =========================================================================== + * Read a new buffer from the current input stream, update the adler32 + * and total number of bytes read. All deflate() input goes through + * this function so some applications may wish to modify it to avoid + * allocating a large strm->next_in buffer and copying from it. + * (See also flush_pending()). + */ +local int read_buf(strm, buf, size) + z_streamp strm; + Bytef *buf; + unsigned size; +{ + unsigned len = strm->avail_in; + + if (len > size) len = size; + if (len == 0) return 0; + + strm->avail_in -= len; + + zmemcpy(buf, strm->next_in, len); + if (strm->state->wrap == 1) { + strm->adler = adler32(strm->adler, buf, len); + } +#ifdef GZIP + else if (strm->state->wrap == 2) { + strm->adler = crc32(strm->adler, buf, len); + } +#endif + strm->next_in += len; + strm->total_in += len; + + return (int)len; +} + +/* =========================================================================== + * Initialize the "longest match" routines for a new zlib stream + */ +local void lm_init (s) + deflate_state *s; +{ + s->window_size = (ulg)2L*s->w_size; + + CLEAR_HASH(s); + + /* Set the default configuration parameters: + */ + s->max_lazy_match = configuration_table[s->level].max_lazy; + s->good_match = configuration_table[s->level].good_length; + s->nice_match = configuration_table[s->level].nice_length; + s->max_chain_length = configuration_table[s->level].max_chain; + + s->strstart = 0; + s->block_start = 0L; + s->lookahead = 0; + s->insert = 0; + s->match_length = s->prev_length = MIN_MATCH-1; + s->match_available = 0; + s->ins_h = 0; +#ifndef FASTEST +#ifdef ASMV + match_init(); /* initialize the asm code */ +#endif +#endif +} + +#ifndef FASTEST +/* =========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + * OUT assertion: the match length is not greater than s->lookahead. + */ +#ifndef ASMV +/* For 80x86 and 680x0, an optimized version will be provided in match.asm or + * match.S. The code will be functionally equivalent. + */ +local uInt longest_match(s, cur_match) + deflate_state *s; + IPos cur_match; /* current match */ +{ + unsigned chain_length = s->max_chain_length;/* max hash chain length */ + register Bytef *scan = s->window + s->strstart; /* current string */ + register Bytef *match; /* matched string */ + register int len; /* length of current match */ + int best_len = s->prev_length; /* best match length so far */ + int nice_match = s->nice_match; /* stop if match long enough */ + IPos limit = s->strstart > (IPos)MAX_DIST(s) ? + s->strstart - (IPos)MAX_DIST(s) : NIL; + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ + Posf *prev = s->prev; + uInt wmask = s->w_mask; + +#ifdef UNALIGNED_OK + /* Compare two bytes at a time. Note: this is not always beneficial. + * Try with and without -DUNALIGNED_OK to check. + */ + register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; + register ush scan_start = *(ushf*)scan; + register ush scan_end = *(ushf*)(scan+best_len-1); +#else + register Bytef *strend = s->window + s->strstart + MAX_MATCH; + register Byte scan_end1 = scan[best_len-1]; + register Byte scan_end = scan[best_len]; +#endif + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + /* Do not waste too much time if we already have a good match: */ + if (s->prev_length >= s->good_match) { + chain_length >>= 2; + } + /* Do not look for matches beyond the end of the input. This is necessary + * to make deflate deterministic. + */ + if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; + + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + do { + Assert(cur_match < s->strstart, "no future"); + match = s->window + cur_match; + + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2. Note that the checks below + * for insufficient lookahead only occur occasionally for performance + * reasons. Therefore uninitialized memory will be accessed, and + * conditional jumps will be made that depend on those values. + * However the length of the match is limited to the lookahead, so + * the output of deflate is not affected by the uninitialized values. + */ +#if (defined(UNALIGNED_OK) && MAX_MATCH == 258) + /* This code assumes sizeof(unsigned short) == 2. Do not use + * UNALIGNED_OK if your compiler uses a different size. + */ + if (*(ushf*)(match+best_len-1) != scan_end || + *(ushf*)match != scan_start) continue; + + /* It is not necessary to compare scan[2] and match[2] since they are + * always equal when the other bytes match, given that the hash keys + * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at + * strstart+3, +5, ... up to strstart+257. We check for insufficient + * lookahead only every 4th comparison; the 128th check will be made + * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is + * necessary to put more guard bytes at the end of the window, or + * to check more often for insufficient lookahead. + */ + Assert(scan[2] == match[2], "scan[2]?"); + scan++, match++; + do { + } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + scan < strend); + /* The funny "do {}" generates better code on most compilers */ + + /* Here, scan <= window+strstart+257 */ + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + if (*scan == *match) scan++; + + len = (MAX_MATCH - 1) - (int)(strend-scan); + scan = strend - (MAX_MATCH-1); + +#else /* UNALIGNED_OK */ + + if (match[best_len] != scan_end || + match[best_len-1] != scan_end1 || + *match != *scan || + *++match != scan[1]) continue; + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2, match++; + Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + } while (*++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + scan < strend); + + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (int)(strend - scan); + scan = strend - MAX_MATCH; + +#endif /* UNALIGNED_OK */ + + if (len > best_len) { + s->match_start = cur_match; + best_len = len; + if (len >= nice_match) break; +#ifdef UNALIGNED_OK + scan_end = *(ushf*)(scan+best_len-1); +#else + scan_end1 = scan[best_len-1]; + scan_end = scan[best_len]; +#endif + } + } while ((cur_match = prev[cur_match & wmask]) > limit + && --chain_length != 0); + + if ((uInt)best_len <= s->lookahead) return (uInt)best_len; + return s->lookahead; +} +#endif /* ASMV */ + +#else /* FASTEST */ + +/* --------------------------------------------------------------------------- + * Optimized version for FASTEST only + */ +local uInt longest_match(s, cur_match) + deflate_state *s; + IPos cur_match; /* current match */ +{ + register Bytef *scan = s->window + s->strstart; /* current string */ + register Bytef *match; /* matched string */ + register int len; /* length of current match */ + register Bytef *strend = s->window + s->strstart + MAX_MATCH; + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + Assert(cur_match < s->strstart, "no future"); + + match = s->window + cur_match; + + /* Return failure if the match length is less than 2: + */ + if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2, match += 2; + Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + } while (*++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + scan < strend); + + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (int)(strend - scan); + + if (len < MIN_MATCH) return MIN_MATCH - 1; + + s->match_start = cur_match; + return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; +} + +#endif /* FASTEST */ + +#ifdef DEBUG +/* =========================================================================== + * Check that the match at match_start is indeed a match. + */ +local void check_match(s, start, match, length) + deflate_state *s; + IPos start, match; + int length; +{ + /* check that the match is indeed a match */ + if (zmemcmp(s->window + match, + s->window + start, length) != EQUAL) { + fprintf(stderr, " start %u, match %u, length %d\n", + start, match, length); + do { + fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); + } while (--length != 0); + z_error("invalid match"); + } + if (z_verbose > 1) { + fprintf(stderr,"\\[%d,%d]", start-match, length); + do { putc(s->window[start++], stderr); } while (--length != 0); + } +} +#else +# define check_match(s, start, match, length) +#endif /* DEBUG */ + +/* =========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead. + * + * IN assertion: lookahead < MIN_LOOKAHEAD + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + * At least one byte has been read, or avail_in == 0; reads are + * performed for at least two bytes (required for the zip translate_eol + * option -- not supported here). + */ +local void fill_window(s) + deflate_state *s; +{ + register unsigned n, m; + register Posf *p; + unsigned more; /* Amount of free space at the end of the window. */ + uInt wsize = s->w_size; + + Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); + + do { + more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); + + /* Deal with !@#$% 64K limit: */ + if (sizeof(int) <= 2) { + if (more == 0 && s->strstart == 0 && s->lookahead == 0) { + more = wsize; + + } else if (more == (unsigned)(-1)) { + /* Very unlikely, but possible on 16 bit machine if + * strstart == 0 && lookahead == 1 (input done a byte at time) + */ + more--; + } + } + + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (s->strstart >= wsize+MAX_DIST(s)) { + + zmemcpy(s->window, s->window+wsize, (unsigned)wsize); + s->match_start -= wsize; + s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ + s->block_start -= (long) wsize; + + /* Slide the hash table (could be avoided with 32 bit values + at the expense of memory usage). We slide even when level == 0 + to keep the hash table consistent if we switch back to level > 0 + later. (Using level 0 permanently is not an optimal usage of + zlib, so we don't care about this pathological case.) + */ + n = s->hash_size; + p = &s->head[n]; + do { + m = *--p; + *p = (Pos)(m >= wsize ? m-wsize : NIL); + } while (--n); + + n = wsize; +#ifndef FASTEST + p = &s->prev[n]; + do { + m = *--p; + *p = (Pos)(m >= wsize ? m-wsize : NIL); + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + } while (--n); +#endif + more += wsize; + } + if (s->strm->avail_in == 0) break; + + /* If there was no sliding: + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + * more == window_size - lookahead - strstart + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + * => more >= window_size - 2*WSIZE + 2 + * In the BIG_MEM or MMAP case (not yet supported), + * window_size == input_size + MIN_LOOKAHEAD && + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + * Otherwise, window_size == 2*WSIZE so more >= 2. + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. + */ + Assert(more >= 2, "more < 2"); + + n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); + s->lookahead += n; + + /* Initialize the hash value now that we have some input: */ + if (s->lookahead + s->insert >= MIN_MATCH) { + uInt str = s->strstart - s->insert; + s->ins_h = s->window[str]; + UPDATE_HASH(s, s->ins_h, s->window[str + 1]); +#if MIN_MATCH != 3 + Call UPDATE_HASH() MIN_MATCH-3 more times +#endif + while (s->insert) { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + s->insert--; + if (s->lookahead + s->insert < MIN_MATCH) + break; + } + } + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + * but this is not important since only literal bytes will be emitted. + */ + + } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); + + /* If the WIN_INIT bytes after the end of the current data have never been + * written, then zero those bytes in order to avoid memory check reports of + * the use of uninitialized (or uninitialised as Julian writes) bytes by + * the longest match routines. Update the high water mark for the next + * time through here. WIN_INIT is set to MAX_MATCH since the longest match + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. + */ + if (s->high_water < s->window_size) { + ulg curr = s->strstart + (ulg)(s->lookahead); + ulg init; + + if (s->high_water < curr) { + /* Previous high water mark below current data -- zero WIN_INIT + * bytes or up to end of window, whichever is less. + */ + init = s->window_size - curr; + if (init > WIN_INIT) + init = WIN_INIT; + zmemzero(s->window + curr, (unsigned)init); + s->high_water = curr + init; + } + else if (s->high_water < (ulg)curr + WIN_INIT) { + /* High water mark at or above current data, but below current data + * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up + * to end of window, whichever is less. + */ + init = (ulg)curr + WIN_INIT - s->high_water; + if (init > s->window_size - s->high_water) + init = s->window_size - s->high_water; + zmemzero(s->window + s->high_water, (unsigned)init); + s->high_water += init; + } + } + + Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, + "not enough room for search"); +} + +/* =========================================================================== + * Flush the current block, with given end-of-file flag. + * IN assertion: strstart is set to the end of the current match. + */ +#define FLUSH_BLOCK_ONLY(s, last) { \ + _tr_flush_block(s, (s->block_start >= 0L ? \ + (charf *)&s->window[(unsigned)s->block_start] : \ + (charf *)Z_NULL), \ + (ulg)((long)s->strstart - s->block_start), \ + (last)); \ + s->block_start = s->strstart; \ + flush_pending(s->strm); \ + Tracev((stderr,"[FLUSH]")); \ +} + +/* Same but force premature exit if necessary. */ +#define FLUSH_BLOCK(s, last) { \ + FLUSH_BLOCK_ONLY(s, last); \ + if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ +} + +/* =========================================================================== + * Copy without compression as much as possible from the input stream, return + * the current block state. + * This function does not insert new strings in the dictionary since + * uncompressible data is probably not useful. This function is used + * only for the level=0 compression option. + * NOTE: this function should be optimized to avoid extra copying from + * window to pending_buf. + */ +local block_state deflate_stored(s, flush) + deflate_state *s; + int flush; +{ + /* Stored blocks are limited to 0xffff bytes, pending_buf is limited + * to pending_buf_size, and each stored block has a 5 byte header: + */ + ulg max_block_size = 0xffff; + ulg max_start; + + if (max_block_size > s->pending_buf_size - 5) { + max_block_size = s->pending_buf_size - 5; + } + + /* Copy as much as possible from input to output: */ + for (;;) { + /* Fill the window as much as possible: */ + if (s->lookahead <= 1) { + + Assert(s->strstart < s->w_size+MAX_DIST(s) || + s->block_start >= (long)s->w_size, "slide too late"); + + fill_window(s); + if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; + + if (s->lookahead == 0) break; /* flush the current block */ + } + Assert(s->block_start >= 0L, "block gone"); + + s->strstart += s->lookahead; + s->lookahead = 0; + + /* Emit a stored block if pending_buf will be full: */ + max_start = s->block_start + max_block_size; + if (s->strstart == 0 || (ulg)s->strstart >= max_start) { + /* strstart == 0 is possible when wraparound on 16-bit machine */ + s->lookahead = (uInt)(s->strstart - max_start); + s->strstart = (uInt)max_start; + FLUSH_BLOCK(s, 0); + } + /* Flush if we may have to slide, otherwise block_start may become + * negative and the data will be gone: + */ + if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { + FLUSH_BLOCK(s, 0); + } + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if ((long)s->strstart > s->block_start) + FLUSH_BLOCK(s, 0); + return block_done; +} + +/* =========================================================================== + * Compress as much as possible from the input stream, return the current + * block state. + * This function does not perform lazy evaluation of matches and inserts + * new strings in the dictionary only for unmatched strings or for short + * matches. It is used only for the fast compression options. + */ +local block_state deflate_fast(s, flush) + deflate_state *s; + int flush; +{ + IPos hash_head; /* head of the hash chain */ + int bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s->lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = NIL; + if (s->lookahead >= MIN_MATCH) { + INSERT_STRING(s, s->strstart, hash_head); + } + + /* Find the longest match, discarding those <= prev_length. + * At this point we have always match_length < MIN_MATCH + */ + if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ + } + if (s->match_length >= MIN_MATCH) { + check_match(s, s->strstart, s->match_start, s->match_length); + + _tr_tally_dist(s, s->strstart - s->match_start, + s->match_length - MIN_MATCH, bflush); + + s->lookahead -= s->match_length; + + /* Insert new strings in the hash table only if the match length + * is not too large. This saves time but degrades compression. + */ +#ifndef FASTEST + if (s->match_length <= s->max_insert_length && + s->lookahead >= MIN_MATCH) { + s->match_length--; /* string at strstart already in table */ + do { + s->strstart++; + INSERT_STRING(s, s->strstart, hash_head); + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. + */ + } while (--s->match_length != 0); + s->strstart++; + } else +#endif + { + s->strstart += s->match_length; + s->match_length = 0; + s->ins_h = s->window[s->strstart]; + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); +#if MIN_MATCH != 3 + Call UPDATE_HASH() MIN_MATCH-3 more times +#endif + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not + * matter since it will be recomputed at next deflate call. + */ + } + } else { + /* No match, output a literal byte */ + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + } + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} + +#ifndef FASTEST +/* =========================================================================== + * Same as above, but achieves better compression. We use a lazy + * evaluation for matches: a match is finally adopted only if there is + * no better match at the next window position. + */ +local block_state deflate_slow(s, flush) + deflate_state *s; + int flush; +{ + IPos hash_head; /* head of hash chain */ + int bflush; /* set if current block must be flushed */ + + /* Process the input block. */ + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s->lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = NIL; + if (s->lookahead >= MIN_MATCH) { + INSERT_STRING(s, s->strstart, hash_head); + } + + /* Find the longest match, discarding those <= prev_length. + */ + s->prev_length = s->match_length, s->prev_match = s->match_start; + s->match_length = MIN_MATCH-1; + + if (hash_head != NIL && s->prev_length < s->max_lazy_match && + s->strstart - hash_head <= MAX_DIST(s)) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ + + if (s->match_length <= 5 && (s->strategy == Z_FILTERED +#if TOO_FAR <= 32767 + || (s->match_length == MIN_MATCH && + s->strstart - s->match_start > TOO_FAR) +#endif + )) { + + /* If prev_match is also MIN_MATCH, match_start is garbage + * but we will ignore the current match anyway. + */ + s->match_length = MIN_MATCH-1; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { + uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; + /* Do not insert strings in hash table beyond this. */ + + check_match(s, s->strstart-1, s->prev_match, s->prev_length); + + _tr_tally_dist(s, s->strstart -1 - s->prev_match, + s->prev_length - MIN_MATCH, bflush); + + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. If there is not + * enough lookahead, the last two strings are not inserted in + * the hash table. + */ + s->lookahead -= s->prev_length-1; + s->prev_length -= 2; + do { + if (++s->strstart <= max_insert) { + INSERT_STRING(s, s->strstart, hash_head); + } + } while (--s->prev_length != 0); + s->match_available = 0; + s->match_length = MIN_MATCH-1; + s->strstart++; + + if (bflush) FLUSH_BLOCK(s, 0); + + } else if (s->match_available) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + Tracevv((stderr,"%c", s->window[s->strstart-1])); + _tr_tally_lit(s, s->window[s->strstart-1], bflush); + if (bflush) { + FLUSH_BLOCK_ONLY(s, 0); + } + s->strstart++; + s->lookahead--; + if (s->strm->avail_out == 0) return need_more; + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + s->match_available = 1; + s->strstart++; + s->lookahead--; + } + } + Assert (flush != Z_NO_FLUSH, "no flush?"); + if (s->match_available) { + Tracevv((stderr,"%c", s->window[s->strstart-1])); + _tr_tally_lit(s, s->window[s->strstart-1], bflush); + s->match_available = 0; + } + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} +#endif /* FASTEST */ + +/* =========================================================================== + * For Z_RLE, simply look for runs of bytes, generate matches only of distance + * one. Do not maintain a hash table. (It will be regenerated if this run of + * deflate switches away from Z_RLE.) + */ +local block_state deflate_rle(s, flush) + deflate_state *s; + int flush; +{ + int bflush; /* set if current block must be flushed */ + uInt prev; /* byte at distance one to match */ + Bytef *scan, *strend; /* scan goes up to strend for length of run */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the longest run, plus one for the unrolled loop. + */ + if (s->lookahead <= MAX_MATCH) { + fill_window(s); + if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* See how many times the previous byte repeats */ + s->match_length = 0; + if (s->lookahead >= MIN_MATCH && s->strstart > 0) { + scan = s->window + s->strstart - 1; + prev = *scan; + if (prev == *++scan && prev == *++scan && prev == *++scan) { + strend = s->window + s->strstart + MAX_MATCH; + do { + } while (prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + scan < strend); + s->match_length = MAX_MATCH - (int)(strend - scan); + if (s->match_length > s->lookahead) + s->match_length = s->lookahead; + } + Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); + } + + /* Emit match if have run of MIN_MATCH or longer, else emit literal */ + if (s->match_length >= MIN_MATCH) { + check_match(s, s->strstart, s->strstart - 1, s->match_length); + + _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); + + s->lookahead -= s->match_length; + s->strstart += s->match_length; + s->match_length = 0; + } else { + /* No match, output a literal byte */ + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + } + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} + +/* =========================================================================== + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. + * (It will be regenerated if this run of deflate switches away from Huffman.) + */ +local block_state deflate_huff(s, flush) + deflate_state *s; + int flush; +{ + int bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we have a literal to write. */ + if (s->lookahead == 0) { + fill_window(s); + if (s->lookahead == 0) { + if (flush == Z_NO_FLUSH) + return need_more; + break; /* flush the current block */ + } + } + + /* Output a literal byte */ + s->match_length = 0; + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/deflate.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/deflate.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/deflate.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/deflate.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,346 @@ +/* deflate.h -- internal compression state + * Copyright (C) 1995-2012 Jean-loup Gailly + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* @(#) $Id$ */ + +#ifndef DEFLATE_H +#define DEFLATE_H + +#include "zutil.h" + +/* define NO_GZIP when compiling if you want to disable gzip header and + trailer creation by deflate(). NO_GZIP would be used to avoid linking in + the crc code when it is not needed. For shared libraries, gzip encoding + should be left enabled. */ +#ifndef NO_GZIP +# define GZIP +#endif + +/* =========================================================================== + * Internal compression state. + */ + +#define LENGTH_CODES 29 +/* number of length codes, not counting the special END_BLOCK code */ + +#define LITERALS 256 +/* number of literal bytes 0..255 */ + +#define L_CODES (LITERALS+1+LENGTH_CODES) +/* number of Literal or Length codes, including the END_BLOCK code */ + +#define D_CODES 30 +/* number of distance codes */ + +#define BL_CODES 19 +/* number of codes used to transfer the bit lengths */ + +#define HEAP_SIZE (2*L_CODES+1) +/* maximum heap size */ + +#define MAX_BITS 15 +/* All codes must not exceed MAX_BITS bits */ + +#define Buf_size 16 +/* size of bit buffer in bi_buf */ + +#define INIT_STATE 42 +#define EXTRA_STATE 69 +#define NAME_STATE 73 +#define COMMENT_STATE 91 +#define HCRC_STATE 103 +#define BUSY_STATE 113 +#define FINISH_STATE 666 +/* Stream status */ + + +/* Data structure describing a single value and its code string. */ +typedef struct ct_data_s { + union { + ush freq; /* frequency count */ + ush code; /* bit string */ + } fc; + union { + ush dad; /* father node in Huffman tree */ + ush len; /* length of bit string */ + } dl; +} FAR ct_data; + +#define Freq fc.freq +#define Code fc.code +#define Dad dl.dad +#define Len dl.len + +typedef struct static_tree_desc_s static_tree_desc; + +typedef struct tree_desc_s { + ct_data *dyn_tree; /* the dynamic tree */ + int max_code; /* largest code with non zero frequency */ + static_tree_desc *stat_desc; /* the corresponding static tree */ +} FAR tree_desc; + +typedef ush Pos; +typedef Pos FAR Posf; +typedef unsigned IPos; + +/* A Pos is an index in the character window. We use short instead of int to + * save space in the various tables. IPos is used only for parameter passing. + */ + +typedef struct internal_state { + z_streamp strm; /* pointer back to this zlib stream */ + int status; /* as the name implies */ + Bytef *pending_buf; /* output still pending */ + ulg pending_buf_size; /* size of pending_buf */ + Bytef *pending_out; /* next pending byte to output to the stream */ + uInt pending; /* nb of bytes in the pending buffer */ + int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ + gz_headerp gzhead; /* gzip header information to write */ + uInt gzindex; /* where in extra, name, or comment */ + Byte method; /* can only be DEFLATED */ + int last_flush; /* value of flush param for previous deflate call */ + + /* used by deflate.c: */ + + uInt w_size; /* LZ77 window size (32K by default) */ + uInt w_bits; /* log2(w_size) (8..16) */ + uInt w_mask; /* w_size - 1 */ + + Bytef *window; + /* Sliding window. Input bytes are read into the second half of the window, + * and move to the first half later to keep a dictionary of at least wSize + * bytes. With this organization, matches are limited to a distance of + * wSize-MAX_MATCH bytes, but this ensures that IO is always + * performed with a length multiple of the block size. Also, it limits + * the window size to 64K, which is quite useful on MSDOS. + * To do: use the user input buffer as sliding window. + */ + + ulg window_size; + /* Actual size of window: 2*wSize, except when the user input buffer + * is directly used as sliding window. + */ + + Posf *prev; + /* Link to older string with same hash index. To limit the size of this + * array to 64K, this link is maintained only for the last 32K strings. + * An index in this array is thus a window index modulo 32K. + */ + + Posf *head; /* Heads of the hash chains or NIL. */ + + uInt ins_h; /* hash index of string to be inserted */ + uInt hash_size; /* number of elements in hash table */ + uInt hash_bits; /* log2(hash_size) */ + uInt hash_mask; /* hash_size-1 */ + + uInt hash_shift; + /* Number of bits by which ins_h must be shifted at each input + * step. It must be such that after MIN_MATCH steps, the oldest + * byte no longer takes part in the hash key, that is: + * hash_shift * MIN_MATCH >= hash_bits + */ + + long block_start; + /* Window position at the beginning of the current output block. Gets + * negative when the window is moved backwards. + */ + + uInt match_length; /* length of best match */ + IPos prev_match; /* previous match */ + int match_available; /* set if previous match exists */ + uInt strstart; /* start of string to insert */ + uInt match_start; /* start of matching string */ + uInt lookahead; /* number of valid bytes ahead in window */ + + uInt prev_length; + /* Length of the best match at previous step. Matches not greater than this + * are discarded. This is used in the lazy match evaluation. + */ + + uInt max_chain_length; + /* To speed up deflation, hash chains are never searched beyond this + * length. A higher limit improves compression ratio but degrades the + * speed. + */ + + uInt max_lazy_match; + /* Attempt to find a better match only when the current match is strictly + * smaller than this value. This mechanism is used only for compression + * levels >= 4. + */ +# define max_insert_length max_lazy_match + /* Insert new strings in the hash table only if the match length is not + * greater than this length. This saves time but degrades compression. + * max_insert_length is used only for compression levels <= 3. + */ + + int level; /* compression level (1..9) */ + int strategy; /* favor or force Huffman coding*/ + + uInt good_match; + /* Use a faster search when the previous match is longer than this */ + + int nice_match; /* Stop searching when current match exceeds this */ + + /* used by trees.c: */ + /* Didn't use ct_data typedef below to suppress compiler warning */ + struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ + struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ + struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ + + struct tree_desc_s l_desc; /* desc. for literal tree */ + struct tree_desc_s d_desc; /* desc. for distance tree */ + struct tree_desc_s bl_desc; /* desc. for bit length tree */ + + ush bl_count[MAX_BITS+1]; + /* number of codes at each bit length for an optimal tree */ + + int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ + int heap_len; /* number of elements in the heap */ + int heap_max; /* element of largest frequency */ + /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + * The same heap array is used to build all trees. + */ + + uch depth[2*L_CODES+1]; + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + + uchf *l_buf; /* buffer for literals or lengths */ + + uInt lit_bufsize; + /* Size of match buffer for literals/lengths. There are 4 reasons for + * limiting lit_bufsize to 64K: + * - frequencies can be kept in 16 bit counters + * - if compression is not successful for the first block, all input + * data is still in the window so we can still emit a stored block even + * when input comes from standard input. (This can also be done for + * all blocks if lit_bufsize is not greater than 32K.) + * - if compression is not successful for a file smaller than 64K, we can + * even emit a stored file instead of a stored block (saving 5 bytes). + * This is applicable only for zip (not gzip or zlib). + * - creating new Huffman trees less frequently may not provide fast + * adaptation to changes in the input data statistics. (Take for + * example a binary file with poorly compressible code followed by + * a highly compressible string table.) Smaller buffer sizes give + * fast adaptation but have of course the overhead of transmitting + * trees more frequently. + * - I can't count above 4 + */ + + uInt last_lit; /* running index in l_buf */ + + ushf *d_buf; + /* Buffer for distances. To simplify the code, d_buf and l_buf have + * the same number of elements. To use different lengths, an extra flag + * array would be necessary. + */ + + ulg opt_len; /* bit length of current block with optimal trees */ + ulg static_len; /* bit length of current block with static trees */ + uInt matches; /* number of string matches in current block */ + uInt insert; /* bytes at end of window left to insert */ + +#ifdef DEBUG + ulg compressed_len; /* total bit length of compressed file mod 2^32 */ + ulg bits_sent; /* bit length of compressed data sent mod 2^32 */ +#endif + + ush bi_buf; + /* Output buffer. bits are inserted starting at the bottom (least + * significant bits). + */ + int bi_valid; + /* Number of valid bits in bi_buf. All bits above the last valid bit + * are always zero. + */ + + ulg high_water; + /* High water mark offset in window for initialized bytes -- bytes above + * this are set to zero in order to avoid memory check warnings when + * longest match routines access bytes past the input. This is then + * updated to the new high water mark. + */ + +} FAR deflate_state; + +/* Output a byte on the stream. + * IN assertion: there is enough room in pending_buf. + */ +#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);} + + +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) +/* Minimum amount of lookahead, except at the end of the input file. + * See deflate.c for comments about the MIN_MATCH+1. + */ + +#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) +/* In order to simplify the code, particularly on 16 bit machines, match + * distances are limited to MAX_DIST instead of WSIZE. + */ + +#define WIN_INIT MAX_MATCH +/* Number of bytes after end of data in window to initialize in order to avoid + memory checker errors from longest match routines */ + + /* in trees.c */ +void ZLIB_INTERNAL _tr_init OF((deflate_state *s)); +int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); +void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); +void ZLIB_INTERNAL _tr_flush_bits OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_align OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); + +#define d_code(dist) \ + ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) +/* Mapping from a distance to a distance code. dist is the distance - 1 and + * must not have side effects. _dist_code[256] and _dist_code[257] are never + * used. + */ + +#ifndef DEBUG +/* Inline versions of _tr_tally for speed: */ + +#if defined(GEN_TREES_H) || !defined(STDC) + extern uch ZLIB_INTERNAL _length_code[]; + extern uch ZLIB_INTERNAL _dist_code[]; +#else + extern const uch ZLIB_INTERNAL _length_code[]; + extern const uch ZLIB_INTERNAL _dist_code[]; +#endif + +# define _tr_tally_lit(s, c, flush) \ + { uch cc = (c); \ + s->d_buf[s->last_lit] = 0; \ + s->l_buf[s->last_lit++] = cc; \ + s->dyn_ltree[cc].Freq++; \ + flush = (s->last_lit == s->lit_bufsize-1); \ + } +# define _tr_tally_dist(s, distance, length, flush) \ + { uch len = (length); \ + ush dist = (distance); \ + s->d_buf[s->last_lit] = dist; \ + s->l_buf[s->last_lit++] = len; \ + dist--; \ + s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ + s->dyn_dtree[d_code(dist)].Freq++; \ + flush = (s->last_lit == s->lit_bufsize-1); \ + } +#else +# define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) +# define _tr_tally_dist(s, distance, length, flush) \ + flush = _tr_tally(s, distance, length) +#endif + +#endif /* DEFLATE_H */ diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/gzguts.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/gzguts.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/gzguts.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/gzguts.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,209 @@ +/* gzguts.h -- zlib internal header definitions for gz* operations + * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#ifdef _LARGEFILE64_SOURCE +# ifndef _LARGEFILE_SOURCE +# define _LARGEFILE_SOURCE 1 +# endif +# ifdef _FILE_OFFSET_BITS +# undef _FILE_OFFSET_BITS +# endif +#endif + +#ifdef HAVE_HIDDEN +# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +#else +# define ZLIB_INTERNAL +#endif + +#include +#include "zlib.h" +#ifdef STDC +# include +# include +# include +#endif +#include + +#ifdef _WIN32 +# include +#endif + +#if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32) +# include +#endif + +#ifdef WINAPI_FAMILY +# define open _open +# define read _read +# define write _write +# define close _close +#endif + +#ifdef NO_DEFLATE /* for compatibility with old definition */ +# define NO_GZCOMPRESS +#endif + +#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#if defined(__CYGWIN__) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#ifndef HAVE_VSNPRINTF +# ifdef MSDOS +/* vsnprintf may exist on some MS-DOS compilers (DJGPP?), + but for now we just assume it doesn't. */ +# define NO_vsnprintf +# endif +# ifdef __TURBOC__ +# define NO_vsnprintf +# endif +# ifdef WIN32 +/* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ +# if !defined(vsnprintf) && !defined(NO_vsnprintf) +# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 ) +# define vsnprintf _vsnprintf +# endif +# endif +# endif +# ifdef __SASC +# define NO_vsnprintf +# endif +# ifdef VMS +# define NO_vsnprintf +# endif +# ifdef __OS400__ +# define NO_vsnprintf +# endif +# ifdef __MVS__ +# define NO_vsnprintf +# endif +#endif + +/* unlike snprintf (which is required in C99, yet still not supported by + Microsoft more than a decade later!), _snprintf does not guarantee null + termination of the result -- however this is only used in gzlib.c where + the result is assured to fit in the space provided */ +#ifdef _MSC_VER +# define snprintf _snprintf +#endif + +#ifndef local +# define local static +#endif +/* compile with -Dlocal if your debugger can't find static symbols */ + +/* gz* functions always use library allocation functions */ +#ifndef STDC + extern voidp malloc OF((uInt size)); + extern void free OF((voidpf ptr)); +#endif + +/* get errno and strerror definition */ +#if defined UNDER_CE +# include +# define zstrerror() gz_strwinerror((DWORD)GetLastError()) +#else +# ifndef NO_STRERROR +# include +# define zstrerror() strerror(errno) +# else +# define zstrerror() "stdio error (consult errno)" +# endif +#endif + +/* provide prototypes for these when building zlib without LFS */ +#if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); +#endif + +/* default memLevel */ +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif + +/* default i/o buffer size -- double this for output when reading (this and + twice this must be able to fit in an unsigned type) */ +#define GZBUFSIZE 8192 + +/* gzip modes, also provide a little integrity check on the passed structure */ +#define GZ_NONE 0 +#define GZ_READ 7247 +#define GZ_WRITE 31153 +#define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */ + +/* values for gz_state how */ +#define LOOK 0 /* look for a gzip header */ +#define COPY 1 /* copy input directly */ +#define GZIP 2 /* decompress a gzip stream */ + +/* internal gzip file state data structure */ +typedef struct { + /* exposed contents for gzgetc() macro */ + struct gzFile_s x; /* "x" for exposed */ + /* x.have: number of bytes available at x.next */ + /* x.next: next output data to deliver or write */ + /* x.pos: current position in uncompressed data */ + /* used for both reading and writing */ + int mode; /* see gzip modes above */ + int fd; /* file descriptor */ + char *path; /* path or fd for error messages */ + unsigned size; /* buffer size, zero if not allocated yet */ + unsigned want; /* requested buffer size, default is GZBUFSIZE */ + unsigned char *in; /* input buffer */ + unsigned char *out; /* output buffer (double-sized when reading) */ + int direct; /* 0 if processing gzip, 1 if transparent */ + /* just for reading */ + int how; /* 0: get header, 1: copy, 2: decompress */ + z_off64_t start; /* where the gzip data started, for rewinding */ + int eof; /* true if end of input file reached */ + int past; /* true if read requested past end */ + /* just for writing */ + int level; /* compression level */ + int strategy; /* compression strategy */ + /* seek request */ + z_off64_t skip; /* amount to skip (already rewound if backwards) */ + int seek; /* true if seek request pending */ + /* error information */ + int err; /* error code */ + char *msg; /* error message */ + /* zlib inflate or deflate stream */ + z_stream strm; /* stream structure in-place (not a pointer) */ +} gz_state; +typedef gz_state FAR *gz_statep; + +/* shared functions */ +void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *)); +#if defined UNDER_CE +char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); +#endif + +/* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t + value -- needed when comparing unsigned to z_off64_t, which is signed + (possible z_off64_t types off_t, off64_t, and long are all signed) */ +#ifdef INT_MAX +# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX) +#else +unsigned ZLIB_INTERNAL gz_intmax OF((void)); +# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) +#endif diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/infback.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/infback.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/infback.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/infback.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,640 @@ +/* infback.c -- inflate using a call-back interface + * Copyright (C) 1995-2011 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + This code is largely copied from inflate.c. Normally either infback.o or + inflate.o would be linked into an application--not both. The interface + with inffast.c is retained so that optimized assembler-coded versions of + inflate_fast() can be used with either inflate.c or infback.c. + */ + +#include "zutil.h" +#include "inftrees.h" +#include "inflate.h" +#include "inffast.h" + +/* function prototypes */ +local void fixedtables OF((struct inflate_state FAR *state)); + +/* + strm provides memory allocation functions in zalloc and zfree, or + Z_NULL to use the library memory allocation functions. + + windowBits is in the range 8..15, and window is a user-supplied + window and output buffer that is 2**windowBits bytes. + */ +int ZEXPORT inflateBackInit_(strm, windowBits, window, version, stream_size) +z_streamp strm; +int windowBits; +unsigned char FAR *window; +const char *version; +int stream_size; +{ + struct inflate_state FAR *state; + + if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || + stream_size != (int)(sizeof(z_stream))) + return Z_VERSION_ERROR; + if (strm == Z_NULL || window == Z_NULL || + windowBits < 8 || windowBits > 15) + return Z_STREAM_ERROR; + strm->msg = Z_NULL; /* in case we return an error */ + if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; +#endif + } + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif + state = (struct inflate_state FAR *)ZALLOC(strm, 1, + sizeof(struct inflate_state)); + if (state == Z_NULL) return Z_MEM_ERROR; + Tracev((stderr, "inflate: allocated\n")); + strm->state = (struct internal_state FAR *)state; + state->dmax = 32768U; + state->wbits = windowBits; + state->wsize = 1U << windowBits; + state->window = window; + state->wnext = 0; + state->whave = 0; + return Z_OK; +} + +/* + Return state with length and distance decoding tables and index sizes set to + fixed code decoding. Normally this returns fixed tables from inffixed.h. + If BUILDFIXED is defined, then instead this routine builds the tables the + first time it's called, and returns those tables the first time and + thereafter. This reduces the size of the code by about 2K bytes, in + exchange for a little execution time. However, BUILDFIXED should not be + used for threaded applications, since the rewriting of the tables and virgin + may not be thread-safe. + */ +local void fixedtables(state) +struct inflate_state FAR *state; +{ +#ifdef BUILDFIXED + static int virgin = 1; + static code *lenfix, *distfix; + static code fixed[544]; + + /* build fixed huffman tables if first call (may not be thread safe) */ + if (virgin) { + unsigned sym, bits; + static code *next; + + /* literal/length table */ + sym = 0; + while (sym < 144) state->lens[sym++] = 8; + while (sym < 256) state->lens[sym++] = 9; + while (sym < 280) state->lens[sym++] = 7; + while (sym < 288) state->lens[sym++] = 8; + next = fixed; + lenfix = next; + bits = 9; + inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); + + /* distance table */ + sym = 0; + while (sym < 32) state->lens[sym++] = 5; + distfix = next; + bits = 5; + inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); + + /* do this just once */ + virgin = 0; + } +#else /* !BUILDFIXED */ +# include "inffixed.h" +#endif /* BUILDFIXED */ + state->lencode = lenfix; + state->lenbits = 9; + state->distcode = distfix; + state->distbits = 5; +} + +/* Macros for inflateBack(): */ + +/* Load returned state from inflate_fast() */ +#define LOAD() \ + do { \ + put = strm->next_out; \ + left = strm->avail_out; \ + next = strm->next_in; \ + have = strm->avail_in; \ + hold = state->hold; \ + bits = state->bits; \ + } while (0) + +/* Set state from registers for inflate_fast() */ +#define RESTORE() \ + do { \ + strm->next_out = put; \ + strm->avail_out = left; \ + strm->next_in = next; \ + strm->avail_in = have; \ + state->hold = hold; \ + state->bits = bits; \ + } while (0) + +/* Clear the input bit accumulator */ +#define INITBITS() \ + do { \ + hold = 0; \ + bits = 0; \ + } while (0) + +/* Assure that some input is available. If input is requested, but denied, + then return a Z_BUF_ERROR from inflateBack(). */ +#define PULL() \ + do { \ + if (have == 0) { \ + have = in(in_desc, &next); \ + if (have == 0) { \ + next = Z_NULL; \ + ret = Z_BUF_ERROR; \ + goto inf_leave; \ + } \ + } \ + } while (0) + +/* Get a byte of input into the bit accumulator, or return from inflateBack() + with an error if there is no input available. */ +#define PULLBYTE() \ + do { \ + PULL(); \ + have--; \ + hold += (unsigned long)(*next++) << bits; \ + bits += 8; \ + } while (0) + +/* Assure that there are at least n bits in the bit accumulator. If there is + not enough available input to do that, then return from inflateBack() with + an error. */ +#define NEEDBITS(n) \ + do { \ + while (bits < (unsigned)(n)) \ + PULLBYTE(); \ + } while (0) + +/* Return the low n bits of the bit accumulator (n < 16) */ +#define BITS(n) \ + ((unsigned)hold & ((1U << (n)) - 1)) + +/* Remove n bits from the bit accumulator */ +#define DROPBITS(n) \ + do { \ + hold >>= (n); \ + bits -= (unsigned)(n); \ + } while (0) + +/* Remove zero to seven bits as needed to go to a byte boundary */ +#define BYTEBITS() \ + do { \ + hold >>= bits & 7; \ + bits -= bits & 7; \ + } while (0) + +/* Assure that some output space is available, by writing out the window + if it's full. If the write fails, return from inflateBack() with a + Z_BUF_ERROR. */ +#define ROOM() \ + do { \ + if (left == 0) { \ + put = state->window; \ + left = state->wsize; \ + state->whave = left; \ + if (out(out_desc, put, left)) { \ + ret = Z_BUF_ERROR; \ + goto inf_leave; \ + } \ + } \ + } while (0) + +/* + strm provides the memory allocation functions and window buffer on input, + and provides information on the unused input on return. For Z_DATA_ERROR + returns, strm will also provide an error message. + + in() and out() are the call-back input and output functions. When + inflateBack() needs more input, it calls in(). When inflateBack() has + filled the window with output, or when it completes with data in the + window, it calls out() to write out the data. The application must not + change the provided input until in() is called again or inflateBack() + returns. The application must not change the window/output buffer until + inflateBack() returns. + + in() and out() are called with a descriptor parameter provided in the + inflateBack() call. This parameter can be a structure that provides the + information required to do the read or write, as well as accumulated + information on the input and output such as totals and check values. + + in() should return zero on failure. out() should return non-zero on + failure. If either in() or out() fails, than inflateBack() returns a + Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it + was in() or out() that caused in the error. Otherwise, inflateBack() + returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format + error, or Z_MEM_ERROR if it could not allocate memory for the state. + inflateBack() can also return Z_STREAM_ERROR if the input parameters + are not correct, i.e. strm is Z_NULL or the state was not initialized. + */ +int ZEXPORT inflateBack(strm, in, in_desc, out, out_desc) +z_streamp strm; +in_func in; +void FAR *in_desc; +out_func out; +void FAR *out_desc; +{ + struct inflate_state FAR *state; + z_const unsigned char FAR *next; /* next input */ + unsigned char FAR *put; /* next output */ + unsigned have, left; /* available input and output */ + unsigned long hold; /* bit buffer */ + unsigned bits; /* bits in bit buffer */ + unsigned copy; /* number of stored or match bytes to copy */ + unsigned char FAR *from; /* where to copy match bytes from */ + code here; /* current decoding table entry */ + code last; /* parent table entry */ + unsigned len; /* length to copy for repeats, bits to drop */ + int ret; /* return code */ + static const unsigned short order[19] = /* permutation of code lengths */ + {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + + /* Check that the strm exists and that the state was initialized */ + if (strm == Z_NULL || strm->state == Z_NULL) + return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* Reset the state */ + strm->msg = Z_NULL; + state->mode = TYPE; + state->last = 0; + state->whave = 0; + next = strm->next_in; + have = next != Z_NULL ? strm->avail_in : 0; + hold = 0; + bits = 0; + put = state->window; + left = state->wsize; + + /* Inflate until end of block marked as last */ + for (;;) + switch (state->mode) { + case TYPE: + /* determine and dispatch block type */ + if (state->last) { + BYTEBITS(); + state->mode = DONE; + break; + } + NEEDBITS(3); + state->last = BITS(1); + DROPBITS(1); + switch (BITS(2)) { + case 0: /* stored block */ + Tracev((stderr, "inflate: stored block%s\n", + state->last ? " (last)" : "")); + state->mode = STORED; + break; + case 1: /* fixed block */ + fixedtables(state); + Tracev((stderr, "inflate: fixed codes block%s\n", + state->last ? " (last)" : "")); + state->mode = LEN; /* decode codes */ + break; + case 2: /* dynamic block */ + Tracev((stderr, "inflate: dynamic codes block%s\n", + state->last ? " (last)" : "")); + state->mode = TABLE; + break; + case 3: + strm->msg = (char *)"invalid block type"; + state->mode = BAD; + } + DROPBITS(2); + break; + + case STORED: + /* get and verify stored block length */ + BYTEBITS(); /* go to byte boundary */ + NEEDBITS(32); + if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { + strm->msg = (char *)"invalid stored block lengths"; + state->mode = BAD; + break; + } + state->length = (unsigned)hold & 0xffff; + Tracev((stderr, "inflate: stored length %u\n", + state->length)); + INITBITS(); + + /* copy stored block from input to output */ + while (state->length != 0) { + copy = state->length; + PULL(); + ROOM(); + if (copy > have) copy = have; + if (copy > left) copy = left; + zmemcpy(put, next, copy); + have -= copy; + next += copy; + left -= copy; + put += copy; + state->length -= copy; + } + Tracev((stderr, "inflate: stored end\n")); + state->mode = TYPE; + break; + + case TABLE: + /* get dynamic table entries descriptor */ + NEEDBITS(14); + state->nlen = BITS(5) + 257; + DROPBITS(5); + state->ndist = BITS(5) + 1; + DROPBITS(5); + state->ncode = BITS(4) + 4; + DROPBITS(4); +#ifndef PKZIP_BUG_WORKAROUND + if (state->nlen > 286 || state->ndist > 30) { + strm->msg = (char *)"too many length or distance symbols"; + state->mode = BAD; + break; + } +#endif + Tracev((stderr, "inflate: table sizes ok\n")); + + /* get code length code lengths (not a typo) */ + state->have = 0; + while (state->have < state->ncode) { + NEEDBITS(3); + state->lens[order[state->have++]] = (unsigned short)BITS(3); + DROPBITS(3); + } + while (state->have < 19) + state->lens[order[state->have++]] = 0; + state->next = state->codes; + state->lencode = (code const FAR *)(state->next); + state->lenbits = 7; + ret = inflate_table(CODES, state->lens, 19, &(state->next), + &(state->lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid code lengths set"; + state->mode = BAD; + break; + } + Tracev((stderr, "inflate: code lengths ok\n")); + + /* get length and distance code code lengths */ + state->have = 0; + while (state->have < state->nlen + state->ndist) { + for (;;) { + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.val < 16) { + DROPBITS(here.bits); + state->lens[state->have++] = here.val; + } + else { + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); + if (state->have == 0) { + strm->msg = (char *)"invalid bit length repeat"; + state->mode = BAD; + break; + } + len = (unsigned)(state->lens[state->have - 1]); + copy = 3 + BITS(2); + DROPBITS(2); + } + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); + len = 0; + copy = 3 + BITS(3); + DROPBITS(3); + } + else { + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); + len = 0; + copy = 11 + BITS(7); + DROPBITS(7); + } + if (state->have + copy > state->nlen + state->ndist) { + strm->msg = (char *)"invalid bit length repeat"; + state->mode = BAD; + break; + } + while (copy--) + state->lens[state->have++] = (unsigned short)len; + } + } + + /* handle error breaks in while */ + if (state->mode == BAD) break; + + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + state->mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ + state->next = state->codes; + state->lencode = (code const FAR *)(state->next); + state->lenbits = 9; + ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), + &(state->lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid literal/lengths set"; + state->mode = BAD; + break; + } + state->distcode = (code const FAR *)(state->next); + state->distbits = 6; + ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, + &(state->next), &(state->distbits), state->work); + if (ret) { + strm->msg = (char *)"invalid distances set"; + state->mode = BAD; + break; + } + Tracev((stderr, "inflate: codes ok\n")); + state->mode = LEN; + + case LEN: + /* use inflate_fast() if we have enough input and output */ + if (have >= 6 && left >= 258) { + RESTORE(); + if (state->whave < state->wsize) + state->whave = state->wsize - left; + inflate_fast(strm, state->wsize); + LOAD(); + break; + } + + /* get a literal, length, or end-of-block code */ + for (;;) { + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.op && (here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = state->lencode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + } + DROPBITS(here.bits); + state->length = (unsigned)here.val; + + /* process literal */ + if (here.op == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + "inflate: literal '%c'\n" : + "inflate: literal 0x%02x\n", here.val)); + ROOM(); + *put++ = (unsigned char)(state->length); + left--; + state->mode = LEN; + break; + } + + /* process end of block */ + if (here.op & 32) { + Tracevv((stderr, "inflate: end of block\n")); + state->mode = TYPE; + break; + } + + /* invalid code */ + if (here.op & 64) { + strm->msg = (char *)"invalid literal/length code"; + state->mode = BAD; + break; + } + + /* length code -- get extra bits, if any */ + state->extra = (unsigned)(here.op) & 15; + if (state->extra != 0) { + NEEDBITS(state->extra); + state->length += BITS(state->extra); + DROPBITS(state->extra); + } + Tracevv((stderr, "inflate: length %u\n", state->length)); + + /* get distance code */ + for (;;) { + here = state->distcode[BITS(state->distbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if ((here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = state->distcode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + } + DROPBITS(here.bits); + if (here.op & 64) { + strm->msg = (char *)"invalid distance code"; + state->mode = BAD; + break; + } + state->offset = (unsigned)here.val; + + /* get distance extra bits, if any */ + state->extra = (unsigned)(here.op) & 15; + if (state->extra != 0) { + NEEDBITS(state->extra); + state->offset += BITS(state->extra); + DROPBITS(state->extra); + } + if (state->offset > state->wsize - (state->whave < state->wsize ? + left : 0)) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } + Tracevv((stderr, "inflate: distance %u\n", state->offset)); + + /* copy match from window to output */ + do { + ROOM(); + copy = state->wsize - state->offset; + if (copy < left) { + from = put + copy; + copy = left - copy; + } + else { + from = put - state->offset; + copy = left; + } + if (copy > state->length) copy = state->length; + state->length -= copy; + left -= copy; + do { + *put++ = *from++; + } while (--copy); + } while (state->length != 0); + break; + + case DONE: + /* inflate stream terminated properly -- write leftover output */ + ret = Z_STREAM_END; + if (left < state->wsize) { + if (out(out_desc, state->window, state->wsize - left)) + ret = Z_BUF_ERROR; + } + goto inf_leave; + + case BAD: + ret = Z_DATA_ERROR; + goto inf_leave; + + default: /* can't happen, but makes compilers happy */ + ret = Z_STREAM_ERROR; + goto inf_leave; + } + + /* Return unused input */ + inf_leave: + strm->next_in = next; + strm->avail_in = have; + return ret; +} + +int ZEXPORT inflateBackEnd(strm) +z_streamp strm; +{ + if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) + return Z_STREAM_ERROR; + ZFREE(strm, strm->state); + strm->state = Z_NULL; + Tracev((stderr, "inflate: end\n")); + return Z_OK; +} diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inffast.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inffast.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inffast.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inffast.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,340 @@ +/* inffast.c -- fast decoding + * Copyright (C) 1995-2008, 2010, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "zutil.h" +#include "inftrees.h" +#include "inflate.h" +#include "inffast.h" + +#ifndef ASMINF + +/* Allow machine dependent optimization for post-increment or pre-increment. + Based on testing to date, + Pre-increment preferred for: + - PowerPC G3 (Adler) + - MIPS R5000 (Randers-Pehrson) + Post-increment preferred for: + - none + No measurable difference: + - Pentium III (Anderson) + - M68060 (Nikl) + */ +#ifdef POSTINC +# define OFF 0 +# define PUP(a) *(a)++ +#else +# define OFF 1 +# define PUP(a) *++(a) +#endif + +/* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is + available, an end-of-block is encountered, or a data error is encountered. + When large enough input and output buffers are supplied to inflate(), for + example, a 16K input buffer and a 64K output buffer, more than 95% of the + inflate execution time is spent in this routine. + + Entry assumptions: + + state->mode == LEN + strm->avail_in >= 6 + strm->avail_out >= 258 + start >= strm->avail_out + state->bits < 8 + + On return, state->mode is one of: + + LEN -- ran out of enough output space or enough available input + TYPE -- reached end of block code, inflate() to interpret next block + BAD -- error in block data + + Notes: + + - The maximum input bits used by a length/distance pair is 15 bits for the + length code, 5 bits for the length extra, 15 bits for the distance code, + and 13 bits for the distance extra. This totals 48 bits, or six bytes. + Therefore if strm->avail_in >= 6, then there is enough input to avoid + checking for available input while decoding. + + - The maximum bytes that a single length/distance pair can output is 258 + bytes, which is the maximum length that can be coded. inflate_fast() + requires strm->avail_out >= 258 for each loop to avoid checking for + output space. + */ +void ZLIB_INTERNAL inflate_fast(strm, start) +z_streamp strm; +unsigned start; /* inflate()'s starting value for strm->avail_out */ +{ + struct inflate_state FAR *state; + z_const unsigned char FAR *in; /* local strm->next_in */ + z_const unsigned char FAR *last; /* have enough input while in < last */ + unsigned char FAR *out; /* local strm->next_out */ + unsigned char FAR *beg; /* inflate()'s initial strm->next_out */ + unsigned char FAR *end; /* while out < end, enough space available */ +#ifdef INFLATE_STRICT + unsigned dmax; /* maximum distance from zlib header */ +#endif + unsigned wsize; /* window size or zero if not using window */ + unsigned whave; /* valid bytes in the window */ + unsigned wnext; /* window write index */ + unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */ + unsigned long hold; /* local strm->hold */ + unsigned bits; /* local strm->bits */ + code const FAR *lcode; /* local strm->lencode */ + code const FAR *dcode; /* local strm->distcode */ + unsigned lmask; /* mask for first level of length codes */ + unsigned dmask; /* mask for first level of distance codes */ + code here; /* retrieved table entry */ + unsigned op; /* code bits, operation, extra bits, or */ + /* window position, window bytes to copy */ + unsigned len; /* match length, unused bytes */ + unsigned dist; /* match distance */ + unsigned char FAR *from; /* where to copy match from */ + + /* copy state to local variables */ + state = (struct inflate_state FAR *)strm->state; + in = strm->next_in - OFF; + last = in + (strm->avail_in - 5); + out = strm->next_out - OFF; + beg = out - (start - strm->avail_out); + end = out + (strm->avail_out - 257); +#ifdef INFLATE_STRICT + dmax = state->dmax; +#endif + wsize = state->wsize; + whave = state->whave; + wnext = state->wnext; + window = state->window; + hold = state->hold; + bits = state->bits; + lcode = state->lencode; + dcode = state->distcode; + lmask = (1U << state->lenbits) - 1; + dmask = (1U << state->distbits) - 1; + + /* decode literals and length/distances until end-of-block or not enough + input data or output space */ + do { + if (bits < 15) { + hold += (unsigned long)(PUP(in)) << bits; + bits += 8; + hold += (unsigned long)(PUP(in)) << bits; + bits += 8; + } + here = lcode[hold & lmask]; + dolen: + op = (unsigned)(here.bits); + hold >>= op; + bits -= op; + op = (unsigned)(here.op); + if (op == 0) { /* literal */ + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + "inflate: literal '%c'\n" : + "inflate: literal 0x%02x\n", here.val)); + PUP(out) = (unsigned char)(here.val); + } + else if (op & 16) { /* length base */ + len = (unsigned)(here.val); + op &= 15; /* number of extra bits */ + if (op) { + if (bits < op) { + hold += (unsigned long)(PUP(in)) << bits; + bits += 8; + } + len += (unsigned)hold & ((1U << op) - 1); + hold >>= op; + bits -= op; + } + Tracevv((stderr, "inflate: length %u\n", len)); + if (bits < 15) { + hold += (unsigned long)(PUP(in)) << bits; + bits += 8; + hold += (unsigned long)(PUP(in)) << bits; + bits += 8; + } + here = dcode[hold & dmask]; + dodist: + op = (unsigned)(here.bits); + hold >>= op; + bits -= op; + op = (unsigned)(here.op); + if (op & 16) { /* distance base */ + dist = (unsigned)(here.val); + op &= 15; /* number of extra bits */ + if (bits < op) { + hold += (unsigned long)(PUP(in)) << bits; + bits += 8; + if (bits < op) { + hold += (unsigned long)(PUP(in)) << bits; + bits += 8; + } + } + dist += (unsigned)hold & ((1U << op) - 1); +#ifdef INFLATE_STRICT + if (dist > dmax) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#endif + hold >>= op; + bits -= op; + Tracevv((stderr, "inflate: distance %u\n", dist)); + op = (unsigned)(out - beg); /* max distance in output */ + if (dist > op) { /* see if copy from window */ + op = dist - op; /* distance back in window */ + if (op > whave) { + if (state->sane) { + strm->msg = + (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + if (len <= op - whave) { + do { + PUP(out) = 0; + } while (--len); + continue; + } + len -= op - whave; + do { + PUP(out) = 0; + } while (--op > whave); + if (op == 0) { + from = out - dist; + do { + PUP(out) = PUP(from); + } while (--len); + continue; + } +#endif + } + from = window - OFF; + if (wnext == 0) { /* very common case */ + from += wsize - op; + if (op < len) { /* some from window */ + len -= op; + do { + PUP(out) = PUP(from); + } while (--op); + from = out - dist; /* rest from output */ + } + } + else if (wnext < op) { /* wrap around window */ + from += wsize + wnext - op; + op -= wnext; + if (op < len) { /* some from end of window */ + len -= op; + do { + PUP(out) = PUP(from); + } while (--op); + from = window - OFF; + if (wnext < len) { /* some from start of window */ + op = wnext; + len -= op; + do { + PUP(out) = PUP(from); + } while (--op); + from = out - dist; /* rest from output */ + } + } + } + else { /* contiguous in window */ + from += wnext - op; + if (op < len) { /* some from window */ + len -= op; + do { + PUP(out) = PUP(from); + } while (--op); + from = out - dist; /* rest from output */ + } + } + while (len > 2) { + PUP(out) = PUP(from); + PUP(out) = PUP(from); + PUP(out) = PUP(from); + len -= 3; + } + if (len) { + PUP(out) = PUP(from); + if (len > 1) + PUP(out) = PUP(from); + } + } + else { + from = out - dist; /* copy direct from output */ + do { /* minimum length is three */ + PUP(out) = PUP(from); + PUP(out) = PUP(from); + PUP(out) = PUP(from); + len -= 3; + } while (len > 2); + if (len) { + PUP(out) = PUP(from); + if (len > 1) + PUP(out) = PUP(from); + } + } + } + else if ((op & 64) == 0) { /* 2nd level distance code */ + here = dcode[here.val + (hold & ((1U << op) - 1))]; + goto dodist; + } + else { + strm->msg = (char *)"invalid distance code"; + state->mode = BAD; + break; + } + } + else if ((op & 64) == 0) { /* 2nd level length code */ + here = lcode[here.val + (hold & ((1U << op) - 1))]; + goto dolen; + } + else if (op & 32) { /* end-of-block */ + Tracevv((stderr, "inflate: end of block\n")); + state->mode = TYPE; + break; + } + else { + strm->msg = (char *)"invalid literal/length code"; + state->mode = BAD; + break; + } + } while (in < last && out < end); + + /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ + len = bits >> 3; + in -= len; + bits -= len << 3; + hold &= (1U << bits) - 1; + + /* update state and return */ + strm->next_in = in + OFF; + strm->next_out = out + OFF; + strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); + strm->avail_out = (unsigned)(out < end ? + 257 + (end - out) : 257 - (out - end)); + state->hold = hold; + state->bits = bits; + return; +} + +/* + inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): + - Using bit fields for code structure + - Different op definition to avoid & for extra bits (do & for table bits) + - Three separate decoding do-loops for direct, window, and wnext == 0 + - Special case for distance > 1 copies to do overlapped load and store copy + - Explicit branch predictions (based on measured branch probabilities) + - Deferring match copy and interspersed it with decoding subsequent codes + - Swapping literal/length else + - Swapping window/direct else + - Larger unrolled copy loops (three is about right) + - Moving len -= 3 statement into middle of loop + */ + +#endif /* !ASMINF */ diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inffast.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inffast.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inffast.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inffast.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,11 @@ +/* inffast.h -- header to use inffast.c + * Copyright (C) 1995-2003, 2010 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start)); diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inffixed.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inffixed.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inffixed.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inffixed.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,94 @@ + /* inffixed.h -- table for decoding fixed codes + * Generated automatically by makefixed(). + */ + + /* WARNING: this file should *not* be used by applications. + It is part of the implementation of this library and is + subject to change. Applications should only use zlib.h. + */ + + static const code lenfix[512] = { + {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48}, + {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128}, + {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59}, + {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176}, + {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20}, + {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100}, + {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8}, + {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216}, + {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76}, + {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114}, + {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2}, + {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148}, + {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42}, + {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86}, + {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15}, + {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236}, + {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62}, + {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142}, + {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31}, + {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162}, + {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25}, + {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105}, + {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4}, + {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202}, + {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69}, + {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125}, + {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13}, + {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195}, + {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35}, + {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91}, + {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19}, + {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246}, + {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55}, + {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135}, + {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99}, + {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190}, + {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16}, + {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96}, + {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6}, + {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209}, + {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72}, + {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116}, + {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4}, + {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153}, + {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44}, + {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82}, + {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11}, + {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229}, + {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58}, + {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138}, + {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51}, + {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173}, + {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30}, + {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110}, + {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0}, + {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195}, + {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65}, + {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121}, + {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9}, + {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258}, + {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37}, + {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93}, + {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23}, + {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251}, + {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51}, + {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131}, + {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67}, + {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183}, + {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23}, + {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103}, + {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9}, + {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223}, + {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79}, + {0,9,255} + }; + + static const code distfix[32] = { + {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025}, + {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193}, + {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385}, + {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577}, + {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073}, + {22,5,193},{64,5,0} + }; diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inflate.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inflate.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inflate.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inflate.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,1512 @@ +/* inflate.c -- zlib decompression + * Copyright (C) 1995-2012 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * Change history: + * + * 1.2.beta0 24 Nov 2002 + * - First version -- complete rewrite of inflate to simplify code, avoid + * creation of window when not needed, minimize use of window when it is + * needed, make inffast.c even faster, implement gzip decoding, and to + * improve code readability and style over the previous zlib inflate code + * + * 1.2.beta1 25 Nov 2002 + * - Use pointers for available input and output checking in inffast.c + * - Remove input and output counters in inffast.c + * - Change inffast.c entry and loop from avail_in >= 7 to >= 6 + * - Remove unnecessary second byte pull from length extra in inffast.c + * - Unroll direct copy to three copies per loop in inffast.c + * + * 1.2.beta2 4 Dec 2002 + * - Change external routine names to reduce potential conflicts + * - Correct filename to inffixed.h for fixed tables in inflate.c + * - Make hbuf[] unsigned char to match parameter type in inflate.c + * - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset) + * to avoid negation problem on Alphas (64 bit) in inflate.c + * + * 1.2.beta3 22 Dec 2002 + * - Add comments on state->bits assertion in inffast.c + * - Add comments on op field in inftrees.h + * - Fix bug in reuse of allocated window after inflateReset() + * - Remove bit fields--back to byte structure for speed + * - Remove distance extra == 0 check in inflate_fast()--only helps for lengths + * - Change post-increments to pre-increments in inflate_fast(), PPC biased? + * - Add compile time option, POSTINC, to use post-increments instead (Intel?) + * - Make MATCH copy in inflate() much faster for when inflate_fast() not used + * - Use local copies of stream next and avail values, as well as local bit + * buffer and bit count in inflate()--for speed when inflate_fast() not used + * + * 1.2.beta4 1 Jan 2003 + * - Split ptr - 257 statements in inflate_table() to avoid compiler warnings + * - Move a comment on output buffer sizes from inffast.c to inflate.c + * - Add comments in inffast.c to introduce the inflate_fast() routine + * - Rearrange window copies in inflate_fast() for speed and simplification + * - Unroll last copy for window match in inflate_fast() + * - Use local copies of window variables in inflate_fast() for speed + * - Pull out common wnext == 0 case for speed in inflate_fast() + * - Make op and len in inflate_fast() unsigned for consistency + * - Add FAR to lcode and dcode declarations in inflate_fast() + * - Simplified bad distance check in inflate_fast() + * - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new + * source file infback.c to provide a call-back interface to inflate for + * programs like gzip and unzip -- uses window as output buffer to avoid + * window copying + * + * 1.2.beta5 1 Jan 2003 + * - Improved inflateBack() interface to allow the caller to provide initial + * input in strm. + * - Fixed stored blocks bug in inflateBack() + * + * 1.2.beta6 4 Jan 2003 + * - Added comments in inffast.c on effectiveness of POSTINC + * - Typecasting all around to reduce compiler warnings + * - Changed loops from while (1) or do {} while (1) to for (;;), again to + * make compilers happy + * - Changed type of window in inflateBackInit() to unsigned char * + * + * 1.2.beta7 27 Jan 2003 + * - Changed many types to unsigned or unsigned short to avoid warnings + * - Added inflateCopy() function + * + * 1.2.0 9 Mar 2003 + * - Changed inflateBack() interface to provide separate opaque descriptors + * for the in() and out() functions + * - Changed inflateBack() argument and in_func typedef to swap the length + * and buffer address return values for the input function + * - Check next_in and next_out for Z_NULL on entry to inflate() + * + * The history for versions after 1.2.0 are in ChangeLog in zlib distribution. + */ + +#include "zutil.h" +#include "inftrees.h" +#include "inflate.h" +#include "inffast.h" + +#ifdef MAKEFIXED +# ifndef BUILDFIXED +# define BUILDFIXED +# endif +#endif + +/* function prototypes */ +local void fixedtables OF((struct inflate_state FAR *state)); +local int updatewindow OF((z_streamp strm, const unsigned char FAR *end, + unsigned copy)); +#ifdef BUILDFIXED + void makefixed OF((void)); +#endif +local unsigned syncsearch OF((unsigned FAR *have, const unsigned char FAR *buf, + unsigned len)); + +int ZEXPORT inflateResetKeep(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + strm->total_in = strm->total_out = state->total = 0; + strm->msg = Z_NULL; + if (state->wrap) /* to support ill-conceived Java test suite */ + strm->adler = state->wrap & 1; + state->mode = HEAD; + state->last = 0; + state->havedict = 0; + state->dmax = 32768U; + state->head = Z_NULL; + state->hold = 0; + state->bits = 0; + state->lencode = state->distcode = state->next = state->codes; + state->sane = 1; + state->back = -1; + Tracev((stderr, "inflate: reset\n")); + return Z_OK; +} + +int ZEXPORT inflateReset(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + state->wsize = 0; + state->whave = 0; + state->wnext = 0; + return inflateResetKeep(strm); +} + +int ZEXPORT inflateReset2(strm, windowBits) +z_streamp strm; +int windowBits; +{ + int wrap; + struct inflate_state FAR *state; + + /* get the state */ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* extract wrap request from windowBits parameter */ + if (windowBits < 0) { + wrap = 0; + windowBits = -windowBits; + } + else { + wrap = (windowBits >> 4) + 1; +#ifdef GUNZIP + if (windowBits < 48) + windowBits &= 15; +#endif + } + + /* set number of window bits, free window if different */ + if (windowBits && (windowBits < 8 || windowBits > 15)) + return Z_STREAM_ERROR; + if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) { + ZFREE(strm, state->window); + state->window = Z_NULL; + } + + /* update state and reset the rest of it */ + state->wrap = wrap; + state->wbits = (unsigned)windowBits; + return inflateReset(strm); +} + +int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size) +z_streamp strm; +int windowBits; +const char *version; +int stream_size; +{ + int ret; + struct inflate_state FAR *state; + + if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || + stream_size != (int)(sizeof(z_stream))) + return Z_VERSION_ERROR; + if (strm == Z_NULL) return Z_STREAM_ERROR; + strm->msg = Z_NULL; /* in case we return an error */ + if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; +#endif + } + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif + state = (struct inflate_state FAR *) + ZALLOC(strm, 1, sizeof(struct inflate_state)); + if (state == Z_NULL) return Z_MEM_ERROR; + Tracev((stderr, "inflate: allocated\n")); + strm->state = (struct internal_state FAR *)state; + state->window = Z_NULL; + ret = inflateReset2(strm, windowBits); + if (ret != Z_OK) { + ZFREE(strm, state); + strm->state = Z_NULL; + } + return ret; +} + +int ZEXPORT inflateInit_(strm, version, stream_size) +z_streamp strm; +const char *version; +int stream_size; +{ + return inflateInit2_(strm, DEF_WBITS, version, stream_size); +} + +int ZEXPORT inflatePrime(strm, bits, value) +z_streamp strm; +int bits; +int value; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (bits < 0) { + state->hold = 0; + state->bits = 0; + return Z_OK; + } + if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR; + value &= (1L << bits) - 1; + state->hold += value << state->bits; + state->bits += bits; + return Z_OK; +} + +/* + Return state with length and distance decoding tables and index sizes set to + fixed code decoding. Normally this returns fixed tables from inffixed.h. + If BUILDFIXED is defined, then instead this routine builds the tables the + first time it's called, and returns those tables the first time and + thereafter. This reduces the size of the code by about 2K bytes, in + exchange for a little execution time. However, BUILDFIXED should not be + used for threaded applications, since the rewriting of the tables and virgin + may not be thread-safe. + */ +local void fixedtables(state) +struct inflate_state FAR *state; +{ +#ifdef BUILDFIXED + static int virgin = 1; + static code *lenfix, *distfix; + static code fixed[544]; + + /* build fixed huffman tables if first call (may not be thread safe) */ + if (virgin) { + unsigned sym, bits; + static code *next; + + /* literal/length table */ + sym = 0; + while (sym < 144) state->lens[sym++] = 8; + while (sym < 256) state->lens[sym++] = 9; + while (sym < 280) state->lens[sym++] = 7; + while (sym < 288) state->lens[sym++] = 8; + next = fixed; + lenfix = next; + bits = 9; + inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); + + /* distance table */ + sym = 0; + while (sym < 32) state->lens[sym++] = 5; + distfix = next; + bits = 5; + inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); + + /* do this just once */ + virgin = 0; + } +#else /* !BUILDFIXED */ +# include "inffixed.h" +#endif /* BUILDFIXED */ + state->lencode = lenfix; + state->lenbits = 9; + state->distcode = distfix; + state->distbits = 5; +} + +#ifdef MAKEFIXED +#include + +/* + Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also + defines BUILDFIXED, so the tables are built on the fly. makefixed() writes + those tables to stdout, which would be piped to inffixed.h. A small program + can simply call makefixed to do this: + + void makefixed(void); + + int main(void) + { + makefixed(); + return 0; + } + + Then that can be linked with zlib built with MAKEFIXED defined and run: + + a.out > inffixed.h + */ +void makefixed() +{ + unsigned low, size; + struct inflate_state state; + + fixedtables(&state); + puts(" /* inffixed.h -- table for decoding fixed codes"); + puts(" * Generated automatically by makefixed()."); + puts(" */"); + puts(""); + puts(" /* WARNING: this file should *not* be used by applications."); + puts(" It is part of the implementation of this library and is"); + puts(" subject to change. Applications should only use zlib.h."); + puts(" */"); + puts(""); + size = 1U << 9; + printf(" static const code lenfix[%u] = {", size); + low = 0; + for (;;) { + if ((low % 7) == 0) printf("\n "); + printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op, + state.lencode[low].bits, state.lencode[low].val); + if (++low == size) break; + putchar(','); + } + puts("\n };"); + size = 1U << 5; + printf("\n static const code distfix[%u] = {", size); + low = 0; + for (;;) { + if ((low % 6) == 0) printf("\n "); + printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits, + state.distcode[low].val); + if (++low == size) break; + putchar(','); + } + puts("\n };"); +} +#endif /* MAKEFIXED */ + +/* + Update the window with the last wsize (normally 32K) bytes written before + returning. If window does not exist yet, create it. This is only called + when a window is already in use, or when output has been written during this + inflate call, but the end of the deflate stream has not been reached yet. + It is also called to create a window for dictionary data when a dictionary + is loaded. + + Providing output buffers larger than 32K to inflate() should provide a speed + advantage, since only the last 32K of output is copied to the sliding window + upon return from inflate(), and since all distances after the first 32K of + output will fall in the output data, making match copies simpler and faster. + The advantage may be dependent on the size of the processor's data caches. + */ +local int updatewindow(strm, end, copy) +z_streamp strm; +const Bytef *end; +unsigned copy; +{ + struct inflate_state FAR *state; + unsigned dist; + + state = (struct inflate_state FAR *)strm->state; + + /* if it hasn't been done already, allocate space for the window */ + if (state->window == Z_NULL) { + state->window = (unsigned char FAR *) + ZALLOC(strm, 1U << state->wbits, + sizeof(unsigned char)); + if (state->window == Z_NULL) return 1; + } + + /* if window not in use yet, initialize */ + if (state->wsize == 0) { + state->wsize = 1U << state->wbits; + state->wnext = 0; + state->whave = 0; + } + + /* copy state->wsize or less output bytes into the circular window */ + if (copy >= state->wsize) { + zmemcpy(state->window, end - state->wsize, state->wsize); + state->wnext = 0; + state->whave = state->wsize; + } + else { + dist = state->wsize - state->wnext; + if (dist > copy) dist = copy; + zmemcpy(state->window + state->wnext, end - copy, dist); + copy -= dist; + if (copy) { + zmemcpy(state->window, end - copy, copy); + state->wnext = copy; + state->whave = state->wsize; + } + else { + state->wnext += dist; + if (state->wnext == state->wsize) state->wnext = 0; + if (state->whave < state->wsize) state->whave += dist; + } + } + return 0; +} + +/* Macros for inflate(): */ + +/* check function to use adler32() for zlib or crc32() for gzip */ +#ifdef GUNZIP +# define UPDATE(check, buf, len) \ + (state->flags ? crc32(check, buf, len) : adler32(check, buf, len)) +#else +# define UPDATE(check, buf, len) adler32(check, buf, len) +#endif + +/* check macros for header crc */ +#ifdef GUNZIP +# define CRC2(check, word) \ + do { \ + hbuf[0] = (unsigned char)(word); \ + hbuf[1] = (unsigned char)((word) >> 8); \ + check = crc32(check, hbuf, 2); \ + } while (0) + +# define CRC4(check, word) \ + do { \ + hbuf[0] = (unsigned char)(word); \ + hbuf[1] = (unsigned char)((word) >> 8); \ + hbuf[2] = (unsigned char)((word) >> 16); \ + hbuf[3] = (unsigned char)((word) >> 24); \ + check = crc32(check, hbuf, 4); \ + } while (0) +#endif + +/* Load registers with state in inflate() for speed */ +#define LOAD() \ + do { \ + put = strm->next_out; \ + left = strm->avail_out; \ + next = strm->next_in; \ + have = strm->avail_in; \ + hold = state->hold; \ + bits = state->bits; \ + } while (0) + +/* Restore state from registers in inflate() */ +#define RESTORE() \ + do { \ + strm->next_out = put; \ + strm->avail_out = left; \ + strm->next_in = next; \ + strm->avail_in = have; \ + state->hold = hold; \ + state->bits = bits; \ + } while (0) + +/* Clear the input bit accumulator */ +#define INITBITS() \ + do { \ + hold = 0; \ + bits = 0; \ + } while (0) + +/* Get a byte of input into the bit accumulator, or return from inflate() + if there is no input available. */ +#define PULLBYTE() \ + do { \ + if (have == 0) goto inf_leave; \ + have--; \ + hold += (unsigned long)(*next++) << bits; \ + bits += 8; \ + } while (0) + +/* Assure that there are at least n bits in the bit accumulator. If there is + not enough available input to do that, then return from inflate(). */ +#define NEEDBITS(n) \ + do { \ + while (bits < (unsigned)(n)) \ + PULLBYTE(); \ + } while (0) + +/* Return the low n bits of the bit accumulator (n < 16) */ +#define BITS(n) \ + ((unsigned)hold & ((1U << (n)) - 1)) + +/* Remove n bits from the bit accumulator */ +#define DROPBITS(n) \ + do { \ + hold >>= (n); \ + bits -= (unsigned)(n); \ + } while (0) + +/* Remove zero to seven bits as needed to go to a byte boundary */ +#define BYTEBITS() \ + do { \ + hold >>= bits & 7; \ + bits -= bits & 7; \ + } while (0) + +/* + inflate() uses a state machine to process as much input data and generate as + much output data as possible before returning. The state machine is + structured roughly as follows: + + for (;;) switch (state) { + ... + case STATEn: + if (not enough input data or output space to make progress) + return; + ... make progress ... + state = STATEm; + break; + ... + } + + so when inflate() is called again, the same case is attempted again, and + if the appropriate resources are provided, the machine proceeds to the + next state. The NEEDBITS() macro is usually the way the state evaluates + whether it can proceed or should return. NEEDBITS() does the return if + the requested bits are not available. The typical use of the BITS macros + is: + + NEEDBITS(n); + ... do something with BITS(n) ... + DROPBITS(n); + + where NEEDBITS(n) either returns from inflate() if there isn't enough + input left to load n bits into the accumulator, or it continues. BITS(n) + gives the low n bits in the accumulator. When done, DROPBITS(n) drops + the low n bits off the accumulator. INITBITS() clears the accumulator + and sets the number of available bits to zero. BYTEBITS() discards just + enough bits to put the accumulator on a byte boundary. After BYTEBITS() + and a NEEDBITS(8), then BITS(8) would return the next byte in the stream. + + NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return + if there is no input available. The decoding of variable length codes uses + PULLBYTE() directly in order to pull just enough bytes to decode the next + code, and no more. + + Some states loop until they get enough input, making sure that enough + state information is maintained to continue the loop where it left off + if NEEDBITS() returns in the loop. For example, want, need, and keep + would all have to actually be part of the saved state in case NEEDBITS() + returns: + + case STATEw: + while (want < need) { + NEEDBITS(n); + keep[want++] = BITS(n); + DROPBITS(n); + } + state = STATEx; + case STATEx: + + As shown above, if the next state is also the next case, then the break + is omitted. + + A state may also return if there is not enough output space available to + complete that state. Those states are copying stored data, writing a + literal byte, and copying a matching string. + + When returning, a "goto inf_leave" is used to update the total counters, + update the check value, and determine whether any progress has been made + during that inflate() call in order to return the proper return code. + Progress is defined as a change in either strm->avail_in or strm->avail_out. + When there is a window, goto inf_leave will update the window with the last + output written. If a goto inf_leave occurs in the middle of decompression + and there is no window currently, goto inf_leave will create one and copy + output to the window for the next call of inflate(). + + In this implementation, the flush parameter of inflate() only affects the + return code (per zlib.h). inflate() always writes as much as possible to + strm->next_out, given the space available and the provided input--the effect + documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers + the allocation of and copying into a sliding window until necessary, which + provides the effect documented in zlib.h for Z_FINISH when the entire input + stream available. So the only thing the flush parameter actually does is: + when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it + will return Z_BUF_ERROR if it has not reached the end of the stream. + */ + +int ZEXPORT inflate(strm, flush) +z_streamp strm; +int flush; +{ + struct inflate_state FAR *state; + z_const unsigned char FAR *next; /* next input */ + unsigned char FAR *put; /* next output */ + unsigned have, left; /* available input and output */ + unsigned long hold; /* bit buffer */ + unsigned bits; /* bits in bit buffer */ + unsigned in, out; /* save starting available input and output */ + unsigned copy; /* number of stored or match bytes to copy */ + unsigned char FAR *from; /* where to copy match bytes from */ + code here; /* current decoding table entry */ + code last; /* parent table entry */ + unsigned len; /* length to copy for repeats, bits to drop */ + int ret; /* return code */ +#ifdef GUNZIP + unsigned char hbuf[4]; /* buffer for gzip header crc calculation */ +#endif + static const unsigned short order[19] = /* permutation of code lengths */ + {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + + if (strm == Z_NULL || strm->state == Z_NULL || strm->next_out == Z_NULL || + (strm->next_in == Z_NULL && strm->avail_in != 0)) + return Z_STREAM_ERROR; + + state = (struct inflate_state FAR *)strm->state; + if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */ + LOAD(); + in = have; + out = left; + ret = Z_OK; + for (;;) + switch (state->mode) { + case HEAD: + if (state->wrap == 0) { + state->mode = TYPEDO; + break; + } + NEEDBITS(16); +#ifdef GUNZIP + if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */ + state->check = crc32(0L, Z_NULL, 0); + CRC2(state->check, hold); + INITBITS(); + state->mode = FLAGS; + break; + } + state->flags = 0; /* expect zlib header */ + if (state->head != Z_NULL) + state->head->done = -1; + if (!(state->wrap & 1) || /* check if zlib header allowed */ +#else + if ( +#endif + ((BITS(8) << 8) + (hold >> 8)) % 31) { + strm->msg = (char *)"incorrect header check"; + state->mode = BAD; + break; + } + if (BITS(4) != Z_DEFLATED) { + strm->msg = (char *)"unknown compression method"; + state->mode = BAD; + break; + } + DROPBITS(4); + len = BITS(4) + 8; + if (state->wbits == 0) + state->wbits = len; + else if (len > state->wbits) { + strm->msg = (char *)"invalid window size"; + state->mode = BAD; + break; + } + state->dmax = 1U << len; + Tracev((stderr, "inflate: zlib header ok\n")); + strm->adler = state->check = adler32(0L, Z_NULL, 0); + state->mode = hold & 0x200 ? DICTID : TYPE; + INITBITS(); + break; +#ifdef GUNZIP + case FLAGS: + NEEDBITS(16); + state->flags = (int)(hold); + if ((state->flags & 0xff) != Z_DEFLATED) { + strm->msg = (char *)"unknown compression method"; + state->mode = BAD; + break; + } + if (state->flags & 0xe000) { + strm->msg = (char *)"unknown header flags set"; + state->mode = BAD; + break; + } + if (state->head != Z_NULL) + state->head->text = (int)((hold >> 8) & 1); + if (state->flags & 0x0200) CRC2(state->check, hold); + INITBITS(); + state->mode = TIME; + case TIME: + NEEDBITS(32); + if (state->head != Z_NULL) + state->head->time = hold; + if (state->flags & 0x0200) CRC4(state->check, hold); + INITBITS(); + state->mode = OS; + case OS: + NEEDBITS(16); + if (state->head != Z_NULL) { + state->head->xflags = (int)(hold & 0xff); + state->head->os = (int)(hold >> 8); + } + if (state->flags & 0x0200) CRC2(state->check, hold); + INITBITS(); + state->mode = EXLEN; + case EXLEN: + if (state->flags & 0x0400) { + NEEDBITS(16); + state->length = (unsigned)(hold); + if (state->head != Z_NULL) + state->head->extra_len = (unsigned)hold; + if (state->flags & 0x0200) CRC2(state->check, hold); + INITBITS(); + } + else if (state->head != Z_NULL) + state->head->extra = Z_NULL; + state->mode = EXTRA; + case EXTRA: + if (state->flags & 0x0400) { + copy = state->length; + if (copy > have) copy = have; + if (copy) { + if (state->head != Z_NULL && + state->head->extra != Z_NULL) { + len = state->head->extra_len - state->length; + zmemcpy(state->head->extra + len, next, + len + copy > state->head->extra_max ? + state->head->extra_max - len : copy); + } + if (state->flags & 0x0200) + state->check = crc32(state->check, next, copy); + have -= copy; + next += copy; + state->length -= copy; + } + if (state->length) goto inf_leave; + } + state->length = 0; + state->mode = NAME; + case NAME: + if (state->flags & 0x0800) { + if (have == 0) goto inf_leave; + copy = 0; + do { + len = (unsigned)(next[copy++]); + if (state->head != Z_NULL && + state->head->name != Z_NULL && + state->length < state->head->name_max) + state->head->name[state->length++] = len; + } while (len && copy < have); + if (state->flags & 0x0200) + state->check = crc32(state->check, next, copy); + have -= copy; + next += copy; + if (len) goto inf_leave; + } + else if (state->head != Z_NULL) + state->head->name = Z_NULL; + state->length = 0; + state->mode = COMMENT; + case COMMENT: + if (state->flags & 0x1000) { + if (have == 0) goto inf_leave; + copy = 0; + do { + len = (unsigned)(next[copy++]); + if (state->head != Z_NULL && + state->head->comment != Z_NULL && + state->length < state->head->comm_max) + state->head->comment[state->length++] = len; + } while (len && copy < have); + if (state->flags & 0x0200) + state->check = crc32(state->check, next, copy); + have -= copy; + next += copy; + if (len) goto inf_leave; + } + else if (state->head != Z_NULL) + state->head->comment = Z_NULL; + state->mode = HCRC; + case HCRC: + if (state->flags & 0x0200) { + NEEDBITS(16); + if (hold != (state->check & 0xffff)) { + strm->msg = (char *)"header crc mismatch"; + state->mode = BAD; + break; + } + INITBITS(); + } + if (state->head != Z_NULL) { + state->head->hcrc = (int)((state->flags >> 9) & 1); + state->head->done = 1; + } + strm->adler = state->check = crc32(0L, Z_NULL, 0); + state->mode = TYPE; + break; +#endif + case DICTID: + NEEDBITS(32); + strm->adler = state->check = ZSWAP32(hold); + INITBITS(); + state->mode = DICT; + case DICT: + if (state->havedict == 0) { + RESTORE(); + return Z_NEED_DICT; + } + strm->adler = state->check = adler32(0L, Z_NULL, 0); + state->mode = TYPE; + case TYPE: + if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave; + case TYPEDO: + if (state->last) { + BYTEBITS(); + state->mode = CHECK; + break; + } + NEEDBITS(3); + state->last = BITS(1); + DROPBITS(1); + switch (BITS(2)) { + case 0: /* stored block */ + Tracev((stderr, "inflate: stored block%s\n", + state->last ? " (last)" : "")); + state->mode = STORED; + break; + case 1: /* fixed block */ + fixedtables(state); + Tracev((stderr, "inflate: fixed codes block%s\n", + state->last ? " (last)" : "")); + state->mode = LEN_; /* decode codes */ + if (flush == Z_TREES) { + DROPBITS(2); + goto inf_leave; + } + break; + case 2: /* dynamic block */ + Tracev((stderr, "inflate: dynamic codes block%s\n", + state->last ? " (last)" : "")); + state->mode = TABLE; + break; + case 3: + strm->msg = (char *)"invalid block type"; + state->mode = BAD; + } + DROPBITS(2); + break; + case STORED: + BYTEBITS(); /* go to byte boundary */ + NEEDBITS(32); + if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { + strm->msg = (char *)"invalid stored block lengths"; + state->mode = BAD; + break; + } + state->length = (unsigned)hold & 0xffff; + Tracev((stderr, "inflate: stored length %u\n", + state->length)); + INITBITS(); + state->mode = COPY_; + if (flush == Z_TREES) goto inf_leave; + case COPY_: + state->mode = COPY; + case COPY: + copy = state->length; + if (copy) { + if (copy > have) copy = have; + if (copy > left) copy = left; + if (copy == 0) goto inf_leave; + zmemcpy(put, next, copy); + have -= copy; + next += copy; + left -= copy; + put += copy; + state->length -= copy; + break; + } + Tracev((stderr, "inflate: stored end\n")); + state->mode = TYPE; + break; + case TABLE: + NEEDBITS(14); + state->nlen = BITS(5) + 257; + DROPBITS(5); + state->ndist = BITS(5) + 1; + DROPBITS(5); + state->ncode = BITS(4) + 4; + DROPBITS(4); +#ifndef PKZIP_BUG_WORKAROUND + if (state->nlen > 286 || state->ndist > 30) { + strm->msg = (char *)"too many length or distance symbols"; + state->mode = BAD; + break; + } +#endif + Tracev((stderr, "inflate: table sizes ok\n")); + state->have = 0; + state->mode = LENLENS; + case LENLENS: + while (state->have < state->ncode) { + NEEDBITS(3); + state->lens[order[state->have++]] = (unsigned short)BITS(3); + DROPBITS(3); + } + while (state->have < 19) + state->lens[order[state->have++]] = 0; + state->next = state->codes; + state->lencode = (const code FAR *)(state->next); + state->lenbits = 7; + ret = inflate_table(CODES, state->lens, 19, &(state->next), + &(state->lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid code lengths set"; + state->mode = BAD; + break; + } + Tracev((stderr, "inflate: code lengths ok\n")); + state->have = 0; + state->mode = CODELENS; + case CODELENS: + while (state->have < state->nlen + state->ndist) { + for (;;) { + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.val < 16) { + DROPBITS(here.bits); + state->lens[state->have++] = here.val; + } + else { + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); + if (state->have == 0) { + strm->msg = (char *)"invalid bit length repeat"; + state->mode = BAD; + break; + } + len = state->lens[state->have - 1]; + copy = 3 + BITS(2); + DROPBITS(2); + } + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); + len = 0; + copy = 3 + BITS(3); + DROPBITS(3); + } + else { + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); + len = 0; + copy = 11 + BITS(7); + DROPBITS(7); + } + if (state->have + copy > state->nlen + state->ndist) { + strm->msg = (char *)"invalid bit length repeat"; + state->mode = BAD; + break; + } + while (copy--) + state->lens[state->have++] = (unsigned short)len; + } + } + + /* handle error breaks in while */ + if (state->mode == BAD) break; + + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + state->mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ + state->next = state->codes; + state->lencode = (const code FAR *)(state->next); + state->lenbits = 9; + ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), + &(state->lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid literal/lengths set"; + state->mode = BAD; + break; + } + state->distcode = (const code FAR *)(state->next); + state->distbits = 6; + ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, + &(state->next), &(state->distbits), state->work); + if (ret) { + strm->msg = (char *)"invalid distances set"; + state->mode = BAD; + break; + } + Tracev((stderr, "inflate: codes ok\n")); + state->mode = LEN_; + if (flush == Z_TREES) goto inf_leave; + case LEN_: + state->mode = LEN; + case LEN: + if (have >= 6 && left >= 258) { + RESTORE(); + inflate_fast(strm, out); + LOAD(); + if (state->mode == TYPE) + state->back = -1; + break; + } + state->back = 0; + for (;;) { + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.op && (here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = state->lencode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + state->back += last.bits; + } + DROPBITS(here.bits); + state->back += here.bits; + state->length = (unsigned)here.val; + if ((int)(here.op) == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + "inflate: literal '%c'\n" : + "inflate: literal 0x%02x\n", here.val)); + state->mode = LIT; + break; + } + if (here.op & 32) { + Tracevv((stderr, "inflate: end of block\n")); + state->back = -1; + state->mode = TYPE; + break; + } + if (here.op & 64) { + strm->msg = (char *)"invalid literal/length code"; + state->mode = BAD; + break; + } + state->extra = (unsigned)(here.op) & 15; + state->mode = LENEXT; + case LENEXT: + if (state->extra) { + NEEDBITS(state->extra); + state->length += BITS(state->extra); + DROPBITS(state->extra); + state->back += state->extra; + } + Tracevv((stderr, "inflate: length %u\n", state->length)); + state->was = state->length; + state->mode = DIST; + case DIST: + for (;;) { + here = state->distcode[BITS(state->distbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if ((here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = state->distcode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + state->back += last.bits; + } + DROPBITS(here.bits); + state->back += here.bits; + if (here.op & 64) { + strm->msg = (char *)"invalid distance code"; + state->mode = BAD; + break; + } + state->offset = (unsigned)here.val; + state->extra = (unsigned)(here.op) & 15; + state->mode = DISTEXT; + case DISTEXT: + if (state->extra) { + NEEDBITS(state->extra); + state->offset += BITS(state->extra); + DROPBITS(state->extra); + state->back += state->extra; + } +#ifdef INFLATE_STRICT + if (state->offset > state->dmax) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#endif + Tracevv((stderr, "inflate: distance %u\n", state->offset)); + state->mode = MATCH; + case MATCH: + if (left == 0) goto inf_leave; + copy = out - left; + if (state->offset > copy) { /* copy from window */ + copy = state->offset - copy; + if (copy > state->whave) { + if (state->sane) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + Trace((stderr, "inflate.c too far\n")); + copy -= state->whave; + if (copy > state->length) copy = state->length; + if (copy > left) copy = left; + left -= copy; + state->length -= copy; + do { + *put++ = 0; + } while (--copy); + if (state->length == 0) state->mode = LEN; + break; +#endif + } + if (copy > state->wnext) { + copy -= state->wnext; + from = state->window + (state->wsize - copy); + } + else + from = state->window + (state->wnext - copy); + if (copy > state->length) copy = state->length; + } + else { /* copy from output */ + from = put - state->offset; + copy = state->length; + } + if (copy > left) copy = left; + left -= copy; + state->length -= copy; + do { + *put++ = *from++; + } while (--copy); + if (state->length == 0) state->mode = LEN; + break; + case LIT: + if (left == 0) goto inf_leave; + *put++ = (unsigned char)(state->length); + left--; + state->mode = LEN; + break; + case CHECK: + if (state->wrap) { + NEEDBITS(32); + out -= left; + strm->total_out += out; + state->total += out; + if (out) + strm->adler = state->check = + UPDATE(state->check, put - out, out); + out = left; + if (( +#ifdef GUNZIP + state->flags ? hold : +#endif + ZSWAP32(hold)) != state->check) { + strm->msg = (char *)"incorrect data check"; + state->mode = BAD; + break; + } + INITBITS(); + Tracev((stderr, "inflate: check matches trailer\n")); + } +#ifdef GUNZIP + state->mode = LENGTH; + case LENGTH: + if (state->wrap && state->flags) { + NEEDBITS(32); + if (hold != (state->total & 0xffffffffUL)) { + strm->msg = (char *)"incorrect length check"; + state->mode = BAD; + break; + } + INITBITS(); + Tracev((stderr, "inflate: length matches trailer\n")); + } +#endif + state->mode = DONE; + case DONE: + ret = Z_STREAM_END; + goto inf_leave; + case BAD: + ret = Z_DATA_ERROR; + goto inf_leave; + case MEM: + return Z_MEM_ERROR; + case SYNC: + default: + return Z_STREAM_ERROR; + } + + /* + Return from inflate(), updating the total counts and the check value. + If there was no progress during the inflate() call, return a buffer + error. Call updatewindow() to create and/or update the window state. + Note: a memory error from inflate() is non-recoverable. + */ + inf_leave: + RESTORE(); + if (state->wsize || (out != strm->avail_out && state->mode < BAD && + (state->mode < CHECK || flush != Z_FINISH))) + if (updatewindow(strm, strm->next_out, out - strm->avail_out)) { + state->mode = MEM; + return Z_MEM_ERROR; + } + in -= strm->avail_in; + out -= strm->avail_out; + strm->total_in += in; + strm->total_out += out; + state->total += out; + if (state->wrap && out) + strm->adler = state->check = + UPDATE(state->check, strm->next_out - out, out); + strm->data_type = state->bits + (state->last ? 64 : 0) + + (state->mode == TYPE ? 128 : 0) + + (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0); + if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) + ret = Z_BUF_ERROR; + return ret; +} + +int ZEXPORT inflateEnd(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) + return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (state->window != Z_NULL) ZFREE(strm, state->window); + ZFREE(strm, strm->state); + strm->state = Z_NULL; + Tracev((stderr, "inflate: end\n")); + return Z_OK; +} + +int ZEXPORT inflateGetDictionary(strm, dictionary, dictLength) +z_streamp strm; +Bytef *dictionary; +uInt *dictLength; +{ + struct inflate_state FAR *state; + + /* check state */ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* copy dictionary */ + if (state->whave && dictionary != Z_NULL) { + zmemcpy(dictionary, state->window + state->wnext, + state->whave - state->wnext); + zmemcpy(dictionary + state->whave - state->wnext, + state->window, state->wnext); + } + if (dictLength != Z_NULL) + *dictLength = state->whave; + return Z_OK; +} + +int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength) +z_streamp strm; +const Bytef *dictionary; +uInt dictLength; +{ + struct inflate_state FAR *state; + unsigned long dictid; + int ret; + + /* check state */ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (state->wrap != 0 && state->mode != DICT) + return Z_STREAM_ERROR; + + /* check for correct dictionary identifier */ + if (state->mode == DICT) { + dictid = adler32(0L, Z_NULL, 0); + dictid = adler32(dictid, dictionary, dictLength); + if (dictid != state->check) + return Z_DATA_ERROR; + } + + /* copy dictionary to window using updatewindow(), which will amend the + existing dictionary if appropriate */ + ret = updatewindow(strm, dictionary + dictLength, dictLength); + if (ret) { + state->mode = MEM; + return Z_MEM_ERROR; + } + state->havedict = 1; + Tracev((stderr, "inflate: dictionary set\n")); + return Z_OK; +} + +int ZEXPORT inflateGetHeader(strm, head) +z_streamp strm; +gz_headerp head; +{ + struct inflate_state FAR *state; + + /* check state */ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if ((state->wrap & 2) == 0) return Z_STREAM_ERROR; + + /* save header structure */ + state->head = head; + head->done = 0; + return Z_OK; +} + +/* + Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found + or when out of input. When called, *have is the number of pattern bytes + found in order so far, in 0..3. On return *have is updated to the new + state. If on return *have equals four, then the pattern was found and the + return value is how many bytes were read including the last byte of the + pattern. If *have is less than four, then the pattern has not been found + yet and the return value is len. In the latter case, syncsearch() can be + called again with more data and the *have state. *have is initialized to + zero for the first call. + */ +local unsigned syncsearch(have, buf, len) +unsigned FAR *have; +const unsigned char FAR *buf; +unsigned len; +{ + unsigned got; + unsigned next; + + got = *have; + next = 0; + while (next < len && got < 4) { + if ((int)(buf[next]) == (got < 2 ? 0 : 0xff)) + got++; + else if (buf[next]) + got = 0; + else + got = 4 - got; + next++; + } + *have = got; + return next; +} + +int ZEXPORT inflateSync(strm) +z_streamp strm; +{ + unsigned len; /* number of bytes to look at or looked at */ + unsigned long in, out; /* temporary to save total_in and total_out */ + unsigned char buf[4]; /* to restore bit buffer to byte string */ + struct inflate_state FAR *state; + + /* check parameters */ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR; + + /* if first time, start search in bit buffer */ + if (state->mode != SYNC) { + state->mode = SYNC; + state->hold <<= state->bits & 7; + state->bits -= state->bits & 7; + len = 0; + while (state->bits >= 8) { + buf[len++] = (unsigned char)(state->hold); + state->hold >>= 8; + state->bits -= 8; + } + state->have = 0; + syncsearch(&(state->have), buf, len); + } + + /* search available input */ + len = syncsearch(&(state->have), strm->next_in, strm->avail_in); + strm->avail_in -= len; + strm->next_in += len; + strm->total_in += len; + + /* return no joy or set up to restart inflate() on a new block */ + if (state->have != 4) return Z_DATA_ERROR; + in = strm->total_in; out = strm->total_out; + inflateReset(strm); + strm->total_in = in; strm->total_out = out; + state->mode = TYPE; + return Z_OK; +} + +/* + Returns true if inflate is currently at the end of a block generated by + Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP + implementation to provide an additional safety check. PPP uses + Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored + block. When decompressing, PPP checks that at the end of input packet, + inflate is waiting for these length bytes. + */ +int ZEXPORT inflateSyncPoint(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + return state->mode == STORED && state->bits == 0; +} + +int ZEXPORT inflateCopy(dest, source) +z_streamp dest; +z_streamp source; +{ + struct inflate_state FAR *state; + struct inflate_state FAR *copy; + unsigned char FAR *window; + unsigned wsize; + + /* check input */ + if (dest == Z_NULL || source == Z_NULL || source->state == Z_NULL || + source->zalloc == (alloc_func)0 || source->zfree == (free_func)0) + return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)source->state; + + /* allocate space */ + copy = (struct inflate_state FAR *) + ZALLOC(source, 1, sizeof(struct inflate_state)); + if (copy == Z_NULL) return Z_MEM_ERROR; + window = Z_NULL; + if (state->window != Z_NULL) { + window = (unsigned char FAR *) + ZALLOC(source, 1U << state->wbits, sizeof(unsigned char)); + if (window == Z_NULL) { + ZFREE(source, copy); + return Z_MEM_ERROR; + } + } + + /* copy state */ + zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); + zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state)); + if (state->lencode >= state->codes && + state->lencode <= state->codes + ENOUGH - 1) { + copy->lencode = copy->codes + (state->lencode - state->codes); + copy->distcode = copy->codes + (state->distcode - state->codes); + } + copy->next = copy->codes + (state->next - state->codes); + if (window != Z_NULL) { + wsize = 1U << state->wbits; + zmemcpy(window, state->window, wsize); + } + copy->window = window; + dest->state = (struct internal_state FAR *)copy; + return Z_OK; +} + +int ZEXPORT inflateUndermine(strm, subvert) +z_streamp strm; +int subvert; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + state->sane = !subvert; +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + return Z_OK; +#else + state->sane = 1; + return Z_DATA_ERROR; +#endif +} + +long ZEXPORT inflateMark(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return -1L << 16; + state = (struct inflate_state FAR *)strm->state; + return ((long)(state->back) << 16) + + (state->mode == COPY ? state->length : + (state->mode == MATCH ? state->was - state->length : 0)); +} diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inflate.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inflate.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inflate.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inflate.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,122 @@ +/* inflate.h -- internal inflate state definition + * Copyright (C) 1995-2009 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* define NO_GZIP when compiling if you want to disable gzip header and + trailer decoding by inflate(). NO_GZIP would be used to avoid linking in + the crc code when it is not needed. For shared libraries, gzip decoding + should be left enabled. */ +#ifndef NO_GZIP +# define GUNZIP +#endif + +/* Possible inflate modes between inflate() calls */ +typedef enum { + HEAD, /* i: waiting for magic header */ + FLAGS, /* i: waiting for method and flags (gzip) */ + TIME, /* i: waiting for modification time (gzip) */ + OS, /* i: waiting for extra flags and operating system (gzip) */ + EXLEN, /* i: waiting for extra length (gzip) */ + EXTRA, /* i: waiting for extra bytes (gzip) */ + NAME, /* i: waiting for end of file name (gzip) */ + COMMENT, /* i: waiting for end of comment (gzip) */ + HCRC, /* i: waiting for header crc (gzip) */ + DICTID, /* i: waiting for dictionary check value */ + DICT, /* waiting for inflateSetDictionary() call */ + TYPE, /* i: waiting for type bits, including last-flag bit */ + TYPEDO, /* i: same, but skip check to exit inflate on new block */ + STORED, /* i: waiting for stored size (length and complement) */ + COPY_, /* i/o: same as COPY below, but only first time in */ + COPY, /* i/o: waiting for input or output to copy stored block */ + TABLE, /* i: waiting for dynamic block table lengths */ + LENLENS, /* i: waiting for code length code lengths */ + CODELENS, /* i: waiting for length/lit and distance code lengths */ + LEN_, /* i: same as LEN below, but only first time in */ + LEN, /* i: waiting for length/lit/eob code */ + LENEXT, /* i: waiting for length extra bits */ + DIST, /* i: waiting for distance code */ + DISTEXT, /* i: waiting for distance extra bits */ + MATCH, /* o: waiting for output space to copy string */ + LIT, /* o: waiting for output space to write literal */ + CHECK, /* i: waiting for 32-bit check value */ + LENGTH, /* i: waiting for 32-bit length (gzip) */ + DONE, /* finished check, done -- remain here until reset */ + BAD, /* got a data error -- remain here until reset */ + MEM, /* got an inflate() memory error -- remain here until reset */ + SYNC /* looking for synchronization bytes to restart inflate() */ +} inflate_mode; + +/* + State transitions between above modes - + + (most modes can go to BAD or MEM on error -- not shown for clarity) + + Process header: + HEAD -> (gzip) or (zlib) or (raw) + (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME -> COMMENT -> + HCRC -> TYPE + (zlib) -> DICTID or TYPE + DICTID -> DICT -> TYPE + (raw) -> TYPEDO + Read deflate blocks: + TYPE -> TYPEDO -> STORED or TABLE or LEN_ or CHECK + STORED -> COPY_ -> COPY -> TYPE + TABLE -> LENLENS -> CODELENS -> LEN_ + LEN_ -> LEN + Read deflate codes in fixed or dynamic block: + LEN -> LENEXT or LIT or TYPE + LENEXT -> DIST -> DISTEXT -> MATCH -> LEN + LIT -> LEN + Process trailer: + CHECK -> LENGTH -> DONE + */ + +/* state maintained between inflate() calls. Approximately 10K bytes. */ +struct inflate_state { + inflate_mode mode; /* current inflate mode */ + int last; /* true if processing last block */ + int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ + int havedict; /* true if dictionary provided */ + int flags; /* gzip header method and flags (0 if zlib) */ + unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */ + unsigned long check; /* protected copy of check value */ + unsigned long total; /* protected copy of output count */ + gz_headerp head; /* where to save gzip header information */ + /* sliding window */ + unsigned wbits; /* log base 2 of requested window size */ + unsigned wsize; /* window size or zero if not using window */ + unsigned whave; /* valid bytes in the window */ + unsigned wnext; /* window write index */ + unsigned char FAR *window; /* allocated sliding window, if needed */ + /* bit accumulator */ + unsigned long hold; /* input bit accumulator */ + unsigned bits; /* number of bits in "in" */ + /* for string and stored block copying */ + unsigned length; /* literal or length of data to copy */ + unsigned offset; /* distance back to copy string from */ + /* for table and code decoding */ + unsigned extra; /* extra bits needed */ + /* fixed and dynamic code tables */ + code const FAR *lencode; /* starting table for length/literal codes */ + code const FAR *distcode; /* starting table for distance codes */ + unsigned lenbits; /* index bits for lencode */ + unsigned distbits; /* index bits for distcode */ + /* dynamic table building */ + unsigned ncode; /* number of code length code lengths */ + unsigned nlen; /* number of length code lengths */ + unsigned ndist; /* number of distance code lengths */ + unsigned have; /* number of code lengths in lens[] */ + code FAR *next; /* next available space in codes[] */ + unsigned short lens[320]; /* temporary storage for code lengths */ + unsigned short work[288]; /* work area for code table building */ + code codes[ENOUGH]; /* space for code tables */ + int sane; /* if false, allow invalid distance too far */ + int back; /* bits back of last unprocessed length/lit */ + unsigned was; /* initial length of match */ +}; diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inftrees.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inftrees.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inftrees.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inftrees.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,306 @@ +/* inftrees.c -- generate Huffman trees for efficient decoding + * Copyright (C) 1995-2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "zutil.h" +#include "inftrees.h" + +#define MAXBITS 15 + +const char inflate_copyright[] = + " inflate 1.2.8 Copyright 1995-2013 Mark Adler "; +/* + If you use the zlib library in a product, an acknowledgment is welcome + in the documentation of your product. If for some reason you cannot + include such an acknowledgment, I would appreciate that you keep this + copyright string in the executable of your product. + */ + +/* + Build a set of tables to decode the provided canonical Huffman code. + The code lengths are lens[0..codes-1]. The result starts at *table, + whose indices are 0..2^bits-1. work is a writable array of at least + lens shorts, which is used as a work area. type is the type of code + to be generated, CODES, LENS, or DISTS. On return, zero is success, + -1 is an invalid code, and +1 means that ENOUGH isn't enough. table + on return points to the next available entry's address. bits is the + requested root table index bits, and on return it is the actual root + table index bits. It will differ if the request is greater than the + longest code or if it is less than the shortest code. + */ +int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work) +codetype type; +unsigned short FAR *lens; +unsigned codes; +code FAR * FAR *table; +unsigned FAR *bits; +unsigned short FAR *work; +{ + unsigned len; /* a code's length in bits */ + unsigned sym; /* index of code symbols */ + unsigned min, max; /* minimum and maximum code lengths */ + unsigned root; /* number of index bits for root table */ + unsigned curr; /* number of index bits for current table */ + unsigned drop; /* code bits to drop for sub-table */ + int left; /* number of prefix codes available */ + unsigned used; /* code entries in table used */ + unsigned huff; /* Huffman code */ + unsigned incr; /* for incrementing code, index */ + unsigned fill; /* index for replicating entries */ + unsigned low; /* low bits for current root entry */ + unsigned mask; /* mask for low root bits */ + code here; /* table entry for duplication */ + code FAR *next; /* next available space in table */ + const unsigned short FAR *base; /* base value table to use */ + const unsigned short FAR *extra; /* extra bits table to use */ + int end; /* use base and extra for symbol > end */ + unsigned short count[MAXBITS+1]; /* number of codes of each length */ + unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ + static const unsigned short lbase[31] = { /* Length codes 257..285 base */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + static const unsigned short lext[31] = { /* Length codes 257..285 extra */ + 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, + 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78}; + static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577, 0, 0}; + static const unsigned short dext[32] = { /* Distance codes 0..29 extra */ + 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, + 28, 28, 29, 29, 64, 64}; + + /* + Process a set of code lengths to create a canonical Huffman code. The + code lengths are lens[0..codes-1]. Each length corresponds to the + symbols 0..codes-1. The Huffman code is generated by first sorting the + symbols by length from short to long, and retaining the symbol order + for codes with equal lengths. Then the code starts with all zero bits + for the first code of the shortest length, and the codes are integer + increments for the same length, and zeros are appended as the length + increases. For the deflate format, these bits are stored backwards + from their more natural integer increment ordering, and so when the + decoding tables are built in the large loop below, the integer codes + are incremented backwards. + + This routine assumes, but does not check, that all of the entries in + lens[] are in the range 0..MAXBITS. The caller must assure this. + 1..MAXBITS is interpreted as that code length. zero means that that + symbol does not occur in this code. + + The codes are sorted by computing a count of codes for each length, + creating from that a table of starting indices for each length in the + sorted table, and then entering the symbols in order in the sorted + table. The sorted table is work[], with that space being provided by + the caller. + + The length counts are used for other purposes as well, i.e. finding + the minimum and maximum length codes, determining if there are any + codes at all, checking for a valid set of lengths, and looking ahead + at length counts to determine sub-table sizes when building the + decoding tables. + */ + + /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ + for (len = 0; len <= MAXBITS; len++) + count[len] = 0; + for (sym = 0; sym < codes; sym++) + count[lens[sym]]++; + + /* bound code lengths, force root to be within code lengths */ + root = *bits; + for (max = MAXBITS; max >= 1; max--) + if (count[max] != 0) break; + if (root > max) root = max; + if (max == 0) { /* no symbols to code at all */ + here.op = (unsigned char)64; /* invalid code marker */ + here.bits = (unsigned char)1; + here.val = (unsigned short)0; + *(*table)++ = here; /* make a table to force an error */ + *(*table)++ = here; + *bits = 1; + return 0; /* no symbols, but wait for decoding to report error */ + } + for (min = 1; min < max; min++) + if (count[min] != 0) break; + if (root < min) root = min; + + /* check for an over-subscribed or incomplete set of lengths */ + left = 1; + for (len = 1; len <= MAXBITS; len++) { + left <<= 1; + left -= count[len]; + if (left < 0) return -1; /* over-subscribed */ + } + if (left > 0 && (type == CODES || max != 1)) + return -1; /* incomplete set */ + + /* generate offsets into symbol table for each length for sorting */ + offs[1] = 0; + for (len = 1; len < MAXBITS; len++) + offs[len + 1] = offs[len] + count[len]; + + /* sort symbols by length, by symbol order within each length */ + for (sym = 0; sym < codes; sym++) + if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; + + /* + Create and fill in decoding tables. In this loop, the table being + filled is at next and has curr index bits. The code being used is huff + with length len. That code is converted to an index by dropping drop + bits off of the bottom. For codes where len is less than drop + curr, + those top drop + curr - len bits are incremented through all values to + fill the table with replicated entries. + + root is the number of index bits for the root table. When len exceeds + root, sub-tables are created pointed to by the root entry with an index + of the low root bits of huff. This is saved in low to check for when a + new sub-table should be started. drop is zero when the root table is + being filled, and drop is root when sub-tables are being filled. + + When a new sub-table is needed, it is necessary to look ahead in the + code lengths to determine what size sub-table is needed. The length + counts are used for this, and so count[] is decremented as codes are + entered in the tables. + + used keeps track of how many table entries have been allocated from the + provided *table space. It is checked for LENS and DIST tables against + the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in + the initial root table size constants. See the comments in inftrees.h + for more information. + + sym increments through all symbols, and the loop terminates when + all codes of length max, i.e. all codes, have been processed. This + routine permits incomplete codes, so another loop after this one fills + in the rest of the decoding tables with invalid code markers. + */ + + /* set up for code type */ + switch (type) { + case CODES: + base = extra = work; /* dummy value--not used */ + end = 19; + break; + case LENS: + base = lbase; + base -= 257; + extra = lext; + extra -= 257; + end = 256; + break; + default: /* DISTS */ + base = dbase; + extra = dext; + end = -1; + } + + /* initialize state for loop */ + huff = 0; /* starting code */ + sym = 0; /* starting code symbol */ + len = min; /* starting code length */ + next = *table; /* current table to fill in */ + curr = root; /* current table index bits */ + drop = 0; /* current bits to drop from code for index */ + low = (unsigned)(-1); /* trigger new sub-table when len > root */ + used = 1U << root; /* use root table entries */ + mask = used - 1; /* mask for comparing low */ + + /* check available table space */ + if ((type == LENS && used > ENOUGH_LENS) || + (type == DISTS && used > ENOUGH_DISTS)) + return 1; + + /* process all codes and make table entries */ + for (;;) { + /* create table entry */ + here.bits = (unsigned char)(len - drop); + if ((int)(work[sym]) < end) { + here.op = (unsigned char)0; + here.val = work[sym]; + } + else if ((int)(work[sym]) > end) { + here.op = (unsigned char)(extra[work[sym]]); + here.val = base[work[sym]]; + } + else { + here.op = (unsigned char)(32 + 64); /* end of block */ + here.val = 0; + } + + /* replicate for those indices with low len bits equal to huff */ + incr = 1U << (len - drop); + fill = 1U << curr; + min = fill; /* save offset to next table */ + do { + fill -= incr; + next[(huff >> drop) + fill] = here; + } while (fill != 0); + + /* backwards increment the len-bit code huff */ + incr = 1U << (len - 1); + while (huff & incr) + incr >>= 1; + if (incr != 0) { + huff &= incr - 1; + huff += incr; + } + else + huff = 0; + + /* go to next symbol, update count, len */ + sym++; + if (--(count[len]) == 0) { + if (len == max) break; + len = lens[work[sym]]; + } + + /* create new sub-table if needed */ + if (len > root && (huff & mask) != low) { + /* if first time, transition to sub-tables */ + if (drop == 0) + drop = root; + + /* increment past last table */ + next += min; /* here min is 1 << curr */ + + /* determine length of next table */ + curr = len - drop; + left = (int)(1 << curr); + while (curr + drop < max) { + left -= count[curr + drop]; + if (left <= 0) break; + curr++; + left <<= 1; + } + + /* check for enough space */ + used += 1U << curr; + if ((type == LENS && used > ENOUGH_LENS) || + (type == DISTS && used > ENOUGH_DISTS)) + return 1; + + /* point entry in root table to sub-table */ + low = huff & mask; + (*table)[low].op = (unsigned char)curr; + (*table)[low].bits = (unsigned char)root; + (*table)[low].val = (unsigned short)(next - *table); + } + } + + /* fill in remaining table entry if code is incomplete (guaranteed to have + at most one remaining entry, since if the code is incomplete, the + maximum code length that was allowed to get this far is one bit) */ + if (huff != 0) { + here.op = (unsigned char)64; /* invalid code marker */ + here.bits = (unsigned char)(len - drop); + here.val = (unsigned short)0; + next[huff] = here; + } + + /* set return parameters */ + *table += used; + *bits = root; + return 0; +} diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inftrees.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inftrees.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/inftrees.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/inftrees.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,62 @@ +/* inftrees.h -- header to use inftrees.c + * Copyright (C) 1995-2005, 2010 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* Structure for decoding tables. Each entry provides either the + information needed to do the operation requested by the code that + indexed that table entry, or it provides a pointer to another + table that indexes more bits of the code. op indicates whether + the entry is a pointer to another table, a literal, a length or + distance, an end-of-block, or an invalid code. For a table + pointer, the low four bits of op is the number of index bits of + that table. For a length or distance, the low four bits of op + is the number of extra bits to get after the code. bits is + the number of bits in this code or part of the code to drop off + of the bit buffer. val is the actual byte to output in the case + of a literal, the base length or distance, or the offset from + the current table to the next table. Each entry is four bytes. */ +typedef struct { + unsigned char op; /* operation, extra bits, table bits */ + unsigned char bits; /* bits in this part of the code */ + unsigned short val; /* offset in table or code value */ +} code; + +/* op values as set by inflate_table(): + 00000000 - literal + 0000tttt - table link, tttt != 0 is the number of table index bits + 0001eeee - length or distance, eeee is the number of extra bits + 01100000 - end of block + 01000000 - invalid code + */ + +/* Maximum size of the dynamic table. The maximum number of code structures is + 1444, which is the sum of 852 for literal/length codes and 592 for distance + codes. These values were found by exhaustive searches using the program + examples/enough.c found in the zlib distribtution. The arguments to that + program are the number of symbols, the initial root table size, and the + maximum bit length of a code. "enough 286 9 15" for literal/length codes + returns returns 852, and "enough 30 6 15" for distance codes returns 592. + The initial root table size (9 or 6) is found in the fifth argument of the + inflate_table() calls in inflate.c and infback.c. If the root table size is + changed, then these maximum sizes would be need to be recalculated and + updated. */ +#define ENOUGH_LENS 852 +#define ENOUGH_DISTS 592 +#define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS) + +/* Type of code to build for inflate_table() */ +typedef enum { + CODES, + LENS, + DISTS +} codetype; + +int ZLIB_INTERNAL inflate_table OF((codetype type, unsigned short FAR *lens, + unsigned codes, code FAR * FAR *table, + unsigned FAR *bits, unsigned short FAR *work)); diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/trees.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/trees.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/trees.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/trees.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,1226 @@ +/* trees.c -- output deflated data using Huffman coding + * Copyright (C) 1995-2012 Jean-loup Gailly + * detect_data_type() function provided freely by Cosmin Truta, 2006 + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * ALGORITHM + * + * The "deflation" process uses several Huffman trees. The more + * common source values are represented by shorter bit sequences. + * + * Each code tree is stored in a compressed form which is itself + * a Huffman encoding of the lengths of all the code strings (in + * ascending order by source values). The actual code strings are + * reconstructed from the lengths in the inflate process, as described + * in the deflate specification. + * + * REFERENCES + * + * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". + * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc + * + * Storer, James A. + * Data Compression: Methods and Theory, pp. 49-50. + * Computer Science Press, 1988. ISBN 0-7167-8156-5. + * + * Sedgewick, R. + * Algorithms, p290. + * Addison-Wesley, 1983. ISBN 0-201-06672-6. + */ + +/* @(#) $Id$ */ + +/* #define GEN_TREES_H */ + +#include "deflate.h" + +#ifdef DEBUG +# include +#endif + +/* =========================================================================== + * Constants + */ + +#define MAX_BL_BITS 7 +/* Bit length codes must not exceed MAX_BL_BITS bits */ + +#define END_BLOCK 256 +/* end of block literal code */ + +#define REP_3_6 16 +/* repeat previous bit length 3-6 times (2 bits of repeat count) */ + +#define REPZ_3_10 17 +/* repeat a zero length 3-10 times (3 bits of repeat count) */ + +#define REPZ_11_138 18 +/* repeat a zero length 11-138 times (7 bits of repeat count) */ + +local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ + = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; + +local const int extra_dbits[D_CODES] /* extra bits for each distance code */ + = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ + = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; + +local const uch bl_order[BL_CODES] + = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; +/* The lengths of the bit length codes are sent in order of decreasing + * probability, to avoid transmitting the lengths for unused bit length codes. + */ + +/* =========================================================================== + * Local data. These are initialized only once. + */ + +#define DIST_CODE_LEN 512 /* see definition of array dist_code below */ + +#if defined(GEN_TREES_H) || !defined(STDC) +/* non ANSI compilers may not accept trees.h */ + +local ct_data static_ltree[L_CODES+2]; +/* The static literal tree. Since the bit lengths are imposed, there is no + * need for the L_CODES extra codes used during heap construction. However + * The codes 286 and 287 are needed to build a canonical tree (see _tr_init + * below). + */ + +local ct_data static_dtree[D_CODES]; +/* The static distance tree. (Actually a trivial tree since all codes use + * 5 bits.) + */ + +uch _dist_code[DIST_CODE_LEN]; +/* Distance codes. The first 256 values correspond to the distances + * 3 .. 258, the last 256 values correspond to the top 8 bits of + * the 15 bit distances. + */ + +uch _length_code[MAX_MATCH-MIN_MATCH+1]; +/* length code for each normalized match length (0 == MIN_MATCH) */ + +local int base_length[LENGTH_CODES]; +/* First normalized length for each code (0 = MIN_MATCH) */ + +local int base_dist[D_CODES]; +/* First normalized distance for each code (0 = distance of 1) */ + +#else +# include "trees.h" +#endif /* GEN_TREES_H */ + +struct static_tree_desc_s { + const ct_data *static_tree; /* static tree or NULL */ + const intf *extra_bits; /* extra bits for each code or NULL */ + int extra_base; /* base index for extra_bits */ + int elems; /* max number of elements in the tree */ + int max_length; /* max bit length for the codes */ +}; + +local static_tree_desc static_l_desc = +{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; + +local static_tree_desc static_d_desc = +{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; + +local static_tree_desc static_bl_desc = +{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; + +/* =========================================================================== + * Local (static) routines in this file. + */ + +local void tr_static_init OF((void)); +local void init_block OF((deflate_state *s)); +local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); +local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); +local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); +local void build_tree OF((deflate_state *s, tree_desc *desc)); +local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); +local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); +local int build_bl_tree OF((deflate_state *s)); +local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, + int blcodes)); +local void compress_block OF((deflate_state *s, const ct_data *ltree, + const ct_data *dtree)); +local int detect_data_type OF((deflate_state *s)); +local unsigned bi_reverse OF((unsigned value, int length)); +local void bi_windup OF((deflate_state *s)); +local void bi_flush OF((deflate_state *s)); +local void copy_block OF((deflate_state *s, charf *buf, unsigned len, + int header)); + +#ifdef GEN_TREES_H +local void gen_trees_header OF((void)); +#endif + +#ifndef DEBUG +# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) + /* Send a code of the given tree. c and tree must not have side effects */ + +#else /* DEBUG */ +# define send_code(s, c, tree) \ + { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ + send_bits(s, tree[c].Code, tree[c].Len); } +#endif + +/* =========================================================================== + * Output a short LSB first on the stream. + * IN assertion: there is enough room in pendingBuf. + */ +#define put_short(s, w) { \ + put_byte(s, (uch)((w) & 0xff)); \ + put_byte(s, (uch)((ush)(w) >> 8)); \ +} + +/* =========================================================================== + * Send a value on a given number of bits. + * IN assertion: length <= 16 and value fits in length bits. + */ +#ifdef DEBUG +local void send_bits OF((deflate_state *s, int value, int length)); + +local void send_bits(s, value, length) + deflate_state *s; + int value; /* value to send */ + int length; /* number of bits */ +{ + Tracevv((stderr," l %2d v %4x ", length, value)); + Assert(length > 0 && length <= 15, "invalid length"); + s->bits_sent += (ulg)length; + + /* If not enough room in bi_buf, use (valid) bits from bi_buf and + * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) + * unused bits in value. + */ + if (s->bi_valid > (int)Buf_size - length) { + s->bi_buf |= (ush)value << s->bi_valid; + put_short(s, s->bi_buf); + s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); + s->bi_valid += length - Buf_size; + } else { + s->bi_buf |= (ush)value << s->bi_valid; + s->bi_valid += length; + } +} +#else /* !DEBUG */ + +#define send_bits(s, value, length) \ +{ int len = length;\ + if (s->bi_valid > (int)Buf_size - len) {\ + int val = value;\ + s->bi_buf |= (ush)val << s->bi_valid;\ + put_short(s, s->bi_buf);\ + s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ + s->bi_valid += len - Buf_size;\ + } else {\ + s->bi_buf |= (ush)(value) << s->bi_valid;\ + s->bi_valid += len;\ + }\ +} +#endif /* DEBUG */ + + +/* the arguments must not have side effects */ + +/* =========================================================================== + * Initialize the various 'constant' tables. + */ +local void tr_static_init() +{ +#if defined(GEN_TREES_H) || !defined(STDC) + static int static_init_done = 0; + int n; /* iterates over tree elements */ + int bits; /* bit counter */ + int length; /* length value */ + int code; /* code value */ + int dist; /* distance index */ + ush bl_count[MAX_BITS+1]; + /* number of codes at each bit length for an optimal tree */ + + if (static_init_done) return; + + /* For some embedded targets, global variables are not initialized: */ +#ifdef NO_INIT_GLOBAL_POINTERS + static_l_desc.static_tree = static_ltree; + static_l_desc.extra_bits = extra_lbits; + static_d_desc.static_tree = static_dtree; + static_d_desc.extra_bits = extra_dbits; + static_bl_desc.extra_bits = extra_blbits; +#endif + + /* Initialize the mapping length (0..255) -> length code (0..28) */ + length = 0; + for (code = 0; code < LENGTH_CODES-1; code++) { + base_length[code] = length; + for (n = 0; n < (1< dist code (0..29) */ + dist = 0; + for (code = 0 ; code < 16; code++) { + base_dist[code] = dist; + for (n = 0; n < (1<>= 7; /* from now on, all distances are divided by 128 */ + for ( ; code < D_CODES; code++) { + base_dist[code] = dist << 7; + for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { + _dist_code[256 + dist++] = (uch)code; + } + } + Assert (dist == 256, "tr_static_init: 256+dist != 512"); + + /* Construct the codes of the static literal tree */ + for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; + n = 0; + while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; + while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; + while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; + while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; + /* Codes 286 and 287 do not exist, but we must include them in the + * tree construction to get a canonical Huffman tree (longest code + * all ones) + */ + gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); + + /* The static distance tree is trivial: */ + for (n = 0; n < D_CODES; n++) { + static_dtree[n].Len = 5; + static_dtree[n].Code = bi_reverse((unsigned)n, 5); + } + static_init_done = 1; + +# ifdef GEN_TREES_H + gen_trees_header(); +# endif +#endif /* defined(GEN_TREES_H) || !defined(STDC) */ +} + +/* =========================================================================== + * Genererate the file trees.h describing the static trees. + */ +#ifdef GEN_TREES_H +# ifndef DEBUG +# include +# endif + +# define SEPARATOR(i, last, width) \ + ((i) == (last)? "\n};\n\n" : \ + ((i) % (width) == (width)-1 ? ",\n" : ", ")) + +void gen_trees_header() +{ + FILE *header = fopen("trees.h", "w"); + int i; + + Assert (header != NULL, "Can't open trees.h"); + fprintf(header, + "/* header created automatically with -DGEN_TREES_H */\n\n"); + + fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n"); + for (i = 0; i < L_CODES+2; i++) { + fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code, + static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5)); + } + + fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n"); + for (i = 0; i < D_CODES; i++) { + fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code, + static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); + } + + fprintf(header, "const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {\n"); + for (i = 0; i < DIST_CODE_LEN; i++) { + fprintf(header, "%2u%s", _dist_code[i], + SEPARATOR(i, DIST_CODE_LEN-1, 20)); + } + + fprintf(header, + "const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); + for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { + fprintf(header, "%2u%s", _length_code[i], + SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); + } + + fprintf(header, "local const int base_length[LENGTH_CODES] = {\n"); + for (i = 0; i < LENGTH_CODES; i++) { + fprintf(header, "%1u%s", base_length[i], + SEPARATOR(i, LENGTH_CODES-1, 20)); + } + + fprintf(header, "local const int base_dist[D_CODES] = {\n"); + for (i = 0; i < D_CODES; i++) { + fprintf(header, "%5u%s", base_dist[i], + SEPARATOR(i, D_CODES-1, 10)); + } + + fclose(header); +} +#endif /* GEN_TREES_H */ + +/* =========================================================================== + * Initialize the tree data structures for a new zlib stream. + */ +void ZLIB_INTERNAL _tr_init(s) + deflate_state *s; +{ + tr_static_init(); + + s->l_desc.dyn_tree = s->dyn_ltree; + s->l_desc.stat_desc = &static_l_desc; + + s->d_desc.dyn_tree = s->dyn_dtree; + s->d_desc.stat_desc = &static_d_desc; + + s->bl_desc.dyn_tree = s->bl_tree; + s->bl_desc.stat_desc = &static_bl_desc; + + s->bi_buf = 0; + s->bi_valid = 0; +#ifdef DEBUG + s->compressed_len = 0L; + s->bits_sent = 0L; +#endif + + /* Initialize the first block of the first file: */ + init_block(s); +} + +/* =========================================================================== + * Initialize a new block. + */ +local void init_block(s) + deflate_state *s; +{ + int n; /* iterates over tree elements */ + + /* Initialize the trees. */ + for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; + for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; + for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; + + s->dyn_ltree[END_BLOCK].Freq = 1; + s->opt_len = s->static_len = 0L; + s->last_lit = s->matches = 0; +} + +#define SMALLEST 1 +/* Index within the heap array of least frequent node in the Huffman tree */ + + +/* =========================================================================== + * Remove the smallest element from the heap and recreate the heap with + * one less element. Updates heap and heap_len. + */ +#define pqremove(s, tree, top) \ +{\ + top = s->heap[SMALLEST]; \ + s->heap[SMALLEST] = s->heap[s->heap_len--]; \ + pqdownheap(s, tree, SMALLEST); \ +} + +/* =========================================================================== + * Compares to subtrees, using the tree depth as tie breaker when + * the subtrees have equal frequency. This minimizes the worst case length. + */ +#define smaller(tree, n, m, depth) \ + (tree[n].Freq < tree[m].Freq || \ + (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) + +/* =========================================================================== + * Restore the heap property by moving down the tree starting at node k, + * exchanging a node with the smallest of its two sons if necessary, stopping + * when the heap property is re-established (each father smaller than its + * two sons). + */ +local void pqdownheap(s, tree, k) + deflate_state *s; + ct_data *tree; /* the tree to restore */ + int k; /* node to move down */ +{ + int v = s->heap[k]; + int j = k << 1; /* left son of k */ + while (j <= s->heap_len) { + /* Set j to the smallest of the two sons: */ + if (j < s->heap_len && + smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { + j++; + } + /* Exit if v is smaller than both sons */ + if (smaller(tree, v, s->heap[j], s->depth)) break; + + /* Exchange v with the smallest son */ + s->heap[k] = s->heap[j]; k = j; + + /* And continue down the tree, setting j to the left son of k */ + j <<= 1; + } + s->heap[k] = v; +} + +/* =========================================================================== + * Compute the optimal bit lengths for a tree and update the total bit length + * for the current block. + * IN assertion: the fields freq and dad are set, heap[heap_max] and + * above are the tree nodes sorted by increasing frequency. + * OUT assertions: the field len is set to the optimal bit length, the + * array bl_count contains the frequencies for each bit length. + * The length opt_len is updated; static_len is also updated if stree is + * not null. + */ +local void gen_bitlen(s, desc) + deflate_state *s; + tree_desc *desc; /* the tree descriptor */ +{ + ct_data *tree = desc->dyn_tree; + int max_code = desc->max_code; + const ct_data *stree = desc->stat_desc->static_tree; + const intf *extra = desc->stat_desc->extra_bits; + int base = desc->stat_desc->extra_base; + int max_length = desc->stat_desc->max_length; + int h; /* heap index */ + int n, m; /* iterate over the tree elements */ + int bits; /* bit length */ + int xbits; /* extra bits */ + ush f; /* frequency */ + int overflow = 0; /* number of elements with bit length too large */ + + for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; + + /* In a first pass, compute the optimal bit lengths (which may + * overflow in the case of the bit length tree). + */ + tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ + + for (h = s->heap_max+1; h < HEAP_SIZE; h++) { + n = s->heap[h]; + bits = tree[tree[n].Dad].Len + 1; + if (bits > max_length) bits = max_length, overflow++; + tree[n].Len = (ush)bits; + /* We overwrite tree[n].Dad which is no longer needed */ + + if (n > max_code) continue; /* not a leaf node */ + + s->bl_count[bits]++; + xbits = 0; + if (n >= base) xbits = extra[n-base]; + f = tree[n].Freq; + s->opt_len += (ulg)f * (bits + xbits); + if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); + } + if (overflow == 0) return; + + Trace((stderr,"\nbit length overflow\n")); + /* This happens for example on obj2 and pic of the Calgary corpus */ + + /* Find the first bit length which could increase: */ + do { + bits = max_length-1; + while (s->bl_count[bits] == 0) bits--; + s->bl_count[bits]--; /* move one leaf down the tree */ + s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ + s->bl_count[max_length]--; + /* The brother of the overflow item also moves one step up, + * but this does not affect bl_count[max_length] + */ + overflow -= 2; + } while (overflow > 0); + + /* Now recompute all bit lengths, scanning in increasing frequency. + * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all + * lengths instead of fixing only the wrong ones. This idea is taken + * from 'ar' written by Haruhiko Okumura.) + */ + for (bits = max_length; bits != 0; bits--) { + n = s->bl_count[bits]; + while (n != 0) { + m = s->heap[--h]; + if (m > max_code) continue; + if ((unsigned) tree[m].Len != (unsigned) bits) { + Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); + s->opt_len += ((long)bits - (long)tree[m].Len) + *(long)tree[m].Freq; + tree[m].Len = (ush)bits; + } + n--; + } + } +} + +/* =========================================================================== + * Generate the codes for a given tree and bit counts (which need not be + * optimal). + * IN assertion: the array bl_count contains the bit length statistics for + * the given tree and the field len is set for all tree elements. + * OUT assertion: the field code is set for all tree elements of non + * zero code length. + */ +local void gen_codes (tree, max_code, bl_count) + ct_data *tree; /* the tree to decorate */ + int max_code; /* largest code with non zero frequency */ + ushf *bl_count; /* number of codes at each bit length */ +{ + ush next_code[MAX_BITS+1]; /* next code value for each bit length */ + ush code = 0; /* running code value */ + int bits; /* bit index */ + int n; /* code index */ + + /* The distribution counts are first used to generate the code values + * without bit reversal. + */ + for (bits = 1; bits <= MAX_BITS; bits++) { + next_code[bits] = code = (code + bl_count[bits-1]) << 1; + } + /* Check that the bit counts in bl_count are consistent. The last code + * must be all ones. + */ + Assert (code + bl_count[MAX_BITS]-1 == (1<dyn_tree; + const ct_data *stree = desc->stat_desc->static_tree; + int elems = desc->stat_desc->elems; + int n, m; /* iterate over heap elements */ + int max_code = -1; /* largest code with non zero frequency */ + int node; /* new node being created */ + + /* Construct the initial heap, with least frequent element in + * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + * heap[0] is not used. + */ + s->heap_len = 0, s->heap_max = HEAP_SIZE; + + for (n = 0; n < elems; n++) { + if (tree[n].Freq != 0) { + s->heap[++(s->heap_len)] = max_code = n; + s->depth[n] = 0; + } else { + tree[n].Len = 0; + } + } + + /* The pkzip format requires that at least one distance code exists, + * and that at least one bit should be sent even if there is only one + * possible code. So to avoid special checks later on we force at least + * two codes of non zero frequency. + */ + while (s->heap_len < 2) { + node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); + tree[node].Freq = 1; + s->depth[node] = 0; + s->opt_len--; if (stree) s->static_len -= stree[node].Len; + /* node is 0 or 1 so it does not have extra bits */ + } + desc->max_code = max_code; + + /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + * establish sub-heaps of increasing lengths: + */ + for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + node = elems; /* next internal node of the tree */ + do { + pqremove(s, tree, n); /* n = node of least frequency */ + m = s->heap[SMALLEST]; /* m = node of next least frequency */ + + s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ + s->heap[--(s->heap_max)] = m; + + /* Create a new node father of n and m */ + tree[node].Freq = tree[n].Freq + tree[m].Freq; + s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ? + s->depth[n] : s->depth[m]) + 1); + tree[n].Dad = tree[m].Dad = (ush)node; +#ifdef DUMP_BL_TREE + if (tree == s->bl_tree) { + fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", + node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); + } +#endif + /* and insert the new node in the heap */ + s->heap[SMALLEST] = node++; + pqdownheap(s, tree, SMALLEST); + + } while (s->heap_len >= 2); + + s->heap[--(s->heap_max)] = s->heap[SMALLEST]; + + /* At this point, the fields freq and dad are set. We can now + * generate the bit lengths. + */ + gen_bitlen(s, (tree_desc *)desc); + + /* The field len is now set, we can generate the bit codes */ + gen_codes ((ct_data *)tree, max_code, s->bl_count); +} + +/* =========================================================================== + * Scan a literal or distance tree to determine the frequencies of the codes + * in the bit length tree. + */ +local void scan_tree (s, tree, max_code) + deflate_state *s; + ct_data *tree; /* the tree to be scanned */ + int max_code; /* and its largest code of non zero frequency */ +{ + int n; /* iterates over all tree elements */ + int prevlen = -1; /* last emitted length */ + int curlen; /* length of current code */ + int nextlen = tree[0].Len; /* length of next code */ + int count = 0; /* repeat count of the current code */ + int max_count = 7; /* max repeat count */ + int min_count = 4; /* min repeat count */ + + if (nextlen == 0) max_count = 138, min_count = 3; + tree[max_code+1].Len = (ush)0xffff; /* guard */ + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; nextlen = tree[n+1].Len; + if (++count < max_count && curlen == nextlen) { + continue; + } else if (count < min_count) { + s->bl_tree[curlen].Freq += count; + } else if (curlen != 0) { + if (curlen != prevlen) s->bl_tree[curlen].Freq++; + s->bl_tree[REP_3_6].Freq++; + } else if (count <= 10) { + s->bl_tree[REPZ_3_10].Freq++; + } else { + s->bl_tree[REPZ_11_138].Freq++; + } + count = 0; prevlen = curlen; + if (nextlen == 0) { + max_count = 138, min_count = 3; + } else if (curlen == nextlen) { + max_count = 6, min_count = 3; + } else { + max_count = 7, min_count = 4; + } + } +} + +/* =========================================================================== + * Send a literal or distance tree in compressed form, using the codes in + * bl_tree. + */ +local void send_tree (s, tree, max_code) + deflate_state *s; + ct_data *tree; /* the tree to be scanned */ + int max_code; /* and its largest code of non zero frequency */ +{ + int n; /* iterates over all tree elements */ + int prevlen = -1; /* last emitted length */ + int curlen; /* length of current code */ + int nextlen = tree[0].Len; /* length of next code */ + int count = 0; /* repeat count of the current code */ + int max_count = 7; /* max repeat count */ + int min_count = 4; /* min repeat count */ + + /* tree[max_code+1].Len = -1; */ /* guard already set */ + if (nextlen == 0) max_count = 138, min_count = 3; + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; nextlen = tree[n+1].Len; + if (++count < max_count && curlen == nextlen) { + continue; + } else if (count < min_count) { + do { send_code(s, curlen, s->bl_tree); } while (--count != 0); + + } else if (curlen != 0) { + if (curlen != prevlen) { + send_code(s, curlen, s->bl_tree); count--; + } + Assert(count >= 3 && count <= 6, " 3_6?"); + send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); + + } else if (count <= 10) { + send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); + + } else { + send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); + } + count = 0; prevlen = curlen; + if (nextlen == 0) { + max_count = 138, min_count = 3; + } else if (curlen == nextlen) { + max_count = 6, min_count = 3; + } else { + max_count = 7, min_count = 4; + } + } +} + +/* =========================================================================== + * Construct the Huffman tree for the bit lengths and return the index in + * bl_order of the last bit length code to send. + */ +local int build_bl_tree(s) + deflate_state *s; +{ + int max_blindex; /* index of last bit length code of non zero freq */ + + /* Determine the bit length frequencies for literal and distance trees */ + scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); + scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); + + /* Build the bit length tree: */ + build_tree(s, (tree_desc *)(&(s->bl_desc))); + /* opt_len now includes the length of the tree representations, except + * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + */ + + /* Determine the number of bit length codes to send. The pkzip format + * requires that at least 4 bit length codes be sent. (appnote.txt says + * 3 but the actual value used is 4.) + */ + for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { + if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; + } + /* Update opt_len to include the bit length tree and counts */ + s->opt_len += 3*(max_blindex+1) + 5+5+4; + Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", + s->opt_len, s->static_len)); + + return max_blindex; +} + +/* =========================================================================== + * Send the header for a block using dynamic Huffman trees: the counts, the + * lengths of the bit length codes, the literal tree and the distance tree. + * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + */ +local void send_all_trees(s, lcodes, dcodes, blcodes) + deflate_state *s; + int lcodes, dcodes, blcodes; /* number of codes for each tree */ +{ + int rank; /* index in bl_order */ + + Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); + Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, + "too many codes"); + Tracev((stderr, "\nbl counts: ")); + send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ + send_bits(s, dcodes-1, 5); + send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ + for (rank = 0; rank < blcodes; rank++) { + Tracev((stderr, "\nbl code %2d ", bl_order[rank])); + send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); + } + Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); + + send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ + Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); + + send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ + Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); +} + +/* =========================================================================== + * Send a stored block + */ +void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last) + deflate_state *s; + charf *buf; /* input block */ + ulg stored_len; /* length of input block */ + int last; /* one if this is the last block for a file */ +{ + send_bits(s, (STORED_BLOCK<<1)+last, 3); /* send block type */ +#ifdef DEBUG + s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; + s->compressed_len += (stored_len + 4) << 3; +#endif + copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ +} + +/* =========================================================================== + * Flush the bits in the bit buffer to pending output (leaves at most 7 bits) + */ +void ZLIB_INTERNAL _tr_flush_bits(s) + deflate_state *s; +{ + bi_flush(s); +} + +/* =========================================================================== + * Send one empty static block to give enough lookahead for inflate. + * This takes 10 bits, of which 7 may remain in the bit buffer. + */ +void ZLIB_INTERNAL _tr_align(s) + deflate_state *s; +{ + send_bits(s, STATIC_TREES<<1, 3); + send_code(s, END_BLOCK, static_ltree); +#ifdef DEBUG + s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ +#endif + bi_flush(s); +} + +/* =========================================================================== + * Determine the best encoding for the current block: dynamic trees, static + * trees or store, and output the encoded block to the zip file. + */ +void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) + deflate_state *s; + charf *buf; /* input block, or NULL if too old */ + ulg stored_len; /* length of input block */ + int last; /* one if this is the last block for a file */ +{ + ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ + int max_blindex = 0; /* index of last bit length code of non zero freq */ + + /* Build the Huffman trees unless a stored block is forced */ + if (s->level > 0) { + + /* Check if the file is binary or text */ + if (s->strm->data_type == Z_UNKNOWN) + s->strm->data_type = detect_data_type(s); + + /* Construct the literal and distance trees */ + build_tree(s, (tree_desc *)(&(s->l_desc))); + Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, + s->static_len)); + + build_tree(s, (tree_desc *)(&(s->d_desc))); + Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, + s->static_len)); + /* At this point, opt_len and static_len are the total bit lengths of + * the compressed block data, excluding the tree representations. + */ + + /* Build the bit length tree for the above two trees, and get the index + * in bl_order of the last bit length code to send. + */ + max_blindex = build_bl_tree(s); + + /* Determine the best encoding. Compute the block lengths in bytes. */ + opt_lenb = (s->opt_len+3+7)>>3; + static_lenb = (s->static_len+3+7)>>3; + + Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, + s->last_lit)); + + if (static_lenb <= opt_lenb) opt_lenb = static_lenb; + + } else { + Assert(buf != (char*)0, "lost buf"); + opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ + } + +#ifdef FORCE_STORED + if (buf != (char*)0) { /* force stored block */ +#else + if (stored_len+4 <= opt_lenb && buf != (char*)0) { + /* 4: two words for the lengths */ +#endif + /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + * Otherwise we can't have processed more than WSIZE input bytes since + * the last block flush, because compression would have been + * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + * transform a block into a stored block. + */ + _tr_stored_block(s, buf, stored_len, last); + +#ifdef FORCE_STATIC + } else if (static_lenb >= 0) { /* force static trees */ +#else + } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) { +#endif + send_bits(s, (STATIC_TREES<<1)+last, 3); + compress_block(s, (const ct_data *)static_ltree, + (const ct_data *)static_dtree); +#ifdef DEBUG + s->compressed_len += 3 + s->static_len; +#endif + } else { + send_bits(s, (DYN_TREES<<1)+last, 3); + send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, + max_blindex+1); + compress_block(s, (const ct_data *)s->dyn_ltree, + (const ct_data *)s->dyn_dtree); +#ifdef DEBUG + s->compressed_len += 3 + s->opt_len; +#endif + } + Assert (s->compressed_len == s->bits_sent, "bad compressed size"); + /* The above check is made mod 2^32, for files larger than 512 MB + * and uLong implemented on 32 bits. + */ + init_block(s); + + if (last) { + bi_windup(s); +#ifdef DEBUG + s->compressed_len += 7; /* align on byte boundary */ +#endif + } + Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, + s->compressed_len-7*last)); +} + +/* =========================================================================== + * Save the match info and tally the frequency counts. Return true if + * the current block must be flushed. + */ +int ZLIB_INTERNAL _tr_tally (s, dist, lc) + deflate_state *s; + unsigned dist; /* distance of matched string */ + unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ +{ + s->d_buf[s->last_lit] = (ush)dist; + s->l_buf[s->last_lit++] = (uch)lc; + if (dist == 0) { + /* lc is the unmatched char */ + s->dyn_ltree[lc].Freq++; + } else { + s->matches++; + /* Here, lc is the match length - MIN_MATCH */ + dist--; /* dist = match distance - 1 */ + Assert((ush)dist < (ush)MAX_DIST(s) && + (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && + (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); + + s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; + s->dyn_dtree[d_code(dist)].Freq++; + } + +#ifdef TRUNCATE_BLOCK + /* Try to guess if it is profitable to stop the current block here */ + if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { + /* Compute an upper bound for the compressed length */ + ulg out_length = (ulg)s->last_lit*8L; + ulg in_length = (ulg)((long)s->strstart - s->block_start); + int dcode; + for (dcode = 0; dcode < D_CODES; dcode++) { + out_length += (ulg)s->dyn_dtree[dcode].Freq * + (5L+extra_dbits[dcode]); + } + out_length >>= 3; + Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", + s->last_lit, in_length, out_length, + 100L - out_length*100L/in_length)); + if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; + } +#endif + return (s->last_lit == s->lit_bufsize-1); + /* We avoid equality with lit_bufsize because of wraparound at 64K + * on 16 bit machines and because stored blocks are restricted to + * 64K-1 bytes. + */ +} + +/* =========================================================================== + * Send the block data compressed using the given Huffman trees + */ +local void compress_block(s, ltree, dtree) + deflate_state *s; + const ct_data *ltree; /* literal tree */ + const ct_data *dtree; /* distance tree */ +{ + unsigned dist; /* distance of matched string */ + int lc; /* match length or unmatched char (if dist == 0) */ + unsigned lx = 0; /* running index in l_buf */ + unsigned code; /* the code to send */ + int extra; /* number of extra bits to send */ + + if (s->last_lit != 0) do { + dist = s->d_buf[lx]; + lc = s->l_buf[lx++]; + if (dist == 0) { + send_code(s, lc, ltree); /* send a literal byte */ + Tracecv(isgraph(lc), (stderr," '%c' ", lc)); + } else { + /* Here, lc is the match length - MIN_MATCH */ + code = _length_code[lc]; + send_code(s, code+LITERALS+1, ltree); /* send the length code */ + extra = extra_lbits[code]; + if (extra != 0) { + lc -= base_length[code]; + send_bits(s, lc, extra); /* send the extra length bits */ + } + dist--; /* dist is now the match distance - 1 */ + code = d_code(dist); + Assert (code < D_CODES, "bad d_code"); + + send_code(s, code, dtree); /* send the distance code */ + extra = extra_dbits[code]; + if (extra != 0) { + dist -= base_dist[code]; + send_bits(s, dist, extra); /* send the extra distance bits */ + } + } /* literal or match pair ? */ + + /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ + Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, + "pendingBuf overflow"); + + } while (lx < s->last_lit); + + send_code(s, END_BLOCK, ltree); +} + +/* =========================================================================== + * Check if the data type is TEXT or BINARY, using the following algorithm: + * - TEXT if the two conditions below are satisfied: + * a) There are no non-portable control characters belonging to the + * "black list" (0..6, 14..25, 28..31). + * b) There is at least one printable character belonging to the + * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * - BINARY otherwise. + * - The following partially-portable control characters form a + * "gray list" that is ignored in this detection algorithm: + * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). + * IN assertion: the fields Freq of dyn_ltree are set. + */ +local int detect_data_type(s) + deflate_state *s; +{ + /* black_mask is the bit mask of black-listed bytes + * set bits 0..6, 14..25, and 28..31 + * 0xf3ffc07f = binary 11110011111111111100000001111111 + */ + unsigned long black_mask = 0xf3ffc07fUL; + int n; + + /* Check for non-textual ("black-listed") bytes. */ + for (n = 0; n <= 31; n++, black_mask >>= 1) + if ((black_mask & 1) && (s->dyn_ltree[n].Freq != 0)) + return Z_BINARY; + + /* Check for textual ("white-listed") bytes. */ + if (s->dyn_ltree[9].Freq != 0 || s->dyn_ltree[10].Freq != 0 + || s->dyn_ltree[13].Freq != 0) + return Z_TEXT; + for (n = 32; n < LITERALS; n++) + if (s->dyn_ltree[n].Freq != 0) + return Z_TEXT; + + /* There are no "black-listed" or "white-listed" bytes: + * this stream either is empty or has tolerated ("gray-listed") bytes only. + */ + return Z_BINARY; +} + +/* =========================================================================== + * Reverse the first len bits of a code, using straightforward code (a faster + * method would use a table) + * IN assertion: 1 <= len <= 15 + */ +local unsigned bi_reverse(code, len) + unsigned code; /* the value to invert */ + int len; /* its bit length */ +{ + register unsigned res = 0; + do { + res |= code & 1; + code >>= 1, res <<= 1; + } while (--len > 0); + return res >> 1; +} + +/* =========================================================================== + * Flush the bit buffer, keeping at most 7 bits in it. + */ +local void bi_flush(s) + deflate_state *s; +{ + if (s->bi_valid == 16) { + put_short(s, s->bi_buf); + s->bi_buf = 0; + s->bi_valid = 0; + } else if (s->bi_valid >= 8) { + put_byte(s, (Byte)s->bi_buf); + s->bi_buf >>= 8; + s->bi_valid -= 8; + } +} + +/* =========================================================================== + * Flush the bit buffer and align the output on a byte boundary + */ +local void bi_windup(s) + deflate_state *s; +{ + if (s->bi_valid > 8) { + put_short(s, s->bi_buf); + } else if (s->bi_valid > 0) { + put_byte(s, (Byte)s->bi_buf); + } + s->bi_buf = 0; + s->bi_valid = 0; +#ifdef DEBUG + s->bits_sent = (s->bits_sent+7) & ~7; +#endif +} + +/* =========================================================================== + * Copy a stored block, storing first the length and its + * one's complement if requested. + */ +local void copy_block(s, buf, len, header) + deflate_state *s; + charf *buf; /* the input data */ + unsigned len; /* its length */ + int header; /* true if block header must be written */ +{ + bi_windup(s); /* align on byte boundary */ + + if (header) { + put_short(s, (ush)len); + put_short(s, (ush)~len); +#ifdef DEBUG + s->bits_sent += 2*16; +#endif + } +#ifdef DEBUG + s->bits_sent += (ulg)len<<3; +#endif + while (len--) { + put_byte(s, *buf++); + } +} diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/trees.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/trees.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/trees.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/trees.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,128 @@ +/* header created automatically with -DGEN_TREES_H */ + +local const ct_data static_ltree[L_CODES+2] = { +{{ 12},{ 8}}, {{140},{ 8}}, {{ 76},{ 8}}, {{204},{ 8}}, {{ 44},{ 8}}, +{{172},{ 8}}, {{108},{ 8}}, {{236},{ 8}}, {{ 28},{ 8}}, {{156},{ 8}}, +{{ 92},{ 8}}, {{220},{ 8}}, {{ 60},{ 8}}, {{188},{ 8}}, {{124},{ 8}}, +{{252},{ 8}}, {{ 2},{ 8}}, {{130},{ 8}}, {{ 66},{ 8}}, {{194},{ 8}}, +{{ 34},{ 8}}, {{162},{ 8}}, {{ 98},{ 8}}, {{226},{ 8}}, {{ 18},{ 8}}, +{{146},{ 8}}, {{ 82},{ 8}}, {{210},{ 8}}, {{ 50},{ 8}}, {{178},{ 8}}, +{{114},{ 8}}, {{242},{ 8}}, {{ 10},{ 8}}, {{138},{ 8}}, {{ 74},{ 8}}, +{{202},{ 8}}, {{ 42},{ 8}}, {{170},{ 8}}, {{106},{ 8}}, {{234},{ 8}}, +{{ 26},{ 8}}, {{154},{ 8}}, {{ 90},{ 8}}, {{218},{ 8}}, {{ 58},{ 8}}, +{{186},{ 8}}, {{122},{ 8}}, {{250},{ 8}}, {{ 6},{ 8}}, {{134},{ 8}}, +{{ 70},{ 8}}, {{198},{ 8}}, {{ 38},{ 8}}, {{166},{ 8}}, {{102},{ 8}}, +{{230},{ 8}}, {{ 22},{ 8}}, {{150},{ 8}}, {{ 86},{ 8}}, {{214},{ 8}}, +{{ 54},{ 8}}, {{182},{ 8}}, {{118},{ 8}}, {{246},{ 8}}, {{ 14},{ 8}}, +{{142},{ 8}}, {{ 78},{ 8}}, {{206},{ 8}}, {{ 46},{ 8}}, {{174},{ 8}}, +{{110},{ 8}}, {{238},{ 8}}, {{ 30},{ 8}}, {{158},{ 8}}, {{ 94},{ 8}}, +{{222},{ 8}}, {{ 62},{ 8}}, {{190},{ 8}}, {{126},{ 8}}, {{254},{ 8}}, +{{ 1},{ 8}}, {{129},{ 8}}, {{ 65},{ 8}}, {{193},{ 8}}, {{ 33},{ 8}}, +{{161},{ 8}}, {{ 97},{ 8}}, {{225},{ 8}}, {{ 17},{ 8}}, {{145},{ 8}}, +{{ 81},{ 8}}, {{209},{ 8}}, {{ 49},{ 8}}, {{177},{ 8}}, {{113},{ 8}}, +{{241},{ 8}}, {{ 9},{ 8}}, {{137},{ 8}}, {{ 73},{ 8}}, {{201},{ 8}}, +{{ 41},{ 8}}, {{169},{ 8}}, {{105},{ 8}}, {{233},{ 8}}, {{ 25},{ 8}}, +{{153},{ 8}}, {{ 89},{ 8}}, {{217},{ 8}}, {{ 57},{ 8}}, {{185},{ 8}}, +{{121},{ 8}}, {{249},{ 8}}, {{ 5},{ 8}}, {{133},{ 8}}, {{ 69},{ 8}}, +{{197},{ 8}}, {{ 37},{ 8}}, {{165},{ 8}}, {{101},{ 8}}, {{229},{ 8}}, +{{ 21},{ 8}}, {{149},{ 8}}, {{ 85},{ 8}}, {{213},{ 8}}, {{ 53},{ 8}}, +{{181},{ 8}}, {{117},{ 8}}, {{245},{ 8}}, {{ 13},{ 8}}, {{141},{ 8}}, +{{ 77},{ 8}}, {{205},{ 8}}, {{ 45},{ 8}}, {{173},{ 8}}, {{109},{ 8}}, +{{237},{ 8}}, {{ 29},{ 8}}, {{157},{ 8}}, {{ 93},{ 8}}, {{221},{ 8}}, +{{ 61},{ 8}}, {{189},{ 8}}, {{125},{ 8}}, {{253},{ 8}}, {{ 19},{ 9}}, +{{275},{ 9}}, {{147},{ 9}}, {{403},{ 9}}, {{ 83},{ 9}}, {{339},{ 9}}, +{{211},{ 9}}, {{467},{ 9}}, {{ 51},{ 9}}, {{307},{ 9}}, {{179},{ 9}}, +{{435},{ 9}}, {{115},{ 9}}, {{371},{ 9}}, {{243},{ 9}}, {{499},{ 9}}, +{{ 11},{ 9}}, {{267},{ 9}}, {{139},{ 9}}, {{395},{ 9}}, {{ 75},{ 9}}, +{{331},{ 9}}, {{203},{ 9}}, {{459},{ 9}}, {{ 43},{ 9}}, {{299},{ 9}}, +{{171},{ 9}}, {{427},{ 9}}, {{107},{ 9}}, {{363},{ 9}}, {{235},{ 9}}, +{{491},{ 9}}, {{ 27},{ 9}}, {{283},{ 9}}, {{155},{ 9}}, {{411},{ 9}}, +{{ 91},{ 9}}, {{347},{ 9}}, {{219},{ 9}}, {{475},{ 9}}, {{ 59},{ 9}}, +{{315},{ 9}}, {{187},{ 9}}, {{443},{ 9}}, {{123},{ 9}}, {{379},{ 9}}, +{{251},{ 9}}, {{507},{ 9}}, {{ 7},{ 9}}, {{263},{ 9}}, {{135},{ 9}}, +{{391},{ 9}}, {{ 71},{ 9}}, {{327},{ 9}}, {{199},{ 9}}, {{455},{ 9}}, +{{ 39},{ 9}}, {{295},{ 9}}, {{167},{ 9}}, {{423},{ 9}}, {{103},{ 9}}, +{{359},{ 9}}, {{231},{ 9}}, {{487},{ 9}}, {{ 23},{ 9}}, {{279},{ 9}}, +{{151},{ 9}}, {{407},{ 9}}, {{ 87},{ 9}}, {{343},{ 9}}, {{215},{ 9}}, +{{471},{ 9}}, {{ 55},{ 9}}, {{311},{ 9}}, {{183},{ 9}}, {{439},{ 9}}, +{{119},{ 9}}, {{375},{ 9}}, {{247},{ 9}}, {{503},{ 9}}, {{ 15},{ 9}}, +{{271},{ 9}}, {{143},{ 9}}, {{399},{ 9}}, {{ 79},{ 9}}, {{335},{ 9}}, +{{207},{ 9}}, {{463},{ 9}}, {{ 47},{ 9}}, {{303},{ 9}}, {{175},{ 9}}, +{{431},{ 9}}, {{111},{ 9}}, {{367},{ 9}}, {{239},{ 9}}, {{495},{ 9}}, +{{ 31},{ 9}}, {{287},{ 9}}, {{159},{ 9}}, {{415},{ 9}}, {{ 95},{ 9}}, +{{351},{ 9}}, {{223},{ 9}}, {{479},{ 9}}, {{ 63},{ 9}}, {{319},{ 9}}, +{{191},{ 9}}, {{447},{ 9}}, {{127},{ 9}}, {{383},{ 9}}, {{255},{ 9}}, +{{511},{ 9}}, {{ 0},{ 7}}, {{ 64},{ 7}}, {{ 32},{ 7}}, {{ 96},{ 7}}, +{{ 16},{ 7}}, {{ 80},{ 7}}, {{ 48},{ 7}}, {{112},{ 7}}, {{ 8},{ 7}}, +{{ 72},{ 7}}, {{ 40},{ 7}}, {{104},{ 7}}, {{ 24},{ 7}}, {{ 88},{ 7}}, +{{ 56},{ 7}}, {{120},{ 7}}, {{ 4},{ 7}}, {{ 68},{ 7}}, {{ 36},{ 7}}, +{{100},{ 7}}, {{ 20},{ 7}}, {{ 84},{ 7}}, {{ 52},{ 7}}, {{116},{ 7}}, +{{ 3},{ 8}}, {{131},{ 8}}, {{ 67},{ 8}}, {{195},{ 8}}, {{ 35},{ 8}}, +{{163},{ 8}}, {{ 99},{ 8}}, {{227},{ 8}} +}; + +local const ct_data static_dtree[D_CODES] = { +{{ 0},{ 5}}, {{16},{ 5}}, {{ 8},{ 5}}, {{24},{ 5}}, {{ 4},{ 5}}, +{{20},{ 5}}, {{12},{ 5}}, {{28},{ 5}}, {{ 2},{ 5}}, {{18},{ 5}}, +{{10},{ 5}}, {{26},{ 5}}, {{ 6},{ 5}}, {{22},{ 5}}, {{14},{ 5}}, +{{30},{ 5}}, {{ 1},{ 5}}, {{17},{ 5}}, {{ 9},{ 5}}, {{25},{ 5}}, +{{ 5},{ 5}}, {{21},{ 5}}, {{13},{ 5}}, {{29},{ 5}}, {{ 3},{ 5}}, +{{19},{ 5}}, {{11},{ 5}}, {{27},{ 5}}, {{ 7},{ 5}}, {{23},{ 5}} +}; + +const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = { + 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, + 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, +10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, +11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, +12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, +13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, +13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, +14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, +14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, +14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, +15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, +15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, +15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, +18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, +23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, +26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, +26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, +27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, +29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, +29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, +29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 +}; + +const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, +13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, +17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, +19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, +21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, +22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, +23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, +25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, +26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, +26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 +}; + +local const int base_length[LENGTH_CODES] = { +0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, +64, 80, 96, 112, 128, 160, 192, 224, 0 +}; + +local const int base_dist[D_CODES] = { + 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, + 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, + 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576 +}; + diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/uncompr.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/uncompr.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/uncompr.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/uncompr.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,59 @@ +/* uncompr.c -- decompress a memory buffer + * Copyright (C) 1995-2003, 2010 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#define ZLIB_INTERNAL +#include "zlib.h" + +/* =========================================================================== + Decompresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total + size of the destination buffer, which must be large enough to hold the + entire uncompressed data. (The size of the uncompressed data must have + been saved previously by the compressor and transmitted to the decompressor + by some mechanism outside the scope of this compression library.) + Upon exit, destLen is the actual size of the compressed buffer. + + uncompress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer, or Z_DATA_ERROR if the input data was corrupted. +*/ +int ZEXPORT uncompress (dest, destLen, source, sourceLen) + Bytef *dest; + uLongf *destLen; + const Bytef *source; + uLong sourceLen; +{ + z_stream stream; + int err; + + stream.next_in = (z_const Bytef *)source; + stream.avail_in = (uInt)sourceLen; + /* Check for source > 64K on 16-bit machine: */ + if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR; + + stream.next_out = dest; + stream.avail_out = (uInt)*destLen; + if ((uLong)stream.avail_out != *destLen) return Z_BUF_ERROR; + + stream.zalloc = (alloc_func)0; + stream.zfree = (free_func)0; + + err = inflateInit(&stream); + if (err != Z_OK) return err; + + err = inflate(&stream, Z_FINISH); + if (err != Z_STREAM_END) { + inflateEnd(&stream); + if (err == Z_NEED_DICT || (err == Z_BUF_ERROR && stream.avail_in == 0)) + return Z_DATA_ERROR; + return err; + } + *destLen = stream.total_out; + + err = inflateEnd(&stream); + return err; +} diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/zconf.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/zconf.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/zconf.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/zconf.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,511 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-2013 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef ZCONF_H +#define ZCONF_H + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". + */ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 +# define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy +# define deflateEnd z_deflateEnd +# define deflateInit2_ z_deflateInit2_ +# define deflateInit_ z_deflateInit_ +# define deflateParams z_deflateParams +# define deflatePending z_deflatePending +# define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzvprintf z_gzvprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetHeader z_inflateGetHeader +# define inflateInit2_ z_inflateInit2_ +# define inflateInit_ z_inflateInit_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 +# define inflateSetDictionary z_inflateSetDictionary +# define inflateGetDictionary z_inflateGetDictionary +# define inflateSync z_inflateSync +# define inflateSyncPoint z_inflateSyncPoint +# define inflateUndermine z_inflateUndermine +# define inflateResetKeep z_inflateResetKeep +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# endif +# define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion + +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef +# define alloc_func z_alloc_func +# define charf z_charf +# define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp +# define in_func z_in_func +# define intf z_intf +# define out_func z_out_func +# define uInt z_uInt +# define uIntf z_uIntf +# define uLong z_uLong +# define uLongf z_uLongf +# define voidp z_voidp +# define voidpc z_voidpc +# define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + +#endif + +#if defined(__MSDOS__) && !defined(MSDOS) +# define MSDOS +#endif +#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) +# define OS2 +#endif +#if defined(_WINDOWS) && !defined(WINDOWS) +# define WINDOWS +#endif +#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) +# ifndef WIN32 +# define WIN32 +# endif +#endif +#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) +# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) +# ifndef SYS16BIT +# define SYS16BIT +# endif +# endif +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#ifdef SYS16BIT +# define MAXSEG_64K +#endif +#ifdef MSDOS +# define UNALIGNED_OK +#endif + +#ifdef __STDC_VERSION__ +# ifndef STDC +# define STDC +# endif +# if __STDC_VERSION__ >= 199901L +# ifndef STDC99 +# define STDC99 +# endif +# endif +#endif +#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) +# define STDC +#endif +#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) +# define STDC +#endif +#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) +# define STDC +#endif +#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) +# define STDC +#endif + +#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ +# define STDC +#endif + +#ifndef STDC +# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +# define const /* note: need a more gentle solution here */ +# endif +#endif + +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + +/* Some Mac compilers merge all .h files incorrectly: */ +#if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) +# define NO_DUMMY_DECL +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# ifdef MAXSEG_64K +# define MAX_MEM_LEVEL 8 +# else +# define MAX_MEM_LEVEL 9 +# endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus a few kilobytes + for small objects. +*/ + + /* Type declarations */ + +#ifndef OF /* function prototypes */ +# ifdef STDC +# define OF(args) args +# else +# define OF(args) () +# endif +#endif + +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#ifdef SYS16BIT +# if defined(M_I86SM) || defined(M_I86MM) + /* MSC small or medium model */ +# define SMALL_MEDIUM +# ifdef _MSC_VER +# define FAR _far +# else +# define FAR far +# endif +# endif +# if (defined(__SMALL__) || defined(__MEDIUM__)) + /* Turbo C small or medium model */ +# define SMALL_MEDIUM +# ifdef __BORLANDC__ +# define FAR _far +# else +# define FAR far +# endif +# endif +#endif + +#if defined(WINDOWS) || defined(WIN32) + /* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ +# ifdef ZLIB_DLL +# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) +# ifdef ZLIB_INTERNAL +# define ZEXTERN extern __declspec(dllexport) +# else +# define ZEXTERN extern __declspec(dllimport) +# endif +# endif +# endif /* ZLIB_DLL */ + /* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ +# ifdef ZLIB_WINAPI +# ifdef FAR +# undef FAR +# endif +# include + /* No need for _export, use ZLIB.DEF instead. */ + /* For complete Windows compatibility, use WINAPI, not __stdcall. */ +# define ZEXPORT WINAPI +# ifdef WIN32 +# define ZEXPORTVA WINAPIV +# else +# define ZEXPORTVA FAR CDECL +# endif +# endif +#endif + +#if defined (__BEOS__) +# ifdef ZLIB_DLL +# ifdef ZLIB_INTERNAL +# define ZEXPORT __declspec(dllexport) +# define ZEXPORTVA __declspec(dllexport) +# else +# define ZEXPORT __declspec(dllimport) +# define ZEXPORTVA __declspec(dllimport) +# endif +# endif +#endif + +#ifndef ZEXTERN +# define ZEXTERN extern +#endif +#ifndef ZEXPORT +# define ZEXPORT +#endif +#ifndef ZEXPORTVA +# define ZEXPORTVA +#endif + +#ifndef FAR +# define FAR +#endif + +#if !defined(__MACTYPES__) +typedef unsigned char Byte; /* 8 bits */ +#endif +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#ifdef SMALL_MEDIUM + /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +# define Bytef Byte FAR +#else + typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC + typedef void const *voidpc; + typedef void FAR *voidpf; + typedef void *voidp; +#else + typedef Byte const *voidpc; + typedef Byte FAR *voidpf; + typedef Byte *voidp; +#endif + +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short +# endif +#endif + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) +# define SEEK_SET 0 /* Seek from beginning of file. */ +# define SEEK_CUR 1 /* Seek from current position. */ +# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ +#endif + +#ifndef z_off_t +# define z_off_t long +#endif + +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t +# endif +#endif + +/* MVS linker does not support external names larger than 8 bytes */ +#if defined(__MVS__) + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") +#endif + +#endif /* ZCONF_H */ diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/zlib.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/zlib.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/zlib.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/zlib.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,1768 @@ +/* zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.8, April 28th, 2013 + + Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 + (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). +*/ + +#ifndef ZLIB_H +#define ZLIB_H + +#include "zconf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZLIB_VERSION "1.2.8" +#define ZLIB_VERNUM 0x1280 +#define ZLIB_VER_MAJOR 1 +#define ZLIB_VER_MINOR 2 +#define ZLIB_VER_REVISION 8 +#define ZLIB_VER_SUBREVISION 0 + +/* + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed data. + This version of the library supports only one compression method (deflation) + but other algorithms will be added later and will have the same stream + interface. + + Compression can be done in a single step if the buffers are large enough, + or can be done by repeated calls of the compression function. In the latter + case, the application must provide more input and/or consume the output + (providing more output space) before each call. + + The compressed data format used by default by the in-memory functions is + the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped + around a deflate stream, which is itself documented in RFC 1951. + + The library also supports reading and writing files in gzip (.gz) format + with an interface similar to that of stdio using the functions that start + with "gz". The gzip format is different from the zlib format. gzip is a + gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. + + This library can optionally read and write gzip streams in memory as well. + + The zlib format was designed to be compact and fast for use in memory + and on communications channels. The gzip format was designed for single- + file compression on file systems, has a larger header than zlib to maintain + directory information, and uses a different, slower check method than zlib. + + The library does not install any signal handler. The decoder checks + the consistency of the compressed data, so the library should never crash + even in case of corrupted input. +*/ + +typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); +typedef void (*free_func) OF((voidpf opaque, voidpf address)); + +struct internal_state; + +typedef struct z_stream_s { + z_const Bytef *next_in; /* next input byte */ + uInt avail_in; /* number of bytes available at next_in */ + uLong total_in; /* total number of input bytes read so far */ + + Bytef *next_out; /* next output byte should be put there */ + uInt avail_out; /* remaining free space at next_out */ + uLong total_out; /* total number of bytes output so far */ + + z_const char *msg; /* last error message, NULL if no error */ + struct internal_state FAR *state; /* not visible by applications */ + + alloc_func zalloc; /* used to allocate the internal state */ + free_func zfree; /* used to free the internal state */ + voidpf opaque; /* private data object passed to zalloc and zfree */ + + int data_type; /* best guess about the data type: binary or text */ + uLong adler; /* adler32 value of the uncompressed data */ + uLong reserved; /* reserved for future use */ +} z_stream; + +typedef z_stream FAR *z_streamp; + +/* + gzip header information passed to and from zlib routines. See RFC 1952 + for more details on the meanings of these fields. +*/ +typedef struct gz_header_s { + int text; /* true if compressed data believed to be text */ + uLong time; /* modification time */ + int xflags; /* extra flags (not used when writing a gzip file) */ + int os; /* operating system */ + Bytef *extra; /* pointer to extra field or Z_NULL if none */ + uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ + uInt extra_max; /* space at extra (only when reading header) */ + Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ + uInt name_max; /* space at name (only when reading header) */ + Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ + uInt comm_max; /* space at comment (only when reading header) */ + int hcrc; /* true if there was or will be a header crc */ + int done; /* true when done reading gzip header (not used + when writing a gzip file) */ +} gz_header; + +typedef gz_header FAR *gz_headerp; + +/* + The application must update next_in and avail_in when avail_in has dropped + to zero. It must update next_out and avail_out when avail_out has dropped + to zero. The application must initialize zalloc, zfree and opaque before + calling the init function. All other fields are set by the compression + library and must not be updated by the application. + + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the + opaque value. + + zalloc must return Z_NULL if there is not enough memory for the object. + If zlib is used in a multi-threaded application, zalloc and zfree must be + thread safe. + + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this if + the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers + returned by zalloc for objects of exactly 65536 bytes *must* have their + offset normalized to zero. The default allocation function provided by this + library ensures this (see zutil.c). To reduce memory requirements and avoid + any allocation of 64K objects, at the expense of compression ratio, compile + the library with -DMAX_WBITS=14 (see zconf.h). + + The fields total_in and total_out can be used for statistics or progress + reports. After compression, total_in holds the total size of the + uncompressed data and may be saved for use in the decompressor (particularly + if the decompressor wants to decompress everything in a single step). +*/ + + /* constants */ + +#define Z_NO_FLUSH 0 +#define Z_PARTIAL_FLUSH 1 +#define Z_SYNC_FLUSH 2 +#define Z_FULL_FLUSH 3 +#define Z_FINISH 4 +#define Z_BLOCK 5 +#define Z_TREES 6 +/* Allowed flush values; see deflate() and inflate() below for details */ + +#define Z_OK 0 +#define Z_STREAM_END 1 +#define Z_NEED_DICT 2 +#define Z_ERRNO (-1) +#define Z_STREAM_ERROR (-2) +#define Z_DATA_ERROR (-3) +#define Z_MEM_ERROR (-4) +#define Z_BUF_ERROR (-5) +#define Z_VERSION_ERROR (-6) +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + +#define Z_NO_COMPRESSION 0 +#define Z_BEST_SPEED 1 +#define Z_BEST_COMPRESSION 9 +#define Z_DEFAULT_COMPRESSION (-1) +/* compression levels */ + +#define Z_FILTERED 1 +#define Z_HUFFMAN_ONLY 2 +#define Z_RLE 3 +#define Z_FIXED 4 +#define Z_DEFAULT_STRATEGY 0 +/* compression strategy; see deflateInit2() below for details */ + +#define Z_BINARY 0 +#define Z_TEXT 1 +#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ +#define Z_UNKNOWN 2 +/* Possible values of the data_type field (though see inflate()) */ + +#define Z_DEFLATED 8 +/* The deflate compression method (the only one supported in this version) */ + +#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ + +#define zlib_version zlibVersion() +/* for compatibility with versions < 1.0.2 */ + + + /* basic functions */ + +ZEXTERN const char * ZEXPORT zlibVersion OF((void)); +/* The application can compare zlibVersion and ZLIB_VERSION for consistency. + If the first character differs, the library code actually used is not + compatible with the zlib.h header file used by the application. This check + is automatically made by deflateInit and inflateInit. + */ + +/* +ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); + + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. If + zalloc and zfree are set to Z_NULL, deflateInit updates them to use default + allocation functions. + + The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: + 1 gives best speed, 9 gives best compression, 0 gives no compression at all + (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION + requests a default compromise between speed and compression (currently + equivalent to level 6). + + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if level is not a valid compression level, or + Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible + with the version assumed by the caller (ZLIB_VERSION). msg is set to null + if there is no error message. deflateInit does not perform any compression: + this will be done by deflate(). +*/ + + +ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); +/* + deflate compresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. deflate performs one or both of the + following actions: + + - Compress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in and avail_in are updated and + processing will resume at this point for the next call of deflate(). + + - Provide more output starting at next_out and update next_out and avail_out + accordingly. This action is forced if the parameter flush is non zero. + Forcing flush frequently degrades the compression ratio, so this parameter + should be set only when necessary (in interactive applications). Some + output may be provided even if flush is not set. + + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating avail_in or avail_out accordingly; avail_out should + never be zero before the call. The application can consume the compressed + output when it wants, for example when the output buffer is full (avail_out + == 0), or after each call of deflate(). If deflate returns Z_OK and with + zero avail_out, it must be called again after making room in the output + buffer because there might be more output pending. + + Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to + decide how much data to accumulate before producing output, in order to + maximize compression. + + If the parameter flush is set to Z_SYNC_FLUSH, all pending output is + flushed to the output buffer and the output is aligned on a byte boundary, so + that the decompressor can get all input data available so far. (In + particular avail_in is zero after the call if enough output space has been + provided before the call.) Flushing may degrade compression for some + compression algorithms and so it should be used only when necessary. This + completes the current deflate block and follows it with an empty stored block + that is three bits plus filler bits to the next byte, followed by four bytes + (00 00 ff ff). + + If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the + output buffer, but the output is not aligned to a byte boundary. All of the + input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. + This completes the current deflate block and follows it with an empty fixed + codes block that is 10 bits long. This assures that enough bytes are output + in order for the decompressor to finish the block before the empty fixed code + block. + + If flush is set to Z_BLOCK, a deflate block is completed and emitted, as + for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to + seven bits of the current block are held to be written as the next byte after + the next deflate block is completed. In this case, the decompressor may not + be provided enough bits at this point in order to complete decompression of + the data provided so far to the compressor. It may need to wait for the next + block to be emitted. This is for advanced applications that need to control + the emission of deflate blocks. + + If flush is set to Z_FULL_FLUSH, all output is flushed as with + Z_SYNC_FLUSH, and the compression state is reset so that decompression can + restart from this point if previous compressed data has been damaged or if + random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + compression. + + If deflate returns with avail_out == 0, this function must be called again + with the same value of the flush parameter and more output space (updated + avail_out), until the flush is complete (deflate returns with non-zero + avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that + avail_out is greater than six to avoid repeated flush markers due to + avail_out == 0 on return. + + If the parameter flush is set to Z_FINISH, pending input is processed, + pending output is flushed and deflate returns with Z_STREAM_END if there was + enough output space; if deflate returns with Z_OK, this function must be + called again with Z_FINISH and more output space (updated avail_out) but no + more input data, until it returns with Z_STREAM_END or an error. After + deflate has returned Z_STREAM_END, the only possible operations on the stream + are deflateReset or deflateEnd. + + Z_FINISH can be used immediately after deflateInit if all the compression + is to be done in a single step. In this case, avail_out must be at least the + value returned by deflateBound (see below). Then deflate is guaranteed to + return Z_STREAM_END. If not enough output space is provided, deflate will + not return Z_STREAM_END, and it must be called again as described above. + + deflate() sets strm->adler to the adler32 checksum of all input read + so far (that is, total_in bytes). + + deflate() may update strm->data_type if it can make a good guess about + the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered + binary. This field is only for information purposes and does not affect the + compression algorithm in any manner. + + deflate() returns Z_OK if some progress has been made (more input + processed or more output produced), Z_STREAM_END if all input has been + consumed and all output has been produced (only when flush is set to + Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example + if next_in or next_out was Z_NULL), Z_BUF_ERROR if no progress is possible + (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not + fatal, and deflate() can be called again with more input and more output + space to continue compressing. +*/ + + +ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the + stream state was inconsistent, Z_DATA_ERROR if the stream was freed + prematurely (some input or output was discarded). In the error case, msg + may be set but then points to a static string (which must not be + deallocated). +*/ + + +/* +ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); + + Initializes the internal stream state for decompression. The fields + next_in, avail_in, zalloc, zfree and opaque must be initialized before by + the caller. If next_in is not Z_NULL and avail_in is large enough (the + exact value depends on the compression method), inflateInit determines the + compression method from the zlib header and allocates all data structures + accordingly; otherwise the allocation will be deferred to the first call of + inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to + use default allocation functions. + + inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit() does not process any header information -- that is deferred + until inflate() is called. +*/ + + +ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); +/* + inflate decompresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. inflate performs one or both of the + following actions: + + - Decompress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in is updated and processing will + resume at this point for the next call of inflate(). + + - Provide more output starting at next_out and update next_out and avail_out + accordingly. inflate() provides as much output as possible, until there is + no more input data or no more space in the output buffer (see below about + the flush parameter). + + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating the next_* and avail_* values accordingly. The + application can consume the uncompressed output when it wants, for example + when the output buffer is full (avail_out == 0), or after each call of + inflate(). If inflate returns Z_OK and with zero avail_out, it must be + called again after making room in the output buffer because there might be + more output pending. + + The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, + Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much + output as possible to the output buffer. Z_BLOCK requests that inflate() + stop if and when it gets to the next deflate block boundary. When decoding + the zlib or gzip format, this will cause inflate() to return immediately + after the header and before the first block. When doing a raw inflate, + inflate() will go ahead and process the first block, and will return when it + gets to the end of that block, or when it runs out of data. + + The Z_BLOCK option assists in appending to or combining deflate streams. + Also to assist in this, on return inflate() will set strm->data_type to the + number of unused bits in the last byte taken from strm->next_in, plus 64 if + inflate() is currently decoding the last block in the deflate stream, plus + 128 if inflate() returned immediately after decoding an end-of-block code or + decoding the complete header up to just before the first byte of the deflate + stream. The end-of-block will not be indicated until all of the uncompressed + data from that block has been written to strm->next_out. The number of + unused bits may in general be greater than seven, except when bit 7 of + data_type is set, in which case the number of unused bits will be less than + eight. data_type is set as noted here every time inflate() returns for all + flush options, and so can be used to determine the amount of currently + consumed input in bits. + + The Z_TREES option behaves as Z_BLOCK does, but it also returns when the + end of each deflate block header is reached, before any actual data in that + block is decoded. This allows the caller to determine the length of the + deflate block header for later use in random access within a deflate block. + 256 is added to the value of strm->data_type when inflate() returns + immediately after reaching the end of the deflate block header. + + inflate() should normally be called until it returns Z_STREAM_END or an + error. However if all decompression is to be performed in a single step (a + single call of inflate), the parameter flush should be set to Z_FINISH. In + this case all pending input is processed and all pending output is flushed; + avail_out must be large enough to hold all of the uncompressed data for the + operation to complete. (The size of the uncompressed data may have been + saved by the compressor for this purpose.) The use of Z_FINISH is not + required to perform an inflation in one step. However it may be used to + inform inflate that a faster approach can be used for the single inflate() + call. Z_FINISH also informs inflate to not maintain a sliding window if the + stream completes, which reduces inflate's memory footprint. If the stream + does not complete, either because not all of the stream is provided or not + enough output space is provided, then a sliding window will be allocated and + inflate() can be called again to continue the operation as if Z_NO_FLUSH had + been used. + + In this implementation, inflate() always flushes as much output as + possible to the output buffer, and always uses the faster approach on the + first call. So the effects of the flush parameter in this implementation are + on the return value of inflate() as noted below, when inflate() returns early + when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of + memory for a sliding window when Z_FINISH is used. + + If a preset dictionary is needed after this call (see inflateSetDictionary + below), inflate sets strm->adler to the Adler-32 checksum of the dictionary + chosen by the compressor and returns Z_NEED_DICT; otherwise it sets + strm->adler to the Adler-32 checksum of all output produced so far (that is, + total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described + below. At the end of the stream, inflate() checks that its computed adler32 + checksum is equal to that saved by the compressor and returns Z_STREAM_END + only if the checksum is correct. + + inflate() can decompress and check either zlib-wrapped or gzip-wrapped + deflate data. The header type is detected automatically, if requested when + initializing with inflateInit2(). Any information contained in the gzip + header is not retained, so applications that need that information should + instead use raw inflate, see inflateInit2() below, or inflateBack() and + perform their own processing of the gzip header and trailer. When processing + gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output + producted so far. The CRC-32 is checked against the gzip trailer. + + inflate() returns Z_OK if some progress has been made (more input processed + or more output produced), Z_STREAM_END if the end of the compressed data has + been reached and all uncompressed output has been produced, Z_NEED_DICT if a + preset dictionary is needed at this point, Z_DATA_ERROR if the input data was + corrupted (input stream not conforming to the zlib format or incorrect check + value), Z_STREAM_ERROR if the stream structure was inconsistent (for example + next_in or next_out was Z_NULL), Z_MEM_ERROR if there was not enough memory, + Z_BUF_ERROR if no progress is possible or if there was not enough room in the + output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + inflate() can be called again with more input and more output space to + continue decompressing. If Z_DATA_ERROR is returned, the application may + then call inflateSync() to look for a good compression block if a partial + recovery of the data is desired. +*/ + + +ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state + was inconsistent. In the error case, msg may be set but then points to a + static string (which must not be deallocated). +*/ + + + /* Advanced functions */ + +/* + The following functions are needed only in some special applications. +*/ + +/* +ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm, + int level, + int method, + int windowBits, + int memLevel, + int strategy)); + + This is another version of deflateInit with more compression options. The + fields next_in, zalloc, zfree and opaque must be initialized before by the + caller. + + The method parameter is the compression method. It must be Z_DEFLATED in + this version of the library. + + The windowBits parameter is the base two logarithm of the window size + (the size of the history buffer). It should be in the range 8..15 for this + version of the library. Larger values of this parameter result in better + compression at the expense of memory usage. The default value is 15 if + deflateInit is used instead. + + windowBits can also be -8..-15 for raw deflate. In this case, -windowBits + determines the window size. deflate() will then generate raw deflate data + with no zlib header or trailer, and will not compute an adler32 check value. + + windowBits can also be greater than 15 for optional gzip encoding. Add + 16 to windowBits to write a simple gzip header and trailer around the + compressed data instead of a zlib wrapper. The gzip header will have no + file name, no extra data, no comment, no modification time (set to zero), no + header crc, and the operating system will be set to 255 (unknown). If a + gzip stream is being written, strm->adler is a crc32 instead of an adler32. + + The memLevel parameter specifies how much memory should be allocated + for the internal compression state. memLevel=1 uses minimum memory but is + slow and reduces compression ratio; memLevel=9 uses maximum memory for + optimal speed. The default value is 8. See zconf.h for total memory usage + as a function of windowBits and memLevel. + + The strategy parameter is used to tune the compression algorithm. Use the + value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a + filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no + string match), or Z_RLE to limit match distances to one (run-length + encoding). Filtered data consists mostly of small values with a somewhat + random distribution. In this case, the compression algorithm is tuned to + compress them better. The effect of Z_FILTERED is to force more Huffman + coding and less string matching; it is somewhat intermediate between + Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as + fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The + strategy parameter only affects the compression ratio but not the + correctness of the compressed output even if it is not set appropriately. + Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler + decoder for special applications. + + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid + method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is + incompatible with the version assumed by the caller (ZLIB_VERSION). msg is + set to null if there is no error message. deflateInit2 does not perform any + compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, + const Bytef *dictionary, + uInt dictLength)); +/* + Initializes the compression dictionary from the given byte sequence + without producing any compressed output. When using the zlib format, this + function must be called immediately after deflateInit, deflateInit2 or + deflateReset, and before any call of deflate. When doing raw deflate, this + function must be called either before any call of deflate, or immediately + after the completion of a deflate block, i.e. after all input has been + consumed and all output has been delivered when using any of the flush + options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The + compressor and decompressor must use exactly the same dictionary (see + inflateSetDictionary). + + The dictionary should consist of strings (byte sequences) that are likely + to be encountered later in the data to be compressed, with the most commonly + used strings preferably put towards the end of the dictionary. Using a + dictionary is most useful when the data to be compressed is short and can be + predicted with good accuracy; the data can then be compressed better than + with the default empty dictionary. + + Depending on the size of the compression data structures selected by + deflateInit or deflateInit2, a part of the dictionary may in effect be + discarded, for example if the dictionary is larger than the window size + provided in deflateInit or deflateInit2. Thus the strings most likely to be + useful should be put at the end of the dictionary, not at the front. In + addition, the current implementation of deflate will use at most the window + size minus 262 bytes of the provided dictionary. + + Upon return of this function, strm->adler is set to the adler32 value + of the dictionary; the decompressor may later use this value to determine + which dictionary has been used by the compressor. (The adler32 value + applies to the whole dictionary even if only a subset of the dictionary is + actually used by the compressor.) If a raw deflate was requested, then the + adler32 value is not computed and strm->adler is not set. + + deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent (for example if deflate has already been called for this stream + or if not at a block boundary for raw deflate). deflateSetDictionary does + not perform any compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, + z_streamp source)); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when several compression strategies will be + tried, for example when there are several ways of pre-processing the input + data with a filter. The streams that will be discarded should then be freed + by calling deflateEnd. Note that deflateCopy duplicates the internal + compression state which can be quite large, so this strategy is slow and can + consume lots of memory. + + deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); +/* + This function is equivalent to deflateEnd followed by deflateInit, + but does not free and reallocate all the internal compression state. The + stream will keep the same compression level and any other attributes that + may have been set by deflateInit2. + + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, + int level, + int strategy)); +/* + Dynamically update the compression level and compression strategy. The + interpretation of level and strategy is as in deflateInit2. This can be + used to switch between compression and straight copy of the input data, or + to switch to a different kind of input data requiring a different strategy. + If the compression level is changed, the input available so far is + compressed with the old level (and may be flushed); the new level will take + effect only at the next call of deflate(). + + Before the call of deflateParams, the stream state must be set as for + a call of deflate(), since the currently available input may have to be + compressed and flushed. In particular, strm->avail_out must be non-zero. + + deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source + stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR if + strm->avail_out was zero. +*/ + +ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, + int good_length, + int max_lazy, + int nice_length, + int max_chain)); +/* + Fine tune deflate's internal compression parameters. This should only be + used by someone who understands the algorithm used by zlib's deflate for + searching for the best matching string, and even then only by the most + fanatic optimizer trying to squeeze out the last compressed bit for their + specific input data. Read the deflate.c source code for the meaning of the + max_lazy, good_length, nice_length, and max_chain parameters. + + deflateTune() can be called after deflateInit() or deflateInit2(), and + returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. + */ + +ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm, + uLong sourceLen)); +/* + deflateBound() returns an upper bound on the compressed size after + deflation of sourceLen bytes. It must be called after deflateInit() or + deflateInit2(), and after deflateSetHeader(), if used. This would be used + to allocate an output buffer for deflation in a single pass, and so would be + called before deflate(). If that first deflate() call is provided the + sourceLen input bytes, an output buffer allocated to the size returned by + deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed + to return Z_STREAM_END. Note that it is possible for the compressed size to + be larger than the value returned by deflateBound() if flush options other + than Z_FINISH or Z_NO_FLUSH are used. +*/ + +ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm, + unsigned *pending, + int *bits)); +/* + deflatePending() returns the number of bytes and bits of output that have + been generated, but not yet provided in the available output. The bytes not + provided would be due to the available output space having being consumed. + The number of bits of output not provided are between 0 and 7, where they + await more bits to join them in order to fill out a full byte. If pending + or bits are Z_NULL, then those values are not set. + + deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + +ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, + int bits, + int value)); +/* + deflatePrime() inserts bits in the deflate output stream. The intent + is that this function is used to start off the deflate output with the bits + leftover from a previous deflate stream when appending to it. As such, this + function can only be used for raw deflate, and must be used before the first + deflate() call after a deflateInit2() or deflateReset(). bits must be less + than or equal to 16, and that many of the least significant bits of value + will be inserted in the output. + + deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough + room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, + gz_headerp head)); +/* + deflateSetHeader() provides gzip header information for when a gzip + stream is requested by deflateInit2(). deflateSetHeader() may be called + after deflateInit2() or deflateReset() and before the first call of + deflate(). The text, time, os, extra field, name, and comment information + in the provided gz_header structure are written to the gzip header (xflag is + ignored -- the extra flags are set according to the compression level). The + caller must assure that, if not Z_NULL, name and comment are terminated with + a zero byte, and that if extra is not Z_NULL, that extra_len bytes are + available there. If hcrc is true, a gzip header crc is included. Note that + the current versions of the command-line version of gzip (up through version + 1.3.x) do not support header crc's, and will report that it is a "multi-part + gzip file" and give up. + + If deflateSetHeader is not used, the default gzip header has text false, + the time set to zero, and os set to 255, with no extra, name, or comment + fields. The gzip header is returned to the default state by deflateReset(). + + deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, + int windowBits)); + + This is another version of inflateInit with an extra parameter. The + fields next_in, avail_in, zalloc, zfree and opaque must be initialized + before by the caller. + + The windowBits parameter is the base two logarithm of the maximum window + size (the size of the history buffer). It should be in the range 8..15 for + this version of the library. The default value is 15 if inflateInit is used + instead. windowBits must be greater than or equal to the windowBits value + provided to deflateInit2() while compressing, or it must be equal to 15 if + deflateInit2() was not used. If a compressed stream with a larger window + size is given as input, inflate() will return with the error code + Z_DATA_ERROR instead of trying to allocate a larger window. + + windowBits can also be zero to request that inflate use the window size in + the zlib header of the compressed stream. + + windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + determines the window size. inflate() will then process raw deflate data, + not looking for a zlib or gzip header, not generating a check value, and not + looking for any check values for comparison at the end of the stream. This + is for use with other formats that use the deflate compressed data format + such as zip. Those formats provide their own check values. If a custom + format is developed using the raw deflate format for compressed data, it is + recommended that a check value such as an adler32 or a crc32 be applied to + the uncompressed data as is done in the zlib, gzip, and zip formats. For + most applications, the zlib format should be used as is. Note that comments + above on the use in deflateInit2() applies to the magnitude of windowBits. + + windowBits can also be greater than 15 for optional gzip decoding. Add + 32 to windowBits to enable zlib and gzip decoding with automatic header + detection, or add 16 to decode only the gzip format (the zlib format will + return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a + crc32 instead of an adler32. + + inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit2 does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit2() does not process any header information -- that is + deferred until inflate() is called. +*/ + +ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, + const Bytef *dictionary, + uInt dictLength)); +/* + Initializes the decompression dictionary from the given uncompressed byte + sequence. This function must be called immediately after a call of inflate, + if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + can be determined from the adler32 value returned by that call of inflate. + The compressor and decompressor must use exactly the same dictionary (see + deflateSetDictionary). For raw inflate, this function can be called at any + time to set the dictionary. If the provided dictionary is smaller than the + window and there is already data in the window, then the provided dictionary + will amend what's there. The application must insure that the dictionary + that was used for compression is provided. + + inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the + expected one (incorrect adler32 value). inflateSetDictionary does not + perform any decompression: this will be done by subsequent calls of + inflate(). +*/ + +ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm, + Bytef *dictionary, + uInt *dictLength)); +/* + Returns the sliding dictionary being maintained by inflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If inflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similary, if dictLength is Z_NULL, then it is not set. + + inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); +/* + Skips invalid compressed data until a possible full flush point (see above + for the description of deflate with Z_FULL_FLUSH) can be found, or until all + available input is skipped. No output is provided. + + inflateSync searches for a 00 00 FF FF pattern in the compressed data. + All full flush points have this pattern, but not all occurrences of this + pattern are full flush points. + + inflateSync returns Z_OK if a possible full flush point has been found, + Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point + has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. + In the success case, the application may save the current current value of + total_in which indicates where valid compressed data was found. In the + error case, the application may repeatedly call inflateSync, providing more + input each time, until success or end of the input data. +*/ + +ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, + z_streamp source)); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when randomly accessing a large stream. The + first pass through the stream can periodically record the inflate state, + allowing restarting inflate at those points when randomly accessing the + stream. + + inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); +/* + This function is equivalent to inflateEnd followed by inflateInit, + but does not free and reallocate all the internal decompression state. The + stream will keep attributes that may have been set by inflateInit2. + + inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm, + int windowBits)); +/* + This function is the same as inflateReset, but it also permits changing + the wrap and window size requests. The windowBits parameter is interpreted + the same as it is for inflateInit2. + + inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL), or if + the windowBits parameter is invalid. +*/ + +ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, + int bits, + int value)); +/* + This function inserts bits in the inflate input stream. The intent is + that this function is used to start inflating at a bit position in the + middle of a byte. The provided bits will be used before any bytes are used + from next_in. This function should only be used with raw inflate, and + should be used before the first inflate() call after inflateInit2() or + inflateReset(). bits must be less than or equal to 16, and that many of the + least significant bits of value will be inserted in the input. + + If bits is negative, then the input stream bit buffer is emptied. Then + inflatePrime() can be called again to put bits in the buffer. This is used + to clear out bits leftover after feeding inflate a block description prior + to feeding inflate codes. + + inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm)); +/* + This function returns two values, one in the lower 16 bits of the return + value, and the other in the remaining upper bits, obtained by shifting the + return value down 16 bits. If the upper value is -1 and the lower value is + zero, then inflate() is currently decoding information outside of a block. + If the upper value is -1 and the lower value is non-zero, then inflate is in + the middle of a stored block, with the lower value equaling the number of + bytes from the input remaining to copy. If the upper value is not -1, then + it is the number of bits back from the current bit position in the input of + the code (literal or length/distance pair) currently being processed. In + that case the lower value is the number of bytes already emitted for that + code. + + A code is being processed if inflate is waiting for more input to complete + decoding of the code, or if it has completed decoding but is waiting for + more output space to write the literal or match data. + + inflateMark() is used to mark locations in the input data for random + access, which may be at bit positions, and to note those cases where the + output of a code may span boundaries of random access blocks. The current + location in the input stream can be determined from avail_in and data_type + as noted in the description for the Z_BLOCK flush parameter for inflate. + + inflateMark returns the value noted above or -1 << 16 if the provided + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, + gz_headerp head)); +/* + inflateGetHeader() requests that gzip header information be stored in the + provided gz_header structure. inflateGetHeader() may be called after + inflateInit2() or inflateReset(), and before the first call of inflate(). + As inflate() processes the gzip stream, head->done is zero until the header + is completed, at which time head->done is set to one. If a zlib stream is + being decoded, then head->done is set to -1 to indicate that there will be + no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be + used to force inflate() to return immediately after header processing is + complete and before any actual data is decompressed. + + The text, time, xflags, and os fields are filled in with the gzip header + contents. hcrc is set to true if there is a header CRC. (The header CRC + was valid if done is set to one.) If extra is not Z_NULL, then extra_max + contains the maximum number of bytes to write to extra. Once done is true, + extra_len contains the actual extra field length, and extra contains the + extra field, or that field truncated if extra_max is less than extra_len. + If name is not Z_NULL, then up to name_max characters are written there, + terminated with a zero unless the length is greater than name_max. If + comment is not Z_NULL, then up to comm_max characters are written there, + terminated with a zero unless the length is greater than comm_max. When any + of extra, name, or comment are not Z_NULL and the respective field is not + present in the header, then that field is set to Z_NULL to signal its + absence. This allows the use of deflateSetHeader() with the returned + structure to duplicate the header. However if those fields are set to + allocated memory, then the application will need to save those pointers + elsewhere so that they can be eventually freed. + + If inflateGetHeader is not used, then the header information is simply + discarded. The header is always checked for validity, including the header + CRC if present. inflateReset() will reset the process to discard the header + information. The application would need to call inflateGetHeader() again to + retrieve the header from the next gzip stream. + + inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits, + unsigned char FAR *window)); + + Initialize the internal stream state for decompression using inflateBack() + calls. The fields zalloc, zfree and opaque in strm must be initialized + before the call. If zalloc and zfree are Z_NULL, then the default library- + derived memory allocation routines are used. windowBits is the base two + logarithm of the window size, in the range 8..15. window is a caller + supplied buffer of that size. Except for special applications where it is + assured that deflate was used with small window sizes, windowBits must be 15 + and a 32K byte window must be supplied to be able to decompress general + deflate streams. + + See inflateBack() for the usage of these routines. + + inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of + the parameters are invalid, Z_MEM_ERROR if the internal state could not be + allocated, or Z_VERSION_ERROR if the version of the library does not match + the version of the header file. +*/ + +typedef unsigned (*in_func) OF((void FAR *, + z_const unsigned char FAR * FAR *)); +typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); + +ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, + in_func in, void FAR *in_desc, + out_func out, void FAR *out_desc)); +/* + inflateBack() does a raw inflate with a single call using a call-back + interface for input and output. This is potentially more efficient than + inflate() for file i/o applications, in that it avoids copying between the + output and the sliding window by simply making the window itself the output + buffer. inflate() can be faster on modern CPUs when used with large + buffers. inflateBack() trusts the application to not change the output + buffer passed by the output function, at least until inflateBack() returns. + + inflateBackInit() must be called first to allocate the internal state + and to initialize the state with the user-provided window buffer. + inflateBack() may then be used multiple times to inflate a complete, raw + deflate stream with each call. inflateBackEnd() is then called to free the + allocated state. + + A raw deflate stream is one with no zlib or gzip header or trailer. + This routine would normally be used in a utility that reads zip or gzip + files and writes out uncompressed files. The utility would decode the + header and process the trailer on its own, hence this routine expects only + the raw deflate stream to decompress. This is different from the normal + behavior of inflate(), which expects either a zlib or gzip header and + trailer around the deflate stream. + + inflateBack() uses two subroutines supplied by the caller that are then + called by inflateBack() for input and output. inflateBack() calls those + routines until it reads a complete deflate stream and writes out all of the + uncompressed data, or until it encounters an error. The function's + parameters and return types are defined above in the in_func and out_func + typedefs. inflateBack() will call in(in_desc, &buf) which should return the + number of bytes of provided input, and a pointer to that input in buf. If + there is no input available, in() must return zero--buf is ignored in that + case--and inflateBack() will return a buffer error. inflateBack() will call + out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. out() + should return zero on success, or non-zero on failure. If out() returns + non-zero, inflateBack() will return with an error. Neither in() nor out() + are permitted to change the contents of the window provided to + inflateBackInit(), which is also the buffer that out() uses to write from. + The length written by out() will be at most the window size. Any non-zero + amount of input may be provided by in(). + + For convenience, inflateBack() can be provided input on the first call by + setting strm->next_in and strm->avail_in. If that input is exhausted, then + in() will be called. Therefore strm->next_in must be initialized before + calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called + immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in + must also be initialized, and then if strm->avail_in is not zero, input will + initially be taken from strm->next_in[0 .. strm->avail_in - 1]. + + The in_desc and out_desc parameters of inflateBack() is passed as the + first parameter of in() and out() respectively when they are called. These + descriptors can be optionally used to pass any information that the caller- + supplied in() and out() functions need to do their job. + + On return, inflateBack() will set strm->next_in and strm->avail_in to + pass back any unused input that was provided by the last in() call. The + return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR + if in() or out() returned an error, Z_DATA_ERROR if there was a format error + in the deflate stream (in which case strm->msg is set to indicate the nature + of the error), or Z_STREAM_ERROR if the stream was not properly initialized. + In the case of Z_BUF_ERROR, an input or output error can be distinguished + using strm->next_in which will be Z_NULL only if in() returned an error. If + strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning + non-zero. (in() will always be called before out(), so strm->next_in is + assured to be defined if out() returns non-zero.) Note that inflateBack() + cannot return Z_OK. +*/ + +ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); +/* + All memory allocated by inflateBackInit() is freed. + + inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream + state was inconsistent. +*/ + +ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void)); +/* Return flags indicating compile-time options. + + Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: + 1.0: size of uInt + 3.2: size of uLong + 5.4: size of voidpf (pointer) + 7.6: size of z_off_t + + Compiler, assembler, and debug options: + 8: DEBUG + 9: ASMV or ASMINF -- use ASM code + 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention + 11: 0 (reserved) + + One-time table building (smaller code, but not thread-safe if true): + 12: BUILDFIXED -- build static block decoding tables when needed + 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed + 14,15: 0 (reserved) + + Library content (indicates missing functionality): + 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking + deflate code when not needed) + 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect + and decode gzip streams (to avoid linking crc code) + 18-19: 0 (reserved) + + Operation variations (changes in library functionality): + 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate + 21: FASTEST -- deflate algorithm with only one, lowest compression level + 22,23: 0 (reserved) + + The sprintf variant used by gzprintf (zero is best): + 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format + 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! + 26: 0 = returns value, 1 = void -- 1 means inferred string length returned + + Remainder: + 27-31: 0 (reserved) + */ + +#ifndef Z_SOLO + + /* utility functions */ + +/* + The following utility functions are implemented on top of the basic + stream-oriented functions. To simplify the interface, some default options + are assumed (compression level and memory usage, standard memory allocation + functions). The source code of these utility functions can be modified if + you need special options. +*/ + +ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); +/* + Compresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed buffer. + + compress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer. +*/ + +ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen, + int level)); +/* + Compresses the source buffer into the destination buffer. The level + parameter has the same meaning as in deflateInit. sourceLen is the byte + length of the source buffer. Upon entry, destLen is the total size of the + destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed buffer. + + compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_BUF_ERROR if there was not enough room in the output buffer, + Z_STREAM_ERROR if the level parameter is invalid. +*/ + +ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); +/* + compressBound() returns an upper bound on the compressed size after + compress() or compress2() on sourceLen bytes. It would be used before a + compress() or compress2() call to allocate the destination buffer. +*/ + +ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); +/* + Decompresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be large enough to hold the entire + uncompressed data. (The size of the uncompressed data must have been saved + previously by the compressor and transmitted to the decompressor by some + mechanism outside the scope of this compression library.) Upon exit, destLen + is the actual size of the uncompressed buffer. + + uncompress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In + the case where there is not enough room, uncompress() will fill the output + buffer with the uncompressed data up to that point. +*/ + + /* gzip file access functions */ + +/* + This library supports reading and writing files in gzip (.gz) format with + an interface similar to that of stdio, using the functions that start with + "gz". The gzip format is different from the zlib format. gzip is a gzip + wrapper, documented in RFC 1952, wrapped around a deflate stream. +*/ + +typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ + +/* +ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); + + Opens a gzip (.gz) file for reading or writing. The mode parameter is as + in fopen ("rb" or "wb") but can also include a compression level ("wb9") or + a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only + compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' + for fixed code compression as in "wb9F". (See the description of + deflateInit2 for more information about the strategy parameter.) 'T' will + request transparent writing or appending with no compression and not using + the gzip format. + + "a" can be used instead of "w" to request that the gzip stream that will + be written be appended to the file. "+" will result in an error, since + reading and writing to the same gzip file is not supported. The addition of + "x" when writing will create the file exclusively, which fails if the file + already exists. On systems that support it, the addition of "e" when + reading or writing will set the flag to close the file on an execve() call. + + These functions, as well as gzip, will read and decode a sequence of gzip + streams in a file. The append function of gzopen() can be used to create + such a file. (Also see gzflush() for another way to do this.) When + appending, gzopen does not test whether the file begins with a gzip stream, + nor does it look for the end of the gzip streams to begin appending. gzopen + will simply append a gzip stream to the existing file. + + gzopen can be used to read a file which is not in gzip format; in this + case gzread will directly read from the file without decompression. When + reading, this will be detected automatically by looking for the magic two- + byte gzip header. + + gzopen returns NULL if the file could not be opened, if there was + insufficient memory to allocate the gzFile state, or if an invalid mode was + specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). + errno can be checked to determine if the reason gzopen failed was that the + file could not be opened. +*/ + +ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); +/* + gzdopen associates a gzFile with the file descriptor fd. File descriptors + are obtained from calls like open, dup, creat, pipe or fileno (if the file + has been previously opened with fopen). The mode parameter is as in gzopen. + + The next call of gzclose on the returned gzFile will also close the file + descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor + fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, + mode);. The duplicated descriptor should be saved to avoid a leak, since + gzdopen does not close fd if it fails. If you are using fileno() to get the + file descriptor from a FILE *, then you will have to use dup() to avoid + double-close()ing the file descriptor. Both gzclose() and fclose() will + close the associated file descriptor, so they need to have different file + descriptors. + + gzdopen returns NULL if there was insufficient memory to allocate the + gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not + provided, or '+' was provided), or if fd is -1. The file descriptor is not + used until the next gz* read, write, seek, or close operation, so gzdopen + will not detect if fd is invalid (unless fd is -1). +*/ + +ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); +/* + Set the internal buffer size used by this library's functions. The + default buffer size is 8192 bytes. This function must be called after + gzopen() or gzdopen(), and before any other calls that read or write the + file. The buffer memory allocation is always deferred to the first read or + write. Two buffers are allocated, either both of the specified size when + writing, or one of the specified size and the other twice that size when + reading. A larger buffer size of, for example, 64K or 128K bytes will + noticeably increase the speed of decompression (reading). + + The new buffer size also affects the maximum length for gzprintf(). + + gzbuffer() returns 0 on success, or -1 on failure, such as being called + too late. +*/ + +ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); +/* + Dynamically update the compression level or strategy. See the description + of deflateInit2 for the meaning of these parameters. + + gzsetparams returns Z_OK if success, or Z_STREAM_ERROR if the file was not + opened for writing. +*/ + +ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); +/* + Reads the given number of uncompressed bytes from the compressed file. If + the input file is not in gzip format, gzread copies the given number of + bytes into the buffer directly from the file. + + After reaching the end of a gzip stream in the input, gzread will continue + to read, looking for another gzip stream. Any number of gzip streams may be + concatenated in the input file, and will all be decompressed by gzread(). + If something other than a gzip stream is encountered after a gzip stream, + that remaining trailing garbage is ignored (and no error is returned). + + gzread can be used to read a gzip file that is being concurrently written. + Upon reaching the end of the input, gzread will return with the available + data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then + gzclearerr can be used to clear the end of file indicator in order to permit + gzread to be tried again. Z_OK indicates that a gzip stream was completed + on the last gzread. Z_BUF_ERROR indicates that the input file ended in the + middle of a gzip stream. Note that gzread does not return -1 in the event + of an incomplete gzip stream. This error is deferred until gzclose(), which + will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip + stream. Alternatively, gzerror can be used before gzclose to detect this + case. + + gzread returns the number of uncompressed bytes actually read, less than + len for end of file, or -1 for error. +*/ + +ZEXTERN int ZEXPORT gzwrite OF((gzFile file, + voidpc buf, unsigned len)); +/* + Writes the given number of uncompressed bytes into the compressed file. + gzwrite returns the number of uncompressed bytes written or 0 in case of + error. +*/ + +ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...)); +/* + Converts, formats, and writes the arguments to the compressed file under + control of the format string, as in fprintf. gzprintf returns the number of + uncompressed bytes actually written, or 0 in case of error. The number of + uncompressed bytes written is limited to 8191, or one less than the buffer + size given to gzbuffer(). The caller should assure that this limit is not + exceeded. If it is exceeded, then gzprintf() will return an error (0) with + nothing written. In this case, there may also be a buffer overflow with + unpredictable consequences, which is possible only if zlib was compiled with + the insecure functions sprintf() or vsprintf() because the secure snprintf() + or vsnprintf() functions were not available. This can be determined using + zlibCompileFlags(). +*/ + +ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); +/* + Writes the given null-terminated string to the compressed file, excluding + the terminating null character. + + gzputs returns the number of characters written, or -1 in case of error. +*/ + +ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); +/* + Reads bytes from the compressed file until len-1 characters are read, or a + newline character is read and transferred to buf, or an end-of-file + condition is encountered. If any characters are read or if len == 1, the + string is terminated with a null character. If no characters are read due + to an end-of-file or len < 1, then the buffer is left untouched. + + gzgets returns buf which is a null-terminated string, or it returns NULL + for end-of-file or in case of error. If there was an error, the contents at + buf are indeterminate. +*/ + +ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); +/* + Writes c, converted to an unsigned char, into the compressed file. gzputc + returns the value that was written, or -1 in case of error. +*/ + +ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); +/* + Reads one byte from the compressed file. gzgetc returns this byte or -1 + in case of end of file or error. This is implemented as a macro for speed. + As such, it does not do all of the checking the other functions do. I.e. + it does not check to see if file is NULL, nor whether the structure file + points to has been clobbered or not. +*/ + +ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); +/* + Push one character back onto the stream to be read as the first character + on the next read. At least one character of push-back is allowed. + gzungetc() returns the character pushed, or -1 on failure. gzungetc() will + fail if c is -1, and may fail if a character has been pushed but not read + yet. If gzungetc is used immediately after gzopen or gzdopen, at least the + output buffer size of pushed characters is allowed. (See gzbuffer above.) + The pushed character will be discarded if the stream is repositioned with + gzseek() or gzrewind(). +*/ + +ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); +/* + Flushes all pending output into the compressed file. The parameter flush + is as in the deflate() function. The return value is the zlib error number + (see function gzerror below). gzflush is only permitted when writing. + + If the flush parameter is Z_FINISH, the remaining data is written and the + gzip stream is completed in the output. If gzwrite() is called again, a new + gzip stream will be started in the output. gzread() is able to read such + concatented gzip streams. + + gzflush should be called only when strictly necessary because it will + degrade compression if called too often. +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, + z_off_t offset, int whence)); + + Sets the starting position for the next gzread or gzwrite on the given + compressed file. The offset represents a number of bytes in the + uncompressed data stream. The whence parameter is defined as in lseek(2); + the value SEEK_END is not supported. + + If the file is opened for reading, this function is emulated but can be + extremely slow. If the file is opened for writing, only forward seeks are + supported; gzseek then compresses a sequence of zeroes up to the new + starting position. + + gzseek returns the resulting offset location as measured in bytes from + the beginning of the uncompressed stream, or -1 in case of error, in + particular if the file is opened for writing and the new starting position + would be before the current position. +*/ + +ZEXTERN int ZEXPORT gzrewind OF((gzFile file)); +/* + Rewinds the given file. This function is supported only for reading. + + gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) +*/ + +/* +ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); + + Returns the starting position for the next gzread or gzwrite on the given + compressed file. This position represents a number of bytes in the + uncompressed data stream, and is zero when starting, even if appending or + reading a gzip stream from the middle of a file using gzdopen(). + + gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); + + Returns the current offset in the file being read or written. This offset + includes the count of bytes that precede the gzip stream, for example when + appending or when using gzdopen() for reading. When reading, the offset + does not include as yet unused buffered input. This information can be used + for a progress indicator. On error, gzoffset() returns -1. +*/ + +ZEXTERN int ZEXPORT gzeof OF((gzFile file)); +/* + Returns true (1) if the end-of-file indicator has been set while reading, + false (0) otherwise. Note that the end-of-file indicator is set only if the + read tried to go past the end of the input, but came up short. Therefore, + just like feof(), gzeof() may return false even if there is no more data to + read, in the event that the last read request was for the exact number of + bytes remaining in the input file. This will happen if the input file size + is an exact multiple of the buffer size. + + If gzeof() returns true, then the read functions will return no more data, + unless the end-of-file indicator is reset by gzclearerr() and the input file + has grown since the previous end of file was detected. +*/ + +ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); +/* + Returns true (1) if file is being copied directly while reading, or false + (0) if file is a gzip stream being decompressed. + + If the input file is empty, gzdirect() will return true, since the input + does not contain a gzip stream. + + If gzdirect() is used immediately after gzopen() or gzdopen() it will + cause buffers to be allocated to allow reading the file to determine if it + is a gzip file. Therefore if gzbuffer() is used, it should be called before + gzdirect(). + + When writing, gzdirect() returns true (1) if transparent writing was + requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: + gzdirect() is not needed when writing. Transparent writing must be + explicitly requested, so the application already knows the answer. When + linking statically, using gzdirect() will include all of the zlib code for + gzip file reading and decompression, which may not be desired.) +*/ + +ZEXTERN int ZEXPORT gzclose OF((gzFile file)); +/* + Flushes all pending output if necessary, closes the compressed file and + deallocates the (de)compression state. Note that once file is closed, you + cannot call gzerror with file, since its structures have been deallocated. + gzclose must not be called more than once on the same file, just as free + must not be called more than once on the same allocation. + + gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a + file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the + last read ended in the middle of a gzip stream, or Z_OK on success. +*/ + +ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); +/* + Same as gzclose(), but gzclose_r() is only for use when reading, and + gzclose_w() is only for use when writing or appending. The advantage to + using these instead of gzclose() is that they avoid linking in zlib + compression or decompression code that is not used when only reading or only + writing respectively. If gzclose() is used, then both compression and + decompression code will be included the application when linking to a static + zlib library. +*/ + +ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); +/* + Returns the error message for the last error which occurred on the given + compressed file. errnum is set to zlib error number. If an error occurred + in the file system and not in the compression library, errnum is set to + Z_ERRNO and the application may consult errno to get the exact error code. + + The application must not modify the returned string. Future calls to + this function may invalidate the previously returned string. If file is + closed, then the string previously returned by gzerror will no longer be + available. + + gzerror() should be used to distinguish errors from end-of-file for those + functions above that do not distinguish those cases in their return values. +*/ + +ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); +/* + Clears the error and end-of-file flags for file. This is analogous to the + clearerr() function in stdio. This is useful for continuing to read a gzip + file that is being written concurrently. +*/ + +#endif /* !Z_SOLO */ + + /* checksum functions */ + +/* + These functions are not related to compression but are exported + anyway because they might be useful in applications using the compression + library. +*/ + +ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); +/* + Update a running Adler-32 checksum with the bytes buf[0..len-1] and + return the updated checksum. If buf is Z_NULL, this function returns the + required initial value for the checksum. + + An Adler-32 checksum is almost as reliable as a CRC32 but can be computed + much faster. + + Usage example: + + uLong adler = adler32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + adler = adler32(adler, buffer, length); + } + if (adler != original_adler) error(); +*/ + +/* +ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, + z_off_t len2)); + + Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 + and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for + each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of + seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note + that the z_off_t type (like off_t) is a signed integer. If len2 is + negative, the result has no meaning or utility. +*/ + +ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len)); +/* + Update a running CRC-32 with the bytes buf[0..len-1] and return the + updated CRC-32. If buf is Z_NULL, this function returns the required + initial value for the crc. Pre- and post-conditioning (one's complement) is + performed within this function so it shouldn't be done by the application. + + Usage example: + + uLong crc = crc32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + crc = crc32(crc, buffer, length); + } + if (crc != original_crc) error(); +*/ + +/* +ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); + + Combine two CRC-32 check values into one. For two sequences of bytes, + seq1 and seq2 with lengths len1 and len2, CRC-32 check values were + calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 + check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and + len2. +*/ + + + /* various hacks, don't look :) */ + +/* deflateInit and inflateInit are macros to allow checking the zlib version + * and the compiler's view of z_stream: + */ +ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method, + int windowBits, int memLevel, + int strategy, const char *version, + int stream_size)); +ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits, + unsigned char FAR *window, + const char *version, + int stream_size)); +#define deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +#define inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +#define inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) +#define inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), \ + ZLIB_VERSION, (int)sizeof(z_stream)) + +#ifndef Z_SOLO + +/* gzgetc() macro and its supporting function and exposed data structure. Note + * that the real internal state is much larger than the exposed structure. + * This abbreviated structure exposes just enough for the gzgetc() macro. The + * user should not mess with these exposed elements, since their names or + * behavior could change in the future, perhaps even capriciously. They can + * only be used by the gzgetc() macro. You have been warned. + */ +struct gzFile_s { + unsigned have; + unsigned char *next; + z_off64_t pos; +}; +ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +# define z_gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : gzgetc(g)) +#else +# define gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : gzgetc(g)) +#endif + +/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or + * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if + * both are true, the application gets the *64 functions, and the regular + * functions are changed to 64 bits) -- in case these are set on systems + * without large file support, _LFS64_LARGEFILE must also be true + */ +#ifdef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); +#endif + +#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) +# ifdef Z_PREFIX_SET +# define z_gzopen z_gzopen64 +# define z_gzseek z_gzseek64 +# define z_gztell z_gztell64 +# define z_gzoffset z_gzoffset64 +# define z_adler32_combine z_adler32_combine64 +# define z_crc32_combine z_crc32_combine64 +# else +# define gzopen gzopen64 +# define gzseek gzseek64 +# define gztell gztell64 +# define gzoffset gzoffset64 +# define adler32_combine adler32_combine64 +# define crc32_combine crc32_combine64 +# endif +# ifndef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +# endif +#else + ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); +#endif + +#else /* Z_SOLO */ + + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); + +#endif /* !Z_SOLO */ + +/* hack for buggy compilers */ +#if !defined(ZUTIL_H) && !defined(NO_DUMMY_DECL) + struct internal_state {int dummy;}; +#endif + +/* undocumented functions */ +ZEXTERN const char * ZEXPORT zError OF((int)); +ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp)); +ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void)); +ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int)); +ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp)); +ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp)); +#if defined(_WIN32) && !defined(Z_SOLO) +ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, + const char *mode)); +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file, + const char *format, + va_list va)); +# endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ZLIB_H */ diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/zutil.c cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/zutil.c --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/zutil.c 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/zutil.c 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,324 @@ +/* zutil.c -- target dependent utility functions for the compression library + * Copyright (C) 1995-2005, 2010, 2011, 2012 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#include "zutil.h" +#ifndef Z_SOLO +# include "gzguts.h" +#endif + +#ifndef NO_DUMMY_DECL +struct internal_state {int dummy;}; /* for buggy compilers */ +#endif + +z_const char * const z_errmsg[10] = { +"need dictionary", /* Z_NEED_DICT 2 */ +"stream end", /* Z_STREAM_END 1 */ +"", /* Z_OK 0 */ +"file error", /* Z_ERRNO (-1) */ +"stream error", /* Z_STREAM_ERROR (-2) */ +"data error", /* Z_DATA_ERROR (-3) */ +"insufficient memory", /* Z_MEM_ERROR (-4) */ +"buffer error", /* Z_BUF_ERROR (-5) */ +"incompatible version",/* Z_VERSION_ERROR (-6) */ +""}; + + +const char * ZEXPORT zlibVersion() +{ + return ZLIB_VERSION; +} + +uLong ZEXPORT zlibCompileFlags() +{ + uLong flags; + + flags = 0; + switch ((int)(sizeof(uInt))) { + case 2: break; + case 4: flags += 1; break; + case 8: flags += 2; break; + default: flags += 3; + } + switch ((int)(sizeof(uLong))) { + case 2: break; + case 4: flags += 1 << 2; break; + case 8: flags += 2 << 2; break; + default: flags += 3 << 2; + } + switch ((int)(sizeof(voidpf))) { + case 2: break; + case 4: flags += 1 << 4; break; + case 8: flags += 2 << 4; break; + default: flags += 3 << 4; + } + switch ((int)(sizeof(z_off_t))) { + case 2: break; + case 4: flags += 1 << 6; break; + case 8: flags += 2 << 6; break; + default: flags += 3 << 6; + } +#ifdef DEBUG + flags += 1 << 8; +#endif +#if defined(ASMV) || defined(ASMINF) + flags += 1 << 9; +#endif +#ifdef ZLIB_WINAPI + flags += 1 << 10; +#endif +#ifdef BUILDFIXED + flags += 1 << 12; +#endif +#ifdef DYNAMIC_CRC_TABLE + flags += 1 << 13; +#endif +#ifdef NO_GZCOMPRESS + flags += 1L << 16; +#endif +#ifdef NO_GZIP + flags += 1L << 17; +#endif +#ifdef PKZIP_BUG_WORKAROUND + flags += 1L << 20; +#endif +#ifdef FASTEST + flags += 1L << 21; +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifdef NO_vsnprintf + flags += 1L << 25; +# ifdef HAS_vsprintf_void + flags += 1L << 26; +# endif +# else +# ifdef HAS_vsnprintf_void + flags += 1L << 26; +# endif +# endif +#else + flags += 1L << 24; +# ifdef NO_snprintf + flags += 1L << 25; +# ifdef HAS_sprintf_void + flags += 1L << 26; +# endif +# else +# ifdef HAS_snprintf_void + flags += 1L << 26; +# endif +# endif +#endif + return flags; +} + +#ifdef DEBUG + +# ifndef verbose +# define verbose 0 +# endif +int ZLIB_INTERNAL z_verbose = verbose; + +void ZLIB_INTERNAL z_error (m) + char *m; +{ + fprintf(stderr, "%s\n", m); + exit(1); +} +#endif + +/* exported to allow conversion of error code to string for compress() and + * uncompress() + */ +const char * ZEXPORT zError(err) + int err; +{ + return ERR_MSG(err); +} + +#if defined(_WIN32_WCE) + /* The Microsoft C Run-Time Library for Windows CE doesn't have + * errno. We define it as a global variable to simplify porting. + * Its value is always 0 and should not be used. + */ + int errno = 0; +#endif + +#ifndef HAVE_MEMCPY + +void ZLIB_INTERNAL zmemcpy(dest, source, len) + Bytef* dest; + const Bytef* source; + uInt len; +{ + if (len == 0) return; + do { + *dest++ = *source++; /* ??? to be unrolled */ + } while (--len != 0); +} + +int ZLIB_INTERNAL zmemcmp(s1, s2, len) + const Bytef* s1; + const Bytef* s2; + uInt len; +{ + uInt j; + + for (j = 0; j < len; j++) { + if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; + } + return 0; +} + +void ZLIB_INTERNAL zmemzero(dest, len) + Bytef* dest; + uInt len; +{ + if (len == 0) return; + do { + *dest++ = 0; /* ??? to be unrolled */ + } while (--len != 0); +} +#endif + +#ifndef Z_SOLO + +#ifdef SYS16BIT + +#ifdef __TURBOC__ +/* Turbo C in 16-bit mode */ + +# define MY_ZCALLOC + +/* Turbo C malloc() does not allow dynamic allocation of 64K bytes + * and farmalloc(64K) returns a pointer with an offset of 8, so we + * must fix the pointer. Warning: the pointer must be put back to its + * original form in order to free it, use zcfree(). + */ + +#define MAX_PTR 10 +/* 10*64K = 640K */ + +local int next_ptr = 0; + +typedef struct ptr_table_s { + voidpf org_ptr; + voidpf new_ptr; +} ptr_table; + +local ptr_table table[MAX_PTR]; +/* This table is used to remember the original form of pointers + * to large buffers (64K). Such pointers are normalized with a zero offset. + * Since MSDOS is not a preemptive multitasking OS, this table is not + * protected from concurrent access. This hack doesn't work anyway on + * a protected system like OS/2. Use Microsoft C instead. + */ + +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size) +{ + voidpf buf = opaque; /* just to make some compilers happy */ + ulg bsize = (ulg)items*size; + + /* If we allocate less than 65520 bytes, we assume that farmalloc + * will return a usable pointer which doesn't have to be normalized. + */ + if (bsize < 65520L) { + buf = farmalloc(bsize); + if (*(ush*)&buf != 0) return buf; + } else { + buf = farmalloc(bsize + 16L); + } + if (buf == NULL || next_ptr >= MAX_PTR) return NULL; + table[next_ptr].org_ptr = buf; + + /* Normalize the pointer to seg:0 */ + *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; + *(ush*)&buf = 0; + table[next_ptr++].new_ptr = buf; + return buf; +} + +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) +{ + int n; + if (*(ush*)&ptr != 0) { /* object < 64K */ + farfree(ptr); + return; + } + /* Find the original pointer */ + for (n = 0; n < next_ptr; n++) { + if (ptr != table[n].new_ptr) continue; + + farfree(table[n].org_ptr); + while (++n < next_ptr) { + table[n-1] = table[n]; + } + next_ptr--; + return; + } + ptr = opaque; /* just to make some compilers happy */ + Assert(0, "zcfree: ptr not found"); +} + +#endif /* __TURBOC__ */ + + +#ifdef M_I86 +/* Microsoft C in 16-bit mode */ + +# define MY_ZCALLOC + +#if (!defined(_MSC_VER) || (_MSC_VER <= 600)) +# define _halloc halloc +# define _hfree hfree +#endif + +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size) +{ + if (opaque) opaque = 0; /* to make compiler happy */ + return _halloc((long)items, size); +} + +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) +{ + if (opaque) opaque = 0; /* to make compiler happy */ + _hfree(ptr); +} + +#endif /* M_I86 */ + +#endif /* SYS16BIT */ + + +#ifndef MY_ZCALLOC /* Any system without a special alloc function */ + +#ifndef STDC +extern voidp malloc OF((uInt size)); +extern voidp calloc OF((uInt items, uInt size)); +extern void free OF((voidpf ptr)); +#endif + +voidpf ZLIB_INTERNAL zcalloc (opaque, items, size) + voidpf opaque; + unsigned items; + unsigned size; +{ + if (opaque) items += size - size; /* make compiler happy */ + return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) : + (voidpf)calloc(items, size); +} + +void ZLIB_INTERNAL zcfree (opaque, ptr) + voidpf opaque; + voidpf ptr; +{ + free(ptr); + if (opaque) return; /* make compiler happy */ +} + +#endif /* MY_ZCALLOC */ + +#endif /* !Z_SOLO */ diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/zutil.h cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/zutil.h --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/cbits/zutil.h 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/cbits/zutil.h 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,253 @@ +/* zutil.h -- internal interface and configuration of the compression library + * Copyright (C) 1995-2013 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* @(#) $Id$ */ + +#ifndef ZUTIL_H +#define ZUTIL_H + +#ifdef HAVE_HIDDEN +# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +#else +# define ZLIB_INTERNAL +#endif + +#include "zlib.h" + +#if defined(STDC) && !defined(Z_SOLO) +# if !(defined(_WIN32_WCE) && defined(_MSC_VER)) +# include +# endif +# include +# include +#endif + +#ifdef Z_SOLO + typedef long ptrdiff_t; /* guess -- will be caught if guess is wrong */ +#endif + +#ifndef local +# define local static +#endif +/* compile with -Dlocal if your debugger can't find static symbols */ + +typedef unsigned char uch; +typedef uch FAR uchf; +typedef unsigned short ush; +typedef ush FAR ushf; +typedef unsigned long ulg; + +extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ +/* (size given to avoid silly warnings with Visual C++) */ + +#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] + +#define ERR_RETURN(strm,err) \ + return (strm->msg = ERR_MSG(err), (err)) +/* To be used only when the state is known to be valid */ + + /* common constants */ + +#ifndef DEF_WBITS +# define DEF_WBITS MAX_WBITS +#endif +/* default windowBits for decompression. MAX_WBITS is for compression only */ + +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif +/* default memLevel */ + +#define STORED_BLOCK 0 +#define STATIC_TREES 1 +#define DYN_TREES 2 +/* The three kinds of block type */ + +#define MIN_MATCH 3 +#define MAX_MATCH 258 +/* The minimum and maximum match lengths */ + +#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ + + /* target dependencies */ + +#if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32)) +# define OS_CODE 0x00 +# ifndef Z_SOLO +# if defined(__TURBOC__) || defined(__BORLANDC__) +# if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) + /* Allow compilation with ANSI keywords only enabled */ + void _Cdecl farfree( void *block ); + void *_Cdecl farmalloc( unsigned long nbytes ); +# else +# include +# endif +# else /* MSC or DJGPP */ +# include +# endif +# endif +#endif + +#ifdef AMIGA +# define OS_CODE 0x01 +#endif + +#if defined(VAXC) || defined(VMS) +# define OS_CODE 0x02 +# define F_OPEN(name, mode) \ + fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") +#endif + +#if defined(ATARI) || defined(atarist) +# define OS_CODE 0x05 +#endif + +#ifdef OS2 +# define OS_CODE 0x06 +# if defined(M_I86) && !defined(Z_SOLO) +# include +# endif +#endif + +#if defined(MACOS) || defined(TARGET_OS_MAC) +# define OS_CODE 0x07 +# ifndef Z_SOLO +# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os +# include /* for fdopen */ +# else +# ifndef fdopen +# define fdopen(fd,mode) NULL /* No fdopen() */ +# endif +# endif +# endif +#endif + +#ifdef TOPS20 +# define OS_CODE 0x0a +#endif + +#ifdef WIN32 +# ifndef __CYGWIN__ /* Cygwin is Unix, not Win32 */ +# define OS_CODE 0x0b +# endif +#endif + +#ifdef __50SERIES /* Prime/PRIMOS */ +# define OS_CODE 0x0f +#endif + +#if defined(_BEOS_) || defined(RISCOS) +# define fdopen(fd,mode) NULL /* No fdopen() */ +#endif + +#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX +# if defined(_WIN32_WCE) +# define fdopen(fd,mode) NULL /* No fdopen() */ +# ifndef _PTRDIFF_T_DEFINED + typedef int ptrdiff_t; +# define _PTRDIFF_T_DEFINED +# endif +# else +# define fdopen(fd,type) _fdopen(fd,type) +# endif +#endif + +#if defined(__BORLANDC__) && !defined(MSDOS) + #pragma warn -8004 + #pragma warn -8008 + #pragma warn -8066 +#endif + +/* provide prototypes for these when building zlib without LFS */ +#if !defined(_WIN32) && \ + (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0) + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +#endif + + /* common defaults */ + +#ifndef OS_CODE +# define OS_CODE 0x03 /* assume Unix */ +#endif + +#ifndef F_OPEN +# define F_OPEN(name, mode) fopen((name), (mode)) +#endif + + /* functions */ + +#if defined(pyr) || defined(Z_SOLO) +# define NO_MEMCPY +#endif +#if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__) + /* Use our own functions for small and medium model with MSC <= 5.0. + * You may have to use the same strategy for Borland C (untested). + * The __SC__ check is for Symantec. + */ +# define NO_MEMCPY +#endif +#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) +# define HAVE_MEMCPY +#endif +#ifdef HAVE_MEMCPY +# ifdef SMALL_MEDIUM /* MSDOS small or medium model */ +# define zmemcpy _fmemcpy +# define zmemcmp _fmemcmp +# define zmemzero(dest, len) _fmemset(dest, 0, len) +# else +# define zmemcpy memcpy +# define zmemcmp memcmp +# define zmemzero(dest, len) memset(dest, 0, len) +# endif +#else + void ZLIB_INTERNAL zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); + int ZLIB_INTERNAL zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); + void ZLIB_INTERNAL zmemzero OF((Bytef* dest, uInt len)); +#endif + +/* Diagnostic functions */ +#ifdef DEBUG +# include + extern int ZLIB_INTERNAL z_verbose; + extern void ZLIB_INTERNAL z_error OF((char *m)); +# define Assert(cond,msg) {if(!(cond)) z_error(msg);} +# define Trace(x) {if (z_verbose>=0) fprintf x ;} +# define Tracev(x) {if (z_verbose>0) fprintf x ;} +# define Tracevv(x) {if (z_verbose>1) fprintf x ;} +# define Tracec(c,x) {if (z_verbose>0 && (c)) fprintf x ;} +# define Tracecv(c,x) {if (z_verbose>1 && (c)) fprintf x ;} +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + +#ifndef Z_SOLO + voidpf ZLIB_INTERNAL zcalloc OF((voidpf opaque, unsigned items, + unsigned size)); + void ZLIB_INTERNAL zcfree OF((voidpf opaque, voidpf ptr)); +#endif + +#define ZALLOC(strm, items, size) \ + (*((strm)->zalloc))((strm)->opaque, (items), (size)) +#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) +#define TRY_FREE(s, p) {if (p) ZFREE(s, p);} + +/* Reverse the bytes in a 32-bit value */ +#define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ + (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) + +#endif /* ZUTIL_H */ diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/changelog cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/changelog --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/changelog 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/changelog 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,39 @@ +0.6.1.1 Duncan Coutts April 2015 + + * Fixed building with GHC 7.0 and 7.2 + +0.6.0.2 Duncan Coutts April 2015 + + * Fixed building with GHC 7.0 and 7.2 + +0.6.1.0 Duncan Coutts April 2015 + + * Support for concatenated gzip files (multiple back-to-back streams) + +0.6.0.1 Duncan Coutts April 2015 + + * Fixed building with older GHC + * Fixed warnings with new GHC + * Fixed building on Windows + * Fixed testsuite + +0.6.0.0 Duncan Coutts April 2015 + + * New incremental interface for compression and decompression + * Provide access to unconsumed trailing data + * Simplified structured error type, and instance of Exception + * Updated bundled zlib C code to 1.2.8 (used on Windows) + * Fixed memory leak of zlib z_stream C structure + * More derivied instances (Eq, Show, Typeable, Generic) + +0.5.4.2 Duncan Coutts November 2014 + + * Builds with GHC 7.10 + +0.5.4.1 Duncan Coutts February 2013 + + * Force tail of input when finished decompressing, to help lazy IO + +0.5.4.0 Duncan Coutts September 2012 + + * New support for zlib custom dictionaries diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/GZip.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/GZip.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/GZip.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/GZip.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,128 @@ +----------------------------------------------------------------------------- +-- | +-- Copyright : (c) 2006-2014 Duncan Coutts +-- License : BSD-style +-- +-- Maintainer : duncan@community.haskell.org +-- +-- Compression and decompression of data streams in the gzip format. +-- +-- The format is described in detail in RFC #1952: +-- +-- +-- See also the zlib home page: +-- +----------------------------------------------------------------------------- +module Codec.Compression.GZip ( + + -- | This module provides pure functions for compressing and decompressing + -- streams of data in the gzip format and represented by lazy 'ByteString's. + -- This makes it easy to use either in memory or with disk or network IO. + -- + -- For example a simple gzip compression program is just: + -- + -- > import qualified Data.ByteString.Lazy as ByteString + -- > import qualified Codec.Compression.GZip as GZip + -- > + -- > main = ByteString.interact GZip.compress + -- + -- Or you could lazily read in and decompress a @.gz@ file using: + -- + -- > content <- fmap GZip.decompress (readFile file) + -- + + -- * Simple compression and decompression + compress, + decompress, + + -- * Extended api with control over compression parameters + compressWith, + decompressWith, + + CompressParams(..), defaultCompressParams, + DecompressParams(..), defaultDecompressParams, + + -- ** The compression parameter types + CompressionLevel(..), + defaultCompression, + noCompression, + bestSpeed, + bestCompression, + compressionLevel, + Method(..), + deflateMethod, + WindowBits(..), + defaultWindowBits, + windowBits, + MemoryLevel(..), + defaultMemoryLevel, + minMemoryLevel, + maxMemoryLevel, + memoryLevel, + CompressionStrategy(..), + defaultStrategy, + filteredStrategy, + huffmanOnlyStrategy, + + ) where + +import Data.ByteString.Lazy (ByteString) + +import qualified Codec.Compression.Zlib.Internal as Internal +import Codec.Compression.Zlib.Internal hiding (compress, decompress) + + +-- | Decompress a stream of data in the gzip format. +-- +-- There are a number of errors that can occur. In each case an exception will +-- be thrown. The possible error conditions are: +-- +-- * if the stream does not start with a valid gzip header +-- +-- * if the compressed stream is corrupted +-- +-- * if the compressed stream ends permaturely +-- +-- Note that the decompression is performed /lazily/. Errors in the data stream +-- may not be detected until the end of the stream is demanded (since it is +-- only at the end that the final checksum can be checked). If this is +-- important to you, you must make sure to consume the whole decompressed +-- stream before doing any IO action that depends on it. +-- +decompress :: ByteString -> ByteString +decompress = decompressWith defaultDecompressParams + + +-- | Like 'decompress' but with the ability to specify various decompression +-- parameters. Typical usage: +-- +-- > decompressWith defaultCompressParams { ... } +-- +decompressWith :: DecompressParams -> ByteString -> ByteString +decompressWith = Internal.decompress gzipFormat + + +-- | Compress a stream of data into the gzip format. +-- +-- This uses the default compression parameters. In partiular it uses the +-- default compression level which favours a higher compression ratio over +-- compression speed, though it does not use the maximum compression level. +-- +-- Use 'compressWith' to adjust the compression level or other compression +-- parameters. +-- +compress :: ByteString -> ByteString +compress = compressWith defaultCompressParams + + +-- | Like 'compress' but with the ability to specify various compression +-- parameters. Typical usage: +-- +-- > compressWith defaultCompressParams { ... } +-- +-- In particular you can set the compression level: +-- +-- > compressWith defaultCompressParams { compressLevel = BestCompression } +-- +compressWith :: CompressParams -> ByteString -> ByteString +compressWith = Internal.compress gzipFormat diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Internal.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Internal.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Internal.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Internal.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,947 @@ +{-# LANGUAGE CPP, RankNTypes, DeriveDataTypeable, BangPatterns #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE Trustworthy #-} +#endif +----------------------------------------------------------------------------- +-- | +-- Copyright : (c) 2006-2015 Duncan Coutts +-- License : BSD-style +-- +-- Maintainer : duncan@community.haskell.org +-- +-- Pure and IO stream based interfaces to lower level zlib wrapper +-- +----------------------------------------------------------------------------- +module Codec.Compression.Zlib.Internal ( + + -- * Pure interface + compress, + decompress, + + -- * Monadic incremental interface + -- $incremental-compression + + -- ** Using incremental compression + -- $using-incremental-compression + + CompressStream(..), + compressST, + compressIO, + foldCompressStream, + foldCompressStreamWithInput, + + -- ** Using incremental decompression + -- $using-incremental-decompression + + DecompressStream(..), + DecompressError(..), + decompressST, + decompressIO, + foldDecompressStream, + foldDecompressStreamWithInput, + + -- * The compression parameter types + CompressParams(..), + defaultCompressParams, + DecompressParams(..), + defaultDecompressParams, + Stream.Format(..), + Stream.gzipFormat, + Stream.zlibFormat, + Stream.rawFormat, + Stream.gzipOrZlibFormat, + Stream.CompressionLevel(..), + Stream.defaultCompression, + Stream.noCompression, + Stream.bestSpeed, + Stream.bestCompression, + Stream.compressionLevel, + Stream.Method(..), + Stream.deflateMethod, + Stream.WindowBits(..), + Stream.defaultWindowBits, + Stream.windowBits, + Stream.MemoryLevel(..), + Stream.defaultMemoryLevel, + Stream.minMemoryLevel, + Stream.maxMemoryLevel, + Stream.memoryLevel, + Stream.CompressionStrategy(..), + Stream.defaultStrategy, + Stream.filteredStrategy, + Stream.huffmanOnlyStrategy, + + ) where + +import Prelude hiding (length) +import Control.Monad (when) +import Control.Exception (Exception, throw, assert) +import Control.Monad.ST.Lazy hiding (stToIO) +import Control.Monad.ST.Strict (stToIO) +import Data.Typeable (Typeable) +import qualified Data.ByteString.Lazy as L +import qualified Data.ByteString.Lazy.Internal as L +import qualified Data.ByteString as S +import qualified Data.ByteString.Internal as S +import Data.Word (Word8) + +import qualified Codec.Compression.Zlib.Stream as Stream +import Codec.Compression.Zlib.Stream (Stream) + +-- | The full set of parameters for compression. The defaults are +-- 'defaultCompressParams'. +-- +-- The 'compressBufferSize' is the size of the first output buffer containing +-- the compressed data. If you know an approximate upper bound on the size of +-- the compressed data then setting this parameter can save memory. The default +-- compression output buffer size is @16k@. If your extimate is wrong it does +-- not matter too much, the default buffer size will be used for the remaining +-- chunks. +-- +data CompressParams = CompressParams { + compressLevel :: !Stream.CompressionLevel, + compressMethod :: !Stream.Method, + compressWindowBits :: !Stream.WindowBits, + compressMemoryLevel :: !Stream.MemoryLevel, + compressStrategy :: !Stream.CompressionStrategy, + compressBufferSize :: !Int, + compressDictionary :: Maybe S.ByteString +} deriving Show + +-- | The full set of parameters for decompression. The defaults are +-- 'defaultDecompressParams'. +-- +-- The 'decompressBufferSize' is the size of the first output buffer, +-- containing the uncompressed data. If you know an exact or approximate upper +-- bound on the size of the decompressed data then setting this parameter can +-- save memory. The default decompression output buffer size is @32k@. If your +-- extimate is wrong it does not matter too much, the default buffer size will +-- be used for the remaining chunks. +-- +-- One particular use case for setting the 'decompressBufferSize' is if you +-- know the exact size of the decompressed data and want to produce a strict +-- 'Data.ByteString.ByteString'. The compression and deccompression functions +-- use lazy 'Data.ByteString.Lazy.ByteString's but if you set the +-- 'decompressBufferSize' correctly then you can generate a lazy +-- 'Data.ByteString.Lazy.ByteString' with exactly one chunk, which can be +-- converted to a strict 'Data.ByteString.ByteString' in @O(1)@ time using +-- @'Data.ByteString.concat' . 'Data.ByteString.Lazy.toChunks'@. +-- +data DecompressParams = DecompressParams { + decompressWindowBits :: !Stream.WindowBits, + decompressBufferSize :: !Int, + decompressDictionary :: Maybe S.ByteString, + decompressAllMembers :: Bool +} deriving Show + +-- | The default set of parameters for compression. This is typically used with +-- the @compressWith@ function with specific parameters overridden. +-- +defaultCompressParams :: CompressParams +defaultCompressParams = CompressParams { + compressLevel = Stream.defaultCompression, + compressMethod = Stream.deflateMethod, + compressWindowBits = Stream.defaultWindowBits, + compressMemoryLevel = Stream.defaultMemoryLevel, + compressStrategy = Stream.defaultStrategy, + compressBufferSize = defaultCompressBufferSize, + compressDictionary = Nothing +} + +-- | The default set of parameters for decompression. This is typically used with +-- the @compressWith@ function with specific parameters overridden. +-- +defaultDecompressParams :: DecompressParams +defaultDecompressParams = DecompressParams { + decompressWindowBits = Stream.defaultWindowBits, + decompressBufferSize = defaultDecompressBufferSize, + decompressDictionary = Nothing, + decompressAllMembers = True +} + +-- | The default chunk sizes for the output of compression and decompression +-- are 16k and 32k respectively (less a small accounting overhead). +-- +defaultCompressBufferSize, defaultDecompressBufferSize :: Int +defaultCompressBufferSize = 16 * 1024 - L.chunkOverhead +defaultDecompressBufferSize = 32 * 1024 - L.chunkOverhead + +-- | The unfolding of the decompression process, where you provide a sequence +-- of compressed data chunks as input and receive a sequence of uncompressed +-- data chunks as output. The process is incremental, in that the demand for +-- input and provision of output are interleaved. +-- +-- To indicate the end of the input supply an empty input chunk. Note that +-- for 'gzipFormat' with the default 'decompressAllMembers' @True@ you will +-- have to do this, as the decompressor will look for any following members. +-- With 'decompressAllMembers' @False@ the decompressor knows when the data +-- ends and will produce 'DecompressStreamEnd' without you having to supply an +-- empty chunk to indicate the end of the input. +-- +data DecompressStream m = + + DecompressInputRequired { + decompressSupplyInput :: S.ByteString -> m (DecompressStream m) + } + + | DecompressOutputAvailable { + decompressOutput :: !S.ByteString, + decompressNext :: m (DecompressStream m) + } + + -- | Includes any trailing unconsumed /input/ data. + | DecompressStreamEnd { + decompressUnconsumedInput :: S.ByteString + } + + -- | An error code + | DecompressStreamError { + decompressStreamError :: DecompressError + } + +-- | The possible error cases when decompressing a stream. +-- +-- This can be 'show'n to give a human readable error message. +-- +data DecompressError = + -- | The compressed data stream ended prematurely. This may happen if the + -- input data stream was truncated. + TruncatedInput + + -- | It is possible to do zlib compression with a custom dictionary. This + -- allows slightly higher compression ratios for short files. However such + -- compressed streams require the same dictionary when decompressing. This + -- error is for when we encounter a compressed stream that needs a + -- dictionary, and it's not provided. + | DictionaryRequired + + -- | If the stream requires a dictionary and you provide one with the + -- wrong 'DictionaryHash' then you will get this error. + | DictionaryMismatch + + -- | If the compressed data stream is corrupted in any way then you will + -- get this error, for example if the input data just isn't a compressed + -- zlib data stream. In particular if the data checksum turns out to be + -- wrong then you will get all the decompressed data but this error at the + -- end, instead of the normal sucessful 'StreamEnd'. + | DataFormatError String + deriving (Eq, Typeable) + +instance Show DecompressError where + show TruncatedInput = modprefix "premature end of compressed data stream" + show DictionaryRequired = modprefix "compressed data stream requires custom dictionary" + show DictionaryMismatch = modprefix "given dictionary does not match the expected one" + show (DataFormatError detail) = modprefix ("compressed data stream format error (" ++ detail ++ ")") + +modprefix :: ShowS +modprefix = ("Codec.Compression.Zlib: " ++) + +instance Exception DecompressError + +-- | A fold over the 'DecompressStream' in the given monad. +-- +-- One way to look at this is that it runs the stream, using callback functions +-- for the four stream events. +-- +foldDecompressStream :: Monad m + => ((S.ByteString -> m a) -> m a) + -> (S.ByteString -> m a -> m a) + -> (S.ByteString -> m a) + -> (DecompressError -> m a) + -> DecompressStream m -> m a +foldDecompressStream input output end err = fold + where + fold (DecompressInputRequired next) = + input (\x -> next x >>= fold) + + fold (DecompressOutputAvailable outchunk next) = + output outchunk (next >>= fold) + + fold (DecompressStreamEnd inchunk) = end inchunk + fold (DecompressStreamError derr) = err derr + +-- | A variant on 'foldCompressStream' that is pure rather than operating in a +-- monad and where the input is provided by a lazy 'L.ByteString'. So we only +-- have to deal with the output, end and error parts, making it like a foldr on +-- a list of output chunks. +-- +-- For example: +-- +-- > toChunks = foldDecompressStreamWithInput (:) [] throw +-- +foldDecompressStreamWithInput :: (S.ByteString -> a -> a) + -> (L.ByteString -> a) + -> (DecompressError -> a) + -> (forall s. DecompressStream (ST s)) + -> L.ByteString + -> a +foldDecompressStreamWithInput chunk end err = \s lbs -> + runST (fold s (L.toChunks lbs)) + where + fold (DecompressInputRequired next) [] = + next S.empty >>= \strm -> fold strm [] + + fold (DecompressInputRequired next) (inchunk:inchunks) = + next inchunk >>= \s -> fold s inchunks + + fold (DecompressOutputAvailable outchunk next) inchunks = do + r <- next >>= \s -> fold s inchunks + return $ chunk outchunk r + + fold (DecompressStreamEnd inchunk) inchunks = + return $ end (L.fromChunks (inchunk:inchunks)) + + fold (DecompressStreamError derr) _ = + return $ err derr + + +-- $incremental-compression +-- The pure 'compress' and 'decompress' functions are streaming in the sense +-- that they can produce output without demanding all input, however they need +-- the input data stream as a lazy 'L.ByteString'. Having the input data +-- stream as a lazy 'L.ByteString' often requires using lazy I\/O which is not +-- appropriate in all cicumstances. +-- +-- For these cases an incremental interface is more appropriate. This interface +-- allows both incremental input and output. Chunks of input data are supplied +-- one by one (e.g. as they are obtained from an input source like a file or +-- network source). Output is also produced chunk by chunk. +-- +-- The incremental input and output is managed via the 'CompressStream' and +-- 'DecompressStream' types. They represents the unfolding of the process of +-- compressing and decompressing. They operates in either the 'ST' or 'IO' +-- monads. They can be lifted into other incremental abstractions like pipes or +-- conduits, or they can be used directly in the following style. + +-- $using-incremental-compression +-- +-- In a loop: +-- +-- * Inspect the status of the stream +-- +-- * When it is 'CompressInputRequired' then you should call the action, +-- passing a chunk of input (or 'BS.empty' when no more input is available) +-- to get the next state of the stream and continue the loop. +-- +-- * When it is 'CompressOutputAvailable' then do something with the given +-- chunk of output, and call the action to get the next state of the stream +-- and continue the loop. +-- +-- * When it is 'CompressStreamEnd' then terminate the loop. +-- +-- Note that you cannot stop as soon as you have no more input, you need to +-- carry on until all the output has been collected, i.e. until you get to +-- 'CompressStreamEnd'. +-- +-- Here is an example where we get input from one file handle and send the +-- compressed output to another file handle. +-- +-- > go :: Handle -> Handle -> CompressStream IO -> IO () +-- > go inh outh (CompressInputRequired next) = do +-- > inchunk <- BS.hGet inh 4096 +-- > go inh outh =<< next inchunk +-- > go inh outh (CompressOutputAvailable outchunk next) = +-- > BS.hPut outh outchunk +-- > go inh outh =<< next +-- > go _ _ CompressStreamEnd = return () +-- +-- The same can be achieved with 'foldCompressStream': +-- +-- > foldCompressStream +-- > (\next -> do inchunk <- BS.hGet inh 4096; next inchunk) +-- > (\outchunk next -> do BS.hPut outh outchunk; next) +-- > (return ()) + +-- $using-incremental-decompression +-- +-- The use of 'DecompressStream' is very similar to 'CompressStream' but with +-- a few differences: +-- +-- * There is the extra possibility of a 'DecompressStreamError' +-- +-- * There can be extra trailing data after a compressed stream, and the +-- 'DecompressStreamEnd' includes that. +-- +-- Otherwise the same loop style applies, and there are fold functions. + +-- | The unfolding of the compression process, where you provide a sequence +-- of uncompressed data chunks as input and receive a sequence of compressed +-- data chunks as output. The process is incremental, in that the demand for +-- input and provision of output are interleaved. +-- +data CompressStream m = + CompressInputRequired { + compressSupplyInput :: S.ByteString -> m (CompressStream m) + } + + | CompressOutputAvailable { + compressOutput :: !S.ByteString, + compressNext :: m (CompressStream m) + } + + | CompressStreamEnd + +-- | A fold over the 'CompressStream' in the given monad. +-- +-- One way to look at this is that it runs the stream, using callback functions +-- for the three stream events. +-- +foldCompressStream :: Monad m + => ((S.ByteString -> m a) -> m a) + -> (S.ByteString -> m a -> m a) + -> m a + -> CompressStream m -> m a +foldCompressStream input output end = fold + where + fold (CompressInputRequired next) = + input (\x -> next x >>= fold) + + fold (CompressOutputAvailable outchunk next) = + output outchunk (next >>= fold) + + fold CompressStreamEnd = + end + +-- | A variant on 'foldCompressStream' that is pure rather than operating in a +-- monad and where the input is provided by a lazy 'L.ByteString'. So we only +-- have to deal with the output and end parts, making it just like a foldr on a +-- list of output chunks. +-- +-- For example: +-- +-- > toChunks = foldCompressStreamWithInput (:) [] +-- +foldCompressStreamWithInput :: (S.ByteString -> a -> a) + -> a + -> (forall s. CompressStream (ST s)) + -> L.ByteString + -> a +foldCompressStreamWithInput chunk end = \s lbs -> + runST (fold s (L.toChunks lbs)) + where + fold (CompressInputRequired next) [] = + next S.empty >>= \strm -> fold strm [] + + fold (CompressInputRequired next) (inchunk:inchunks) = + next inchunk >>= \s -> fold s inchunks + + fold (CompressOutputAvailable outchunk next) inchunks = do + r <- next >>= \s -> fold s inchunks + return $ chunk outchunk r + + fold CompressStreamEnd _inchunks = + return end + + +-- | Compress a data stream provided as a lazy 'L.ByteString'. +-- +-- There are no expected error conditions. All input data streams are valid. It +-- is possible for unexpected errors to occur, such as running out of memory, +-- or finding the wrong version of the zlib C library, these are thrown as +-- exceptions. +-- +compress :: Stream.Format -> CompressParams -> L.ByteString -> L.ByteString + +-- | Incremental compression in the 'ST' monad. Using 'ST' makes it possible +-- to write pure /lazy/ functions while making use of incremental compression. +-- +compressST :: Stream.Format -> CompressParams -> CompressStream (ST s) + +-- | Incremental compression in the 'IO' monad. +-- +compressIO :: Stream.Format -> CompressParams -> CompressStream IO + +compress format params = foldCompressStreamWithInput + L.Chunk L.Empty + (compressStreamST format params) +compressST format params = compressStreamST format params +compressIO format params = compressStreamIO format params + +compressStream :: Stream.Format -> CompressParams -> S.ByteString + -> Stream (CompressStream Stream) +compressStream format (CompressParams compLevel method bits memLevel + strategy initChunkSize mdict) = + + \chunk -> do + Stream.deflateInit format compLevel method bits memLevel strategy + setDictionary mdict + case chunk of + _ | S.null chunk -> + fillBuffers 20 --gzip header is 20 bytes, others even smaller + + S.PS inFPtr offset length -> do + Stream.pushInputBuffer inFPtr offset length + fillBuffers initChunkSize + + where + -- we flick between two states: + -- * where one or other buffer is empty + -- - in which case we refill one or both + -- * where both buffers are non-empty + -- - in which case we compress until a buffer is empty + + fillBuffers :: Int -> Stream (CompressStream Stream) + fillBuffers outChunkSize = do +#ifdef DEBUG + Stream.consistencyCheck +#endif + + -- in this state there are two possabilities: + -- * no outbut buffer space is available + -- - in which case we must make more available + -- * no input buffer is available + -- - in which case we must supply more + inputBufferEmpty <- Stream.inputBufferEmpty + outputBufferFull <- Stream.outputBufferFull + + assert (inputBufferEmpty || outputBufferFull) $ return () + + when outputBufferFull $ do + outFPtr <- Stream.unsafeLiftIO (S.mallocByteString outChunkSize) + Stream.pushOutputBuffer outFPtr 0 outChunkSize + + if inputBufferEmpty + then return $ CompressInputRequired $ \chunk -> + case chunk of + _ | S.null chunk -> drainBuffers True + S.PS inFPtr offset length -> do + Stream.pushInputBuffer inFPtr offset length + drainBuffers False + else drainBuffers False + + + drainBuffers :: Bool -> Stream (CompressStream Stream) + drainBuffers lastChunk = do + + inputBufferEmpty' <- Stream.inputBufferEmpty + outputBufferFull' <- Stream.outputBufferFull + assert(not outputBufferFull' + && (lastChunk || not inputBufferEmpty')) $ return () + -- this invariant guarantees we can always make forward progress + -- and that therefore a BufferError is impossible + + let flush = if lastChunk then Stream.Finish else Stream.NoFlush + status <- Stream.deflate flush + + case status of + Stream.Ok -> do + outputBufferFull <- Stream.outputBufferFull + if outputBufferFull + then do (outFPtr, offset, length) <- Stream.popOutputBuffer + let chunk = S.PS outFPtr offset length + return $ CompressOutputAvailable chunk $ do + fillBuffers defaultCompressBufferSize + else do fillBuffers defaultCompressBufferSize + + Stream.StreamEnd -> do + inputBufferEmpty <- Stream.inputBufferEmpty + assert inputBufferEmpty $ return () + outputBufferBytesAvailable <- Stream.outputBufferBytesAvailable + if outputBufferBytesAvailable > 0 + then do (outFPtr, offset, length) <- Stream.popOutputBuffer + let chunk = S.PS outFPtr offset length + Stream.finalise + return $ CompressOutputAvailable chunk (return CompressStreamEnd) + else do Stream.finalise + return CompressStreamEnd + + Stream.Error code msg -> case code of + Stream.BufferError -> fail "BufferError should be impossible!" + Stream.NeedDict _ -> fail "NeedDict is impossible!" + _ -> fail msg + + -- Set the custom dictionary, if we were provided with one + -- and if the format supports it (zlib and raw, not gzip). + setDictionary :: Maybe S.ByteString -> Stream () + setDictionary (Just dict) + | Stream.formatSupportsDictionary format = do + status <- Stream.deflateSetDictionary dict + case status of + Stream.Ok -> return () + Stream.Error _ msg -> fail msg + _ -> fail "error when setting deflate dictionary" + setDictionary _ = return () + + +-- | Decompress a data stream provided as a lazy 'L.ByteString'. +-- +-- It will throw an exception if any error is encountered in the input data. +-- If you need more control over error handling then use one the incremental +-- versions, 'decompressST' or 'decompressIO'. +-- +decompress :: Stream.Format -> DecompressParams -> L.ByteString -> L.ByteString + +-- | Incremental decompression in the 'ST' monad. Using 'ST' makes it possible +-- to write pure /lazy/ functions while making use of incremental decompression. +-- +decompressST :: Stream.Format -> DecompressParams -> DecompressStream (ST s) + +-- | Incremental decompression in the 'IO' monad. +-- +decompressIO :: Stream.Format -> DecompressParams -> DecompressStream IO + +decompress format params = foldDecompressStreamWithInput + L.Chunk (const L.Empty) throw + (decompressStreamST format params) +decompressST format params = decompressStreamST format params +decompressIO format params = decompressStreamIO format params + + +decompressStream :: Stream.Format -> DecompressParams + -> Bool -> S.ByteString + -> Stream (DecompressStream Stream) +decompressStream format (DecompressParams bits initChunkSize mdict allMembers) + resume = + + \chunk -> do + inputBufferEmpty <- Stream.inputBufferEmpty + outputBufferFull <- Stream.outputBufferFull + assert inputBufferEmpty $ + if resume then assert (format == Stream.gzipFormat && allMembers) $ + Stream.inflateReset + else assert outputBufferFull $ + Stream.inflateInit format bits + case chunk of + _ | S.null chunk -> do + -- special case to avoid demanding more input again + -- always an error anyway + when outputBufferFull $ do + let outChunkSize = 1 + outFPtr <- Stream.unsafeLiftIO (S.mallocByteString outChunkSize) + Stream.pushOutputBuffer outFPtr 0 outChunkSize + drainBuffers True + + S.PS inFPtr offset length -> do + Stream.pushInputBuffer inFPtr offset length + -- Normally we start with no output buffer (so counts as full) but + -- if we're resuming then we'll usually still have output buffer + -- space available + assert (if not resume then outputBufferFull else True) $ return () + if outputBufferFull + then fillBuffers initChunkSize + else drainBuffers False + + where + -- we flick between two states: + -- * where one or other buffer is empty + -- - in which case we refill one or both + -- * where both buffers are non-empty + -- - in which case we compress until a buffer is empty + + fillBuffers :: Int + -> Stream (DecompressStream Stream) + fillBuffers outChunkSize = do +#ifdef DEBUG + Stream.consistencyCheck +#endif + + -- in this state there are two possabilities: + -- * no outbut buffer space is available + -- - in which case we must make more available + -- * no input buffer is available + -- - in which case we must supply more + inputBufferEmpty <- Stream.inputBufferEmpty + outputBufferFull <- Stream.outputBufferFull + + assert (inputBufferEmpty || outputBufferFull) $ return () + + when outputBufferFull $ do + outFPtr <- Stream.unsafeLiftIO (S.mallocByteString outChunkSize) + Stream.pushOutputBuffer outFPtr 0 outChunkSize + + if inputBufferEmpty + then return $ DecompressInputRequired $ \chunk -> + case chunk of + _ | S.null chunk -> drainBuffers True + S.PS inFPtr offset length -> do + Stream.pushInputBuffer inFPtr offset length + drainBuffers False + else drainBuffers False + + + drainBuffers :: Bool -> Stream (DecompressStream Stream) + drainBuffers lastChunk = do + + inputBufferEmpty' <- Stream.inputBufferEmpty + outputBufferFull' <- Stream.outputBufferFull + assert(not outputBufferFull' + && (lastChunk || not inputBufferEmpty')) $ return () + -- this invariant guarantees we can always make forward progress or at + -- least if a BufferError does occur that it must be due to a premature EOF + + status <- Stream.inflate Stream.NoFlush + + case status of + Stream.Ok -> do + outputBufferFull <- Stream.outputBufferFull + if outputBufferFull + then do (outFPtr, offset, length) <- Stream.popOutputBuffer + let chunk = S.PS outFPtr offset length + return $ DecompressOutputAvailable chunk $ do + fillBuffers defaultDecompressBufferSize + else do fillBuffers defaultDecompressBufferSize + + Stream.StreamEnd -> do + -- The decompressor tells us we're done. + -- Note that there may be input bytes still available if the stream is + -- embeded in some other data stream, so we return any trailing data. + inputBufferEmpty <- Stream.inputBufferEmpty + if inputBufferEmpty + then do finish (DecompressStreamEnd S.empty) + else do (inFPtr, offset, length) <- Stream.popRemainingInputBuffer + let inchunk = S.PS inFPtr offset length + finish (DecompressStreamEnd inchunk) + + Stream.Error code msg -> case code of + Stream.BufferError -> finish (DecompressStreamError TruncatedInput) + Stream.NeedDict adler -> do + err <- setDictionary adler mdict + case err of + Just streamErr -> finish streamErr + Nothing -> drainBuffers lastChunk + Stream.DataError -> finish (DecompressStreamError (DataFormatError msg)) + _ -> fail msg + + -- Note even if we end with an error we still try to flush the last chunk if + -- there is one. The user just has to decide what they want to trust. + finish end = do + outputBufferBytesAvailable <- Stream.outputBufferBytesAvailable + if outputBufferBytesAvailable > 0 + then do (outFPtr, offset, length) <- Stream.popOutputBuffer + return (DecompressOutputAvailable (S.PS outFPtr offset length) (return end)) + else return end + + setDictionary :: Stream.DictionaryHash -> Maybe S.ByteString + -> Stream (Maybe (DecompressStream Stream)) + setDictionary _adler Nothing = + return $ Just (DecompressStreamError DictionaryRequired) + setDictionary _adler (Just dict) = do + status <- Stream.inflateSetDictionary dict + case status of + Stream.Ok -> return Nothing + Stream.Error Stream.DataError _ -> + return $ Just (DecompressStreamError DictionaryMismatch) + _ -> fail "error when setting inflate dictionary" + + +------------------------------------------------------------------------------ + +mkStateST :: ST s (Stream.State s) +mkStateIO :: IO (Stream.State RealWorld) +mkStateST = strictToLazyST Stream.mkState +mkStateIO = stToIO Stream.mkState + +runStreamST :: Stream a -> Stream.State s -> ST s (a, Stream.State s) +runStreamIO :: Stream a -> Stream.State RealWorld -> IO (a, Stream.State RealWorld) +runStreamST strm zstate = strictToLazyST (Stream.runStream strm zstate) +runStreamIO strm zstate = stToIO (Stream.runStream strm zstate) + +compressStreamIO :: Stream.Format -> CompressParams -> CompressStream IO +compressStreamIO format params = + CompressInputRequired { + compressSupplyInput = \chunk -> do + zstate <- mkStateIO + let next = compressStream format params + (strm', zstate') <- runStreamIO (next chunk) zstate + return (go strm' zstate') + } + where + go :: CompressStream Stream -> Stream.State RealWorld -> CompressStream IO + go (CompressInputRequired next) zstate = + CompressInputRequired { + compressSupplyInput = \chunk -> do + (strm', zstate') <- runStreamIO (next chunk) zstate + return (go strm' zstate') + } + + go (CompressOutputAvailable chunk next) zstate = + CompressOutputAvailable chunk $ do + (strm', zstate') <- runStreamIO next zstate + return (go strm' zstate') + + go CompressStreamEnd _ = CompressStreamEnd + +compressStreamST :: Stream.Format -> CompressParams -> CompressStream (ST s) +compressStreamST format params = + CompressInputRequired { + compressSupplyInput = \chunk -> do + zstate <- mkStateST + let next = compressStream format params + (strm', zstate') <- runStreamST (next chunk) zstate + return (go strm' zstate') + } + where + go :: CompressStream Stream -> Stream.State s -> CompressStream (ST s) + go (CompressInputRequired next) zstate = + CompressInputRequired { + compressSupplyInput = \chunk -> do + (strm', zstate') <- runStreamST (next chunk) zstate + return (go strm' zstate') + } + + go (CompressOutputAvailable chunk next) zstate = + CompressOutputAvailable chunk $ do + (strm', zstate') <- runStreamST next zstate + return (go strm' zstate') + + go CompressStreamEnd _ = CompressStreamEnd + + +decompressStreamIO :: Stream.Format -> DecompressParams -> DecompressStream IO +decompressStreamIO format params = + DecompressInputRequired $ \chunk -> do + zstate <- mkStateIO + let next = decompressStream format params False + (strm', zstate') <- runStreamIO (next chunk) zstate + go strm' zstate' (S.null chunk) + where + go :: DecompressStream Stream -> Stream.State RealWorld -> Bool + -> IO (DecompressStream IO) + go (DecompressInputRequired next) zstate !_ = + return $ DecompressInputRequired $ \chunk -> do + (strm', zstate') <- runStreamIO (next chunk) zstate + go strm' zstate' (S.null chunk) + + go (DecompressOutputAvailable chunk next) zstate !eof = + return $ DecompressOutputAvailable chunk $ do + (strm', zstate') <- runStreamIO next zstate + go strm' zstate' eof + + go (DecompressStreamEnd unconsumed) zstate !eof + | format == Stream.gzipFormat + , decompressAllMembers params + , not eof = tryFollowingStream unconsumed zstate + | otherwise = finaliseStreamEnd unconsumed zstate + + go (DecompressStreamError err) zstate !_ = finaliseStreamError err zstate + + tryFollowingStream :: S.ByteString -> Stream.State RealWorld -> IO (DecompressStream IO) + tryFollowingStream chunk zstate = case S.length chunk of + 0 -> return $ DecompressInputRequired $ \chunk' -> case S.length chunk' of + 0 -> finaliseStreamEnd S.empty zstate + 1 | S.head chunk' /= 0x1f + -> finaliseStreamEnd chunk' zstate + 1 -> return $ DecompressInputRequired $ \chunk'' -> case S.length chunk'' of + 0 -> finaliseStreamEnd chunk' zstate + _ -> checkHeaderSplit (S.head chunk') chunk'' zstate + _ -> checkHeader chunk' zstate + 1 -> return $ DecompressInputRequired $ \chunk' -> case S.length chunk' of + 0 -> finaliseStreamEnd chunk zstate + _ -> checkHeaderSplit (S.head chunk) chunk' zstate + _ -> checkHeader chunk zstate + + checkHeaderSplit :: Word8 -> S.ByteString -> Stream.State RealWorld -> IO (DecompressStream IO) + checkHeaderSplit 0x1f chunk zstate + | S.head chunk == 0x8b = do + let resume = decompressStream format params True (S.pack [0x1f, 0x8b]) + if S.length chunk > 1 + then do + -- have to handle the remaining data in this chunk + (DecompressInputRequired next, zstate') <- runStreamIO resume zstate + (strm', zstate'') <- runStreamIO (next (S.tail chunk)) zstate' + go strm' zstate'' False + else do + -- subtle special case when the chunk tail is empty + -- yay for QC tests + (strm, zstate') <- runStreamIO resume zstate + go strm zstate' False + checkHeaderSplit byte chunk zstate = + finaliseStreamEnd (S.cons byte chunk) zstate + + checkHeader :: S.ByteString -> Stream.State RealWorld -> IO (DecompressStream IO) + checkHeader chunk zstate + | S.index chunk 0 == 0x1f + , S.index chunk 1 == 0x8b = do + let resume = decompressStream format params True chunk + (strm', zstate') <- runStreamIO resume zstate + go strm' zstate' False + checkHeader chunk zstate = finaliseStreamEnd chunk zstate + + finaliseStreamEnd unconsumed zstate = do + _ <- runStreamIO Stream.finalise zstate + return (DecompressStreamEnd unconsumed) + + finaliseStreamError err zstate = do + _ <- runStreamIO Stream.finalise zstate + return (DecompressStreamError err) + + +decompressStreamST :: Stream.Format -> DecompressParams -> DecompressStream (ST s) +decompressStreamST format params = + DecompressInputRequired $ \chunk -> do + zstate <- mkStateST + let next = decompressStream format params False + (strm', zstate') <- runStreamST (next chunk) zstate + go strm' zstate' (S.null chunk) + where + go :: DecompressStream Stream -> Stream.State s -> Bool + -> ST s (DecompressStream (ST s)) + go (DecompressInputRequired next) zstate !_ = + return $ DecompressInputRequired $ \chunk -> do + (strm', zstate') <- runStreamST (next chunk) zstate + go strm' zstate' (S.null chunk) + + go (DecompressOutputAvailable chunk next) zstate !eof = + return $ DecompressOutputAvailable chunk $ do + (strm', zstate') <- runStreamST next zstate + go strm' zstate' eof + + go (DecompressStreamEnd unconsumed) zstate !eof + | format == Stream.gzipFormat + , decompressAllMembers params + , not eof = tryFollowingStream unconsumed zstate + | otherwise = finaliseStreamEnd unconsumed zstate + + go (DecompressStreamError err) zstate !_ = finaliseStreamError err zstate + + + tryFollowingStream :: S.ByteString -> Stream.State s -> ST s (DecompressStream (ST s)) + tryFollowingStream chunk zstate = + case S.length chunk of + 0 -> return $ DecompressInputRequired $ \chunk' -> case S.length chunk' of + 0 -> finaliseStreamEnd S.empty zstate + 1 | S.head chunk' /= 0x1f + -> finaliseStreamEnd chunk' zstate + 1 -> return $ DecompressInputRequired $ \chunk'' -> case S.length chunk'' of + 0 -> finaliseStreamEnd chunk' zstate + _ -> checkHeaderSplit (S.head chunk') chunk'' zstate + _ -> checkHeader chunk' zstate + 1 -> return $ DecompressInputRequired $ \chunk' -> case S.length chunk' of + 0 -> finaliseStreamEnd chunk zstate + _ -> checkHeaderSplit (S.head chunk) chunk' zstate + _ -> checkHeader chunk zstate + + checkHeaderSplit :: Word8 -> S.ByteString -> Stream.State s -> ST s (DecompressStream (ST s)) + checkHeaderSplit 0x1f chunk zstate + | S.head chunk == 0x8b = do + let resume = decompressStream format params True (S.pack [0x1f, 0x8b]) + if S.length chunk > 1 + then do + -- have to handle the remaining data in this chunk + (DecompressInputRequired next, zstate') <- runStreamST resume zstate + (strm', zstate'') <- runStreamST (next (S.tail chunk)) zstate' + go strm' zstate'' False + else do + -- subtle special case when the chunk tail is empty + -- yay for QC tests + (strm, zstate') <- runStreamST resume zstate + go strm zstate' False + checkHeaderSplit byte chunk zstate = + finaliseStreamEnd (S.cons byte chunk) zstate + + checkHeader :: S.ByteString -> Stream.State s -> ST s (DecompressStream (ST s)) + checkHeader chunk zstate + | S.index chunk 0 == 0x1f + , S.index chunk 1 == 0x8b = do + let resume = decompressStream format params True chunk + (strm', zstate') <- runStreamST resume zstate + go strm' zstate' False + checkHeader chunk zstate = finaliseStreamEnd chunk zstate + + finaliseStreamEnd unconsumed zstate = do + _ <- runStreamST Stream.finalise zstate + return (DecompressStreamEnd unconsumed) + + finaliseStreamError err zstate = do + _ <- runStreamST Stream.finalise zstate + return (DecompressStreamError err) diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Raw.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Raw.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Raw.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Raw.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,68 @@ +----------------------------------------------------------------------------- +-- | +-- Copyright : (c) 2006-2014 Duncan Coutts +-- License : BSD-style +-- +-- Maintainer : duncan@community.haskell.org +-- +-- Compression and decompression of data streams in the raw deflate format. +-- +-- The format is described in detail in RFC #1951: +-- +-- +-- See also the zlib home page: +-- +----------------------------------------------------------------------------- +module Codec.Compression.Zlib.Raw ( + + -- * Simple compression and decompression + compress, + decompress, + + -- * Extended api with control over compression parameters + compressWith, + decompressWith, + + CompressParams(..), defaultCompressParams, + DecompressParams(..), defaultDecompressParams, + + -- ** The compression parameter types + CompressionLevel(..), + defaultCompression, + noCompression, + bestSpeed, + bestCompression, + compressionLevel, + Method(..), + deflateMethod, + WindowBits(..), + defaultWindowBits, + windowBits, + MemoryLevel(..), + defaultMemoryLevel, + minMemoryLevel, + maxMemoryLevel, + memoryLevel, + CompressionStrategy(..), + defaultStrategy, + filteredStrategy, + huffmanOnlyStrategy, + + ) where + +import Data.ByteString.Lazy (ByteString) + +import qualified Codec.Compression.Zlib.Internal as Internal +import Codec.Compression.Zlib.Internal hiding (compress, decompress) + +decompress :: ByteString -> ByteString +decompress = decompressWith defaultDecompressParams + +decompressWith :: DecompressParams -> ByteString -> ByteString +decompressWith = Internal.decompress rawFormat + +compress :: ByteString -> ByteString +compress = compressWith defaultCompressParams + +compressWith :: CompressParams -> ByteString -> ByteString +compressWith = Internal.compress rawFormat diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Stream.hsc cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Stream.hsc --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Stream.hsc 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/Zlib/Stream.hsc 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,1065 @@ +{-# LANGUAGE CPP, ForeignFunctionInterface, DeriveDataTypeable #-} +#if __GLASGOW_HASKELL__ >= 702 +{-# LANGUAGE DeriveGeneric #-} +#endif +----------------------------------------------------------------------------- +-- | +-- Copyright : (c) 2006-2015 Duncan Coutts +-- License : BSD-style +-- +-- Maintainer : duncan@community.haskell.org +-- +-- Zlib wrapper layer +-- +----------------------------------------------------------------------------- +module Codec.Compression.Zlib.Stream ( + + -- * The Zlib state monad + Stream, + State, + mkState, + runStream, + unsafeLiftIO, + finalise, + + -- * Initialisation + deflateInit, + inflateInit, + + -- ** Initialisation parameters + Format(..), + gzipFormat, + zlibFormat, + rawFormat, + gzipOrZlibFormat, + formatSupportsDictionary, + CompressionLevel(..), + defaultCompression, + noCompression, + bestSpeed, + bestCompression, + compressionLevel, + Method(..), + deflateMethod, + WindowBits(..), + defaultWindowBits, + windowBits, + MemoryLevel(..), + defaultMemoryLevel, + minMemoryLevel, + maxMemoryLevel, + memoryLevel, + CompressionStrategy(..), + defaultStrategy, + filteredStrategy, + huffmanOnlyStrategy, + + -- * The buisness + deflate, + inflate, + Status(..), + Flush(..), + ErrorCode(..), + -- ** Special operations + inflateReset, + + -- * Buffer management + -- ** Input buffer + pushInputBuffer, + inputBufferEmpty, + popRemainingInputBuffer, + + -- ** Output buffer + pushOutputBuffer, + popOutputBuffer, + outputBufferBytesAvailable, + outputBufferSpaceRemaining, + outputBufferFull, + + -- ** Dictionary + deflateSetDictionary, + inflateSetDictionary, + + -- ** Dictionary hashes + DictionaryHash, + dictionaryHash, + zeroDictionaryHash, + +#ifdef DEBUG + -- * Debugging + consistencyCheck, + dump, + trace, +#endif + + ) where + +-- Note we don't use the MIN_VERSION_* macros here for compatability with +-- old Cabal versions that come with old GHC, that didn't provide these +-- macros for .hsc files. So we use __GLASGOW_HASKELL__ as a proxy. + +import Foreign + ( Word8, Ptr, nullPtr, plusPtr, peekByteOff, pokeByteOff + , ForeignPtr, FinalizerPtr, mallocForeignPtrBytes, addForeignPtrFinalizer + , withForeignPtr, touchForeignPtr, minusPtr ) +#if __GLASGOW_HASKELL__ >= 702 +import Foreign.ForeignPtr.Unsafe ( unsafeForeignPtrToPtr ) +import System.IO.Unsafe ( unsafePerformIO ) +#else +import Foreign ( unsafeForeignPtrToPtr, unsafePerformIO ) +#endif +#ifdef __GLASGOW_HASKELL__ +import Foreign + ( finalizeForeignPtr ) +#endif +import Foreign.C +import Data.ByteString.Internal (nullForeignPtr) +import qualified Data.ByteString.Unsafe as B +import Data.ByteString (ByteString) +#if !(__GLASGOW_HASKELL__ >= 710) +import Control.Applicative (Applicative(..)) +#endif +import Control.Monad (ap,liftM) +#if __GLASGOW_HASKELL__ >= 702 +#if __GLASGOW_HASKELL__ >= 708 +import Control.Monad.ST.Strict +#else +import Control.Monad.ST.Strict hiding (unsafeIOToST) +#endif +import Control.Monad.ST.Unsafe +#else +import Control.Monad.ST.Strict +#endif +import Control.Exception (assert) +import Data.Typeable (Typeable) +#if __GLASGOW_HASKELL__ >= 702 +import GHC.Generics (Generic) +#endif +#ifdef DEBUG +import System.IO (hPutStrLn, stderr) +#endif + +import Prelude hiding (length) + +#include "zlib.h" + + +pushInputBuffer :: ForeignPtr Word8 -> Int -> Int -> Stream () +pushInputBuffer inBuf' offset length = do + + -- must not push a new input buffer if the last one is not used up + inAvail <- getInAvail + assert (inAvail == 0) $ return () + + -- Now that we're setting a new input buffer, we can be sure that zlib no + -- longer has a reference to the old one. Therefore this is the last point + -- at which the old buffer had to be retained. It's safe to release now. + inBuf <- getInBuf + unsafeLiftIO $ touchForeignPtr inBuf + + -- now set the available input buffer ptr and length + setInBuf inBuf' + setInAvail length + setInNext (unsafeForeignPtrToPtr inBuf' `plusPtr` offset) + -- Note the 'unsafe'. We are passing the raw ptr inside inBuf' to zlib. + -- To make this safe we need to hold on to the ForeignPtr for at least as + -- long as zlib is using the underlying raw ptr. + + +inputBufferEmpty :: Stream Bool +inputBufferEmpty = getInAvail >>= return . (==0) + + +popRemainingInputBuffer :: Stream (ForeignPtr Word8, Int, Int) +popRemainingInputBuffer = do + + inBuf <- getInBuf + inNext <- getInNext + inAvail <- getInAvail + + -- there really should be something to pop, otherwise it's silly + assert (inAvail > 0) $ return () + setInAvail 0 + + return (inBuf, inNext `minusPtr` unsafeForeignPtrToPtr inBuf, inAvail) + + +pushOutputBuffer :: ForeignPtr Word8 -> Int -> Int -> Stream () +pushOutputBuffer outBuf' offset length = do + + --must not push a new buffer if there is still data in the old one + outAvail <- getOutAvail + assert (outAvail == 0) $ return () + -- Note that there may still be free space in the output buffer, that's ok, + -- you might not want to bother completely filling the output buffer say if + -- there's only a few free bytes left. + + outBuf <- getOutBuf + unsafeLiftIO $ touchForeignPtr outBuf + + -- now set the available input buffer ptr and length + setOutBuf outBuf' + setOutFree length + setOutNext (unsafeForeignPtrToPtr outBuf' `plusPtr` offset) + + setOutOffset offset + setOutAvail 0 + + +-- get that part of the output buffer that is currently full +-- (might be 0, use outputBufferBytesAvailable to check) +-- this may leave some space remaining in the buffer, use +-- outputBufferSpaceRemaining to check. +popOutputBuffer :: Stream (ForeignPtr Word8, Int, Int) +popOutputBuffer = do + + outBuf <- getOutBuf + outOffset <- getOutOffset + outAvail <- getOutAvail + + -- there really should be something to pop, otherwise it's silly + assert (outAvail > 0) $ return () + + setOutOffset (outOffset + outAvail) + setOutAvail 0 + + return (outBuf, outOffset, outAvail) + + +-- this is the number of bytes available in the output buffer +outputBufferBytesAvailable :: Stream Int +outputBufferBytesAvailable = getOutAvail + + +-- you needen't get all the output immediately, you can continue until +-- there is no more output space available, this tells you that amount +outputBufferSpaceRemaining :: Stream Int +outputBufferSpaceRemaining = getOutFree + + +-- you only need to supply a new buffer when there is no more output buffer +-- space remaining +outputBufferFull :: Stream Bool +outputBufferFull = liftM (==0) outputBufferSpaceRemaining + + +-- you can only run this when the output buffer is not empty +-- you can run it when the input buffer is empty but it doesn't do anything +-- after running deflate either the output buffer will be full +-- or the input buffer will be empty (or both) +deflate :: Flush -> Stream Status +deflate flush = do + + outFree <- getOutFree + + -- deflate needs free space in the output buffer + assert (outFree > 0) $ return () + + result <- deflate_ flush + outFree' <- getOutFree + + -- number of bytes of extra output there is available as a result of + -- the call to deflate: + let outExtra = outFree - outFree' + + outAvail <- getOutAvail + setOutAvail (outAvail + outExtra) + return result + + +inflate :: Flush -> Stream Status +inflate flush = do + + outFree <- getOutFree + + -- inflate needs free space in the output buffer + assert (outFree > 0) $ return () + + result <- inflate_ flush + outFree' <- getOutFree + + -- number of bytes of extra output there is available as a result of + -- the call to inflate: + let outExtra = outFree - outFree' + + outAvail <- getOutAvail + setOutAvail (outAvail + outExtra) + return result + + +inflateReset :: Stream () +inflateReset = do + + outAvail <- getOutAvail + inAvail <- getInAvail + -- At the point where this is used, all the output should have been consumed + -- and any trailing input should be extracted and resupplied explicitly, not + -- just left. + assert (outAvail == 0 && inAvail == 0) $ return () + + err <- withStreamState $ \zstream -> + c_inflateReset zstream + failIfError err + + + +deflateSetDictionary :: ByteString -> Stream Status +deflateSetDictionary dict = do + err <- withStreamState $ \zstream -> + B.unsafeUseAsCStringLen dict $ \(ptr, len) -> + c_deflateSetDictionary zstream ptr (fromIntegral len) + toStatus err + +inflateSetDictionary :: ByteString -> Stream Status +inflateSetDictionary dict = do + err <- withStreamState $ \zstream -> do + B.unsafeUseAsCStringLen dict $ \(ptr, len) -> + c_inflateSetDictionary zstream ptr (fromIntegral len) + toStatus err + +-- | A hash of a custom compression dictionary. These hashes are used by +-- zlib as dictionary identifiers. +-- (The particular hash function used is Adler32.) +-- +newtype DictionaryHash = DictHash CULong + deriving (Eq, Ord, Read, Show) + +-- | Update a running 'DictionaryHash'. You can generate a 'DictionaryHash' +-- from one or more 'ByteString's by starting from 'zeroDictionaryHash', e.g. +-- +-- > dictionaryHash zeroDictionaryHash :: ByteString -> DictionaryHash +-- +-- or +-- +-- > foldl' dictionaryHash zeroDictionaryHash :: [ByteString] -> DictionaryHash +-- +dictionaryHash :: DictionaryHash -> ByteString -> DictionaryHash +dictionaryHash (DictHash adler) dict = + unsafePerformIO $ + B.unsafeUseAsCStringLen dict $ \(ptr, len) -> + liftM DictHash $ c_adler32 adler ptr (fromIntegral len) + +-- | A zero 'DictionaryHash' to use as the initial value with 'dictionaryHash'. +-- +zeroDictionaryHash :: DictionaryHash +zeroDictionaryHash = DictHash 0 + +---------------------------- +-- Stream monad +-- + +newtype Stream a = Z { + unZ :: ForeignPtr StreamState + -> ForeignPtr Word8 + -> ForeignPtr Word8 + -> Int -> Int + -> IO (ForeignPtr Word8 + ,ForeignPtr Word8 + ,Int, Int, a) + } + +instance Functor Stream where + fmap = liftM + +instance Applicative Stream where + pure = return + (<*>) = ap + +instance Monad Stream where + (>>=) = thenZ +-- m >>= f = (m `thenZ` \a -> consistencyCheck `thenZ_` returnZ a) `thenZ` f + (>>) = thenZ_ + return = returnZ + fail = (finalise >>) . failZ + +returnZ :: a -> Stream a +returnZ a = Z $ \_ inBuf outBuf outOffset outLength -> + return (inBuf, outBuf, outOffset, outLength, a) +{-# INLINE returnZ #-} + +thenZ :: Stream a -> (a -> Stream b) -> Stream b +thenZ (Z m) f = + Z $ \stream inBuf outBuf outOffset outLength -> + m stream inBuf outBuf outOffset outLength >>= + \(inBuf', outBuf', outOffset', outLength', a) -> + unZ (f a) stream inBuf' outBuf' outOffset' outLength' +{-# INLINE thenZ #-} + +thenZ_ :: Stream a -> Stream b -> Stream b +thenZ_ (Z m) f = + Z $ \stream inBuf outBuf outOffset outLength -> + m stream inBuf outBuf outOffset outLength >>= + \(inBuf', outBuf', outOffset', outLength', _) -> + unZ f stream inBuf' outBuf' outOffset' outLength' +{-# INLINE thenZ_ #-} + +failZ :: String -> Stream a +failZ msg = Z (\_ _ _ _ _ -> fail ("Codec.Compression.Zlib: " ++ msg)) + +data State s = State !(ForeignPtr StreamState) + !(ForeignPtr Word8) + !(ForeignPtr Word8) + {-# UNPACK #-} !Int + {-# UNPACK #-} !Int + +mkState :: ST s (State s) +mkState = unsafeIOToST $ do + stream <- mallocForeignPtrBytes (#{const sizeof(z_stream)}) + withForeignPtr stream $ \ptr -> do + #{poke z_stream, msg} ptr nullPtr + #{poke z_stream, zalloc} ptr nullPtr + #{poke z_stream, zfree} ptr nullPtr + #{poke z_stream, opaque} ptr nullPtr + #{poke z_stream, next_in} ptr nullPtr + #{poke z_stream, next_out} ptr nullPtr + #{poke z_stream, avail_in} ptr (0 :: CUInt) + #{poke z_stream, avail_out} ptr (0 :: CUInt) + return (State stream nullForeignPtr nullForeignPtr 0 0) + +runStream :: Stream a -> State s -> ST s (a, State s) +runStream (Z m) (State stream inBuf outBuf outOffset outLength) = + unsafeIOToST $ + m stream inBuf outBuf outOffset outLength >>= + \(inBuf', outBuf', outOffset', outLength', a) -> + return (a, State stream inBuf' outBuf' outOffset' outLength') + +-- This is marked as unsafe because runStream uses unsafeIOToST so anything +-- lifted here can end up being unsafePerformIO'd. +unsafeLiftIO :: IO a -> Stream a +unsafeLiftIO m = Z $ \_stream inBuf outBuf outOffset outLength -> do + a <- m + return (inBuf, outBuf, outOffset, outLength, a) + +getStreamState :: Stream (ForeignPtr StreamState) +getStreamState = Z $ \stream inBuf outBuf outOffset outLength -> do + return (inBuf, outBuf, outOffset, outLength, stream) + +getInBuf :: Stream (ForeignPtr Word8) +getInBuf = Z $ \_stream inBuf outBuf outOffset outLength -> do + return (inBuf, outBuf, outOffset, outLength, inBuf) + +getOutBuf :: Stream (ForeignPtr Word8) +getOutBuf = Z $ \_stream inBuf outBuf outOffset outLength -> do + return (inBuf, outBuf, outOffset, outLength, outBuf) + +getOutOffset :: Stream Int +getOutOffset = Z $ \_stream inBuf outBuf outOffset outLength -> do + return (inBuf, outBuf, outOffset, outLength, outOffset) + +getOutAvail :: Stream Int +getOutAvail = Z $ \_stream inBuf outBuf outOffset outLength -> do + return (inBuf, outBuf, outOffset, outLength, outLength) + +setInBuf :: ForeignPtr Word8 -> Stream () +setInBuf inBuf = Z $ \_stream _ outBuf outOffset outLength -> do + return (inBuf, outBuf, outOffset, outLength, ()) + +setOutBuf :: ForeignPtr Word8 -> Stream () +setOutBuf outBuf = Z $ \_stream inBuf _ outOffset outLength -> do + return (inBuf, outBuf, outOffset, outLength, ()) + +setOutOffset :: Int -> Stream () +setOutOffset outOffset = Z $ \_stream inBuf outBuf _ outLength -> do + return (inBuf, outBuf, outOffset, outLength, ()) + +setOutAvail :: Int -> Stream () +setOutAvail outLength = Z $ \_stream inBuf outBuf outOffset _ -> do + return (inBuf, outBuf, outOffset, outLength, ()) + +---------------------------- +-- Debug stuff +-- + +#ifdef DEBUG +trace :: String -> Stream () +trace = unsafeLiftIO . hPutStrLn stderr + +dump :: Stream () +dump = do + inNext <- getInNext + inAvail <- getInAvail + + outNext <- getOutNext + outFree <- getOutFree + outAvail <- getOutAvail + outOffset <- getOutOffset + + unsafeLiftIO $ hPutStrLn stderr $ + "Stream {\n" ++ + " inNext = " ++ show inNext ++ ",\n" ++ + " inAvail = " ++ show inAvail ++ ",\n" ++ + "\n" ++ + " outNext = " ++ show outNext ++ ",\n" ++ + " outFree = " ++ show outFree ++ ",\n" ++ + " outAvail = " ++ show outAvail ++ ",\n" ++ + " outOffset = " ++ show outOffset ++ "\n" ++ + "}" + + consistencyCheck + +consistencyCheck :: Stream () +consistencyCheck = do + + outBuf <- getOutBuf + outOffset <- getOutOffset + outAvail <- getOutAvail + outNext <- getOutNext + + let outBufPtr = unsafeForeignPtrToPtr outBuf + + assert (outBufPtr `plusPtr` (outOffset + outAvail) == outNext) $ return () +#endif + + +---------------------------- +-- zlib wrapper layer +-- + +data Status = + Ok + | StreamEnd + | Error ErrorCode String + +data ErrorCode = + NeedDict DictionaryHash + | FileError + | StreamError + | DataError + | MemoryError + | BufferError -- ^ No progress was possible or there was not enough room in + -- the output buffer when 'Finish' is used. Note that + -- 'BuferError' is not fatal, and 'inflate' can be called + -- again with more input and more output space to continue. + | VersionError + | Unexpected + +toStatus :: CInt -> Stream Status +toStatus errno = case errno of + (#{const Z_OK}) -> return Ok + (#{const Z_STREAM_END}) -> return StreamEnd + (#{const Z_NEED_DICT}) -> do + adler <- withStreamPtr (#{peek z_stream, adler}) + err (NeedDict (DictHash adler)) "custom dictionary needed" + (#{const Z_BUF_ERROR}) -> err BufferError "buffer error" + (#{const Z_ERRNO}) -> err FileError "file error" + (#{const Z_STREAM_ERROR}) -> err StreamError "stream error" + (#{const Z_DATA_ERROR}) -> err DataError "data error" + (#{const Z_MEM_ERROR}) -> err MemoryError "insufficient memory" + (#{const Z_VERSION_ERROR}) -> err VersionError "incompatible zlib version" + other -> return $ Error Unexpected + ("unexpected zlib status: " ++ show other) + where + err errCode altMsg = liftM (Error errCode) $ do + msgPtr <- withStreamPtr (#{peek z_stream, msg}) + if msgPtr /= nullPtr + then unsafeLiftIO (peekCAString msgPtr) + else return altMsg + +failIfError :: CInt -> Stream () +failIfError errno = toStatus errno >>= \status -> case status of + (Error _ msg) -> fail msg + _ -> return () + + +data Flush = + NoFlush + | SyncFlush + | FullFlush + | Finish +-- | Block -- only available in zlib 1.2 and later, uncomment if you need it. + +fromFlush :: Flush -> CInt +fromFlush NoFlush = #{const Z_NO_FLUSH} +fromFlush SyncFlush = #{const Z_SYNC_FLUSH} +fromFlush FullFlush = #{const Z_FULL_FLUSH} +fromFlush Finish = #{const Z_FINISH} +-- fromFlush Block = #{const Z_BLOCK} + + +-- | The format used for compression or decompression. There are three +-- variations. +-- +data Format = GZip | Zlib | Raw | GZipOrZlib + deriving (Eq, Ord, Enum, Bounded, Show, Typeable +#if __GLASGOW_HASKELL__ >= 702 + , Generic +#endif + ) + +{-# DEPRECATED GZip "Use gzipFormat. Format constructors will be hidden in version 0.7" #-} +{-# DEPRECATED Zlib "Use zlibFormat. Format constructors will be hidden in version 0.7" #-} +{-# DEPRECATED Raw "Use rawFormat. Format constructors will be hidden in version 0.7" #-} +{-# DEPRECATED GZipOrZlib "Use gzipOrZlibFormat. Format constructors will be hidden in version 0.7" #-} + +-- | The gzip format uses a header with a checksum and some optional meta-data +-- about the compressed file. It is intended primarily for compressing +-- individual files but is also sometimes used for network protocols such as +-- HTTP. The format is described in detail in RFC #1952 +-- +-- +gzipFormat :: Format +gzipFormat = GZip + +-- | The zlib format uses a minimal header with a checksum but no other +-- meta-data. It is especially designed for use in network protocols. The +-- format is described in detail in RFC #1950 +-- +-- +zlibFormat :: Format +zlibFormat = Zlib + +-- | The \'raw\' format is just the compressed data stream without any +-- additional header, meta-data or data-integrity checksum. The format is +-- described in detail in RFC #1951 +-- +rawFormat :: Format +rawFormat = Raw + +-- | This is not a format as such. It enabled zlib or gzip decoding with +-- automatic header detection. This only makes sense for decompression. +-- +gzipOrZlibFormat :: Format +gzipOrZlibFormat = GZipOrZlib + +formatSupportsDictionary :: Format -> Bool +formatSupportsDictionary Zlib = True +formatSupportsDictionary Raw = True +formatSupportsDictionary _ = False + +-- | The compression method +-- +data Method = Deflated + deriving (Eq, Ord, Enum, Bounded, Show, Typeable +#if __GLASGOW_HASKELL__ >= 702 + , Generic +#endif + ) + +{-# DEPRECATED Deflated "Use deflateMethod. Method constructors will be hidden in version 0.7" #-} + +-- | \'Deflate\' is the only method supported in this version of zlib. +-- Indeed it is likely to be the only method that ever will be supported. +-- +deflateMethod :: Method +deflateMethod = Deflated + +fromMethod :: Method -> CInt +fromMethod Deflated = #{const Z_DEFLATED} + + +-- | The compression level parameter controls the amount of compression. This +-- is a trade-off between the amount of compression and the time required to do +-- the compression. +-- +data CompressionLevel = + DefaultCompression + | NoCompression + | BestSpeed + | BestCompression + | CompressionLevel Int + deriving (Eq, Show, Typeable +#if __GLASGOW_HASKELL__ >= 702 + , Generic +#endif + ) + +{-# DEPRECATED DefaultCompression "Use defaultCompression. CompressionLevel constructors will be hidden in version 0.7" #-} +{-# DEPRECATED NoCompression "Use noCompression. CompressionLevel constructors will be hidden in version 0.7" #-} +{-# DEPRECATED BestSpeed "Use bestSpeed. CompressionLevel constructors will be hidden in version 0.7" #-} +{-# DEPRECATED BestCompression "Use bestCompression. CompressionLevel constructors will be hidden in version 0.7" #-} +--FIXME: cannot deprecate constructor named the same as the type +{- DEPRECATED CompressionLevel "Use compressionLevel. CompressionLevel constructors will be hidden in version 0.7" -} + +-- | The default compression level is 6 (that is, biased towards higher +-- compression at expense of speed). +defaultCompression :: CompressionLevel +defaultCompression = DefaultCompression + +-- | No compression, just a block copy. +noCompression :: CompressionLevel +noCompression = CompressionLevel 0 + +-- | The fastest compression method (less compression) +bestSpeed :: CompressionLevel +bestSpeed = CompressionLevel 1 + +-- | The slowest compression method (best compression). +bestCompression :: CompressionLevel +bestCompression = CompressionLevel 9 + +-- | A specific compression level between 0 and 9. +compressionLevel :: Int -> CompressionLevel +compressionLevel n + | n >= 0 && n <= 9 = CompressionLevel n + | otherwise = error "CompressionLevel must be in the range 0..9" + +fromCompressionLevel :: CompressionLevel -> CInt +fromCompressionLevel DefaultCompression = -1 +fromCompressionLevel NoCompression = 0 +fromCompressionLevel BestSpeed = 1 +fromCompressionLevel BestCompression = 9 +fromCompressionLevel (CompressionLevel n) + | n >= 0 && n <= 9 = fromIntegral n + | otherwise = error "CompressLevel must be in the range 1..9" + + +-- | This specifies the size of the compression window. Larger values of this +-- parameter result in better compression at the expense of higher memory +-- usage. +-- +-- The compression window size is the value of the the window bits raised to +-- the power 2. The window bits must be in the range @8..15@ which corresponds +-- to compression window sizes of 256b to 32Kb. The default is 15 which is also +-- the maximum size. +-- +-- The total amount of memory used depends on the window bits and the +-- 'MemoryLevel'. See the 'MemoryLevel' for the details. +-- +data WindowBits = WindowBits Int + | DefaultWindowBits -- This constructor must be last to make + -- the Ord instance work. The Ord instance + -- is used by the tests. + -- It makse sense because the default value + -- is is also the max value at 15. + deriving (Eq, Ord, Show, Typeable +#if __GLASGOW_HASKELL__ >= 702 + , Generic +#endif + ) + +{-# DEPRECATED DefaultWindowBits "Use defaultWindowBits. WindowBits constructors will be hidden in version 0.7" #-} +--FIXME: cannot deprecate constructor named the same as the type +{- DEPRECATED WindowBits "Use windowBits. WindowBits constructors will be hidden in version 0.7" -} + +-- | The default 'WindowBits' is 15 which is also the maximum size. +-- +defaultWindowBits :: WindowBits +defaultWindowBits = WindowBits 15 + +-- | A specific compression window size, specified in bits in the range @8..15@ +-- +windowBits :: Int -> WindowBits +windowBits n + | n >= 8 && n <= 15 = WindowBits n + | otherwise = error "WindowBits must be in the range 8..15" + +fromWindowBits :: Format -> WindowBits-> CInt +fromWindowBits format bits = (formatModifier format) (checkWindowBits bits) + where checkWindowBits DefaultWindowBits = 15 + checkWindowBits (WindowBits n) + | n >= 8 && n <= 15 = fromIntegral n + | otherwise = error "WindowBits must be in the range 8..15" + formatModifier Zlib = id + formatModifier GZip = (+16) + formatModifier GZipOrZlib = (+32) + formatModifier Raw = negate + + +-- | The 'MemoryLevel' parameter specifies how much memory should be allocated +-- for the internal compression state. It is a tradoff between memory usage, +-- compression ratio and compression speed. Using more memory allows faster +-- compression and a better compression ratio. +-- +-- The total amount of memory used for compression depends on the 'WindowBits' +-- and the 'MemoryLevel'. For decompression it depends only on the +-- 'WindowBits'. The totals are given by the functions: +-- +-- > compressTotal windowBits memLevel = 4 * 2^windowBits + 512 * 2^memLevel +-- > decompressTotal windowBits = 2^windowBits +-- +-- For example, for compression with the default @windowBits = 15@ and +-- @memLevel = 8@ uses @256Kb@. So for example a network server with 100 +-- concurrent compressed streams would use @25Mb@. The memory per stream can be +-- halved (at the cost of somewhat degraded and slower compressionby) by +-- reducing the @windowBits@ and @memLevel@ by one. +-- +-- Decompression takes less memory, the default @windowBits = 15@ corresponds +-- to just @32Kb@. +-- +data MemoryLevel = + DefaultMemoryLevel + | MinMemoryLevel + | MaxMemoryLevel + | MemoryLevel Int + deriving (Eq, Show, Typeable +#if __GLASGOW_HASKELL__ >= 702 + , Generic +#endif + ) + +{-# DEPRECATED DefaultMemoryLevel "Use defaultMemoryLevel. MemoryLevel constructors will be hidden in version 0.7" #-} +{-# DEPRECATED MinMemoryLevel "Use minMemoryLevel. MemoryLevel constructors will be hidden in version 0.7" #-} +{-# DEPRECATED MaxMemoryLevel "Use maxMemoryLevel. MemoryLevel constructors will be hidden in version 0.7" #-} +--FIXME: cannot deprecate constructor named the same as the type +{- DEPRECATED MemoryLevel "Use memoryLevel. MemoryLevel constructors will be hidden in version 0.7" -} + +-- | The default memory level. (Equivalent to @'memoryLevel' 8@) +-- +defaultMemoryLevel :: MemoryLevel +defaultMemoryLevel = MemoryLevel 8 + +-- | Use minimum memory. This is slow and reduces the compression ratio. +-- (Equivalent to @'memoryLevel' 1@) +-- +minMemoryLevel :: MemoryLevel +minMemoryLevel = MemoryLevel 1 + +-- | Use maximum memory for optimal compression speed. +-- (Equivalent to @'memoryLevel' 9@) +-- +maxMemoryLevel :: MemoryLevel +maxMemoryLevel = MemoryLevel 9 + +-- | A specific level in the range @1..9@ +-- +memoryLevel :: Int -> MemoryLevel +memoryLevel n + | n >= 1 && n <= 9 = MemoryLevel n + | otherwise = error "MemoryLevel must be in the range 1..9" + +fromMemoryLevel :: MemoryLevel -> CInt +fromMemoryLevel DefaultMemoryLevel = 8 +fromMemoryLevel MinMemoryLevel = 1 +fromMemoryLevel MaxMemoryLevel = 9 +fromMemoryLevel (MemoryLevel n) + | n >= 1 && n <= 9 = fromIntegral n + | otherwise = error "MemoryLevel must be in the range 1..9" + + +-- | The strategy parameter is used to tune the compression algorithm. +-- +-- The strategy parameter only affects the compression ratio but not the +-- correctness of the compressed output even if it is not set appropriately. +-- +data CompressionStrategy = + DefaultStrategy + | Filtered + | HuffmanOnly + deriving (Eq, Ord, Enum, Bounded, Show, Typeable +#if __GLASGOW_HASKELL__ >= 702 + , Generic +#endif + ) + +{- +-- -- only available in zlib 1.2 and later, uncomment if you need it. + | RLE -- ^ Use 'RLE' to limit match distances to one (run-length + -- encoding). 'RLE' is designed to be almost as fast as + -- 'HuffmanOnly', but give better compression for PNG + -- image data. + | Fixed -- ^ 'Fixed' prevents the use of dynamic Huffman codes, + -- allowing for a simpler decoder for special applications. +-} + +{-# DEPRECATED DefaultStrategy "Use defaultStrategy. CompressionStrategy constructors will be hidden in version 0.7" #-} +{-# DEPRECATED Filtered "Use filteredStrategy. CompressionStrategy constructors will be hidden in version 0.7" #-} +{-# DEPRECATED HuffmanOnly "Use huffmanOnlyStrategy. CompressionStrategy constructors will be hidden in version 0.7" #-} + +-- | Use this default compression strategy for normal data. +-- +defaultStrategy :: CompressionStrategy +defaultStrategy = DefaultStrategy + +-- | Use the filtered compression strategy for data produced by a filter (or +-- predictor). Filtered data consists mostly of small values with a somewhat +-- random distribution. In this case, the compression algorithm is tuned to +-- compress them better. The effect of this strategy is to force more Huffman +-- coding and less string matching; it is somewhat intermediate between +-- 'defaultCompressionStrategy' and 'huffmanOnlyCompressionStrategy'. +-- +filteredStrategy :: CompressionStrategy +filteredStrategy = Filtered + +-- | Use the Huffman-only compression strategy to force Huffman encoding only +-- (no string match). +-- +huffmanOnlyStrategy :: CompressionStrategy +huffmanOnlyStrategy = HuffmanOnly + + +fromCompressionStrategy :: CompressionStrategy -> CInt +fromCompressionStrategy DefaultStrategy = #{const Z_DEFAULT_STRATEGY} +fromCompressionStrategy Filtered = #{const Z_FILTERED} +fromCompressionStrategy HuffmanOnly = #{const Z_HUFFMAN_ONLY} +--fromCompressionStrategy RLE = #{const Z_RLE} +--fromCompressionStrategy Fixed = #{const Z_FIXED} + + +withStreamPtr :: (Ptr StreamState -> IO a) -> Stream a +withStreamPtr f = do + stream <- getStreamState + unsafeLiftIO (withForeignPtr stream f) + +withStreamState :: (StreamState -> IO a) -> Stream a +withStreamState f = do + stream <- getStreamState + unsafeLiftIO (withForeignPtr stream (f . StreamState)) + +setInAvail :: Int -> Stream () +setInAvail val = withStreamPtr $ \ptr -> + #{poke z_stream, avail_in} ptr (fromIntegral val :: CUInt) + +getInAvail :: Stream Int +getInAvail = liftM (fromIntegral :: CUInt -> Int) $ + withStreamPtr (#{peek z_stream, avail_in}) + +setInNext :: Ptr Word8 -> Stream () +setInNext val = withStreamPtr (\ptr -> #{poke z_stream, next_in} ptr val) + +getInNext :: Stream (Ptr Word8) +getInNext = withStreamPtr (#{peek z_stream, next_in}) + +setOutFree :: Int -> Stream () +setOutFree val = withStreamPtr $ \ptr -> + #{poke z_stream, avail_out} ptr (fromIntegral val :: CUInt) + +getOutFree :: Stream Int +getOutFree = liftM (fromIntegral :: CUInt -> Int) $ + withStreamPtr (#{peek z_stream, avail_out}) + +setOutNext :: Ptr Word8 -> Stream () +setOutNext val = withStreamPtr (\ptr -> #{poke z_stream, next_out} ptr val) + +#ifdef DEBUG +getOutNext :: Stream (Ptr Word8) +getOutNext = withStreamPtr (#{peek z_stream, next_out}) +#endif + +inflateInit :: Format -> WindowBits -> Stream () +inflateInit format bits = do + checkFormatSupported format + err <- withStreamState $ \zstream -> + c_inflateInit2 zstream (fromIntegral (fromWindowBits format bits)) + failIfError err + getStreamState >>= unsafeLiftIO . addForeignPtrFinalizer c_inflateEnd + +deflateInit :: Format + -> CompressionLevel + -> Method + -> WindowBits + -> MemoryLevel + -> CompressionStrategy + -> Stream () +deflateInit format compLevel method bits memLevel strategy = do + checkFormatSupported format + err <- withStreamState $ \zstream -> + c_deflateInit2 zstream + (fromCompressionLevel compLevel) + (fromMethod method) + (fromWindowBits format bits) + (fromMemoryLevel memLevel) + (fromCompressionStrategy strategy) + failIfError err + getStreamState >>= unsafeLiftIO . addForeignPtrFinalizer c_deflateEnd + +inflate_ :: Flush -> Stream Status +inflate_ flush = do + err <- withStreamState $ \zstream -> + c_inflate zstream (fromFlush flush) + toStatus err + +deflate_ :: Flush -> Stream Status +deflate_ flush = do + err <- withStreamState $ \zstream -> + c_deflate zstream (fromFlush flush) + toStatus err + +-- | This never needs to be used as the stream's resources will be released +-- automatically when no longer needed, however this can be used to release +-- them early. Only use this when you can guarantee that the stream will no +-- longer be needed, for example if an error occurs or if the stream ends. +-- +finalise :: Stream () +#ifdef __GLASGOW_HASKELL__ +--TODO: finalizeForeignPtr is ghc-only +finalise = getStreamState >>= unsafeLiftIO . finalizeForeignPtr +#else +finalise = return () +#endif + +checkFormatSupported :: Format -> Stream () +checkFormatSupported format = do + version <- unsafeLiftIO (peekCAString =<< c_zlibVersion) + case version of + ('1':'.':'1':'.':_) + | format == GZip + || format == GZipOrZlib + -> fail $ "version 1.1.x of the zlib C library does not support the" + ++ " 'gzip' format via the in-memory api, only the 'raw' and " + ++ " 'zlib' formats." + _ -> return () + +---------------------- +-- The foreign imports + +newtype StreamState = StreamState (Ptr StreamState) + +-- inflateInit2 and deflateInit2 are actually defined as macros in zlib.h +-- They are defined in terms of inflateInit2_ and deflateInit2_ passing two +-- additional arguments used to detect compatability problems. They pass the +-- version of zlib as a char * and the size of the z_stream struct. +-- If we compile via C then we can avoid this hassle however thats not really +-- kosher since the Haskell FFI is defined at the C ABI level, not the C +-- language level. There is no requirement to compile via C and pick up C +-- headers. So it's much better if we can make it work properly and that'd +-- also allow compiling via ghc's ncg which is a good thing since the C +-- backend is not going to be around forever. +-- +-- So we define c_inflateInit2 and c_deflateInit2 here as wrappers around +-- their _ counterparts and pass the extra args. + +foreign import ccall unsafe "zlib.h inflateInit2_" + c_inflateInit2_ :: StreamState -> CInt -> Ptr CChar -> CInt -> IO CInt + +c_inflateInit2 :: StreamState -> CInt -> IO CInt +c_inflateInit2 z n = + withCAString #{const_str ZLIB_VERSION} $ \versionStr -> + c_inflateInit2_ z n versionStr (#{const sizeof(z_stream)} :: CInt) + +foreign import ccall unsafe "zlib.h inflate" + c_inflate :: StreamState -> CInt -> IO CInt + +foreign import ccall unsafe "zlib.h &inflateEnd" + c_inflateEnd :: FinalizerPtr StreamState + +foreign import ccall unsafe "zlib.h inflateReset" + c_inflateReset :: StreamState -> IO CInt + +foreign import ccall unsafe "zlib.h deflateInit2_" + c_deflateInit2_ :: StreamState + -> CInt -> CInt -> CInt -> CInt -> CInt + -> Ptr CChar -> CInt + -> IO CInt + +c_deflateInit2 :: StreamState + -> CInt -> CInt -> CInt -> CInt -> CInt -> IO CInt +c_deflateInit2 z a b c d e = + withCAString #{const_str ZLIB_VERSION} $ \versionStr -> + c_deflateInit2_ z a b c d e versionStr (#{const sizeof(z_stream)} :: CInt) + +foreign import ccall unsafe "zlib.h deflateSetDictionary" + c_deflateSetDictionary :: StreamState + -> Ptr CChar + -> CUInt + -> IO CInt + +foreign import ccall unsafe "zlib.h inflateSetDictionary" + c_inflateSetDictionary :: StreamState + -> Ptr CChar + -> CUInt + -> IO CInt + +foreign import ccall unsafe "zlib.h deflate" + c_deflate :: StreamState -> CInt -> IO CInt + +foreign import ccall unsafe "zlib.h &deflateEnd" + c_deflateEnd :: FinalizerPtr StreamState + +foreign import ccall unsafe "zlib.h zlibVersion" + c_zlibVersion :: IO CString + +foreign import ccall unsafe "zlib.h adler32" + c_adler32 :: CULong + -> Ptr CChar + -> CUInt + -> IO CULong diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/Zlib.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/Zlib.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Codec/Compression/Zlib.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Codec/Compression/Zlib.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,116 @@ +----------------------------------------------------------------------------- +-- | +-- Copyright : (c) 2006-2014 Duncan Coutts +-- License : BSD-style +-- +-- Maintainer : duncan@community.haskell.org +-- +-- Compression and decompression of data streams in the zlib format. +-- +-- The format is described in detail in RFC #1950: +-- +-- +-- See also the zlib home page: +-- +----------------------------------------------------------------------------- +module Codec.Compression.Zlib ( + + -- | This module provides pure functions for compressing and decompressing + -- streams of data in the zlib format and represented by lazy 'ByteString's. + -- This makes it easy to use either in memory or with disk or network IO. + + -- * Simple compression and decompression + compress, + decompress, + + -- * Extended api with control over compression parameters + compressWith, + decompressWith, + + CompressParams(..), defaultCompressParams, + DecompressParams(..), defaultDecompressParams, + + -- ** The compression parameter types + CompressionLevel(..), + defaultCompression, + noCompression, + bestSpeed, + bestCompression, + compressionLevel, + Method(..), + deflateMethod, + WindowBits(..), + defaultWindowBits, + windowBits, + MemoryLevel(..), + defaultMemoryLevel, + minMemoryLevel, + maxMemoryLevel, + memoryLevel, + CompressionStrategy(..), + defaultStrategy, + filteredStrategy, + huffmanOnlyStrategy, + + ) where + +import Data.ByteString.Lazy (ByteString) + +import qualified Codec.Compression.Zlib.Internal as Internal +import Codec.Compression.Zlib.Internal hiding (compress, decompress) + + +-- | Decompress a stream of data in the zlib format. +-- +-- There are a number of errors that can occur. In each case an exception will +-- be thrown. The possible error conditions are: +-- +-- * if the stream does not start with a valid gzip header +-- +-- * if the compressed stream is corrupted +-- +-- * if the compressed stream ends permaturely +-- +-- Note that the decompression is performed /lazily/. Errors in the data stream +-- may not be detected until the end of the stream is demanded (since it is +-- only at the end that the final checksum can be checked). If this is +-- important to you, you must make sure to consume the whole decompressed +-- stream before doing any IO action that depends on it. +-- +decompress :: ByteString -> ByteString +decompress = decompressWith defaultDecompressParams + + +-- | Like 'decompress' but with the ability to specify various decompression +-- parameters. Typical usage: +-- +-- > decompressWith defaultCompressParams { ... } +-- +decompressWith :: DecompressParams -> ByteString -> ByteString +decompressWith = Internal.decompress zlibFormat + + +-- | Compress a stream of data into the zlib format. +-- +-- This uses the default compression parameters. In partiular it uses the +-- default compression level which favours a higher compression ratio over +-- compression speed, though it does not use the maximum compression level. +-- +-- Use 'compressWith' to adjust the compression level or other compression +-- parameters. +-- +compress :: ByteString -> ByteString +compress = compressWith defaultCompressParams + + +-- | Like 'compress' but with the ability to specify various compression +-- parameters. Typical usage: +-- +-- > compressWith defaultCompressParams { ... } +-- +-- In particular you can set the compression level: +-- +-- > compressWith defaultCompressParams { compressLevel = BestCompression } +-- +compressWith :: CompressParams -> ByteString -> ByteString +compressWith = Internal.compress zlibFormat diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/examples/gunzip.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/examples/gunzip.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/examples/gunzip.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/examples/gunzip.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,6 @@ +module Main where + +import qualified Data.ByteString.Lazy as B +import qualified Codec.Compression.GZip as GZip + +main = B.interact GZip.decompress diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/examples/gzip.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/examples/gzip.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/examples/gzip.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/examples/gzip.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,8 @@ +module Main where + +import qualified Data.ByteString.Lazy as B +import qualified Codec.Compression.GZip as GZip + +main = B.interact $ GZip.compressWith GZip.defaultCompressParams { + GZip.compressLevel = GZip.BestCompression + } diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/LICENSE cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/LICENSE --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/LICENSE 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,24 @@ +Copyright (c) 2006-2015, Duncan Coutts +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. This clause is intentionally left blank. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Setup.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Setup.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/Setup.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/Setup.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,2 @@ +import Distribution.Simple +main = defaultMain Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/data/bad-crc.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/data/bad-crc.gz differ Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/data/custom-dict.zlib and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/data/custom-dict.zlib differ diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/data/custom-dict.zlib-dict cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/data/custom-dict.zlib-dict --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/data/custom-dict.zlib-dict 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/data/custom-dict.zlib-dict 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1 @@ +Haskell \ No newline at end of file Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/data/hello.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/data/hello.gz differ diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/data/not-gzip cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/data/not-gzip --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/data/not-gzip 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/data/not-gzip 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1 @@ +This is not a gzip file! Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/data/two-files.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/data/two-files.gz differ diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/Test/Codec/Compression/Zlib/Internal.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/Test/Codec/Compression/Zlib/Internal.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/Test/Codec/Compression/Zlib/Internal.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/Test/Codec/Compression/Zlib/Internal.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,32 @@ +{-# OPTIONS_GHC -fno-warn-orphans #-} + +-- | Test code and properties for "Codec.Compression.Zlib.Internal" +-- +module Test.Codec.Compression.Zlib.Internal where + +import Codec.Compression.Zlib.Internal +import Test.Codec.Compression.Zlib.Stream () +import Test.QuickCheck + +import Control.Monad (ap) + + +instance Arbitrary CompressParams where + arbitrary = return CompressParams `ap` arbitrary `ap` arbitrary + `ap` arbitrary `ap` arbitrary + `ap` arbitrary `ap` arbitraryBufferSize + `ap` return Nothing + +arbitraryBufferSize :: Gen Int +arbitraryBufferSize = frequency $ [(10, return n) | n <- [1..1024]] ++ + [(20, return n) | n <- [1025..8192]] ++ + [(40, return n) | n <- [8193..131072]] ++ + [(1, return n) | n <- [131072..1048576]] + + +instance Arbitrary DecompressParams where + arbitrary = return DecompressParams `ap` arbitrary + `ap` arbitraryBufferSize + `ap` return Nothing + `ap` arbitrary + diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/Test/Codec/Compression/Zlib/Stream.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/Test/Codec/Compression/Zlib/Stream.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/Test/Codec/Compression/Zlib/Stream.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/Test/Codec/Compression/Zlib/Stream.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,40 @@ +{-# OPTIONS_GHC -fno-warn-orphans #-} + +-- | Test code and properties for "Codec.Compression.Zlib.Stream" +-- +module Test.Codec.Compression.Zlib.Stream where + +import Codec.Compression.Zlib.Internal +import Test.QuickCheck + + +instance Arbitrary Format where + -- GZipOrZlib omitted since it's not symmetric + arbitrary = elements [gzipFormat, zlibFormat, rawFormat] + + +instance Arbitrary Method where + arbitrary = return deflateMethod + + +instance Arbitrary CompressionLevel where + arbitrary = elements $ [defaultCompression, noCompression, + bestCompression, bestSpeed] + ++ map compressionLevel [1..9] + + +instance Arbitrary WindowBits where + arbitrary = elements $ defaultWindowBits:map windowBits [8..15] + + +instance Arbitrary MemoryLevel where + arbitrary = elements $ [defaultMemoryLevel, minMemoryLevel, maxMemoryLevel] + ++ [memoryLevel n | n <- [1..9]] + + + +instance Arbitrary CompressionStrategy where + arbitrary = elements $ [defaultStrategy, filteredStrategy, huffmanOnlyStrategy] + -- These are disabled by default in the package + -- as they are only available with zlib >=1.2 + -- ++ [RLE, Fixed] diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/Test.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/Test.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/Test.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/Test.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,340 @@ +{-# LANGUAGE CPP #-} + +module Main where + +import Codec.Compression.Zlib.Internal +import qualified Codec.Compression.Zlib as Zlib +import qualified Codec.Compression.GZip as GZip +import qualified Codec.Compression.Zlib.Raw as Raw + +import Test.Codec.Compression.Zlib.Internal () +import Test.Codec.Compression.Zlib.Stream () + +import Test.QuickCheck +import Test.HUnit +import Test.Tasty +import Test.Tasty.QuickCheck +import Test.Tasty.HUnit +import Utils () + +import Control.Monad +import Control.Exception +import qualified Data.ByteString.Char8 as BS.Char8 +import qualified Data.ByteString.Lazy as BL +import qualified Data.ByteString as BS +import System.IO +#if !(MIN_VERSION_base(4,6,0)) +import Prelude hiding (catch) +#endif + + +main :: IO () +main = defaultMain $ + testGroup "zlib tests" [ + testGroup "property tests" [ + testProperty "decompress . compress = id (standard)" prop_decompress_after_compress, + testProperty "decompress . compress = id (Zlib -> GZipOrZLib)" prop_gziporzlib1, + testProperty "decompress . compress = id (GZip -> GZipOrZlib)" prop_gziporzlib2, + testProperty "concatenated gzip members" prop_gzip_concat, + testProperty "multiple gzip members, boundaries (all 2-chunks)" prop_multiple_members_boundary2, + testProperty "multiple gzip members, boundaries (all 3-chunks)" prop_multiple_members_boundary3, + testProperty "prefixes of valid stream detected as truncated" prop_truncated + ], + testGroup "unit tests" [ + testCase "simple gzip case" test_simple_gzip, + testCase "detect bad crc" test_bad_crc, + testCase "detect non-gzip" test_non_gzip, + testCase "detect custom dictionary" test_custom_dict, + testCase "dectect inflate with wrong dict" test_wrong_dictionary, + testCase "dectect inflate with right dict" test_right_dictionary, + testCase "handle trailing data" test_trailing_data, + testCase "multiple gzip members" test_multiple_members, + testCase "check small input chunks" test_small_chunks, + testCase "check empty input" test_empty, + testCase "check exception raised" test_exception + ] + ] + + +prop_decompress_after_compress :: Format + -> CompressParams + -> DecompressParams + -> Property +prop_decompress_after_compress w cp dp = + (w /= zlibFormat || decompressWindowBits dp >= compressWindowBits cp) && + -- Zlib decompression has been observed to fail with both compress and decompress + -- window bits = 8. This seems to be contrary to the docs and to a quick reading + -- of the zlib source code. + (decompressWindowBits dp > compressWindowBits cp || decompressWindowBits dp > WindowBits 8) && + decompressBufferSize dp > 0 && compressBufferSize cp > 0 ==> + liftM2 (==) (decompress w dp . compress w cp) id + + +prop_gziporzlib1 :: CompressParams + -> DecompressParams + -> Property +prop_gziporzlib1 cp dp = + decompressWindowBits dp > compressWindowBits cp && + decompressBufferSize dp > 0 && compressBufferSize cp > 0 ==> + liftM2 (==) (decompress gzipOrZlibFormat dp . compress zlibFormat cp) id + + +prop_gziporzlib2 :: CompressParams + -> DecompressParams + -> Property +prop_gziporzlib2 cp dp = + decompressWindowBits dp >= compressWindowBits cp && + decompressBufferSize dp > 0 && compressBufferSize cp > 0 ==> + liftM2 (==) (decompress gzipOrZlibFormat dp . compress gzipFormat cp) id + +prop_gzip_concat :: CompressParams + -> DecompressParams + -> BL.ByteString + -> Property +prop_gzip_concat cp dp input = + decompressWindowBits dp >= compressWindowBits cp && + decompressBufferSize dp > 0 && compressBufferSize cp > 0 ==> + let catComp = BL.concat (replicate 5 (compress gzipFormat cp input)) + compCat = compress gzipFormat cp (BL.concat (replicate 5 input)) + + in decompress gzipFormat dp { decompressAllMembers = True } catComp + == decompress gzipFormat dp { decompressAllMembers = True } compCat + +prop_multiple_members_boundary2 :: Property +prop_multiple_members_boundary2 = + forAll shortStrings $ \bs -> + all (\c -> decomp c == BL.append bs bs) + (twoChunkSplits (comp bs `BL.append` comp bs)) + where + comp = compress gzipFormat defaultCompressParams + decomp = decompress gzipFormat defaultDecompressParams + + shortStrings = fmap BL.pack $ listOf arbitrary + +prop_multiple_members_boundary3 :: Property +prop_multiple_members_boundary3 = + forAll shortStrings $ \bs -> + all (\c -> decomp c == BL.append bs bs) + (threeChunkSplits (comp bs `BL.append` comp bs)) + where + comp = compress gzipFormat defaultCompressParams + decomp = decompress gzipFormat defaultDecompressParams + + shortStrings = sized $ \sz -> resize (sz `div` 10) $ + fmap BL.pack $ listOf arbitrary + +prop_truncated :: Format -> Property +prop_truncated format = + forAll shortStrings $ \bs -> + all (truncated decomp) + (init (BL.inits (comp bs))) + -- All the initial prefixes of a valid compressed stream should be detected + -- as truncated. + where + comp = compress format defaultCompressParams + decomp = decompressST format defaultDecompressParams + truncated = foldDecompressStreamWithInput (\_ r -> r) (\_ -> False) + (\err -> case err of TruncatedInput -> True; _ -> False) + + shortStrings = sized $ \sz -> resize (sz `div` 6) arbitrary + + +test_simple_gzip :: Assertion +test_simple_gzip = + withSampleData "hello.gz" $ \hnd -> + let decomp = decompressIO gzipFormat defaultDecompressParams + in assertDecompressOk hnd decomp + +test_bad_crc :: Assertion +test_bad_crc = + withSampleData "bad-crc.gz" $ \hnd -> do + let decomp = decompressIO gzipFormat defaultDecompressParams + err <- assertDecompressError hnd decomp + msg <- assertDataFormatError err + msg @?= "incorrect data check" + +test_non_gzip :: Assertion +test_non_gzip = do + withSampleData "not-gzip" $ \hnd -> do + let decomp = decompressIO gzipFormat defaultDecompressParams + err <- assertDecompressError hnd decomp + msg <- assertDataFormatError err + msg @?= "incorrect header check" + + withSampleData "not-gzip" $ \hnd -> do + let decomp = decompressIO zlibFormat defaultDecompressParams + err <- assertDecompressError hnd decomp + msg <- assertDataFormatError err + msg @?= "incorrect header check" + + withSampleData "not-gzip" $ \hnd -> do + let decomp = decompressIO rawFormat defaultDecompressParams + err <- assertDecompressError hnd decomp + msg <- assertDataFormatError err + msg @?= "invalid code lengths set" + + withSampleData "not-gzip" $ \hnd -> do + let decomp = decompressIO gzipOrZlibFormat defaultDecompressParams + err <- assertDecompressError hnd decomp + msg <- assertDataFormatError err + msg @?= "incorrect header check" + +test_custom_dict :: Assertion +test_custom_dict = + withSampleData "custom-dict.zlib" $ \hnd -> do + let decomp = decompressIO zlibFormat defaultDecompressParams + err <- assertDecompressError hnd decomp + err @?= DictionaryRequired + +test_wrong_dictionary :: Assertion +test_wrong_dictionary = do + withSampleData "custom-dict.zlib" $ \hnd -> do + let decomp = decompressIO zlibFormat defaultDecompressParams { + decompressDictionary = -- wrong dict! + Just (BS.pack [65,66,67]) + } + + err <- assertDecompressError hnd decomp + err @?= DictionaryMismatch + +test_right_dictionary :: Assertion +test_right_dictionary = do + withSampleData "custom-dict.zlib" $ \hnd -> do + dict <- readSampleData "custom-dict.zlib-dict" + let decomp = decompressIO zlibFormat defaultDecompressParams { + decompressDictionary = + Just (toStrict dict) + } + assertDecompressOk hnd decomp + +test_trailing_data :: Assertion +test_trailing_data = + withSampleData "two-files.gz" $ \hnd -> do + let decomp = decompressIO gzipFormat defaultDecompressParams { + decompressAllMembers = False + } + chunks <- assertDecompressOkChunks hnd decomp + case chunks of + [chunk] -> chunk @?= BS.Char8.pack "Test 1" + _ -> assertFailure "expected single chunk" + +test_multiple_members :: Assertion +test_multiple_members = + withSampleData "two-files.gz" $ \hnd -> do + let decomp = decompressIO gzipFormat defaultDecompressParams { + decompressAllMembers = True + } + chunks <- assertDecompressOkChunks hnd decomp + case chunks of + [chunk1, + chunk2] -> do chunk1 @?= BS.Char8.pack "Test 1" + chunk2 @?= BS.Char8.pack "Test 2" + _ -> assertFailure "expected two chunks" + +test_small_chunks :: Assertion +test_small_chunks = do + uncompressedFile <- readSampleData "not-gzip" + GZip.compress (smallChunks uncompressedFile) @?= GZip.compress uncompressedFile + Zlib.compress (smallChunks uncompressedFile) @?= Zlib.compress uncompressedFile + Raw.compress (smallChunks uncompressedFile) @?= Raw.compress uncompressedFile + + GZip.decompress (smallChunks (GZip.compress uncompressedFile)) @?= uncompressedFile + Zlib.decompress (smallChunks (Zlib.compress uncompressedFile)) @?= uncompressedFile + Raw.decompress (smallChunks (Raw.compress uncompressedFile)) @?= uncompressedFile + + compressedFile <- readSampleData "hello.gz" + (GZip.decompress . smallChunks) compressedFile @?= GZip.decompress compressedFile + +test_empty :: Assertion +test_empty = do + -- Regression test to make sure we only ask for input once in the case of + -- initially empty input. We previously asked for input twice before + -- returning the error. + let decomp = decompressIO zlibFormat defaultDecompressParams + case decomp of + DecompressInputRequired next -> do + decomp' <- next BS.empty + case decomp' of + DecompressStreamError TruncatedInput -> return () + _ -> assertFailure "expected truncated error" + + _ -> assertFailure "expected input" + +test_exception :: Assertion +test_exception = + (do + compressedFile <- readSampleData "bad-crc.gz" + _ <- evaluate (BL.length (GZip.decompress compressedFile)) + assertFailure "expected exception") + + `catch` \err -> do + msg <- assertDataFormatError err + msg @?= "incorrect data check" + +toStrict :: BL.ByteString -> BS.ByteString +#if MIN_VERSION_bytestring(0,10,0) +toStrict = BL.toStrict +#else +toStrict = BS.concat . BL.toChunks +#endif + +----------------------- +-- Chunk boundary utils + +smallChunks :: BL.ByteString -> BL.ByteString +smallChunks = BL.fromChunks . map (\c -> BS.pack [c]) . BL.unpack + +twoChunkSplits :: BL.ByteString -> [BL.ByteString] +twoChunkSplits bs = zipWith (\a b -> BL.fromChunks [a,b]) (BS.inits sbs) (BS.tails sbs) + where + sbs = toStrict bs + +threeChunkSplits :: BL.ByteString -> [BL.ByteString] +threeChunkSplits bs = + [ BL.fromChunks [a,b,c] + | (a,x) <- zip (BS.inits sbs) (BS.tails sbs) + , (b,c) <- zip (BS.inits x) (BS.tails x) ] + where + sbs = toStrict bs + +-------------- +-- HUnit Utils + +readSampleData :: FilePath -> IO BL.ByteString +readSampleData file = BL.readFile ("test/data/" ++ file) + +withSampleData :: FilePath -> (Handle -> IO a) -> IO a +withSampleData file = withFile ("test/data/" ++ file) ReadMode + +expected :: String -> String -> IO a +expected e g = assertFailure ("expected: " ++ e ++ "\nbut got: " ++ g) + >> fail "" + +assertDecompressOk :: Handle -> DecompressStream IO -> Assertion +assertDecompressOk hnd = + foldDecompressStream + (BS.hGet hnd 4000 >>=) + (\_ r -> r) + (\_ -> return ()) + (\err -> expected "decompress ok" (show err)) + +assertDecompressOkChunks :: Handle -> DecompressStream IO -> IO [BS.ByteString] +assertDecompressOkChunks hnd = + foldDecompressStream + (BS.hGet hnd 4000 >>=) + (\chunk -> liftM (chunk:)) + (\_ -> return []) + (\err -> expected "decompress ok" (show err)) + +assertDecompressError :: Handle -> DecompressStream IO -> IO DecompressError +assertDecompressError hnd = + foldDecompressStream + (BS.hGet hnd 4000 >>=) + (\_ r -> r) + (\_ -> expected "StreamError" "StreamEnd") + return + +assertDataFormatError :: DecompressError -> IO String +assertDataFormatError (DataFormatError detail) = return detail +assertDataFormatError _ = assertFailure "expected DataError" + >> return "" diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/Utils.hs cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/Utils.hs --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/test/Utils.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/test/Utils.hs 2015-05-16 13:12:10.000000000 +0000 @@ -0,0 +1,28 @@ +{-# OPTIONS_GHC -fno-warn-orphans #-} +module Utils where + +import qualified Data.ByteString.Lazy as BL +import qualified Data.ByteString as BS + +import Test.QuickCheck + +------------------- +-- QuickCheck Utils + +maxStrSize :: Double +maxStrSize = 500 + +-- convert a QC size parameter into one for generating long lists, +-- growing inverse exponentially up to maxStrSize +strSize :: Int -> Int +strSize n = floor (maxStrSize * (1 - 2 ** (-fromIntegral n/100))) + +instance Arbitrary BL.ByteString where + arbitrary = sized $ \sz -> fmap BL.fromChunks $ listOf $ resize (sz `div` 2) arbitrary + shrink = map BL.pack . shrink . BL.unpack + +instance Arbitrary BS.ByteString where + arbitrary = sized $ \sz -> resize (strSize sz) $ fmap BS.pack $ listOf $ arbitrary + shrink = map BS.pack . shrink . BS.unpack + + diff -Nru cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/zlib.cabal cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/zlib.cabal --- cabal-install-1.22-1.22.6.0/src/zlib-0.6.1.1/zlib.cabal 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/src/zlib-0.6.1.1/zlib.cabal 2016-06-02 07:15:44.000000000 +0000 @@ -0,0 +1,87 @@ +name: zlib +version: 0.6.1.1 +x-revision: 3 +copyright: (c) 2006-2015 Duncan Coutts +license: BSD3 +license-file: LICENSE +author: Duncan Coutts +maintainer: Duncan Coutts +bug-reports: https://github.com/haskell/zlib/issues +category: Codec +synopsis: Compression and decompression in the gzip and zlib formats +description: This package provides a pure interface for compressing and + decompressing streams of data represented as lazy + 'ByteString's. It uses the zlib C library so it has high + performance. It supports the \"zlib\", \"gzip\" and \"raw\" + compression formats. + . + It provides a convenient high level API suitable for most + tasks and for the few cases where more control is needed it + provides access to the full zlib feature set. +build-type: Simple +cabal-version: >= 1.10 +extra-source-files: changelog + -- zlib C sources (for Windows) + cbits/crc32.h cbits/inffast.h cbits/inflate.h + cbits/trees.h cbits/deflate.h cbits/inffixed.h + cbits/inftrees.h cbits/zutil.h cbits/gzguts.h + -- test data files + test/data/bad-crc.gz test/data/custom-dict.zlib + test/data/custom-dict.zlib-dict test/data/hello.gz + test/data/not-gzip test/data/two-files.gz + -- demo programs: + examples/gzip.hs examples/gunzip.hs + +source-repository head + type: git + location: https://github.com/haskell/zlib.git + +library + exposed-modules: Codec.Compression.GZip, + Codec.Compression.Zlib, + Codec.Compression.Zlib.Raw, + Codec.Compression.Zlib.Internal + other-modules: Codec.Compression.Zlib.Stream + if impl(ghc < 7) + default-language: Haskell98 + default-extensions: PatternGuards + else + default-language: Haskell2010 + other-extensions: CPP, ForeignFunctionInterface, RankNTypes, BangPatterns, + DeriveDataTypeable + if impl(ghc >= 7.2) + other-extensions: DeriveGeneric + build-depends: base >= 4 && < 5, + bytestring >= 0.9 && < 0.12 + if impl(ghc >= 7.2 && < 7.6) + build-depends: ghc-prim + includes: zlib.h + ghc-options: -Wall -fwarn-tabs + if !os(windows) + -- Normally we use the the standard system zlib: + extra-libraries: z + else + -- However for the benefit of users of Windows (which does not have zlib + -- by default) we bundle a complete copy of the C sources of zlib-1.2.8 + c-sources: cbits/adler32.c cbits/compress.c cbits/crc32.c + cbits/deflate.c cbits/infback.c + cbits/inffast.c cbits/inflate.c cbits/inftrees.c + cbits/trees.c cbits/uncompr.c cbits/zutil.c + include-dirs: cbits + install-includes: zlib.h zconf.h + +test-suite tests + type: exitcode-stdio-1.0 + main-is: Test.hs + other-modules: Utils, + Test.Codec.Compression.Zlib.Internal, + Test.Codec.Compression.Zlib.Stream + hs-source-dirs: test + default-language: Haskell2010 + build-depends: base, bytestring, zlib, + QuickCheck == 2.*, + HUnit >= 1.2 && <1.4, + tasty >= 0.8 && < 0.12, + tasty-quickcheck == 0.8.*, + tasty-hunit >= 0.8 && < 0.9 + ghc-options: -Wall Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/stm-2.4.4.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/stm-2.4.4.tar.gz differ diff -Nru cabal-install-1.22-1.22.6.0/tests/PackageTests/Exec/Check.hs cabal-install-1.22-1.22.9.0/tests/PackageTests/Exec/Check.hs --- cabal-install-1.22-1.22.6.0/tests/PackageTests/Exec/Check.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/PackageTests/Exec/Check.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,146 +0,0 @@ -{-# LANGUAGE CPP #-} -module PackageTests.Exec.Check - ( tests - ) where - - -import PackageTests.PackageTester - -import Test.Framework as TF (Test) -import Test.Framework.Providers.HUnit (testCase) -import Test.HUnit (assertBool) - -#if !MIN_VERSION_base(4,8,0) -import Control.Applicative ((<$>)) -#endif -import Control.Monad (when) -import Data.List (intercalate, isInfixOf) -import System.FilePath (()) -import System.Directory (getDirectoryContents) - -dir :: FilePath -dir = packageTestsDirectory "Exec" - -tests :: TestsPaths -> [TF.Test] -tests paths = - [ testCase "exits with failure if given no argument" $ do - result <- cabal_exec paths dir [] - assertExecFailed result - - , testCase "prints error message if given no argument" $ do - result <- cabal_exec paths dir [] - assertExecFailed result - let output = outputText result - expected = "specify an executable to run" - errMsg = "should have requested an executable be specified\n" ++ - output - assertBool errMsg $ - expected `isInfixOf` (intercalate " " . lines $ output) - - , testCase "runs the given command" $ do - result <- cabal_exec paths dir ["echo", "this", "string"] - assertExecSucceeded result - let output = outputText result - expected = "this string" - errMsg = "should have ran the given command\n" ++ output - assertBool errMsg $ - expected `isInfixOf` (intercalate " " . lines $ output) - - , testCase "can run executables installed in the sandbox" $ do - -- Test that an executable installed into the sandbox can be found. - -- We do this by removing any existing sandbox. Checking that the - -- executable cannot be found. Creating a new sandbox. Installing - -- the executable and checking it can be run. - - cleanPreviousBuilds paths - assertMyExecutableNotFound paths - assertPackageInstall paths - - result <- cabal_exec paths dir ["my-executable"] - assertExecSucceeded result - let output = outputText result - expected = "This is my-executable" - errMsg = "should have found a my-executable\n" ++ output - assertBool errMsg $ - expected `isInfixOf` (intercalate " " . lines $ output) - - , testCase "adds the sandbox bin directory to the PATH" $ do - cleanPreviousBuilds paths - assertMyExecutableNotFound paths - assertPackageInstall paths - - result <- cabal_exec paths dir ["bash", "--", "-c", "my-executable"] - assertExecSucceeded result - let output = outputText result - expected = "This is my-executable" - errMsg = "should have found a my-executable\n" ++ output - assertBool errMsg $ - expected `isInfixOf` (intercalate " " . lines $ output) - - , testCase "configures GHC to use the sandbox" $ do - let libNameAndVersion = "my-0.1" - - cleanPreviousBuilds paths - assertPackageInstall paths - - assertMyLibIsNotAvailableOutsideofSandbox paths libNameAndVersion - - result <- cabal_exec paths dir ["ghc-pkg", "list"] - assertExecSucceeded result - let output = outputText result - errMsg = "my library should have been found" - assertBool errMsg $ - libNameAndVersion `isInfixOf` (intercalate " " . lines $ output) - - - -- , testCase "can find executables built from the package" $ do - - , testCase "configures cabal to use the sandbox" $ do - let libNameAndVersion = "my-0.1" - - cleanPreviousBuilds paths - assertPackageInstall paths - - assertMyLibIsNotAvailableOutsideofSandbox paths libNameAndVersion - - result <- cabal_exec paths dir ["bash", "--", "-c", "cd subdir ; cabal sandbox hc-pkg list"] - assertExecSucceeded result - let output = outputText result - errMsg = "my library should have been found" - assertBool errMsg $ - libNameAndVersion `isInfixOf` (intercalate " " . lines $ output) - ] - -cleanPreviousBuilds :: TestsPaths -> IO () -cleanPreviousBuilds paths = do - sandboxExists <- not . null . filter (== "cabal.sandbox.config") <$> - getDirectoryContents dir - assertCleanSucceeded =<< cabal_clean paths dir [] - when sandboxExists $ do - assertSandboxSucceeded =<< cabal_sandbox paths dir ["delete"] - - -assertPackageInstall :: TestsPaths -> IO () -assertPackageInstall paths = do - assertSandboxSucceeded =<< cabal_sandbox paths dir ["init"] - assertInstallSucceeded =<< cabal_install paths dir [] - - -assertMyExecutableNotFound :: TestsPaths -> IO () -assertMyExecutableNotFound paths = do - result <- cabal_exec paths dir ["my-executable"] - assertExecFailed result - let output = outputText result - expected = "cabal: The program 'my-executable' is required but it " ++ - "could not be found" - errMsg = "should not have found a my-executable\n" ++ output - assertBool errMsg $ - expected `isInfixOf` (intercalate " " . lines $ output) - - - -assertMyLibIsNotAvailableOutsideofSandbox :: TestsPaths -> String -> IO () -assertMyLibIsNotAvailableOutsideofSandbox paths libNameAndVersion = do - (_, _, output) <- run (Just $ dir) (ghcPkgPath paths) ["list"] - assertBool "my library should not have been found" $ not $ - libNameAndVersion `isInfixOf` (intercalate " " . lines $ output) diff -Nru cabal-install-1.22-1.22.6.0/tests/PackageTests/Freeze/Check.hs cabal-install-1.22-1.22.9.0/tests/PackageTests/Freeze/Check.hs --- cabal-install-1.22-1.22.6.0/tests/PackageTests/Freeze/Check.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/PackageTests/Freeze/Check.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -{-# LANGUAGE ScopedTypeVariables #-} - -module PackageTests.Freeze.Check - ( tests - ) where - -import PackageTests.PackageTester - -import Test.Framework as TF (Test) -import Test.Framework.Providers.HUnit (testCase) -import Test.HUnit (assertBool) - -import qualified Control.Exception.Extensible as E -import Data.List (intercalate, isInfixOf) -import System.Directory (doesFileExist, removeFile) -import System.FilePath (()) -import System.IO.Error (isDoesNotExistError) - -dir :: FilePath -dir = packageTestsDirectory "Freeze" - -tests :: TestsPaths -> [TF.Test] -tests paths = - [ testCase "runs without error" $ do - removeCabalConfig - result <- cabal_freeze paths dir [] - assertFreezeSucceeded result - - , testCase "freezes direct dependencies" $ do - removeCabalConfig - result <- cabal_freeze paths dir [] - assertFreezeSucceeded result - c <- readCabalConfig - assertBool ("should have frozen base\n" ++ c) $ - " base ==" `isInfixOf` (intercalate " " $ lines $ c) - - , testCase "freezes transitory dependencies" $ do - removeCabalConfig - result <- cabal_freeze paths dir [] - assertFreezeSucceeded result - c <- readCabalConfig - assertBool ("should have frozen ghc-prim\n" ++ c) $ - " ghc-prim ==" `isInfixOf` (intercalate " " $ lines $ c) - - , testCase "does not freeze packages which are not dependend upon" $ do - -- XXX Test this against a package installed in the sandbox but - -- not depended upon. - removeCabalConfig - result <- cabal_freeze paths dir [] - assertFreezeSucceeded result - c <- readCabalConfig - assertBool ("should not have frozen exceptions\n" ++ c) $ not $ - " exceptions ==" `isInfixOf` (intercalate " " $ lines $ c) - - , testCase "does not include a constraint for the package being frozen" $ do - removeCabalConfig - result <- cabal_freeze paths dir [] - assertFreezeSucceeded result - c <- readCabalConfig - assertBool ("should not have frozen self\n" ++ c) $ not $ - " my ==" `isInfixOf` (intercalate " " $ lines $ c) - - , testCase "--dry-run does not modify the cabal.config file" $ do - removeCabalConfig - result <- cabal_freeze paths dir ["--dry-run"] - assertFreezeSucceeded result - c <- doesFileExist $ dir "cabal.config" - assertBool "cabal.config file should not have been created" (not c) - - , testCase "--enable-tests freezes test dependencies" $ do - removeCabalConfig - result <- cabal_freeze paths dir ["--enable-tests"] - assertFreezeSucceeded result - c <- readCabalConfig - assertBool ("should have frozen test-framework\n" ++ c) $ - " test-framework ==" `isInfixOf` (intercalate " " $ lines $ c) - - , testCase "--disable-tests does not freeze test dependencies" $ do - removeCabalConfig - result <- cabal_freeze paths dir ["--disable-tests"] - assertFreezeSucceeded result - c <- readCabalConfig - assertBool ("should not have frozen test-framework\n" ++ c) $ not $ - " test-framework ==" `isInfixOf` (intercalate " " $ lines $ c) - - , testCase "--enable-benchmarks freezes benchmark dependencies" $ do - removeCabalConfig - result <- cabal_freeze paths dir ["--disable-benchmarks"] - assertFreezeSucceeded result - c <- readCabalConfig - assertBool ("should not have frozen criterion\n" ++ c) $ not $ - " criterion ==" `isInfixOf` (intercalate " " $ lines $ c) - - , testCase "--disable-benchmarks does not freeze benchmark dependencies" $ do - removeCabalConfig - result <- cabal_freeze paths dir ["--disable-benchmarks"] - assertFreezeSucceeded result - c <- readCabalConfig - assertBool ("should not have frozen criterion\n" ++ c) $ not $ - " criterion ==" `isInfixOf` (intercalate " " $ lines $ c) - ] - -removeCabalConfig :: IO () -removeCabalConfig = do - removeFile (dir "cabal.config") - `E.catch` \ (e :: IOError) -> - if isDoesNotExistError e - then return () - else E.throw e - - -readCabalConfig :: IO String -readCabalConfig = do - readFile $ dir "cabal.config" diff -Nru cabal-install-1.22-1.22.6.0/tests/PackageTests/Freeze/my.cabal cabal-install-1.22-1.22.9.0/tests/PackageTests/Freeze/my.cabal --- cabal-install-1.22-1.22.6.0/tests/PackageTests/Freeze/my.cabal 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/PackageTests/Freeze/my.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -name: my -version: 0.1 -license: BSD3 -cabal-version: >= 1.20.0 -build-type: Simple - -library - exposed-modules: Foo - build-depends: base - -test-suite test-Foo - type: exitcode-stdio-1.0 - hs-source-dirs: tests - main-is: test-Foo.hs - build-depends: base, my, test-framework - -benchmark bench-Foo - type: exitcode-stdio-1.0 - hs-source-dirs: benchmarks - main-is: benchmark-Foo.hs - build-depends: base, my, criterion diff -Nru cabal-install-1.22-1.22.6.0/tests/PackageTests/MultipleSource/Check.hs cabal-install-1.22-1.22.9.0/tests/PackageTests/MultipleSource/Check.hs --- cabal-install-1.22-1.22.6.0/tests/PackageTests/MultipleSource/Check.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/PackageTests/MultipleSource/Check.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -module PackageTests.MultipleSource.Check - ( tests - ) where - - -import PackageTests.PackageTester - -import Test.Framework as TF (Test) -import Test.Framework.Providers.HUnit (testCase) - -import Control.Monad (void, when) -import System.Directory (doesDirectoryExist) -import System.FilePath (()) - -dir :: FilePath -dir = packageTestsDirectory "MultipleSource" - -tests :: TestsPaths -> [TF.Test] -tests paths = - [ testCase "finds second source of multiple source" $ do - sandboxExists <- doesDirectoryExist $ dir ".cabal-sandbox" - when sandboxExists $ - void $ cabal_sandbox paths dir ["delete"] - assertSandboxSucceeded =<< cabal_sandbox paths dir ["init"] - assertSandboxSucceeded =<< cabal_sandbox paths dir ["add-source", "p"] - assertSandboxSucceeded =<< cabal_sandbox paths dir ["add-source", "q"] - assertInstallSucceeded =<< cabal_install paths dir ["q"] - ] diff -Nru cabal-install-1.22-1.22.6.0/tests/PackageTests/PackageTester.hs cabal-install-1.22-1.22.9.0/tests/PackageTests/PackageTester.hs --- cabal-install-1.22-1.22.6.0/tests/PackageTests/PackageTester.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/PackageTests/PackageTester.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,232 +0,0 @@ -{-# LANGUAGE ScopedTypeVariables #-} - --- TODO This module was originally based on the PackageTests.PackageTester --- module in Cabal, however it has a few differences. I suspect that as --- this module ages the two modules will diverge further. As such, I have --- not attempted to merge them into a single module nor to extract a common --- module from them. Refactor this module and/or Cabal's --- PackageTests.PackageTester to remove commonality. --- 2014-05-15 Ben Armston - --- | Routines for black-box testing cabal-install. --- --- Instead of driving the tests by making library calls into --- Distribution.Simple.* or Distribution.Client.* this module only every --- executes the `cabal-install` binary. --- --- You can set the following VERBOSE environment variable to control --- the verbosity of the output generated by this module. -module PackageTests.PackageTester - ( TestsPaths(..) - , Result(..) - - , packageTestsDirectory - , packageTestsConfigFile - - -- * Running cabal commands - , cabal_clean - , cabal_exec - , cabal_freeze - , cabal_install - , cabal_sandbox - , run - - -- * Test helpers - , assertCleanSucceeded - , assertExecFailed - , assertExecSucceeded - , assertFreezeSucceeded - , assertInstallSucceeded - , assertSandboxSucceeded - ) where - -import qualified Control.Exception.Extensible as E -import Control.Monad (when, unless) -import Data.Maybe (fromMaybe) -import System.Directory (canonicalizePath, doesFileExist) -import System.Environment (getEnv) -import System.Exit (ExitCode(ExitSuccess)) -import System.FilePath ( (<.>) ) -import System.IO (hClose, hGetChar, hIsEOF) -import System.IO.Error (isDoesNotExistError) -import System.Process (runProcess, waitForProcess) -import Test.HUnit (Assertion, assertFailure) - -import Distribution.Simple.BuildPaths (exeExtension) -import Distribution.Simple.Utils (printRawCommandAndArgs) -import Distribution.Compat.CreatePipe (createPipe) -import Distribution.ReadE (readEOrFail) -import Distribution.Verbosity (Verbosity, flagToVerbosity, normal) - -data Success = Failure - -- | ConfigureSuccess - -- | BuildSuccess - -- | TestSuccess - -- | BenchSuccess - | CleanSuccess - | ExecSuccess - | FreezeSuccess - | InstallSuccess - | SandboxSuccess - deriving (Eq, Show) - -data TestsPaths = TestsPaths - { cabalPath :: FilePath -- ^ absolute path to cabal executable. - , ghcPkgPath :: FilePath -- ^ absolute path to ghc-pkg executable. - , configPath :: FilePath -- ^ absolute path of the default config file - -- to use for tests (tests are free to use - -- a different one). - } - -data Result = Result - { successful :: Bool - , success :: Success - , outputText :: String - } deriving Show - -nullResult :: Result -nullResult = Result True Failure "" - ------------------------------------------------------------------------- --- * Config - -packageTestsDirectory :: FilePath -packageTestsDirectory = "PackageTests" - -packageTestsConfigFile :: FilePath -packageTestsConfigFile = "cabal-config" - ------------------------------------------------------------------------- --- * Running cabal commands - -recordRun :: (String, ExitCode, String) -> Success -> Result -> Result -recordRun (cmd, exitCode, exeOutput) thisSucc res = - res { successful = successful res && exitCode == ExitSuccess - , success = if exitCode == ExitSuccess then thisSucc - else success res - , outputText = - (if null $ outputText res then "" else outputText res ++ "\n") ++ - cmd ++ "\n" ++ exeOutput - } - --- | Run the clean command and return its result. -cabal_clean :: TestsPaths -> FilePath -> [String] -> IO Result -cabal_clean paths dir args = do - res <- cabal paths dir (["clean"] ++ args) - return $ recordRun res CleanSuccess nullResult - --- | Run the exec command and return its result. -cabal_exec :: TestsPaths -> FilePath -> [String] -> IO Result -cabal_exec paths dir args = do - res <- cabal paths dir (["exec"] ++ args) - return $ recordRun res ExecSuccess nullResult - --- | Run the freeze command and return its result. -cabal_freeze :: TestsPaths -> FilePath -> [String] -> IO Result -cabal_freeze paths dir args = do - res <- cabal paths dir (["freeze"] ++ args) - return $ recordRun res FreezeSuccess nullResult - --- | Run the install command and return its result. -cabal_install :: TestsPaths -> FilePath -> [String] -> IO Result -cabal_install paths dir args = do - res <- cabal paths dir (["install"] ++ args) - return $ recordRun res InstallSuccess nullResult - --- | Run the sandbox command and return its result. -cabal_sandbox :: TestsPaths -> FilePath -> [String] -> IO Result -cabal_sandbox paths dir args = do - res <- cabal paths dir (["sandbox"] ++ args) - return $ recordRun res SandboxSuccess nullResult - --- | Returns the command that was issued, the return code, and the output text. -cabal :: TestsPaths -> FilePath -> [String] -> IO (String, ExitCode, String) -cabal paths dir cabalArgs = do - run (Just dir) (cabalPath paths) args - where - args = configFileArg : cabalArgs - configFileArg = "--config-file=" ++ configPath paths - --- | Returns the command that was issued, the return code, and the output text -run :: Maybe FilePath -> String -> [String] -> IO (String, ExitCode, String) -run cwd path args = do - verbosity <- getVerbosity - -- path is relative to the current directory; canonicalizePath makes it - -- absolute, so that runProcess will find it even when changing directory. - path' <- do pathExists <- doesFileExist path - canonicalizePath (if pathExists then path else path <.> exeExtension) - printRawCommandAndArgs verbosity path' args - (readh, writeh) <- createPipe - pid <- runProcess path' args cwd Nothing Nothing (Just writeh) (Just writeh) - - -- fork off a thread to start consuming the output - out <- suckH [] readh - hClose readh - - -- wait for the program to terminate - exitcode <- waitForProcess pid - let fullCmd = unwords (path' : args) - return ("\"" ++ fullCmd ++ "\" in " ++ fromMaybe "" cwd, exitcode, out) - where - suckH output h = do - eof <- hIsEOF h - if eof - then return (reverse output) - else do - c <- hGetChar h - suckH (c:output) h - ------------------------------------------------------------------------- --- * Test helpers - -assertCleanSucceeded :: Result -> Assertion -assertCleanSucceeded result = unless (successful result) $ - assertFailure $ - "expected: \'cabal clean\' should succeed\n" ++ - " output: " ++ outputText result - -assertExecSucceeded :: Result -> Assertion -assertExecSucceeded result = unless (successful result) $ - assertFailure $ - "expected: \'cabal exec\' should succeed\n" ++ - " output: " ++ outputText result - -assertExecFailed :: Result -> Assertion -assertExecFailed result = when (successful result) $ - assertFailure $ - "expected: \'cabal exec\' should fail\n" ++ - " output: " ++ outputText result - -assertFreezeSucceeded :: Result -> Assertion -assertFreezeSucceeded result = unless (successful result) $ - assertFailure $ - "expected: \'cabal freeze\' should succeed\n" ++ - " output: " ++ outputText result - -assertInstallSucceeded :: Result -> Assertion -assertInstallSucceeded result = unless (successful result) $ - assertFailure $ - "expected: \'cabal install\' should succeed\n" ++ - " output: " ++ outputText result - -assertSandboxSucceeded :: Result -> Assertion -assertSandboxSucceeded result = unless (successful result) $ - assertFailure $ - "expected: \'cabal sandbox\' should succeed\n" ++ - " output: " ++ outputText result - ------------------------------------------------------------------------- --- Verbosity - -lookupEnv :: String -> IO (Maybe String) -lookupEnv name = - (fmap Just $ getEnv name) - `E.catch` \ (e :: IOError) -> - if isDoesNotExistError e - then return Nothing - else E.throw e - --- TODO: Convert to a "-v" flag instead. -getVerbosity :: IO Verbosity -getVerbosity = do - maybe normal (readEOrFail flagToVerbosity) `fmap` lookupEnv "VERBOSE" diff -Nru cabal-install-1.22-1.22.6.0/tests/PackageTests.hs cabal-install-1.22-1.22.9.0/tests/PackageTests.hs --- cabal-install-1.22-1.22.6.0/tests/PackageTests.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/PackageTests.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ --- | Groups black-box tests of cabal-install and configures them to test --- the correct binary. --- --- This file should do nothing but import tests from other modules and run --- them with the path to the correct cabal-install binary. -module Main - where - --- Modules from Cabal. -import Distribution.Simple.Program.Builtin (ghcPkgProgram) -import Distribution.Simple.Program.Db - (defaultProgramDb, requireProgram, setProgramSearchPath) -import Distribution.Simple.Program.Find - (ProgramSearchPathEntry(ProgramSearchPathDir), defaultProgramSearchPath) -import Distribution.Simple.Program.Types - ( Program(..), simpleProgram, programPath) -import Distribution.Simple.Utils ( findProgramVersion ) -import Distribution.Verbosity (normal) - --- Third party modules. -import qualified Control.Exception.Extensible as E -import System.Directory - ( canonicalizePath, getCurrentDirectory, setCurrentDirectory - , removeFile, doesFileExist ) -import System.FilePath (()) -import Test.Framework (Test, defaultMain, testGroup) -import Control.Monad ( when ) - --- Module containing common test code. - -import PackageTests.PackageTester ( TestsPaths(..) - , packageTestsDirectory - , packageTestsConfigFile ) - --- Modules containing the tests. -import qualified PackageTests.Exec.Check -import qualified PackageTests.Freeze.Check -import qualified PackageTests.MultipleSource.Check - --- List of tests to run. Each test will be called with the path to the --- cabal binary to use. -tests :: PackageTests.PackageTester.TestsPaths -> [Test] -tests paths = - [ testGroup "Freeze" $ PackageTests.Freeze.Check.tests paths - , testGroup "Exec" $ PackageTests.Exec.Check.tests paths - , testGroup "MultipleSource" $ PackageTests.MultipleSource.Check.tests paths - ] - -cabalProgram :: Program -cabalProgram = (simpleProgram "cabal") { - programFindVersion = findProgramVersion "--numeric-version" id - } - -main :: IO () -main = do - buildDir <- canonicalizePath "dist/build/cabal" - let programSearchPath = ProgramSearchPathDir buildDir : defaultProgramSearchPath - (cabal, _) <- requireProgram normal cabalProgram - (setProgramSearchPath programSearchPath defaultProgramDb) - (ghcPkg, _) <- requireProgram normal ghcPkgProgram defaultProgramDb - canonicalConfigPath <- canonicalizePath $ "tests" packageTestsDirectory - - let testsPaths = TestsPaths { - cabalPath = programPath cabal, - ghcPkgPath = programPath ghcPkg, - configPath = canonicalConfigPath packageTestsConfigFile - } - - putStrLn $ "Using cabal: " ++ cabalPath testsPaths - putStrLn $ "Using ghc-pkg: " ++ ghcPkgPath testsPaths - - cwd <- getCurrentDirectory - let confFile = packageTestsDirectory "cabal-config" - removeConf = do - b <- doesFileExist confFile - when b $ removeFile confFile - let runTests = do - setCurrentDirectory "tests" - removeConf -- assert that there is no existing config file - -- (we want deterministic testing with the default - -- config values) - defaultMain $ tests testsPaths - runTests `E.finally` do - -- remove the default config file that got created by the tests - removeConf - -- Change back to the old working directory so that the tests can be - -- repeatedly run in `cabal repl` via `:main`. - setCurrentDirectory cwd diff -Nru cabal-install-1.22-1.22.6.0/tests/UnitTests/Distribution/Client/Dependency/Modular/PSQ.hs cabal-install-1.22-1.22.9.0/tests/UnitTests/Distribution/Client/Dependency/Modular/PSQ.hs --- cabal-install-1.22-1.22.6.0/tests/UnitTests/Distribution/Client/Dependency/Modular/PSQ.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/UnitTests/Distribution/Client/Dependency/Modular/PSQ.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -module UnitTests.Distribution.Client.Dependency.Modular.PSQ ( - tests - ) where - -import Distribution.Client.Dependency.Modular.PSQ - -import Test.Framework as TF (Test) -import Test.Framework.Providers.QuickCheck2 - -tests :: [TF.Test] -tests = [ testProperty "splitsAltImplementation" splitsTest - ] - --- | Original splits implementation -splits' :: PSQ k a -> PSQ k (a, PSQ k a) -splits' xs = - casePSQ xs - (PSQ []) - (\ k v ys -> cons k (v, ys) (fmap (\ (w, zs) -> (w, cons k v zs)) (splits' ys))) - -splitsTest :: [(Int, Int)] -> Bool -splitsTest psq = splits' (PSQ psq) == splits (PSQ psq) diff -Nru cabal-install-1.22-1.22.6.0/tests/UnitTests/Distribution/Client/Sandbox.hs cabal-install-1.22-1.22.9.0/tests/UnitTests/Distribution/Client/Sandbox.hs --- cabal-install-1.22-1.22.6.0/tests/UnitTests/Distribution/Client/Sandbox.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/UnitTests/Distribution/Client/Sandbox.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -module UnitTests.Distribution.Client.Sandbox ( - tests - ) where - -import Distribution.Client.Sandbox (withSandboxBinDirOnSearchPath) - -import Test.Framework as TF (Test) -import Test.Framework.Providers.HUnit (testCase) -import Test.HUnit (Assertion, assertBool, assertEqual) - -import System.FilePath (getSearchPath, ()) - -tests :: [TF.Test] -tests = [ testCase "sandboxBinDirOnSearchPath" sandboxBinDirOnSearchPathTest - , testCase "oldSearchPathRestored" oldSearchPathRestoreTest - ] - -sandboxBinDirOnSearchPathTest :: Assertion -sandboxBinDirOnSearchPathTest = - withSandboxBinDirOnSearchPath "foo" $ do - r <- getSearchPath - assertBool "'foo/bin' not on search path" $ ("foo" "bin") `elem` r - -oldSearchPathRestoreTest :: Assertion -oldSearchPathRestoreTest = do - r <- getSearchPath - withSandboxBinDirOnSearchPath "foo" $ return () - r' <- getSearchPath - assertEqual "Old search path wasn't restored" r r' diff -Nru cabal-install-1.22-1.22.6.0/tests/UnitTests/Distribution/Client/Targets.hs cabal-install-1.22-1.22.9.0/tests/UnitTests/Distribution/Client/Targets.hs --- cabal-install-1.22-1.22.6.0/tests/UnitTests/Distribution/Client/Targets.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/UnitTests/Distribution/Client/Targets.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -module UnitTests.Distribution.Client.Targets ( - tests - ) where - -import Distribution.Client.Targets (UserConstraint (..), readUserConstraint) -import Distribution.Compat.ReadP (ReadP, readP_to_S) -import Distribution.Package (PackageName (..)) -import Distribution.ParseUtils (parseCommaList) -import Distribution.Text (parse) - -import Test.Framework as TF (Test) -import Test.Framework.Providers.HUnit (testCase) -import Test.HUnit (Assertion, assertEqual) - -import Data.Char (isSpace) - -tests :: [TF.Test] -tests = [ testCase "readUserConstraint" readUserConstraintTest - , testCase "parseUserConstraint" parseUserConstraintTest - , testCase "readUserConstraints" readUserConstraintsTest - ] - -readUserConstraintTest :: Assertion -readUserConstraintTest = - assertEqual ("Couldn't read constraint: '" ++ constr ++ "'") expected actual - where - pkgName = "template-haskell" - constr = pkgName ++ " installed" - - expected = UserConstraintInstalled (PackageName pkgName) - actual = let (Right r) = readUserConstraint constr in r - -parseUserConstraintTest :: Assertion -parseUserConstraintTest = - assertEqual ("Couldn't parse constraint: '" ++ constr ++ "'") expected actual - where - pkgName = "template-haskell" - constr = pkgName ++ " installed" - - expected = [UserConstraintInstalled (PackageName pkgName)] - actual = [ x | (x, ys) <- readP_to_S parseUserConstraint constr - , all isSpace ys] - - parseUserConstraint :: ReadP r UserConstraint - parseUserConstraint = parse - -readUserConstraintsTest :: Assertion -readUserConstraintsTest = - assertEqual ("Couldn't read constraints: '" ++ constr ++ "'") expected actual - where - pkgName = "template-haskell" - constr = pkgName ++ " installed" - - expected = [[UserConstraintInstalled (PackageName pkgName)]] - actual = [ x | (x, ys) <- readP_to_S parseUserConstraints constr - , all isSpace ys] - - parseUserConstraints :: ReadP r [UserConstraint] - parseUserConstraints = parseCommaList parse diff -Nru cabal-install-1.22-1.22.6.0/tests/UnitTests/Distribution/Client/UserConfig.hs cabal-install-1.22-1.22.9.0/tests/UnitTests/Distribution/Client/UserConfig.hs --- cabal-install-1.22-1.22.6.0/tests/UnitTests/Distribution/Client/UserConfig.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/UnitTests/Distribution/Client/UserConfig.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -{-# LANGUAGE CPP #-} -module UnitTests.Distribution.Client.UserConfig - ( tests - ) where - -import Control.Exception (bracket) -import Data.List (sort, nub) -#if !MIN_VERSION_base(4,8,0) -import Data.Monoid -#endif -import System.Directory (getCurrentDirectory, removeDirectoryRecursive, createDirectoryIfMissing) -import System.FilePath (takeDirectory) - -import Test.Framework as TF (Test) -import Test.Framework.Providers.HUnit (testCase) -import Test.HUnit (Assertion, assertBool) - -import Distribution.Client.Compat.Environment (lookupEnv, setEnv) -import Distribution.Client.Config -import Distribution.Utils.NubList (fromNubList) -import Distribution.Client.Setup (GlobalFlags (..), InstallFlags (..)) -import Distribution.Simple.Setup (ConfigFlags (..), fromFlag) -import Distribution.Verbosity (silent) - -tests :: [TF.Test] -tests = [ testCase "nullDiffOnCreate" nullDiffOnCreateTest - , testCase "canDetectDifference" canDetectDifference - , testCase "canUpdateConfig" canUpdateConfig - , testCase "doubleUpdateConfig" doubleUpdateConfig - ] - -nullDiffOnCreateTest :: Assertion -nullDiffOnCreateTest = bracketTest . const $ do - -- Create a new default config file in our test directory. - _ <- loadConfig silent mempty mempty - -- Now we read it in and compare it against the default. - diff <- userConfigDiff mempty - assertBool (unlines $ "Following diff should be empty:" : diff) $ null diff - - -canDetectDifference :: Assertion -canDetectDifference = bracketTest . const $ do - -- Create a new default config file in our test directory. - _ <- loadConfig silent mempty mempty - cabalFile <- defaultConfigFile - appendFile cabalFile "verbose: 0\n" - diff <- userConfigDiff mempty - assertBool (unlines $ "Should detect a difference:" : diff) $ - diff == [ "- verbose: 1", "+ verbose: 0" ] - - -canUpdateConfig :: Assertion -canUpdateConfig = bracketTest . const $ do - cabalFile <- defaultConfigFile - createDirectoryIfMissing True $ takeDirectory cabalFile - -- Write a trivial cabal file. - writeFile cabalFile "tests: True\n" - -- Update the config file. - userConfigUpdate silent mempty - -- Load it again. - updated <- loadConfig silent mempty mempty - assertBool ("Field 'tests' should be True") $ - fromFlag (configTests $ savedConfigureFlags updated) - - -doubleUpdateConfig :: Assertion -doubleUpdateConfig = bracketTest . const $ do - -- Create a new default config file in our test directory. - _ <- loadConfig silent mempty mempty - -- Update it. - userConfigUpdate silent mempty - userConfigUpdate silent mempty - -- Load it again. - updated <- loadConfig silent mempty mempty - - assertBool ("Field 'remote-repo' doesn't contain duplicates") $ - listUnique (map show . fromNubList . globalRemoteRepos $ savedGlobalFlags updated) - assertBool ("Field 'extra-prog-path' doesn't contain duplicates") $ - listUnique (map show . fromNubList . configProgramPathExtra $ savedConfigureFlags updated) - assertBool ("Field 'build-summary' doesn't contain duplicates") $ - listUnique (map show . fromNubList . installSummaryFile $ savedInstallFlags updated) - - -listUnique :: Ord a => [a] -> Bool -listUnique xs = - let sorted = sort xs - in nub sorted == xs - - -bracketTest :: ((FilePath, FilePath) -> IO ()) -> Assertion -bracketTest = - bracket testSetup testTearDown - where - testSetup :: IO (FilePath, FilePath) - testSetup = do - Just oldHome <- lookupEnv "HOME" - testdir <- fmap (++ "/test-user-config") getCurrentDirectory - setEnv "HOME" testdir - return (oldHome, testdir) - - testTearDown :: (FilePath, FilePath) -> IO () - testTearDown (oldHome, testdir) = do - setEnv "HOME" oldHome - removeDirectoryRecursive testdir diff -Nru cabal-install-1.22-1.22.6.0/tests/UnitTests.hs cabal-install-1.22-1.22.9.0/tests/UnitTests.hs --- cabal-install-1.22-1.22.6.0/tests/UnitTests.hs 2015-06-17 08:11:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/tests/UnitTests.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -module Main - where - -import Test.Framework - -import qualified UnitTests.Distribution.Client.Sandbox -import qualified UnitTests.Distribution.Client.UserConfig -import qualified UnitTests.Distribution.Client.Targets -import qualified UnitTests.Distribution.Client.Dependency.Modular.PSQ - -tests :: [Test] -tests = [ - testGroup "UnitTests.Distribution.Client.UserConfig" - UnitTests.Distribution.Client.UserConfig.tests - ,testGroup "Distribution.Client.Sandbox" - UnitTests.Distribution.Client.Sandbox.tests - ,testGroup "Distribution.Client.Targets" - UnitTests.Distribution.Client.Targets.tests - ,testGroup "UnitTests.Distribution.Client.Dependency.Modular.PSQ" - UnitTests.Distribution.Client.Dependency.Modular.PSQ.tests - ] - -main :: IO () -main = defaultMain tests Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/text-1.2.0.3.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/text-1.2.0.3.tar.gz differ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar1=/changelog.md cabal-install-1.22-1.22.9.0/=unpacked-tar1=/changelog.md --- cabal-install-1.22-1.22.6.0/=unpacked-tar1=/changelog.md 2014-11-21 10:45:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar1=/changelog.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -# Changelog for [`old-locale` package](http://hackage.haskell.org/package/old-locale) - -## 1.0.0.7 *Nov 2014* - - * Decoupled from GHC distribution - -## 1.0.0.6 *Mar 2014* - - * Bundled with GHC 7.8.1 - - * Update Haddock comments - - * Update to Cabal 1.10 format - -## 1.0.0.5 *Sep 2012* - - * Bundled with GHC 7.6.1 - - * Un-deprecate `old-locale` package - -## 1.0.0.4 *Feb 2012* - - * Bundled with GHC 7.4.1 - - * Add support for SafeHaskell diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar1=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar1=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar1=/LICENSE 2014-11-21 10:45:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar1=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -This library (libraries/base) is derived from code from two -sources: - - * Code from the GHC project which is largely (c) The University of - Glasgow, and distributable under a BSD-style license (see below), - - * Code from the Haskell 98 Report which is (c) Simon Peyton Jones - and freely redistributable (but see the full license for - restrictions). - -The full text of these licenses is reproduced below. Both of the -licenses are BSD-style or compatible. - ------------------------------------------------------------------------------ - -The Glasgow Haskell Compiler License - -Copyright 2004, The University Court of the University of Glasgow. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -- Neither name of the University nor the names of its contributors may be -used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF -GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. - ------------------------------------------------------------------------------ - -Code derived from the document "Report on the Programming Language -Haskell 98", is distributed under the following license: - - Copyright (c) 2002 Simon Peyton Jones - - The authors intend this Report to belong to the entire Haskell - community, and so we grant permission to copy and distribute it for - any purpose, provided that it is reproduced in its entirety, - including this Notice. Modified versions of this Report may also be - copied and distributed for any purpose, provided that the modified - version is clearly presented as such, and that it does not claim to - be a definition of the Haskell 98 Language. - ------------------------------------------------------------------------------ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar1=/old-locale.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar1=/old-locale.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar1=/old-locale.cabal 2014-11-21 10:45:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar1=/old-locale.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -name: old-locale -version: 1.0.0.7 --- NOTE: Don't forget to update ./changelog.md -license: BSD3 -license-file: LICENSE -maintainer: libraries@haskell.org -bug-reports: https://github.com/haskell/old-locale/issues -synopsis: locale library -category: System -build-type: Simple -Cabal-Version:>=1.10 -tested-with: GHC==7.8.3, GHC==7.8.2, GHC==7.8.1, GHC==7.6.3, GHC==7.6.2, GHC==7.6.1, GHC==7.4.2, GHC==7.4.1, GHC==7.2.2, GHC==7.2.1, GHC==7.0.4, GHC==7.0.3, GHC==7.0.2, GHC==7.0.1, GHC==6.12.3 -description: - This package provides the ability to adapt to - locale conventions such as date and time formats. - -extra-source-files: - changelog.md - -source-repository head - type: git - location: https://github.com/haskell/old-locale.git - -Library - default-language: Haskell98 - other-extensions: CPP - if impl(ghc>=7.2) - -- && base>=4.4.1 - other-extensions: Safe - - exposed-modules: - System.Locale - - build-depends: base >= 4.2 && < 4.9 - ghc-options: -Wall diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar1=/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar1=/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar1=/Setup.hs 2014-11-21 10:45:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar1=/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -module Main (main) where - -import Distribution.Simple - -main :: IO () -main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar1=/System/Locale.hs cabal-install-1.22-1.22.9.0/=unpacked-tar1=/System/Locale.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar1=/System/Locale.hs 2014-11-21 10:45:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar1=/System/Locale.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ -{-# LANGUAGE CPP #-} -#if __GLASGOW_HASKELL__ >= 701 && MIN_VERSION_base(4,4,1) -{-# LANGUAGE Safe #-} -#endif ------------------------------------------------------------------------------ --- | --- Module : System.Locale --- Copyright : (c) The University of Glasgow 2001 --- License : BSD3 (see LICENSE file) --- --- Maintainer : libraries@haskell.org --- Stability : stable --- Portability : portable --- --- This module provides the ability to adapt to local conventions. --- --- At present, it supports only time and date information as used by --- @calendarTimeToString@ from the @System.Time@ module in the --- @old-time@ package. --- ------------------------------------------------------------------------------ - -module System.Locale ( - - TimeLocale(..) - - , defaultTimeLocale - - , iso8601DateFormat - , rfc822DateFormat - ) -where - -import Prelude - -data TimeLocale = TimeLocale { - -- |full and abbreviated week days - wDays :: [(String, String)], - -- |full and abbreviated months - months :: [(String, String)], - intervals :: [(String, String)], - -- |AM\/PM symbols - amPm :: (String, String), - -- |formatting strings - dateTimeFmt, dateFmt, - timeFmt, time12Fmt :: String - } deriving (Eq, Ord, Show) - -defaultTimeLocale :: TimeLocale -defaultTimeLocale = TimeLocale { - wDays = [("Sunday", "Sun"), ("Monday", "Mon"), - ("Tuesday", "Tue"), ("Wednesday", "Wed"), - ("Thursday", "Thu"), ("Friday", "Fri"), - ("Saturday", "Sat")], - - months = [("January", "Jan"), ("February", "Feb"), - ("March", "Mar"), ("April", "Apr"), - ("May", "May"), ("June", "Jun"), - ("July", "Jul"), ("August", "Aug"), - ("September", "Sep"), ("October", "Oct"), - ("November", "Nov"), ("December", "Dec")], - - intervals = [ ("year","years") - , ("month", "months") - , ("day","days") - , ("hour","hours") - , ("min","mins") - , ("sec","secs") - , ("usec","usecs") - ], - - amPm = ("AM", "PM"), - dateTimeFmt = "%a %b %e %H:%M:%S %Z %Y", - dateFmt = "%m/%d/%y", - timeFmt = "%H:%M:%S", - time12Fmt = "%I:%M:%S %p" - } - - -{- | Construct format string according to . - -The @Maybe String@ argument allows to supply an optional time specification. E.g.: - -@ -'iso8601DateFormat' Nothing == "%Y-%m-%d" -- i.e. @/YYYY-MM-DD/@ -'iso8601DateFormat' (Just "%H:%M:%S") == "%Y-%m-%dT%H:%M:%S" -- i.e. @/YYYY-MM-DD/T/HH:MM:SS/@ -@ --} - -iso8601DateFormat :: Maybe String -> String -iso8601DateFormat mTimeFmt = - "%Y-%m-%d" ++ case mTimeFmt of - Nothing -> "" - Just fmt -> 'T' : fmt - --- | Format string according to . -rfc822DateFormat :: String -rfc822DateFormat = "%a, %_d %b %Y %H:%M:%S %Z" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/CHANGES cabal-install-1.22-1.22.9.0/=unpacked-tar10=/CHANGES --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/CHANGES 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/CHANGES 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -3.1.7 - -- Fix a regression from 3.1.6 related to the reported position of error messages. - See bug #9 for details. -- Reset the current error position on success of 'lookAhead'. - -3.1.6 - -- Export 'Text' instances from Text.Parsec -- Make Text.Parsec exports more visible -- Re-arrange Text.Parsec exports -- Add functions 'crlf' and 'endOfLine' to Text.Parsec.Char for handling - input streams that do not have normalized line terminators. -- Fix off-by-one error in Token.charControl - -3.1.4 & 3.1.5 - -- Bump dependency on 'text' - -3.1.3 - -- Fix a regression introduced in 3.1.2 related to positions reported by error messages. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar10=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/LICENSE 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -Copyright 1999-2000, Daan Leijen; 2007, Paolo Martini. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -This software is provided by the copyright holders "as is" and any express or -implied warranties, including, but not limited to, the implied warranties of -merchantability and fitness for a particular purpose are disclaimed. In no -event shall the copyright holders be liable for any direct, indirect, -incidental, special, exemplary, or consequential damages (including, but not -limited to, procurement of substitute goods or services; loss of use, data, -or profits; or business interruption) however caused and on any theory of -liability, whether in contract, strict liability, or tort (including -negligence or otherwise) arising in any way out of the use of this software, -even if advised of the possibility of such damage. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/parsec.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar10=/parsec.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/parsec.cabal 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/parsec.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -name: parsec -version: 3.1.7 -cabal-version: >= 1.8 -license: BSD3 -license-file: LICENSE -author: Daan Leijen , Paolo Martini -maintainer: Antoine Latter -homepage: http://www.cs.uu.nl/~daan/parsec.html -bug-reports: https://github.com/aslatter/parsec/issues -category: Parsing -synopsis: Monadic parser combinators -build-type: Simple -description: - Parsec is designed from scratch as an industrial-strength parser - library. It is simple, safe, well documented (on the package - homepage), has extensive libraries and good error messages, - and is also fast. It is defined as a monad transformer that can be - stacked on arbitrary monads, and it is also parametric in the - input stream type. -extra-source-files: CHANGES - -source-repository head - type: git - location: https://github.com/aslatter/parsec - -flag base4 - Description: Use base-4.* - Default: True - -library - exposed-modules: - Text.Parsec, - Text.Parsec.String, - Text.Parsec.ByteString, - Text.Parsec.ByteString.Lazy, - Text.Parsec.Text, - Text.Parsec.Text.Lazy, - Text.Parsec.Pos, - Text.Parsec.Error, - Text.Parsec.Prim, - Text.Parsec.Char, - Text.Parsec.Combinator, - Text.Parsec.Token, - Text.Parsec.Expr, - Text.Parsec.Language, - Text.Parsec.Perm, - Text.ParserCombinators.Parsec, - Text.ParserCombinators.Parsec.Char, - Text.ParserCombinators.Parsec.Combinator, - Text.ParserCombinators.Parsec.Error, - Text.ParserCombinators.Parsec.Expr, - Text.ParserCombinators.Parsec.Language, - Text.ParserCombinators.Parsec.Perm, - Text.ParserCombinators.Parsec.Pos, - Text.ParserCombinators.Parsec.Prim, - Text.ParserCombinators.Parsec.Token - if flag(base4) - build-depends: base >= 4 && < 5 - else - build-depends: base >= 3.0.3 && < 4 - cpp-options: -DBASE3 - - build-depends: mtl, bytestring, text >= 0.2 && < 1.3 - extensions: ExistentialQuantification, PolymorphicComponents, MultiParamTypeClasses, FlexibleInstances, FlexibleContexts, DeriveDataTypeable, CPP - ghc-options: -O2 - -Test-Suite tests - type: exitcode-stdio-1.0 - hs-source-dirs: test - main-is: Main.hs - other-modules: - Bugs, - Bugs.Bug2, - Bugs.Bug6, - Bugs.Bug9, - Util - build-depends: - base, - parsec, - HUnit == 1.2.*, - test-framework >= 0.6 && < 0.9, - test-framework-hunit >= 0.2 && < 0.4 - ghc-options: -Wall diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Setup.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -module Main (main) where - -import Distribution.Simple - -main :: IO () -main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Bugs/Bug2.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Bugs/Bug2.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Bugs/Bug2.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Bugs/Bug2.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ - -module Bugs.Bug2 - ( main - ) where - -import Test.HUnit hiding ( Test ) -import Test.Framework -import Test.Framework.Providers.HUnit - -import Text.Parsec -import Text.Parsec.String -import qualified Text.Parsec.Token as P -import Text.Parsec.Language (haskellDef) - -main :: Test -main = - testCase "Control Char Parsing (#2)" $ - parseString "\"test\\^Bstring\"" @?= "test\^Bstring" - - where - parseString :: String -> String - parseString input = - case parse parser "Example" input of - Left{} -> error "Parse failure" - Right str -> str - - parser :: Parser String - parser = P.stringLiteral $ P.makeTokenParser haskellDef \ No newline at end of file diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Bugs/Bug6.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Bugs/Bug6.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Bugs/Bug6.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Bugs/Bug6.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ - -module Bugs.Bug6 - ( main - ) where - -import Test.HUnit hiding ( Test ) -import Test.Framework -import Test.Framework.Providers.HUnit - -import Text.Parsec -import Text.Parsec.String - -import Util - -main :: Test -main = - testCase "Look-ahead preserving error location (#6)" $ - parseErrors variable "return" @?= ["'return' is a reserved keyword"] - -variable :: Parser String -variable = do - x <- lookAhead (many1 letter) - if x == "return" - then fail "'return' is a reserved keyword" - else string x diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Bugs/Bug9.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Bugs/Bug9.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Bugs/Bug9.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Bugs/Bug9.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ - -module Bugs.Bug9 ( main ) where - -import Control.Applicative ((<*), (<$>), (<$)) -import Text.Parsec -import Text.Parsec.Language (haskellStyle) -import Text.Parsec.String (Parser) -import Text.Parsec.Expr -import qualified Text.Parsec.Token as P - -import Test.HUnit hiding ( Test ) -import Test.Framework -import Test.Framework.Providers.HUnit - -import Util - -data Expr = Const Integer | Op Expr Expr - deriving Show - -main :: Test -main = - testCase "Tracing of current position in error message (#9)" - $ result @?= ["unexpected '>'","expecting operator or end of input"] - - where - result :: [String] - result = parseErrors parseTopLevel "4 >> 5" - --- Syntax analaysis - -parseTopLevel :: Parser Expr -parseTopLevel = parseExpr <* eof - -parseExpr :: Parser Expr -parseExpr = buildExpressionParser table (Const <$> integer) - where - table = [[ Infix (Op <$ reserved ">>>") AssocLeft ]] - - -- Lexical analysis - - lexer = P.makeTokenParser haskellStyle { P.reservedOpNames = [">>>"] } - - integer = P.integer lexer - reserved = P.reserved lexer - reservedOp = P.reservedOp lexer - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Bugs.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Bugs.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Bugs.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Bugs.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ - -module Bugs - ( bugs - ) where - -import Test.Framework - -import qualified Bugs.Bug2 -import qualified Bugs.Bug6 -import qualified Bugs.Bug9 - -bugs :: [Test] -bugs = [ Bugs.Bug2.main - , Bugs.Bug6.main - , Bugs.Bug9.main - ] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Main.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Main.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Main.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Main.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ - -import Test.Framework - -import Bugs ( bugs ) - -main :: IO () -main = do - defaultMain - [ testGroup "Bugs" bugs - ] \ No newline at end of file diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Util.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Util.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/test/Util.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/test/Util.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ - -module Util where - -import Text.Parsec -import Text.Parsec.String ( Parser ) - --- | Returns the error messages associated --- with a failed parse. -parseErrors :: Parser a -> String -> [String] -parseErrors p input = - case parse p "" input of - Left err -> - drop 1 $ lines $ show err - Right{} -> [] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/ByteString/Lazy.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/ByteString/Lazy.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/ByteString/Lazy.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/ByteString/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.ByteString.Lazy --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Convinience definitions for working with lazy 'C.ByteString's. --- ------------------------------------------------------------------------------ - -module Text.Parsec.ByteString.Lazy - ( Parser, GenParser, parseFromFile - ) where - -import Text.Parsec.Error -import Text.Parsec.Prim - -import qualified Data.ByteString.Lazy.Char8 as C - -type Parser = Parsec C.ByteString () -type GenParser t st = Parsec C.ByteString st - --- | @parseFromFile p filePath@ runs a lazy bytestring parser @p@ on the --- input read from @filePath@ using 'ByteString.Lazy.Char8.readFile'. Returns either a 'ParseError' --- ('Left') or a value of type @a@ ('Right'). --- --- > main = do{ result <- parseFromFile numbers "digits.txt" --- > ; case result of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > } -parseFromFile :: Parser a -> String -> IO (Either ParseError a) -parseFromFile p fname - = do input <- C.readFile fname - return (runP p () fname input) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/ByteString.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/ByteString.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/ByteString.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/ByteString.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.ByteString --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Convinience definitions for working with 'C.ByteString's. --- ------------------------------------------------------------------------------ - -module Text.Parsec.ByteString - ( Parser, GenParser, parseFromFile - ) where - -import Text.Parsec.Error -import Text.Parsec.Prim - -import qualified Data.ByteString.Char8 as C - -type Parser = Parsec C.ByteString () -type GenParser t st = Parsec C.ByteString st - --- | @parseFromFile p filePath@ runs a strict bytestring parser @p@ on the --- input read from @filePath@ using 'ByteString.Char8.readFile'. Returns either a 'ParseError' --- ('Left') or a value of type @a@ ('Right'). --- --- > main = do{ result <- parseFromFile numbers "digits.txt" --- > ; case result of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > } - -parseFromFile :: Parser a -> String -> IO (Either ParseError a) -parseFromFile p fname - = do input <- C.readFile fname - return (runP p () fname input) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Char.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Char.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Char.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Char.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,151 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Char --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Commonly used character parsers. --- ------------------------------------------------------------------------------ - -{-# LANGUAGE FlexibleContexts #-} - -module Text.Parsec.Char where - -import Data.Char -import Text.Parsec.Pos -import Text.Parsec.Prim -import Control.Applicative ((*>)) - --- | @oneOf cs@ succeeds if the current character is in the supplied --- list of characters @cs@. Returns the parsed character. See also --- 'satisfy'. --- --- > vowel = oneOf "aeiou" - -oneOf :: (Stream s m Char) => [Char] -> ParsecT s u m Char -oneOf cs = satisfy (\c -> elem c cs) - --- | As the dual of 'oneOf', @noneOf cs@ succeeds if the current --- character /not/ in the supplied list of characters @cs@. Returns the --- parsed character. --- --- > consonant = noneOf "aeiou" - -noneOf :: (Stream s m Char) => [Char] -> ParsecT s u m Char -noneOf cs = satisfy (\c -> not (elem c cs)) - --- | Skips /zero/ or more white space characters. See also 'skipMany'. - -spaces :: (Stream s m Char) => ParsecT s u m () -spaces = skipMany space "white space" - --- | Parses a white space character (any character which satisfies 'isSpace') --- Returns the parsed character. - -space :: (Stream s m Char) => ParsecT s u m Char -space = satisfy isSpace "space" - --- | Parses a newline character (\'\\n\'). Returns a newline character. - -newline :: (Stream s m Char) => ParsecT s u m Char -newline = char '\n' "lf new-line" - --- | Parses a carriage return character (\'\\r\') followed by a newline character (\'\\n\'). --- Returns a newline character. - -crlf :: (Stream s m Char) => ParsecT s u m Char -crlf = char '\r' *> char '\n' "crlf new-line" - --- | Parses a CRLF (see 'crlf') or LF (see 'newline') end-of-line. --- Returns a newline character (\'\\n\'). --- --- > endOfLine = newline <|> crlf --- - -endOfLine :: (Stream s m Char) => ParsecT s u m Char -endOfLine = newline <|> crlf "new-line" - --- | Parses a tab character (\'\\t\'). Returns a tab character. - -tab :: (Stream s m Char) => ParsecT s u m Char -tab = char '\t' "tab" - --- | Parses an upper case letter (a character between \'A\' and \'Z\'). --- Returns the parsed character. - -upper :: (Stream s m Char) => ParsecT s u m Char -upper = satisfy isUpper "uppercase letter" - --- | Parses a lower case character (a character between \'a\' and \'z\'). --- Returns the parsed character. - -lower :: (Stream s m Char) => ParsecT s u m Char -lower = satisfy isLower "lowercase letter" - --- | Parses a letter or digit (a character between \'0\' and \'9\'). --- Returns the parsed character. - -alphaNum :: (Stream s m Char => ParsecT s u m Char) -alphaNum = satisfy isAlphaNum "letter or digit" - --- | Parses a letter (an upper case or lower case character). Returns the --- parsed character. - -letter :: (Stream s m Char) => ParsecT s u m Char -letter = satisfy isAlpha "letter" - --- | Parses a digit. Returns the parsed character. - -digit :: (Stream s m Char) => ParsecT s u m Char -digit = satisfy isDigit "digit" - --- | Parses a hexadecimal digit (a digit or a letter between \'a\' and --- \'f\' or \'A\' and \'F\'). Returns the parsed character. - -hexDigit :: (Stream s m Char) => ParsecT s u m Char -hexDigit = satisfy isHexDigit "hexadecimal digit" - --- | Parses an octal digit (a character between \'0\' and \'7\'). Returns --- the parsed character. - -octDigit :: (Stream s m Char) => ParsecT s u m Char -octDigit = satisfy isOctDigit "octal digit" - --- | @char c@ parses a single character @c@. Returns the parsed --- character (i.e. @c@). --- --- > semiColon = char ';' - -char :: (Stream s m Char) => Char -> ParsecT s u m Char -char c = satisfy (==c) show [c] - --- | This parser succeeds for any character. Returns the parsed character. - -anyChar :: (Stream s m Char) => ParsecT s u m Char -anyChar = satisfy (const True) - --- | The parser @satisfy f@ succeeds for any character for which the --- supplied function @f@ returns 'True'. Returns the character that is --- actually parsed. - --- > digit = satisfy isDigit --- > oneOf cs = satisfy (\c -> c `elem` cs) - -satisfy :: (Stream s m Char) => (Char -> Bool) -> ParsecT s u m Char -satisfy f = tokenPrim (\c -> show [c]) - (\pos c _cs -> updatePosChar pos c) - (\c -> if f c then Just c else Nothing) - --- | @string s@ parses a sequence of characters given by @s@. Returns --- the parsed string (i.e. @s@). --- --- > divOrMod = string "div" --- > <|> string "mod" - -string :: (Stream s m Char) => String -> ParsecT s u m String -string s = tokens show updatePosString s diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Combinator.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Combinator.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Combinator.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Combinator.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,277 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Combinator --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Commonly used generic combinators --- ------------------------------------------------------------------------------ - -module Text.Parsec.Combinator - ( choice - , count - , between - , option, optionMaybe, optional - , skipMany1 - , many1 - , sepBy, sepBy1 - , endBy, endBy1 - , sepEndBy, sepEndBy1 - , chainl, chainl1 - , chainr, chainr1 - , eof, notFollowedBy - -- tricky combinators - , manyTill, lookAhead, anyToken - ) where - -import Control.Monad -import Text.Parsec.Prim - --- | @choice ps@ tries to apply the parsers in the list @ps@ in order, --- until one of them succeeds. Returns the value of the succeeding --- parser. - -choice :: (Stream s m t) => [ParsecT s u m a] -> ParsecT s u m a -choice ps = foldr (<|>) mzero ps - --- | @option x p@ tries to apply parser @p@. If @p@ fails without --- consuming input, it returns the value @x@, otherwise the value --- returned by @p@. --- --- > priority = option 0 (do{ d <- digit --- > ; return (digitToInt d) --- > }) - -option :: (Stream s m t) => a -> ParsecT s u m a -> ParsecT s u m a -option x p = p <|> return x - --- | @optionMaybe p@ tries to apply parser @p@. If @p@ fails without --- consuming input, it return 'Nothing', otherwise it returns --- 'Just' the value returned by @p@. - -optionMaybe :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (Maybe a) -optionMaybe p = option Nothing (liftM Just p) - --- | @optional p@ tries to apply parser @p@. It will parse @p@ or nothing. --- It only fails if @p@ fails after consuming input. It discards the result --- of @p@. - -optional :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m () -optional p = do{ p; return ()} <|> return () - --- | @between open close p@ parses @open@, followed by @p@ and @close@. --- Returns the value returned by @p@. --- --- > braces = between (symbol "{") (symbol "}") - -between :: (Stream s m t) => ParsecT s u m open -> ParsecT s u m close - -> ParsecT s u m a -> ParsecT s u m a -between open close p - = do{ open; x <- p; close; return x } - --- | @skipMany1 p@ applies the parser @p@ /one/ or more times, skipping --- its result. - -skipMany1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m () -skipMany1 p = do{ p; skipMany p } -{- -skipMany p = scan - where - scan = do{ p; scan } <|> return () --} - --- | @many1 p@ applies the parser @p@ /one/ or more times. Returns a --- list of the returned values of @p@. --- --- > word = many1 letter - -many1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m [a] -many1 p = do{ x <- p; xs <- many p; return (x:xs) } -{- -many p = scan id - where - scan f = do{ x <- p - ; scan (\tail -> f (x:tail)) - } - <|> return (f []) --} - - --- | @sepBy p sep@ parses /zero/ or more occurrences of @p@, separated --- by @sep@. Returns a list of values returned by @p@. --- --- > commaSep p = p `sepBy` (symbol ",") - -sepBy :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -sepBy p sep = sepBy1 p sep <|> return [] - --- | @sepBy1 p sep@ parses /one/ or more occurrences of @p@, separated --- by @sep@. Returns a list of values returned by @p@. - -sepBy1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -sepBy1 p sep = do{ x <- p - ; xs <- many (sep >> p) - ; return (x:xs) - } - - --- | @sepEndBy1 p sep@ parses /one/ or more occurrences of @p@, --- separated and optionally ended by @sep@. Returns a list of values --- returned by @p@. - -sepEndBy1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -sepEndBy1 p sep = do{ x <- p - ; do{ sep - ; xs <- sepEndBy p sep - ; return (x:xs) - } - <|> return [x] - } - --- | @sepEndBy p sep@ parses /zero/ or more occurrences of @p@, --- separated and optionally ended by @sep@, ie. haskell style --- statements. Returns a list of values returned by @p@. --- --- > haskellStatements = haskellStatement `sepEndBy` semi - -sepEndBy :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -sepEndBy p sep = sepEndBy1 p sep <|> return [] - - --- | @endBy1 p sep@ parses /one/ or more occurrences of @p@, seperated --- and ended by @sep@. Returns a list of values returned by @p@. - -endBy1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -endBy1 p sep = many1 (do{ x <- p; sep; return x }) - --- | @endBy p sep@ parses /zero/ or more occurrences of @p@, seperated --- and ended by @sep@. Returns a list of values returned by @p@. --- --- > cStatements = cStatement `endBy` semi - -endBy :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -endBy p sep = many (do{ x <- p; sep; return x }) - --- | @count n p@ parses @n@ occurrences of @p@. If @n@ is smaller or --- equal to zero, the parser equals to @return []@. Returns a list of --- @n@ values returned by @p@. - -count :: (Stream s m t) => Int -> ParsecT s u m a -> ParsecT s u m [a] -count n p | n <= 0 = return [] - | otherwise = sequence (replicate n p) - --- | @chainr p op x@ parser /zero/ or more occurrences of @p@, --- separated by @op@ Returns a value obtained by a /right/ associative --- application of all functions returned by @op@ to the values returned --- by @p@. If there are no occurrences of @p@, the value @x@ is --- returned. - -chainr :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> a -> ParsecT s u m a -chainr p op x = chainr1 p op <|> return x - --- | @chainl p op x@ parser /zero/ or more occurrences of @p@, --- separated by @op@. Returns a value obtained by a /left/ associative --- application of all functions returned by @op@ to the values returned --- by @p@. If there are zero occurrences of @p@, the value @x@ is --- returned. - -chainl :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> a -> ParsecT s u m a -chainl p op x = chainl1 p op <|> return x - --- | @chainl1 p op x@ parser /one/ or more occurrences of @p@, --- separated by @op@ Returns a value obtained by a /left/ associative --- application of all functions returned by @op@ to the values returned --- by @p@. . This parser can for example be used to eliminate left --- recursion which typically occurs in expression grammars. --- --- > expr = term `chainl1` addop --- > term = factor `chainl1` mulop --- > factor = parens expr <|> integer --- > --- > mulop = do{ symbol "*"; return (*) } --- > <|> do{ symbol "/"; return (div) } --- > --- > addop = do{ symbol "+"; return (+) } --- > <|> do{ symbol "-"; return (-) } - -chainl1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> ParsecT s u m a -chainl1 p op = do{ x <- p; rest x } - where - rest x = do{ f <- op - ; y <- p - ; rest (f x y) - } - <|> return x - --- | @chainr1 p op x@ parser /one/ or more occurrences of |p|, --- separated by @op@ Returns a value obtained by a /right/ associative --- application of all functions returned by @op@ to the values returned --- by @p@. - -chainr1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> ParsecT s u m a -chainr1 p op = scan - where - scan = do{ x <- p; rest x } - - rest x = do{ f <- op - ; y <- scan - ; return (f x y) - } - <|> return x - ------------------------------------------------------------ --- Tricky combinators ------------------------------------------------------------ --- | The parser @anyToken@ accepts any kind of token. It is for example --- used to implement 'eof'. Returns the accepted token. - -anyToken :: (Stream s m t, Show t) => ParsecT s u m t -anyToken = tokenPrim show (\pos _tok _toks -> pos) Just - --- | This parser only succeeds at the end of the input. This is not a --- primitive parser but it is defined using 'notFollowedBy'. --- --- > eof = notFollowedBy anyToken "end of input" - -eof :: (Stream s m t, Show t) => ParsecT s u m () -eof = notFollowedBy anyToken "end of input" - --- | @notFollowedBy p@ only succeeds when parser @p@ fails. This parser --- does not consume any input. This parser can be used to implement the --- \'longest match\' rule. For example, when recognizing keywords (for --- example @let@), we want to make sure that a keyword is not followed --- by a legal identifier character, in which case the keyword is --- actually an identifier (for example @lets@). We can program this --- behaviour as follows: --- --- > keywordLet = try (do{ string "let" --- > ; notFollowedBy alphaNum --- > }) - -notFollowedBy :: (Stream s m t, Show a) => ParsecT s u m a -> ParsecT s u m () -notFollowedBy p = try (do{ c <- try p; unexpected (show c) } - <|> return () - ) - --- | @manyTill p end@ applies parser @p@ /zero/ or more times until --- parser @end@ succeeds. Returns the list of values returned by @p@. --- This parser can be used to scan comments: --- --- > simpleComment = do{ string "")) --- > } --- --- Note the overlapping parsers @anyChar@ and @string \"-->\"@, and --- therefore the use of the 'try' combinator. - -manyTill :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m end -> ParsecT s u m [a] -manyTill p end = scan - where - scan = do{ end; return [] } - <|> - do{ x <- p; xs <- scan; return (x:xs) } diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Error.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Error.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Error.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Error.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,205 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Error --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parse errors --- ------------------------------------------------------------------------------ - -module Text.Parsec.Error - ( Message ( SysUnExpect, UnExpect, Expect, Message ) - , messageString - , ParseError, errorPos, errorMessages, errorIsUnknown - , showErrorMessages - , newErrorMessage, newErrorUnknown - , addErrorMessage, setErrorPos, setErrorMessage - , mergeError - ) where - -import Data.List ( nub, sort ) - -import Text.Parsec.Pos - --- | This abstract data type represents parse error messages. There are --- four kinds of messages: --- --- > data Message = SysUnExpect String --- > | UnExpect String --- > | Expect String --- > | Message String --- --- The fine distinction between different kinds of parse errors allows --- the system to generate quite good error messages for the user. It --- also allows error messages that are formatted in different --- languages. Each kind of message is generated by different combinators: --- --- * A 'SysUnExpect' message is automatically generated by the --- 'Text.Parsec.Combinator.satisfy' combinator. The argument is the --- unexpected input. --- --- * A 'UnExpect' message is generated by the 'Text.Parsec.Prim.unexpected' --- combinator. The argument describes the --- unexpected item. --- --- * A 'Expect' message is generated by the 'Text.Parsec.Prim.' --- combinator. The argument describes the expected item. --- --- * A 'Message' message is generated by the 'fail' --- combinator. The argument is some general parser message. - -data Message = SysUnExpect !String -- @ library generated unexpect - | UnExpect !String -- @ unexpected something - | Expect !String -- @ expecting something - | Message !String -- @ raw message - -instance Enum Message where - fromEnum (SysUnExpect _) = 0 - fromEnum (UnExpect _) = 1 - fromEnum (Expect _) = 2 - fromEnum (Message _) = 3 - toEnum _ = error "toEnum is undefined for Message" - --- < Return 'True' only when 'compare' would return 'EQ'. - -instance Eq Message where - - m1 == m2 = fromEnum m1 == fromEnum m2 - --- < Compares two error messages without looking at their content. Only --- the constructors are compared where: --- --- > 'SysUnExpect' < 'UnExpect' < 'Expect' < 'Message' - -instance Ord Message where - compare msg1 msg2 = compare (fromEnum msg1) (fromEnum msg2) - --- | Extract the message string from an error message - -messageString :: Message -> String -messageString (SysUnExpect s) = s -messageString (UnExpect s) = s -messageString (Expect s) = s -messageString (Message s) = s - --- | The abstract data type @ParseError@ represents parse errors. It --- provides the source position ('SourcePos') of the error --- and a list of error messages ('Message'). A @ParseError@ --- can be returned by the function 'Text.Parsec.Prim.parse'. @ParseError@ is an --- instance of the 'Show' class. - -data ParseError = ParseError !SourcePos [Message] - --- | Extracts the source position from the parse error - -errorPos :: ParseError -> SourcePos -errorPos (ParseError pos _msgs) - = pos - --- | Extracts the list of error messages from the parse error - -errorMessages :: ParseError -> [Message] -errorMessages (ParseError _pos msgs) - = sort msgs - -errorIsUnknown :: ParseError -> Bool -errorIsUnknown (ParseError _pos msgs) - = null msgs - --- < Create parse errors - -newErrorUnknown :: SourcePos -> ParseError -newErrorUnknown pos - = ParseError pos [] - -newErrorMessage :: Message -> SourcePos -> ParseError -newErrorMessage msg pos - = ParseError pos [msg] - -addErrorMessage :: Message -> ParseError -> ParseError -addErrorMessage msg (ParseError pos msgs) - = ParseError pos (msg:msgs) - -setErrorPos :: SourcePos -> ParseError -> ParseError -setErrorPos pos (ParseError _ msgs) - = ParseError pos msgs - -setErrorMessage :: Message -> ParseError -> ParseError -setErrorMessage msg (ParseError pos msgs) - = ParseError pos (msg : filter (msg /=) msgs) - -mergeError :: ParseError -> ParseError -> ParseError -mergeError e1@(ParseError pos1 msgs1) e2@(ParseError pos2 msgs2) - -- prefer meaningful errors - | null msgs2 && not (null msgs1) = e1 - | null msgs1 && not (null msgs2) = e2 - | otherwise - = case pos1 `compare` pos2 of - -- select the longest match - EQ -> ParseError pos1 (msgs1 ++ msgs2) - GT -> e1 - LT -> e2 - -instance Show ParseError where - show err - = show (errorPos err) ++ ":" ++ - showErrorMessages "or" "unknown parse error" - "expecting" "unexpected" "end of input" - (errorMessages err) - --- Language independent show function - --- TODO --- < The standard function for showing error messages. Formats a list of --- error messages in English. This function is used in the |Show| --- instance of |ParseError <#ParseError>|. The resulting string will be --- formatted like: --- --- |unexpected /{The first UnExpect or a SysUnExpect message}/; --- expecting /{comma separated list of Expect messages}/; --- /{comma separated list of Message messages}/ - -showErrorMessages :: - String -> String -> String -> String -> String -> [Message] -> String -showErrorMessages msgOr msgUnknown msgExpecting msgUnExpected msgEndOfInput msgs - | null msgs = msgUnknown - | otherwise = concat $ map ("\n"++) $ clean $ - [showSysUnExpect,showUnExpect,showExpect,showMessages] - where - (sysUnExpect,msgs1) = span ((SysUnExpect "") ==) msgs - (unExpect,msgs2) = span ((UnExpect "") ==) msgs1 - (expect,messages) = span ((Expect "") ==) msgs2 - - showExpect = showMany msgExpecting expect - showUnExpect = showMany msgUnExpected unExpect - showSysUnExpect | not (null unExpect) || - null sysUnExpect = "" - | null firstMsg = msgUnExpected ++ " " ++ msgEndOfInput - | otherwise = msgUnExpected ++ " " ++ firstMsg - where - firstMsg = messageString (head sysUnExpect) - - showMessages = showMany "" messages - - -- helpers - showMany pre msgs = case clean (map messageString msgs) of - [] -> "" - ms | null pre -> commasOr ms - | otherwise -> pre ++ " " ++ commasOr ms - - commasOr [] = "" - commasOr [m] = m - commasOr ms = commaSep (init ms) ++ " " ++ msgOr ++ " " ++ last ms - - commaSep = seperate ", " . clean - - seperate _ [] = "" - seperate _ [m] = m - seperate sep (m:ms) = m ++ sep ++ seperate sep ms - - clean = nub . filter (not . null) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Expr.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Expr.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Expr.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Expr.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,166 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Expr --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : non-portable --- --- A helper module to parse \"expressions\". --- Builds a parser given a table of operators and associativities. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Expr - ( Assoc(..), Operator(..), OperatorTable - , buildExpressionParser - ) where - -import Text.Parsec.Prim -import Text.Parsec.Combinator - ------------------------------------------------------------ --- Assoc and OperatorTable ------------------------------------------------------------ - --- | This data type specifies the associativity of operators: left, right --- or none. - -data Assoc = AssocNone - | AssocLeft - | AssocRight - --- | This data type specifies operators that work on values of type @a@. --- An operator is either binary infix or unary prefix or postfix. A --- binary operator has also an associated associativity. - -data Operator s u m a = Infix (ParsecT s u m (a -> a -> a)) Assoc - | Prefix (ParsecT s u m (a -> a)) - | Postfix (ParsecT s u m (a -> a)) - --- | An @OperatorTable s u m a@ is a list of @Operator s u m a@ --- lists. The list is ordered in descending --- precedence. All operators in one list have the same precedence (but --- may have a different associativity). - -type OperatorTable s u m a = [[Operator s u m a]] - ------------------------------------------------------------ --- Convert an OperatorTable and basic term parser into --- a full fledged expression parser ------------------------------------------------------------ - --- | @buildExpressionParser table term@ builds an expression parser for --- terms @term@ with operators from @table@, taking the associativity --- and precedence specified in @table@ into account. Prefix and postfix --- operators of the same precedence can only occur once (i.e. @--2@ is --- not allowed if @-@ is prefix negate). Prefix and postfix operators --- of the same precedence associate to the left (i.e. if @++@ is --- postfix increment, than @-2++@ equals @-1@, not @-3@). --- --- The @buildExpressionParser@ takes care of all the complexity --- involved in building expression parser. Here is an example of an --- expression parser that handles prefix signs, postfix increment and --- basic arithmetic. --- --- > expr = buildExpressionParser table term --- > "expression" --- > --- > term = parens expr --- > <|> natural --- > "simple expression" --- > --- > table = [ [prefix "-" negate, prefix "+" id ] --- > , [postfix "++" (+1)] --- > , [binary "*" (*) AssocLeft, binary "/" (div) AssocLeft ] --- > , [binary "+" (+) AssocLeft, binary "-" (-) AssocLeft ] --- > ] --- > --- > binary name fun assoc = Infix (do{ reservedOp name; return fun }) assoc --- > prefix name fun = Prefix (do{ reservedOp name; return fun }) --- > postfix name fun = Postfix (do{ reservedOp name; return fun }) - -buildExpressionParser :: (Stream s m t) - => OperatorTable s u m a - -> ParsecT s u m a - -> ParsecT s u m a -buildExpressionParser operators simpleExpr - = foldl (makeParser) simpleExpr operators - where - makeParser term ops - = let (rassoc,lassoc,nassoc - ,prefix,postfix) = foldr splitOp ([],[],[],[],[]) ops - - rassocOp = choice rassoc - lassocOp = choice lassoc - nassocOp = choice nassoc - prefixOp = choice prefix "" - postfixOp = choice postfix "" - - ambigious assoc op= try $ - do{ op; fail ("ambiguous use of a " ++ assoc - ++ " associative operator") - } - - ambigiousRight = ambigious "right" rassocOp - ambigiousLeft = ambigious "left" lassocOp - ambigiousNon = ambigious "non" nassocOp - - termP = do{ pre <- prefixP - ; x <- term - ; post <- postfixP - ; return (post (pre x)) - } - - postfixP = postfixOp <|> return id - - prefixP = prefixOp <|> return id - - rassocP x = do{ f <- rassocOp - ; y <- do{ z <- termP; rassocP1 z } - ; return (f x y) - } - <|> ambigiousLeft - <|> ambigiousNon - -- <|> return x - - rassocP1 x = rassocP x <|> return x - - lassocP x = do{ f <- lassocOp - ; y <- termP - ; lassocP1 (f x y) - } - <|> ambigiousRight - <|> ambigiousNon - -- <|> return x - - lassocP1 x = lassocP x <|> return x - - nassocP x = do{ f <- nassocOp - ; y <- termP - ; ambigiousRight - <|> ambigiousLeft - <|> ambigiousNon - <|> return (f x y) - } - -- <|> return x - - in do{ x <- termP - ; rassocP x <|> lassocP x <|> nassocP x <|> return x - "operator" - } - - - splitOp (Infix op assoc) (rassoc,lassoc,nassoc,prefix,postfix) - = case assoc of - AssocNone -> (rassoc,lassoc,op:nassoc,prefix,postfix) - AssocLeft -> (rassoc,op:lassoc,nassoc,prefix,postfix) - AssocRight -> (op:rassoc,lassoc,nassoc,prefix,postfix) - - splitOp (Prefix op) (rassoc,lassoc,nassoc,prefix,postfix) - = (rassoc,lassoc,nassoc,op:prefix,postfix) - - splitOp (Postfix op) (rassoc,lassoc,nassoc,prefix,postfix) - = (rassoc,lassoc,nassoc,prefix,op:postfix) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Language.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Language.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Language.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Language.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,150 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Language --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : non-portable (uses non-portable module Text.Parsec.Token) --- --- A helper module that defines some language definitions that can be used --- to instantiate a token parser (see "Text.Parsec.Token"). --- ------------------------------------------------------------------------------ - -module Text.Parsec.Language - ( haskellDef, haskell - , mondrianDef, mondrian - , emptyDef - , haskellStyle - , javaStyle - , LanguageDef - , GenLanguageDef - ) where - -import Text.Parsec -import Text.Parsec.Token - ------------------------------------------------------------ --- Styles: haskellStyle, javaStyle ------------------------------------------------------------ - --- | This is a minimal token definition for Haskell style languages. It --- defines the style of comments, valid identifiers and case --- sensitivity. It does not define any reserved words or operators. - -haskellStyle :: LanguageDef st -haskellStyle = emptyDef - { commentStart = "{-" - , commentEnd = "-}" - , commentLine = "--" - , nestedComments = True - , identStart = letter - , identLetter = alphaNum <|> oneOf "_'" - , opStart = opLetter haskellStyle - , opLetter = oneOf ":!#$%&*+./<=>?@\\^|-~" - , reservedOpNames= [] - , reservedNames = [] - , caseSensitive = True - } - --- | This is a minimal token definition for Java style languages. It --- defines the style of comments, valid identifiers and case --- sensitivity. It does not define any reserved words or operators. - -javaStyle :: LanguageDef st -javaStyle = emptyDef - { commentStart = "/*" - , commentEnd = "*/" - , commentLine = "//" - , nestedComments = True - , identStart = letter - , identLetter = alphaNum <|> oneOf "_'" - , reservedNames = [] - , reservedOpNames= [] - , caseSensitive = False - } - ------------------------------------------------------------ --- minimal language definition --------------------------------------------------------- - --- TODO: This seems wrong --- < This is the most minimal token definition. It is recommended to use --- this definition as the basis for other definitions. @emptyDef@ has --- no reserved names or operators, is case sensitive and doesn't accept --- comments, identifiers or operators. - -emptyDef :: LanguageDef st -emptyDef = LanguageDef - { commentStart = "" - , commentEnd = "" - , commentLine = "" - , nestedComments = True - , identStart = letter <|> char '_' - , identLetter = alphaNum <|> oneOf "_'" - , opStart = opLetter emptyDef - , opLetter = oneOf ":!#$%&*+./<=>?@\\^|-~" - , reservedOpNames= [] - , reservedNames = [] - , caseSensitive = True - } - - - ------------------------------------------------------------ --- Haskell ------------------------------------------------------------ - --- | A lexer for the haskell language. - -haskell :: TokenParser st -haskell = makeTokenParser haskellDef - --- | The language definition for the Haskell language. - -haskellDef :: LanguageDef st -haskellDef = haskell98Def - { identLetter = identLetter haskell98Def <|> char '#' - , reservedNames = reservedNames haskell98Def ++ - ["foreign","import","export","primitive" - ,"_ccall_","_casm_" - ,"forall" - ] - } - --- | The language definition for the language Haskell98. - -haskell98Def :: LanguageDef st -haskell98Def = haskellStyle - { reservedOpNames= ["::","..","=","\\","|","<-","->","@","~","=>"] - , reservedNames = ["let","in","case","of","if","then","else", - "data","type", - "class","default","deriving","do","import", - "infix","infixl","infixr","instance","module", - "newtype","where", - "primitive" - -- "as","qualified","hiding" - ] - } - - ------------------------------------------------------------ --- Mondrian ------------------------------------------------------------ - --- | A lexer for the mondrian language. - -mondrian :: TokenParser st -mondrian = makeTokenParser mondrianDef - --- | The language definition for the language Mondrian. - -mondrianDef :: LanguageDef st -mondrianDef = javaStyle - { reservedNames = [ "case", "class", "default", "extends" - , "import", "in", "let", "new", "of", "package" - ] - , caseSensitive = True - } diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Perm.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Perm.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Perm.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Perm.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,181 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Perm --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the file libraries/parsec/LICENSE) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : non-portable (uses existentially quantified data constructors) --- --- This module implements permutation parsers. The algorithm used --- is fairly complex since we push the type system to its limits :-) --- The algorithm is described in: --- --- /Parsing Permutation Phrases,/ --- by Arthur Baars, Andres Loh and Doaitse Swierstra. --- Published as a functional pearl at the Haskell Workshop 2001. --- ------------------------------------------------------------------------------ - -{-# LANGUAGE ExistentialQuantification #-} - -module Text.Parsec.Perm - ( PermParser - , StreamPermParser -- abstract - - , permute - , (<||>), (<$$>) - , (<|?>), (<$?>) - ) where - -import Text.Parsec - -import Control.Monad.Identity - -infixl 1 <||>, <|?> -infixl 2 <$$>, <$?> - - -{--------------------------------------------------------------- - test -- parse a permutation of - * an optional string of 'a's - * a required 'b' - * an optional 'c' ----------------------------------------------------------------} -{- -test input - = parse (do{ x <- ptest; eof; return x }) "" input - -ptest :: Parser (String,Char,Char) -ptest - = permute $ - (,,) <$?> ("",many1 (char 'a')) - <||> char 'b' - <|?> ('_',char 'c') --} - -{--------------------------------------------------------------- - Building a permutation parser ----------------------------------------------------------------} - --- | The expression @perm \<||> p@ adds parser @p@ to the permutation --- parser @perm@. The parser @p@ is not allowed to accept empty input - --- use the optional combinator ('<|?>') instead. Returns a --- new permutation parser that includes @p@. - -(<||>) :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> Parsec s st a -> StreamPermParser s st b -(<||>) perm p = add perm p - --- | The expression @f \<$$> p@ creates a fresh permutation parser --- consisting of parser @p@. The the final result of the permutation --- parser is the function @f@ applied to the return value of @p@. The --- parser @p@ is not allowed to accept empty input - use the optional --- combinator ('<$?>') instead. --- --- If the function @f@ takes more than one parameter, the type variable --- @b@ is instantiated to a functional type which combines nicely with --- the adds parser @p@ to the ('<||>') combinator. This --- results in stylized code where a permutation parser starts with a --- combining function @f@ followed by the parsers. The function @f@ --- gets its parameters in the order in which the parsers are specified, --- but actual input can be in any order. - -(<$$>) :: (Stream s Identity tok) => (a -> b) -> Parsec s st a -> StreamPermParser s st b -(<$$>) f p = newperm f <||> p - --- | The expression @perm \<||> (x,p)@ adds parser @p@ to the --- permutation parser @perm@. The parser @p@ is optional - if it can --- not be applied, the default value @x@ will be used instead. Returns --- a new permutation parser that includes the optional parser @p@. - -(<|?>) :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> (a, Parsec s st a) -> StreamPermParser s st b -(<|?>) perm (x,p) = addopt perm x p - --- | The expression @f \<$?> (x,p)@ creates a fresh permutation parser --- consisting of parser @p@. The the final result of the permutation --- parser is the function @f@ applied to the return value of @p@. The --- parser @p@ is optional - if it can not be applied, the default value --- @x@ will be used instead. - -(<$?>) :: (Stream s Identity tok) => (a -> b) -> (a, Parsec s st a) -> StreamPermParser s st b -(<$?>) f (x,p) = newperm f <|?> (x,p) - -{--------------------------------------------------------------- - The permutation tree ----------------------------------------------------------------} - --- | Provided for backwards compatibility. The tok type is ignored. - -type PermParser tok st a = StreamPermParser String st a - --- | The type @StreamPermParser s st a@ denotes a permutation parser that, --- when converted by the 'permute' function, parses --- @s@ streams with user state @st@ and returns a value of --- type @a@ on success. --- --- Normally, a permutation parser is first build with special operators --- like ('<||>') and than transformed into a normal parser --- using 'permute'. - -data StreamPermParser s st a = Perm (Maybe a) [StreamBranch s st a] - --- type Branch st a = StreamBranch String st a - -data StreamBranch s st a = forall b. Branch (StreamPermParser s st (b -> a)) (Parsec s st b) - --- | The parser @permute perm@ parses a permutation of parser described --- by @perm@. For example, suppose we want to parse a permutation of: --- an optional string of @a@'s, the character @b@ and an optional @c@. --- This can be described by: --- --- > test = permute (tuple <$?> ("",many1 (char 'a')) --- > <||> char 'b' --- > <|?> ('_',char 'c')) --- > where --- > tuple a b c = (a,b,c) - --- transform a permutation tree into a normal parser -permute :: (Stream s Identity tok) => StreamPermParser s st a -> Parsec s st a -permute (Perm def xs) - = choice (map branch xs ++ empty) - where - empty - = case def of - Nothing -> [] - Just x -> [return x] - - branch (Branch perm p) - = do{ x <- p - ; f <- permute perm - ; return (f x) - } - --- build permutation trees -newperm :: (Stream s Identity tok) => (a -> b) -> StreamPermParser s st (a -> b) -newperm f - = Perm (Just f) [] - -add :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> Parsec s st a -> StreamPermParser s st b -add perm@(Perm _mf fs) p - = Perm Nothing (first:map insert fs) - where - first = Branch perm p - insert (Branch perm' p') - = Branch (add (mapPerms flip perm') p) p' - -addopt :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> a -> Parsec s st a -> StreamPermParser s st b -addopt perm@(Perm mf fs) x p - = Perm (fmap ($ x) mf) (first:map insert fs) - where - first = Branch perm p - insert (Branch perm' p') - = Branch (addopt (mapPerms flip perm') x p) p' - - -mapPerms :: (Stream s Identity tok) => (a -> b) -> StreamPermParser s st a -> StreamPermParser s st b -mapPerms f (Perm x xs) - = Perm (fmap f x) (map mapBranch xs) - where - mapBranch (Branch perm p) - = Branch (mapPerms (f.) perm) p diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Pos.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Pos.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Pos.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Pos.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Pos --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Textual source positions. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Pos - ( SourceName, Line, Column - , SourcePos - , sourceLine, sourceColumn, sourceName - , incSourceLine, incSourceColumn - , setSourceLine, setSourceColumn, setSourceName - , newPos, initialPos - , updatePosChar, updatePosString - ) where - -#ifdef BASE3 -import Data.Generics -#else -import Data.Data (Data) -import Data.Typeable (Typeable) -#endif - --- < Source positions: a file name, a line and a column --- upper left is (1,1) - -type SourceName = String -type Line = Int -type Column = Int - --- | The abstract data type @SourcePos@ represents source positions. It --- contains the name of the source (i.e. file name), a line number and --- a column number. @SourcePos@ is an instance of the 'Show', 'Eq' and --- 'Ord' class. - -data SourcePos = SourcePos SourceName !Line !Column - deriving ( Eq, Ord, Data, Typeable) - --- | Create a new 'SourcePos' with the given source name, --- line number and column number. - -newPos :: SourceName -> Line -> Column -> SourcePos -newPos name line column - = SourcePos name line column - --- | Create a new 'SourcePos' with the given source name, --- and line number and column number set to 1, the upper left. - -initialPos :: SourceName -> SourcePos -initialPos name - = newPos name 1 1 - --- | Extracts the name of the source from a source position. - -sourceName :: SourcePos -> SourceName -sourceName (SourcePos name _line _column) = name - --- | Extracts the line number from a source position. - -sourceLine :: SourcePos -> Line -sourceLine (SourcePos _name line _column) = line - --- | Extracts the column number from a source position. - -sourceColumn :: SourcePos -> Column -sourceColumn (SourcePos _name _line column) = column - --- | Increments the line number of a source position. - -incSourceLine :: SourcePos -> Line -> SourcePos -incSourceLine (SourcePos name line column) n = SourcePos name (line+n) column - --- | Increments the column number of a source position. - -incSourceColumn :: SourcePos -> Column -> SourcePos -incSourceColumn (SourcePos name line column) n = SourcePos name line (column+n) - --- | Set the name of the source. - -setSourceName :: SourcePos -> SourceName -> SourcePos -setSourceName (SourcePos _name line column) n = SourcePos n line column - --- | Set the line number of a source position. - -setSourceLine :: SourcePos -> Line -> SourcePos -setSourceLine (SourcePos name _line column) n = SourcePos name n column - --- | Set the column number of a source position. - -setSourceColumn :: SourcePos -> Column -> SourcePos -setSourceColumn (SourcePos name line _column) n = SourcePos name line n - --- | The expression @updatePosString pos s@ updates the source position --- @pos@ by calling 'updatePosChar' on every character in @s@, ie. --- @foldl updatePosChar pos string@. - -updatePosString :: SourcePos -> String -> SourcePos -updatePosString pos string - = foldl updatePosChar pos string - --- | Update a source position given a character. If the character is a --- newline (\'\\n\') or carriage return (\'\\r\') the line number is --- incremented by 1. If the character is a tab (\'\t\') the column --- number is incremented to the nearest 8'th column, ie. @column + 8 - --- ((column-1) \`mod\` 8)@. In all other cases, the column is --- incremented by 1. - -updatePosChar :: SourcePos -> Char -> SourcePos -updatePosChar (SourcePos name line column) c - = case c of - '\n' -> SourcePos name (line+1) 1 - '\t' -> SourcePos name line (column + 8 - ((column-1) `mod` 8)) - _ -> SourcePos name line (column + 1) - -instance Show SourcePos where - show (SourcePos name line column) - | null name = showLineColumn - | otherwise = "\"" ++ name ++ "\" " ++ showLineColumn - where - showLineColumn = "(line " ++ show line ++ - ", column " ++ show column ++ - ")" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Prim.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Prim.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Prim.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Prim.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,766 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Prim --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- The primitive parser combinators. --- ------------------------------------------------------------------------------ - -{-# LANGUAGE MultiParamTypeClasses, FunctionalDependencies, FlexibleContexts, - UndecidableInstances #-} -{-# OPTIONS_HADDOCK not-home #-} - -module Text.Parsec.Prim - ( unknownError - , sysUnExpectError - , unexpected - , ParsecT - , runParsecT - , mkPT - , Parsec - , Consumed(..) - , Reply(..) - , State(..) - , parsecMap - , parserReturn - , parserBind - , mergeErrorReply - , parserFail - , parserZero - , parserPlus - , () - , (<|>) - , label - , labels - , lookAhead - , Stream(..) - , tokens - , try - , token - , tokenPrim - , tokenPrimEx - , many - , skipMany - , manyAccum - , runPT - , runP - , runParserT - , runParser - , parse - , parseTest - , getPosition - , getInput - , setPosition - , setInput - , getParserState - , setParserState - , updateParserState - , getState - , putState - , modifyState - , setState - , updateState - ) where - - -import qualified Data.ByteString.Lazy.Char8 as CL -import qualified Data.ByteString.Char8 as C - -import qualified Data.Text as Text -import qualified Data.Text.Lazy as TextL - -import qualified Control.Applicative as Applicative ( Applicative(..), Alternative(..) ) -import Control.Monad() -import Control.Monad.Trans -import Control.Monad.Identity - -import Control.Monad.Reader.Class -import Control.Monad.State.Class -import Control.Monad.Cont.Class -import Control.Monad.Error.Class - -import Text.Parsec.Pos -import Text.Parsec.Error - -unknownError :: State s u -> ParseError -unknownError state = newErrorUnknown (statePos state) - -sysUnExpectError :: String -> SourcePos -> Reply s u a -sysUnExpectError msg pos = Error (newErrorMessage (SysUnExpect msg) pos) - --- | The parser @unexpected msg@ always fails with an unexpected error --- message @msg@ without consuming any input. --- --- The parsers 'fail', ('') and @unexpected@ are the three parsers --- used to generate error messages. Of these, only ('') is commonly --- used. For an example of the use of @unexpected@, see the definition --- of 'Text.Parsec.Combinator.notFollowedBy'. - -unexpected :: (Stream s m t) => String -> ParsecT s u m a -unexpected msg - = ParsecT $ \s _ _ _ eerr -> - eerr $ newErrorMessage (UnExpect msg) (statePos s) - --- | ParserT monad transformer and Parser type - --- | @ParsecT s u m a@ is a parser with stream type @s@, user state type @u@, --- underlying monad @m@ and return type @a@. Parsec is strict in the user state. --- If this is undesirable, simply used a data type like @data Box a = Box a@ and --- the state type @Box YourStateType@ to add a level of indirection. - -newtype ParsecT s u m a - = ParsecT {unParser :: forall b . - State s u - -> (a -> State s u -> ParseError -> m b) -- consumed ok - -> (ParseError -> m b) -- consumed err - -> (a -> State s u -> ParseError -> m b) -- empty ok - -> (ParseError -> m b) -- empty err - -> m b - } - --- | Low-level unpacking of the ParsecT type. To run your parser, please look to --- runPT, runP, runParserT, runParser and other such functions. -runParsecT :: Monad m => ParsecT s u m a -> State s u -> m (Consumed (m (Reply s u a))) -runParsecT p s = unParser p s cok cerr eok eerr - where cok a s' err = return . Consumed . return $ Ok a s' err - cerr err = return . Consumed . return $ Error err - eok a s' err = return . Empty . return $ Ok a s' err - eerr err = return . Empty . return $ Error err - --- | Low-level creation of the ParsecT type. You really shouldn't have to do this. -mkPT :: Monad m => (State s u -> m (Consumed (m (Reply s u a)))) -> ParsecT s u m a -mkPT k = ParsecT $ \s cok cerr eok eerr -> do - cons <- k s - case cons of - Consumed mrep -> do - rep <- mrep - case rep of - Ok x s' err -> cok x s' err - Error err -> cerr err - Empty mrep -> do - rep <- mrep - case rep of - Ok x s' err -> eok x s' err - Error err -> eerr err - -type Parsec s u = ParsecT s u Identity - -data Consumed a = Consumed a - | Empty !a - -data Reply s u a = Ok a !(State s u) ParseError - | Error ParseError - -data State s u = State { - stateInput :: s, - statePos :: !SourcePos, - stateUser :: !u - } - -instance Functor Consumed where - fmap f (Consumed x) = Consumed (f x) - fmap f (Empty x) = Empty (f x) - -instance Functor (Reply s u) where - fmap f (Ok x s e) = Ok (f x) s e - fmap _ (Error e) = Error e -- XXX - -instance Functor (ParsecT s u m) where - fmap f p = parsecMap f p - -parsecMap :: (a -> b) -> ParsecT s u m a -> ParsecT s u m b -parsecMap f p - = ParsecT $ \s cok cerr eok eerr -> - unParser p s (cok . f) cerr (eok . f) eerr - -instance Applicative.Applicative (ParsecT s u m) where - pure = return - (<*>) = ap -- TODO: Can this be optimized? - -instance Applicative.Alternative (ParsecT s u m) where - empty = mzero - (<|>) = mplus - -instance Monad (ParsecT s u m) where - return x = parserReturn x - p >>= f = parserBind p f - fail msg = parserFail msg - -instance (MonadIO m) => MonadIO (ParsecT s u m) where - liftIO = lift . liftIO - -instance (MonadReader r m) => MonadReader r (ParsecT s u m) where - ask = lift ask - local f p = mkPT $ \s -> local f (runParsecT p s) - --- I'm presuming the user might want a separate, non-backtracking --- state aside from the Parsec user state. -instance (MonadState s m) => MonadState s (ParsecT s' u m) where - get = lift get - put = lift . put - -instance (MonadCont m) => MonadCont (ParsecT s u m) where - callCC f = mkPT $ \s -> - callCC $ \c -> - runParsecT (f (\a -> mkPT $ \s' -> c (pack s' a))) s - - where pack s a= Empty $ return (Ok a s (unknownError s)) - -instance (MonadError e m) => MonadError e (ParsecT s u m) where - throwError = lift . throwError - p `catchError` h = mkPT $ \s -> - runParsecT p s `catchError` \e -> - runParsecT (h e) s - -parserReturn :: a -> ParsecT s u m a -parserReturn x - = ParsecT $ \s _ _ eok _ -> - eok x s (unknownError s) - -parserBind :: ParsecT s u m a -> (a -> ParsecT s u m b) -> ParsecT s u m b -{-# INLINE parserBind #-} -parserBind m k - = ParsecT $ \s cok cerr eok eerr -> - let - -- consumed-okay case for m - mcok x s err = - let - -- if (k x) consumes, those go straigt up - pcok = cok - pcerr = cerr - - -- if (k x) doesn't consume input, but is okay, - -- we still return in the consumed continuation - peok x s err' = cok x s (mergeError err err') - - -- if (k x) doesn't consume input, but errors, - -- we return the error in the 'consumed-error' - -- continuation - peerr err' = cerr (mergeError err err') - in unParser (k x) s pcok pcerr peok peerr - - -- empty-ok case for m - meok x s err = - let - -- in these cases, (k x) can return as empty - pcok = cok - peok x s err' = eok x s (mergeError err err') - pcerr = cerr - peerr err' = eerr (mergeError err err') - in unParser (k x) s pcok pcerr peok peerr - -- consumed-error case for m - mcerr = cerr - - -- empty-error case for m - meerr = eerr - - in unParser m s mcok mcerr meok meerr - - -mergeErrorReply :: ParseError -> Reply s u a -> Reply s u a -mergeErrorReply err1 reply -- XXX where to put it? - = case reply of - Ok x state err2 -> Ok x state (mergeError err1 err2) - Error err2 -> Error (mergeError err1 err2) - -parserFail :: String -> ParsecT s u m a -parserFail msg - = ParsecT $ \s _ _ _ eerr -> - eerr $ newErrorMessage (Message msg) (statePos s) - -instance MonadPlus (ParsecT s u m) where - mzero = parserZero - mplus p1 p2 = parserPlus p1 p2 - --- | @parserZero@ always fails without consuming any input. @parserZero@ is defined --- equal to the 'mzero' member of the 'MonadPlus' class and to the 'Control.Applicative.empty' member --- of the 'Control.Applicative.Applicative' class. - -parserZero :: ParsecT s u m a -parserZero - = ParsecT $ \s _ _ _ eerr -> - eerr $ unknownError s - -parserPlus :: ParsecT s u m a -> ParsecT s u m a -> ParsecT s u m a -{-# INLINE parserPlus #-} -parserPlus m n - = ParsecT $ \s cok cerr eok eerr -> - let - meerr err = - let - neok y s' err' = eok y s' (mergeError err err') - neerr err' = eerr $ mergeError err err' - in unParser n s cok cerr neok neerr - in unParser m s cok cerr eok meerr - -instance MonadTrans (ParsecT s u) where - lift amb = ParsecT $ \s _ _ eok _ -> do - a <- amb - eok a s $ unknownError s - -infix 0 -infixr 1 <|> - --- | The parser @p \ msg@ behaves as parser @p@, but whenever the --- parser @p@ fails /without consuming any input/, it replaces expect --- error messages with the expect error message @msg@. --- --- This is normally used at the end of a set alternatives where we want --- to return an error message in terms of a higher level construct --- rather than returning all possible characters. For example, if the --- @expr@ parser from the 'try' example would fail, the error --- message is: '...: expecting expression'. Without the @(\)@ --- combinator, the message would be like '...: expecting \"let\" or --- letter', which is less friendly. - -() :: (ParsecT s u m a) -> String -> (ParsecT s u m a) -p msg = label p msg - --- | This combinator implements choice. The parser @p \<|> q@ first --- applies @p@. If it succeeds, the value of @p@ is returned. If @p@ --- fails /without consuming any input/, parser @q@ is tried. This --- combinator is defined equal to the 'mplus' member of the 'MonadPlus' --- class and the ('Control.Applicative.<|>') member of 'Control.Applicative.Alternative'. --- --- The parser is called /predictive/ since @q@ is only tried when --- parser @p@ didn't consume any input (i.e.. the look ahead is 1). --- This non-backtracking behaviour allows for both an efficient --- implementation of the parser combinators and the generation of good --- error messages. - -(<|>) :: (ParsecT s u m a) -> (ParsecT s u m a) -> (ParsecT s u m a) -p1 <|> p2 = mplus p1 p2 - --- | A synonym for @@, but as a function instead of an operator. -label :: ParsecT s u m a -> String -> ParsecT s u m a -label p msg - = labels p [msg] - -labels :: ParsecT s u m a -> [String] -> ParsecT s u m a -labels p msgs = - ParsecT $ \s cok cerr eok eerr -> - let eok' x s' error = eok x s' $ if errorIsUnknown error - then error - else setExpectErrors error msgs - eerr' err = eerr $ setExpectErrors err msgs - - in unParser p s cok cerr eok' eerr' - - where - setExpectErrors err [] = setErrorMessage (Expect "") err - setExpectErrors err [msg] = setErrorMessage (Expect msg) err - setExpectErrors err (msg:msgs) - = foldr (\msg' err' -> addErrorMessage (Expect msg') err') - (setErrorMessage (Expect msg) err) msgs - --- TODO: There should be a stronger statement that can be made about this - --- | An instance of @Stream@ has stream type @s@, underlying monad @m@ and token type @t@ determined by the stream --- --- Some rough guidelines for a \"correct\" instance of Stream: --- --- * unfoldM uncons gives the [t] corresponding to the stream --- --- * A @Stream@ instance is responsible for maintaining the \"position within the stream\" in the stream state @s@. This is trivial unless you are using the monad in a non-trivial way. - -class (Monad m) => Stream s m t | s -> t where - uncons :: s -> m (Maybe (t,s)) - -instance (Monad m) => Stream [tok] m tok where - uncons [] = return $ Nothing - uncons (t:ts) = return $ Just (t,ts) - {-# INLINE uncons #-} - - -instance (Monad m) => Stream CL.ByteString m Char where - uncons = return . CL.uncons - -instance (Monad m) => Stream C.ByteString m Char where - uncons = return . C.uncons - -instance (Monad m) => Stream Text.Text m Char where - uncons = return . Text.uncons - {-# INLINE uncons #-} - -instance (Monad m) => Stream TextL.Text m Char where - uncons = return . TextL.uncons - {-# INLINE uncons #-} - - -tokens :: (Stream s m t, Eq t) - => ([t] -> String) -- Pretty print a list of tokens - -> (SourcePos -> [t] -> SourcePos) - -> [t] -- List of tokens to parse - -> ParsecT s u m [t] -{-# INLINE tokens #-} -tokens _ _ [] - = ParsecT $ \s _ _ eok _ -> - eok [] s $ unknownError s -tokens showTokens nextposs tts@(tok:toks) - = ParsecT $ \(State input pos u) cok cerr eok eerr -> - let - errEof = (setErrorMessage (Expect (showTokens tts)) - (newErrorMessage (SysUnExpect "") pos)) - - errExpect x = (setErrorMessage (Expect (showTokens tts)) - (newErrorMessage (SysUnExpect (showTokens [x])) pos)) - - walk [] rs = ok rs - walk (t:ts) rs = do - sr <- uncons rs - case sr of - Nothing -> cerr $ errEof - Just (x,xs) | t == x -> walk ts xs - | otherwise -> cerr $ errExpect x - - ok rs = let pos' = nextposs pos tts - s' = State rs pos' u - in cok tts s' (newErrorUnknown pos') - in do - sr <- uncons input - case sr of - Nothing -> eerr $ errEof - Just (x,xs) - | tok == x -> walk toks xs - | otherwise -> eerr $ errExpect x - --- | The parser @try p@ behaves like parser @p@, except that it --- pretends that it hasn't consumed any input when an error occurs. --- --- This combinator is used whenever arbitrary look ahead is needed. --- Since it pretends that it hasn't consumed any input when @p@ fails, --- the ('<|>') combinator will try its second alternative even when the --- first parser failed while consuming input. --- --- The @try@ combinator can for example be used to distinguish --- identifiers and reserved words. Both reserved words and identifiers --- are a sequence of letters. Whenever we expect a certain reserved --- word where we can also expect an identifier we have to use the @try@ --- combinator. Suppose we write: --- --- > expr = letExpr <|> identifier "expression" --- > --- > letExpr = do{ string "let"; ... } --- > identifier = many1 letter --- --- If the user writes \"lexical\", the parser fails with: @unexpected --- \'x\', expecting \'t\' in \"let\"@. Indeed, since the ('<|>') combinator --- only tries alternatives when the first alternative hasn't consumed --- input, the @identifier@ parser is never tried (because the prefix --- \"le\" of the @string \"let\"@ parser is already consumed). The --- right behaviour can be obtained by adding the @try@ combinator: --- --- > expr = letExpr <|> identifier "expression" --- > --- > letExpr = do{ try (string "let"); ... } --- > identifier = many1 letter - -try :: ParsecT s u m a -> ParsecT s u m a -try p = - ParsecT $ \s cok _ eok eerr -> - unParser p s cok eerr eok eerr - --- | @lookAhead p@ parses @p@ without consuming any input. --- --- If @p@ fails and consumes some input, so does @lookAhead@. Combine with 'try' --- if this is undesirable. - -lookAhead :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m a -lookAhead p = - ParsecT $ \s _ cerr eok eerr -> do - let eok' a _ _ = eok a s (newErrorUnknown (statePos s)) - unParser p s eok' cerr eok' eerr - --- | The parser @token showTok posFromTok testTok@ accepts a token @t@ --- with result @x@ when the function @testTok t@ returns @'Just' x@. The --- source position of the @t@ should be returned by @posFromTok t@ and --- the token can be shown using @showTok t@. --- --- This combinator is expressed in terms of 'tokenPrim'. --- It is used to accept user defined token streams. For example, --- suppose that we have a stream of basic tokens tupled with source --- positions. We can than define a parser that accepts single tokens as: --- --- > mytoken x --- > = token showTok posFromTok testTok --- > where --- > showTok (pos,t) = show t --- > posFromTok (pos,t) = pos --- > testTok (pos,t) = if x == t then Just t else Nothing - -token :: (Stream s Identity t) - => (t -> String) -- ^ Token pretty-printing function. - -> (t -> SourcePos) -- ^ Computes the position of a token. - -> (t -> Maybe a) -- ^ Matching function for the token to parse. - -> Parsec s u a -token showToken tokpos test = tokenPrim showToken nextpos test - where - nextpos _ tok ts = case runIdentity (uncons ts) of - Nothing -> tokpos tok - Just (tok',_) -> tokpos tok' - --- | The parser @tokenPrim showTok nextPos testTok@ accepts a token @t@ --- with result @x@ when the function @testTok t@ returns @'Just' x@. The --- token can be shown using @showTok t@. The position of the /next/ --- token should be returned when @nextPos@ is called with the current --- source position @pos@, the current token @t@ and the rest of the --- tokens @toks@, @nextPos pos t toks@. --- --- This is the most primitive combinator for accepting tokens. For --- example, the 'Text.Parsec.Char.char' parser could be implemented as: --- --- > char c --- > = tokenPrim showChar nextPos testChar --- > where --- > showChar x = "'" ++ x ++ "'" --- > testChar x = if x == c then Just x else Nothing --- > nextPos pos x xs = updatePosChar pos x - -tokenPrim :: (Stream s m t) - => (t -> String) -- ^ Token pretty-printing function. - -> (SourcePos -> t -> s -> SourcePos) -- ^ Next position calculating function. - -> (t -> Maybe a) -- ^ Matching function for the token to parse. - -> ParsecT s u m a -{-# INLINE tokenPrim #-} -tokenPrim showToken nextpos test = tokenPrimEx showToken nextpos Nothing test - -tokenPrimEx :: (Stream s m t) - => (t -> String) - -> (SourcePos -> t -> s -> SourcePos) - -> Maybe (SourcePos -> t -> s -> u -> u) - -> (t -> Maybe a) - -> ParsecT s u m a -{-# INLINE tokenPrimEx #-} -tokenPrimEx showToken nextpos Nothing test - = ParsecT $ \(State input pos user) cok cerr eok eerr -> do - r <- uncons input - case r of - Nothing -> eerr $ unexpectError "" pos - Just (c,cs) - -> case test c of - Just x -> let newpos = nextpos pos c cs - newstate = State cs newpos user - in seq newpos $ seq newstate $ - cok x newstate (newErrorUnknown newpos) - Nothing -> eerr $ unexpectError (showToken c) pos -tokenPrimEx showToken nextpos (Just nextState) test - = ParsecT $ \(State input pos user) cok cerr eok eerr -> do - r <- uncons input - case r of - Nothing -> eerr $ unexpectError "" pos - Just (c,cs) - -> case test c of - Just x -> let newpos = nextpos pos c cs - newUser = nextState pos c cs user - newstate = State cs newpos newUser - in seq newpos $ seq newstate $ - cok x newstate $ newErrorUnknown newpos - Nothing -> eerr $ unexpectError (showToken c) pos - -unexpectError msg pos = newErrorMessage (SysUnExpect msg) pos - - --- | @many p@ applies the parser @p@ /zero/ or more times. Returns a --- list of the returned values of @p@. --- --- > identifier = do{ c <- letter --- > ; cs <- many (alphaNum <|> char '_') --- > ; return (c:cs) --- > } - -many :: ParsecT s u m a -> ParsecT s u m [a] -many p - = do xs <- manyAccum (:) p - return (reverse xs) - --- | @skipMany p@ applies the parser @p@ /zero/ or more times, skipping --- its result. --- --- > spaces = skipMany space - -skipMany :: ParsecT s u m a -> ParsecT s u m () -skipMany p - = do manyAccum (\_ _ -> []) p - return () - -manyAccum :: (a -> [a] -> [a]) - -> ParsecT s u m a - -> ParsecT s u m [a] -manyAccum acc p = - ParsecT $ \s cok cerr eok eerr -> - let walk xs x s' err = - unParser p s' - (seq xs $ walk $ acc x xs) -- consumed-ok - cerr -- consumed-err - manyErr -- empty-ok - (\e -> cok (acc x xs) s' e) -- empty-err - in unParser p s (walk []) cerr manyErr (\e -> eok [] s e) - -manyErr = error "Text.ParserCombinators.Parsec.Prim.many: combinator 'many' is applied to a parser that accepts an empty string." - - --- < Running a parser: monadic (runPT) and pure (runP) - -runPT :: (Stream s m t) - => ParsecT s u m a -> u -> SourceName -> s -> m (Either ParseError a) -runPT p u name s - = do res <- runParsecT p (State s (initialPos name) u) - r <- parserReply res - case r of - Ok x _ _ -> return (Right x) - Error err -> return (Left err) - where - parserReply res - = case res of - Consumed r -> r - Empty r -> r - -runP :: (Stream s Identity t) - => Parsec s u a -> u -> SourceName -> s -> Either ParseError a -runP p u name s = runIdentity $ runPT p u name s - --- | The most general way to run a parser. @runParserT p state filePath --- input@ runs parser @p@ on the input list of tokens @input@, --- obtained from source @filePath@ with the initial user state @st@. --- The @filePath@ is only used in error messages and may be the empty --- string. Returns a computation in the underlying monad @m@ that return either a 'ParseError' ('Left') or a --- value of type @a@ ('Right'). - -runParserT :: (Stream s m t) - => ParsecT s u m a -> u -> SourceName -> s -> m (Either ParseError a) -runParserT = runPT - --- | The most general way to run a parser over the Identity monad. @runParser p state filePath --- input@ runs parser @p@ on the input list of tokens @input@, --- obtained from source @filePath@ with the initial user state @st@. --- The @filePath@ is only used in error messages and may be the empty --- string. Returns either a 'ParseError' ('Left') or a --- value of type @a@ ('Right'). --- --- > parseFromFile p fname --- > = do{ input <- readFile fname --- > ; return (runParser p () fname input) --- > } - -runParser :: (Stream s Identity t) - => Parsec s u a -> u -> SourceName -> s -> Either ParseError a -runParser = runP - --- | @parse p filePath input@ runs a parser @p@ over Identity without user --- state. The @filePath@ is only used in error messages and may be the --- empty string. Returns either a 'ParseError' ('Left') --- or a value of type @a@ ('Right'). --- --- > main = case (parse numbers "" "11, 2, 43") of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > --- > numbers = commaSep integer - -parse :: (Stream s Identity t) - => Parsec s () a -> SourceName -> s -> Either ParseError a -parse p = runP p () - --- | The expression @parseTest p input@ applies a parser @p@ against --- input @input@ and prints the result to stdout. Used for testing --- parsers. - -parseTest :: (Stream s Identity t, Show a) - => Parsec s () a -> s -> IO () -parseTest p input - = case parse p "" input of - Left err -> do putStr "parse error at " - print err - Right x -> print x - --- < Parser state combinators - --- | Returns the current source position. See also 'SourcePos'. - -getPosition :: (Monad m) => ParsecT s u m SourcePos -getPosition = do state <- getParserState - return (statePos state) - --- | Returns the current input - -getInput :: (Monad m) => ParsecT s u m s -getInput = do state <- getParserState - return (stateInput state) - --- | @setPosition pos@ sets the current source position to @pos@. - -setPosition :: (Monad m) => SourcePos -> ParsecT s u m () -setPosition pos - = do updateParserState (\(State input _ user) -> State input pos user) - return () - --- | @setInput input@ continues parsing with @input@. The 'getInput' and --- @setInput@ functions can for example be used to deal with #include --- files. - -setInput :: (Monad m) => s -> ParsecT s u m () -setInput input - = do updateParserState (\(State _ pos user) -> State input pos user) - return () - --- | Returns the full parser state as a 'State' record. - -getParserState :: (Monad m) => ParsecT s u m (State s u) -getParserState = updateParserState id - --- | @setParserState st@ set the full parser state to @st@. - -setParserState :: (Monad m) => State s u -> ParsecT s u m (State s u) -setParserState st = updateParserState (const st) - --- | @updateParserState f@ applies function @f@ to the parser state. - -updateParserState :: (State s u -> State s u) -> ParsecT s u m (State s u) -updateParserState f = - ParsecT $ \s _ _ eok _ -> - let s' = f s - in eok s' s' $ unknownError s' - --- < User state combinators - --- | Returns the current user state. - -getState :: (Monad m) => ParsecT s u m u -getState = stateUser `liftM` getParserState - --- | @putState st@ set the user state to @st@. - -putState :: (Monad m) => u -> ParsecT s u m () -putState u = do updateParserState $ \s -> s { stateUser = u } - return () - --- | @updateState f@ applies function @f@ to the user state. Suppose --- that we want to count identifiers in a source, we could use the user --- state as: --- --- > expr = do{ x <- identifier --- > ; updateState (+1) --- > ; return (Id x) --- > } - -modifyState :: (Monad m) => (u -> u) -> ParsecT s u m () -modifyState f = do updateParserState $ \s -> s { stateUser = f (stateUser s) } - return () - --- XXX Compat - --- | An alias for putState for backwards compatibility. - -setState :: (Monad m) => u -> ParsecT s u m () -setState = putState - --- | An alias for modifyState for backwards compatibility. - -updateState :: (Monad m) => (u -> u) -> ParsecT s u m () -updateState = modifyState diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/String.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/String.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/String.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/String.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.String --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the file libraries/parsec/LICENSE) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Make Strings an instance of 'Stream' with 'Char' token type. --- ------------------------------------------------------------------------------ - -module Text.Parsec.String - ( Parser, GenParser, parseFromFile - ) where - -import Text.Parsec.Error -import Text.Parsec.Prim - -type Parser = Parsec String () -type GenParser tok st = Parsec [tok] st - --- | @parseFromFile p filePath@ runs a string parser @p@ on the --- input read from @filePath@ using 'Prelude.readFile'. Returns either a 'ParseError' --- ('Left') or a value of type @a@ ('Right'). --- --- > main = do{ result <- parseFromFile numbers "digits.txt" --- > ; case result of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > } -parseFromFile :: Parser a -> String -> IO (Either ParseError a) -parseFromFile p fname - = do input <- readFile fname - return (runP p () fname input) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Text/Lazy.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Text/Lazy.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Text/Lazy.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Text/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.String --- Copyright : (c) Antoine Latter 2011 --- License : BSD-style (see the file libraries/parsec/LICENSE) --- --- Maintainer : aslatter@gmail.com --- Stability : provisional --- Portability : portable --- --- Convinience definitions for working with lazy 'Text.Text'. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Text.Lazy - ( Parser, GenParser - ) where - -import qualified Data.Text.Lazy as Text -import Text.Parsec.Error -import Text.Parsec.Prim - -type Parser = Parsec Text.Text () -type GenParser st = Parsec Text.Text st diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Text.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Text.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Text.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Text.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.String --- Copyright : (c) Antoine Latter 2011 --- License : BSD-style (see the file libraries/parsec/LICENSE) --- --- Maintainer : aslatter@gmail.com --- Stability : provisional --- Portability : portable --- --- Convinience definitions for working with 'Text.Text'. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Text - ( Parser, GenParser - ) where - -import qualified Data.Text as Text -import Text.Parsec.Error -import Text.Parsec.Prim - -type Parser = Parsec Text.Text () -type GenParser st = Parsec Text.Text st diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Token.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Token.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec/Token.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec/Token.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,722 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Token --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : non-portable (uses local universal quantification: PolymorphicComponents) --- --- A helper module to parse lexical elements (tokens). See 'makeTokenParser' --- for a description of how to use it. --- ------------------------------------------------------------------------------ - -{-# LANGUAGE PolymorphicComponents #-} -{-# OPTIONS_GHC -fno-warn-name-shadowing #-} - -module Text.Parsec.Token - ( LanguageDef - , GenLanguageDef (..) - , TokenParser - , GenTokenParser (..) - , makeTokenParser - ) where - -import Data.Char ( isAlpha, toLower, toUpper, isSpace, digitToInt ) -import Data.List ( nub, sort ) -import Control.Monad.Identity -import Text.Parsec.Prim -import Text.Parsec.Char -import Text.Parsec.Combinator - ------------------------------------------------------------ --- Language Definition ------------------------------------------------------------ - -type LanguageDef st = GenLanguageDef String st Identity - --- | The @GenLanguageDef@ type is a record that contains all parameterizable --- features of the "Text.Parsec.Token" module. The module "Text.Parsec.Language" --- contains some default definitions. - -data GenLanguageDef s u m - = LanguageDef { - - -- | Describes the start of a block comment. Use the empty string if the - -- language doesn't support block comments. For example \"\/*\". - - commentStart :: String, - - -- | Describes the end of a block comment. Use the empty string if the - -- language doesn't support block comments. For example \"*\/\". - - commentEnd :: String, - - -- | Describes the start of a line comment. Use the empty string if the - -- language doesn't support line comments. For example \"\/\/\". - - commentLine :: String, - - -- | Set to 'True' if the language supports nested block comments. - - nestedComments :: Bool, - - -- | This parser should accept any start characters of identifiers. For - -- example @letter \<|> char \"_\"@. - - identStart :: ParsecT s u m Char, - - -- | This parser should accept any legal tail characters of identifiers. - -- For example @alphaNum \<|> char \"_\"@. - - identLetter :: ParsecT s u m Char, - - -- | This parser should accept any start characters of operators. For - -- example @oneOf \":!#$%&*+.\/\<=>?\@\\\\^|-~\"@ - - opStart :: ParsecT s u m Char, - - -- | This parser should accept any legal tail characters of operators. - -- Note that this parser should even be defined if the language doesn't - -- support user-defined operators, or otherwise the 'reservedOp' - -- parser won't work correctly. - - opLetter :: ParsecT s u m Char, - - -- | The list of reserved identifiers. - - reservedNames :: [String], - - -- | The list of reserved operators. - - reservedOpNames:: [String], - - -- | Set to 'True' if the language is case sensitive. - - caseSensitive :: Bool - - } - ------------------------------------------------------------ --- A first class module: TokenParser ------------------------------------------------------------ - -type TokenParser st = GenTokenParser String st Identity - --- | The type of the record that holds lexical parsers that work on --- @s@ streams with state @u@ over a monad @m@. - -data GenTokenParser s u m - = TokenParser { - - -- | This lexeme parser parses a legal identifier. Returns the identifier - -- string. This parser will fail on identifiers that are reserved - -- words. Legal identifier (start) characters and reserved words are - -- defined in the 'LanguageDef' that is passed to - -- 'makeTokenParser'. An @identifier@ is treated as - -- a single token using 'try'. - - identifier :: ParsecT s u m String, - - -- | The lexeme parser @reserved name@ parses @symbol - -- name@, but it also checks that the @name@ is not a prefix of a - -- valid identifier. A @reserved@ word is treated as a single token - -- using 'try'. - - reserved :: String -> ParsecT s u m (), - - -- | This lexeme parser parses a legal operator. Returns the name of the - -- operator. This parser will fail on any operators that are reserved - -- operators. Legal operator (start) characters and reserved operators - -- are defined in the 'LanguageDef' that is passed to - -- 'makeTokenParser'. An @operator@ is treated as a - -- single token using 'try'. - - operator :: ParsecT s u m String, - - -- |The lexeme parser @reservedOp name@ parses @symbol - -- name@, but it also checks that the @name@ is not a prefix of a - -- valid operator. A @reservedOp@ is treated as a single token using - -- 'try'. - - reservedOp :: String -> ParsecT s u m (), - - - -- | This lexeme parser parses a single literal character. Returns the - -- literal character value. This parsers deals correctly with escape - -- sequences. The literal character is parsed according to the grammar - -- rules defined in the Haskell report (which matches most programming - -- languages quite closely). - - charLiteral :: ParsecT s u m Char, - - -- | This lexeme parser parses a literal string. Returns the literal - -- string value. This parsers deals correctly with escape sequences and - -- gaps. The literal string is parsed according to the grammar rules - -- defined in the Haskell report (which matches most programming - -- languages quite closely). - - stringLiteral :: ParsecT s u m String, - - -- | This lexeme parser parses a natural number (a positive whole - -- number). Returns the value of the number. The number can be - -- specified in 'decimal', 'hexadecimal' or - -- 'octal'. The number is parsed according to the grammar - -- rules in the Haskell report. - - natural :: ParsecT s u m Integer, - - -- | This lexeme parser parses an integer (a whole number). This parser - -- is like 'natural' except that it can be prefixed with - -- sign (i.e. \'-\' or \'+\'). Returns the value of the number. The - -- number can be specified in 'decimal', 'hexadecimal' - -- or 'octal'. The number is parsed according - -- to the grammar rules in the Haskell report. - - integer :: ParsecT s u m Integer, - - -- | This lexeme parser parses a floating point value. Returns the value - -- of the number. The number is parsed according to the grammar rules - -- defined in the Haskell report. - - float :: ParsecT s u m Double, - - -- | This lexeme parser parses either 'natural' or a 'float'. - -- Returns the value of the number. This parsers deals with - -- any overlap in the grammar rules for naturals and floats. The number - -- is parsed according to the grammar rules defined in the Haskell report. - - naturalOrFloat :: ParsecT s u m (Either Integer Double), - - -- | Parses a positive whole number in the decimal system. Returns the - -- value of the number. - - decimal :: ParsecT s u m Integer, - - -- | Parses a positive whole number in the hexadecimal system. The number - -- should be prefixed with \"0x\" or \"0X\". Returns the value of the - -- number. - - hexadecimal :: ParsecT s u m Integer, - - -- | Parses a positive whole number in the octal system. The number - -- should be prefixed with \"0o\" or \"0O\". Returns the value of the - -- number. - - octal :: ParsecT s u m Integer, - - -- | Lexeme parser @symbol s@ parses 'string' @s@ and skips - -- trailing white space. - - symbol :: String -> ParsecT s u m String, - - -- | @lexeme p@ first applies parser @p@ and than the 'whiteSpace' - -- parser, returning the value of @p@. Every lexical - -- token (lexeme) is defined using @lexeme@, this way every parse - -- starts at a point without white space. Parsers that use @lexeme@ are - -- called /lexeme/ parsers in this document. - -- - -- The only point where the 'whiteSpace' parser should be - -- called explicitly is the start of the main parser in order to skip - -- any leading white space. - -- - -- > mainParser = do{ whiteSpace - -- > ; ds <- many (lexeme digit) - -- > ; eof - -- > ; return (sum ds) - -- > } - - lexeme :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Parses any white space. White space consists of /zero/ or more - -- occurrences of a 'space', a line comment or a block (multi - -- line) comment. Block comments may be nested. How comments are - -- started and ended is defined in the 'LanguageDef' - -- that is passed to 'makeTokenParser'. - - whiteSpace :: ParsecT s u m (), - - -- | Lexeme parser @parens p@ parses @p@ enclosed in parenthesis, - -- returning the value of @p@. - - parens :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Lexeme parser @braces p@ parses @p@ enclosed in braces (\'{\' and - -- \'}\'), returning the value of @p@. - - braces :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Lexeme parser @angles p@ parses @p@ enclosed in angle brackets (\'\<\' - -- and \'>\'), returning the value of @p@. - - angles :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Lexeme parser @brackets p@ parses @p@ enclosed in brackets (\'[\' - -- and \']\'), returning the value of @p@. - - brackets :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | DEPRECATED: Use 'brackets'. - - squares :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Lexeme parser |semi| parses the character \';\' and skips any - -- trailing white space. Returns the string \";\". - - semi :: ParsecT s u m String, - - -- | Lexeme parser @comma@ parses the character \',\' and skips any - -- trailing white space. Returns the string \",\". - - comma :: ParsecT s u m String, - - -- | Lexeme parser @colon@ parses the character \':\' and skips any - -- trailing white space. Returns the string \":\". - - colon :: ParsecT s u m String, - - -- | Lexeme parser @dot@ parses the character \'.\' and skips any - -- trailing white space. Returns the string \".\". - - dot :: ParsecT s u m String, - - -- | Lexeme parser @semiSep p@ parses /zero/ or more occurrences of @p@ - -- separated by 'semi'. Returns a list of values returned by - -- @p@. - - semiSep :: forall a . ParsecT s u m a -> ParsecT s u m [a], - - -- | Lexeme parser @semiSep1 p@ parses /one/ or more occurrences of @p@ - -- separated by 'semi'. Returns a list of values returned by @p@. - - semiSep1 :: forall a . ParsecT s u m a -> ParsecT s u m [a], - - -- | Lexeme parser @commaSep p@ parses /zero/ or more occurrences of - -- @p@ separated by 'comma'. Returns a list of values returned - -- by @p@. - - commaSep :: forall a . ParsecT s u m a -> ParsecT s u m [a], - - -- | Lexeme parser @commaSep1 p@ parses /one/ or more occurrences of - -- @p@ separated by 'comma'. Returns a list of values returned - -- by @p@. - - commaSep1 :: forall a . ParsecT s u m a -> ParsecT s u m [a] - } - ------------------------------------------------------------ --- Given a LanguageDef, create a token parser. ------------------------------------------------------------ - --- | The expression @makeTokenParser language@ creates a 'GenTokenParser' --- record that contains lexical parsers that are --- defined using the definitions in the @language@ record. --- --- The use of this function is quite stylized - one imports the --- appropiate language definition and selects the lexical parsers that --- are needed from the resulting 'GenTokenParser'. --- --- > module Main where --- > --- > import Text.Parsec --- > import qualified Text.Parsec.Token as P --- > import Text.Parsec.Language (haskellDef) --- > --- > -- The parser --- > ... --- > --- > expr = parens expr --- > <|> identifier --- > <|> ... --- > --- > --- > -- The lexer --- > lexer = P.makeTokenParser haskellDef --- > --- > parens = P.parens lexer --- > braces = P.braces lexer --- > identifier = P.identifier lexer --- > reserved = P.reserved lexer --- > ... - -makeTokenParser :: (Stream s m Char) - => GenLanguageDef s u m -> GenTokenParser s u m -makeTokenParser languageDef - = TokenParser{ identifier = identifier - , reserved = reserved - , operator = operator - , reservedOp = reservedOp - - , charLiteral = charLiteral - , stringLiteral = stringLiteral - , natural = natural - , integer = integer - , float = float - , naturalOrFloat = naturalOrFloat - , decimal = decimal - , hexadecimal = hexadecimal - , octal = octal - - , symbol = symbol - , lexeme = lexeme - , whiteSpace = whiteSpace - - , parens = parens - , braces = braces - , angles = angles - , brackets = brackets - , squares = brackets - , semi = semi - , comma = comma - , colon = colon - , dot = dot - , semiSep = semiSep - , semiSep1 = semiSep1 - , commaSep = commaSep - , commaSep1 = commaSep1 - } - where - - ----------------------------------------------------------- - -- Bracketing - ----------------------------------------------------------- - parens p = between (symbol "(") (symbol ")") p - braces p = between (symbol "{") (symbol "}") p - angles p = between (symbol "<") (symbol ">") p - brackets p = between (symbol "[") (symbol "]") p - - semi = symbol ";" - comma = symbol "," - dot = symbol "." - colon = symbol ":" - - commaSep p = sepBy p comma - semiSep p = sepBy p semi - - commaSep1 p = sepBy1 p comma - semiSep1 p = sepBy1 p semi - - - ----------------------------------------------------------- - -- Chars & Strings - ----------------------------------------------------------- - charLiteral = lexeme (between (char '\'') - (char '\'' "end of character") - characterChar ) - "character" - - characterChar = charLetter <|> charEscape - "literal character" - - charEscape = do{ char '\\'; escapeCode } - charLetter = satisfy (\c -> (c /= '\'') && (c /= '\\') && (c > '\026')) - - - - stringLiteral = lexeme ( - do{ str <- between (char '"') - (char '"' "end of string") - (many stringChar) - ; return (foldr (maybe id (:)) "" str) - } - "literal string") - - stringChar = do{ c <- stringLetter; return (Just c) } - <|> stringEscape - "string character" - - stringLetter = satisfy (\c -> (c /= '"') && (c /= '\\') && (c > '\026')) - - stringEscape = do{ char '\\' - ; do{ escapeGap ; return Nothing } - <|> do{ escapeEmpty; return Nothing } - <|> do{ esc <- escapeCode; return (Just esc) } - } - - escapeEmpty = char '&' - escapeGap = do{ many1 space - ; char '\\' "end of string gap" - } - - - - -- escape codes - escapeCode = charEsc <|> charNum <|> charAscii <|> charControl - "escape code" - - charControl = do{ char '^' - ; code <- upper - ; return (toEnum (fromEnum code - fromEnum 'A' + 1)) - } - - charNum = do{ code <- decimal - <|> do{ char 'o'; number 8 octDigit } - <|> do{ char 'x'; number 16 hexDigit } - ; return (toEnum (fromInteger code)) - } - - charEsc = choice (map parseEsc escMap) - where - parseEsc (c,code) = do{ char c; return code } - - charAscii = choice (map parseAscii asciiMap) - where - parseAscii (asc,code) = try (do{ string asc; return code }) - - - -- escape code tables - escMap = zip ("abfnrtv\\\"\'") ("\a\b\f\n\r\t\v\\\"\'") - asciiMap = zip (ascii3codes ++ ascii2codes) (ascii3 ++ ascii2) - - ascii2codes = ["BS","HT","LF","VT","FF","CR","SO","SI","EM", - "FS","GS","RS","US","SP"] - ascii3codes = ["NUL","SOH","STX","ETX","EOT","ENQ","ACK","BEL", - "DLE","DC1","DC2","DC3","DC4","NAK","SYN","ETB", - "CAN","SUB","ESC","DEL"] - - ascii2 = ['\BS','\HT','\LF','\VT','\FF','\CR','\SO','\SI', - '\EM','\FS','\GS','\RS','\US','\SP'] - ascii3 = ['\NUL','\SOH','\STX','\ETX','\EOT','\ENQ','\ACK', - '\BEL','\DLE','\DC1','\DC2','\DC3','\DC4','\NAK', - '\SYN','\ETB','\CAN','\SUB','\ESC','\DEL'] - - - ----------------------------------------------------------- - -- Numbers - ----------------------------------------------------------- - naturalOrFloat = lexeme (natFloat) "number" - - float = lexeme floating "float" - integer = lexeme int "integer" - natural = lexeme nat "natural" - - - -- floats - floating = do{ n <- decimal - ; fractExponent n - } - - - natFloat = do{ char '0' - ; zeroNumFloat - } - <|> decimalFloat - - zeroNumFloat = do{ n <- hexadecimal <|> octal - ; return (Left n) - } - <|> decimalFloat - <|> fractFloat 0 - <|> return (Left 0) - - decimalFloat = do{ n <- decimal - ; option (Left n) - (fractFloat n) - } - - fractFloat n = do{ f <- fractExponent n - ; return (Right f) - } - - fractExponent n = do{ fract <- fraction - ; expo <- option 1.0 exponent' - ; return ((fromInteger n + fract)*expo) - } - <|> - do{ expo <- exponent' - ; return ((fromInteger n)*expo) - } - - fraction = do{ char '.' - ; digits <- many1 digit "fraction" - ; return (foldr op 0.0 digits) - } - "fraction" - where - op d f = (f + fromIntegral (digitToInt d))/10.0 - - exponent' = do{ oneOf "eE" - ; f <- sign - ; e <- decimal "exponent" - ; return (power (f e)) - } - "exponent" - where - power e | e < 0 = 1.0/power(-e) - | otherwise = fromInteger (10^e) - - - -- integers and naturals - int = do{ f <- lexeme sign - ; n <- nat - ; return (f n) - } - - sign = (char '-' >> return negate) - <|> (char '+' >> return id) - <|> return id - - nat = zeroNumber <|> decimal - - zeroNumber = do{ char '0' - ; hexadecimal <|> octal <|> decimal <|> return 0 - } - "" - - decimal = number 10 digit - hexadecimal = do{ oneOf "xX"; number 16 hexDigit } - octal = do{ oneOf "oO"; number 8 octDigit } - - number base baseDigit - = do{ digits <- many1 baseDigit - ; let n = foldl (\x d -> base*x + toInteger (digitToInt d)) 0 digits - ; seq n (return n) - } - - ----------------------------------------------------------- - -- Operators & reserved ops - ----------------------------------------------------------- - reservedOp name = - lexeme $ try $ - do{ string name - ; notFollowedBy (opLetter languageDef) ("end of " ++ show name) - } - - operator = - lexeme $ try $ - do{ name <- oper - ; if (isReservedOp name) - then unexpected ("reserved operator " ++ show name) - else return name - } - - oper = - do{ c <- (opStart languageDef) - ; cs <- many (opLetter languageDef) - ; return (c:cs) - } - "operator" - - isReservedOp name = - isReserved (sort (reservedOpNames languageDef)) name - - - ----------------------------------------------------------- - -- Identifiers & Reserved words - ----------------------------------------------------------- - reserved name = - lexeme $ try $ - do{ caseString name - ; notFollowedBy (identLetter languageDef) ("end of " ++ show name) - } - - caseString name - | caseSensitive languageDef = string name - | otherwise = do{ walk name; return name } - where - walk [] = return () - walk (c:cs) = do{ caseChar c msg; walk cs } - - caseChar c | isAlpha c = char (toLower c) <|> char (toUpper c) - | otherwise = char c - - msg = show name - - - identifier = - lexeme $ try $ - do{ name <- ident - ; if (isReservedName name) - then unexpected ("reserved word " ++ show name) - else return name - } - - - ident - = do{ c <- identStart languageDef - ; cs <- many (identLetter languageDef) - ; return (c:cs) - } - "identifier" - - isReservedName name - = isReserved theReservedNames caseName - where - caseName | caseSensitive languageDef = name - | otherwise = map toLower name - - - isReserved names name - = scan names - where - scan [] = False - scan (r:rs) = case (compare r name) of - LT -> scan rs - EQ -> True - GT -> False - - theReservedNames - | caseSensitive languageDef = sort reserved - | otherwise = sort . map (map toLower) $ reserved - where - reserved = reservedNames languageDef - - - - ----------------------------------------------------------- - -- White space & symbols - ----------------------------------------------------------- - symbol name - = lexeme (string name) - - lexeme p - = do{ x <- p; whiteSpace; return x } - - - --whiteSpace - whiteSpace - | noLine && noMulti = skipMany (simpleSpace "") - | noLine = skipMany (simpleSpace <|> multiLineComment "") - | noMulti = skipMany (simpleSpace <|> oneLineComment "") - | otherwise = skipMany (simpleSpace <|> oneLineComment <|> multiLineComment "") - where - noLine = null (commentLine languageDef) - noMulti = null (commentStart languageDef) - - - simpleSpace = - skipMany1 (satisfy isSpace) - - oneLineComment = - do{ try (string (commentLine languageDef)) - ; skipMany (satisfy (/= '\n')) - ; return () - } - - multiLineComment = - do { try (string (commentStart languageDef)) - ; inComment - } - - inComment - | nestedComments languageDef = inCommentMulti - | otherwise = inCommentSingle - - inCommentMulti - = do{ try (string (commentEnd languageDef)) ; return () } - <|> do{ multiLineComment ; inCommentMulti } - <|> do{ skipMany1 (noneOf startEnd) ; inCommentMulti } - <|> do{ oneOf startEnd ; inCommentMulti } - "end of comment" - where - startEnd = nub (commentEnd languageDef ++ commentStart languageDef) - - inCommentSingle - = do{ try (string (commentEnd languageDef)); return () } - <|> do{ skipMany1 (noneOf startEnd) ; inCommentSingle } - <|> do{ oneOf startEnd ; inCommentSingle } - "end of comment" - where - startEnd = nub (commentEnd languageDef ++ commentStart languageDef) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/Parsec.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/Parsec.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -{-| -Module : Text.Parsec -Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 -License : BSD-style (see the LICENSE file) - -Maintainer : aslatter@gmail.com -Stability : provisional -Portability : portable - -This module includes everything you need to get started writing a -parser. - -By default this module is set up to parse character data. If you'd like -to parse the result of your own tokenizer you should start with the following -imports: - -@ - import Text.Parsec.Prim - import Text.Parsec.Combinator -@ - -Then you can implement your own version of 'satisfy' on top of the 'tokenPrim' -primitive. - --} - -module Text.Parsec - ( -- * Parsers - ParsecT - , Parsec - , token - , tokens - , runParserT - , runParser - , parse - , parseTest - , getPosition - , getInput - , getState - , putState - , modifyState - -- * Combinators - , (<|>) - , () - , label - , labels - , try - , unexpected - , choice - , many - , many1 - , skipMany - , skipMany1 - , count - , between - , option - , optionMaybe - , optional - , sepBy - , sepBy1 - , endBy - , endBy1 - , sepEndBy - , sepEndBy1 - , chainl - , chainl1 - , chainr - , chainr1 - , eof - , notFollowedBy - , manyTill - , lookAhead - , anyToken - -- * Character Parsing - , module Text.Parsec.Char - -- * Error messages - , ParseError - , errorPos - -- * Position - , SourcePos - , SourceName, Line, Column - , sourceName, sourceLine, sourceColumn - , incSourceLine, incSourceColumn - , setSourceLine, setSourceColumn, setSourceName - -- * Low-level operations - , manyAccum - , tokenPrim - , tokenPrimEx - , runPT - , unknownError - , sysUnExpectError - , mergeErrorReply - , getParserState - , setParserState - , updateParserState - , Stream - , runParsecT - , mkPT - , Consumed - , Reply - , State - , setPosition - , setInput - -- * Other stuff - , setState - , updateState - , parsecMap - , parserReturn - , parserBind - , parserFail - , parserZero - , parserPlus - ) where - -import Text.Parsec.Pos -import Text.Parsec.Error -import Text.Parsec.Prim -import Text.Parsec.Char -import Text.Parsec.Combinator diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Char.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Char.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Char.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Char.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Char --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Char - ( CharParser, - spaces, - space, - newline, - tab, - upper, - lower, - alphaNum, - letter, - digit, - hexDigit, - octDigit, - char, - string, - anyChar, - oneOf, - noneOf, - satisfy - ) where - - -import Text.Parsec.Char -import Text.Parsec.String - -type CharParser st = GenParser Char st diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Combinator.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Combinator.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Combinator.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Combinator.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Combinator --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Combinator - ( choice, - count, - between, - option, - optionMaybe, - optional, - skipMany1, - many1, - sepBy, - sepBy1, - endBy, - endBy1, - sepEndBy, - sepEndBy1, - chainl, - chainl1, - chainr, - chainr1, - eof, - notFollowedBy, - manyTill, - lookAhead, - anyToken - ) where - - -import Text.Parsec.Combinator diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Error.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Error.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Error.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Error.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Error --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Error - ( Message (SysUnExpect,UnExpect,Expect,Message), - messageString, - messageCompare, - messageEq, - ParseError, - errorPos, - errorMessages, - errorIsUnknown, - showErrorMessages, - newErrorMessage, - newErrorUnknown, - addErrorMessage, - setErrorPos, - setErrorMessage, - mergeError - ) where - -import Text.Parsec.Error - - -messageCompare :: Message -> Message -> Ordering -messageCompare = compare - -messageEq :: Message -> Message -> Bool -messageEq = (==) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Expr.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Expr.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Expr.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Expr.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Expr --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Expr - ( Assoc (AssocNone,AssocLeft,AssocRight), - Operator(..), - OperatorTable, - buildExpressionParser - ) where - -import Text.Parsec.Expr(Assoc(..)) -import qualified Text.Parsec.Expr as N -import Text.ParserCombinators.Parsec(GenParser) - -import Control.Monad.Identity - -data Operator tok st a = Infix (GenParser tok st (a -> a -> a)) Assoc - | Prefix (GenParser tok st (a -> a)) - | Postfix (GenParser tok st (a -> a)) - -type OperatorTable tok st a = [[Operator tok st a]] - -convert :: Operator tok st a -> N.Operator [tok] st Identity a -convert (Infix p a) = N.Infix p a -convert (Prefix p) = N.Prefix p -convert (Postfix p) = N.Postfix p - -buildExpressionParser :: OperatorTable tok st a - -> GenParser tok st a - -> GenParser tok st a -buildExpressionParser = N.buildExpressionParser . map (map convert) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Language.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Language.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Language.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Language.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Language --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Language - ( haskellDef, - haskell, - mondrianDef, - mondrian, - emptyDef, - haskellStyle, - javaStyle, - LanguageDef, - GenLanguageDef(..), - ) where - -import Text.Parsec.Token -import Text.Parsec.Language diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Perm.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Perm.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Perm.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Perm.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Perm --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Perm - ( PermParser, - permute, - (<||>), - (<$$>), - (<|?>), - (<$?>) - ) where - -import Text.Parsec.Perm diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Pos.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Pos.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Pos.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Pos.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Pos --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Pos - ( SourceName, - Line, - Column, - SourcePos, - sourceLine, - sourceColumn, - sourceName, - incSourceLine, - incSourceColumn, - setSourceLine, - setSourceColumn, - setSourceName, - newPos, - initialPos, - updatePosChar, - updatePosString - ) where - - -import Text.Parsec.Pos diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Prim.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Prim.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Prim.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Prim.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Prim --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Prim - ( (), - (<|>), - Parser, - GenParser, - runParser, - parse, - parseFromFile, - parseTest, - token, - tokens, - tokenPrim, - tokenPrimEx, - try, - label, - labels, - unexpected, - pzero, - many, - skipMany, - getState, - setState, - updateState, - getPosition, - setPosition, - getInput, - setInput, - State(..), - getParserState, - setParserState - ) where - -import Text.Parsec.Prim hiding (runParser, try) -import qualified Text.Parsec.Prim as N -- 'N' for 'New' -import Text.Parsec.String - -import Text.Parsec.Error -import Text.Parsec.Pos - -pzero :: GenParser tok st a -pzero = parserZero - -runParser :: GenParser tok st a - -> st - -> SourceName - -> [tok] - -> Either ParseError a -runParser = N.runParser - -try :: GenParser tok st a -> GenParser tok st a -try = N.try diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Token.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Token.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Token.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec/Token.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Token --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Token - ( LanguageDef, - GenLanguageDef(..), - TokenParser, - GenTokenParser(..), - makeTokenParser - ) where - -import Text.Parsec.Token diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec.hs cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar10=/Text/ParserCombinators/Parsec.hs 2014-09-25 01:54:02.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar10=/Text/ParserCombinators/Parsec.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec - ( -- complete modules - module Text.ParserCombinators.Parsec.Prim - , module Text.ParserCombinators.Parsec.Combinator - , module Text.ParserCombinators.Parsec.Char - - -- module Text.ParserCombinators.Parsec.Error - , ParseError - , errorPos - - -- module Text.ParserCombinators.Parsec.Pos - , SourcePos - , SourceName, Line, Column - , sourceName, sourceLine, sourceColumn - , incSourceLine, incSourceColumn - , setSourceLine, setSourceColumn, setSourceName - - ) where - -import Text.Parsec.String() - -import Text.ParserCombinators.Parsec.Prim -import Text.ParserCombinators.Parsec.Combinator -import Text.ParserCombinators.Parsec.Char - -import Text.ParserCombinators.Parsec.Error -import Text.ParserCombinators.Parsec.Pos diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/aclocal.m4 cabal-install-1.22-1.22.9.0/=unpacked-tar11=/aclocal.m4 --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/aclocal.m4 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/aclocal.m4 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -# FP_DECL_ALTZONE -# --------------- -# Defines HAVE_DECL_ALTZONE to 1 if declared, 0 otherwise. -# -# Used by base package. -AC_DEFUN([FP_DECL_ALTZONE], -[AC_REQUIRE([AC_HEADER_TIME])dnl -AC_CHECK_HEADERS([sys/time.h]) -AC_CHECK_DECLS([altzone], [], [],[#if TIME_WITH_SYS_TIME -# include -# include -#else -# if HAVE_SYS_TIME_H -# include -# else -# include -# endif -#endif]) -])# FP_DECL_ALTZONE - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/cbits/timeUtils.c cabal-install-1.22-1.22.9.0/=unpacked-tar11=/cbits/timeUtils.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/cbits/timeUtils.c 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/cbits/timeUtils.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -#if defined(__GLASGOW_HASKELL__) || defined(__HUGS__) -/* - * (c) The University of Glasgow 2002 - * - * Time Runtime Support - */ -#include "HsTime.h" - -#if HAVE_GETTIMEOFDAY -int __hscore_gettimeofday(struct timeval *tp, void *tzp) -{ - return gettimeofday(tp, tzp); -} -#endif - -#if HAVE_GMTIME_R -struct tm *__hscore_gmtime_r(const time_t *clock, struct tm *result) -{ - return gmtime_r(clock, result); -} -#endif - -#if HAVE_LOCALTIME_R -struct tm *__hscore_localtime_r(const time_t *clock, struct tm *result) -{ - return localtime_r(clock, result); -} -#endif - -#if defined(_MSC_VER) || defined(__MINGW32__) || defined(_WIN32) /* to the end */ - -long *__hscore_timezone( void ) -{ return &_timezone; } - -char **__hscore_tzname( void ) -{ return _tzname; } -#endif -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/changelog.md cabal-install-1.22-1.22.9.0/=unpacked-tar11=/changelog.md --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/changelog.md 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/changelog.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -# Changelog for [`old-time` package](http://hackage.haskell.org/package/old-time) - -## 1.1.0.3 *Nov 2014* - - * Decoupled from GHC distribution - -## 1.1.0.2 *Mar 2014* - - * Bundled with GHC 7.8.1 - - * Supports `base-4.7.0.0` - - * Remove NHC98-specific code - - * Update to Cabal 1.10 format - -## 1.1.0.1 *Sep 2012* - - * Bundled with GHC 7.6.1 - - * Don't include deprecated `` on FreeBSD - - * Fix `gettimeofday(2)` call on Win64 diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/config.guess cabal-install-1.22-1.22.9.0/=unpacked-tar11=/config.guess --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/config.guess 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/config.guess 1970-01-01 00:00:00.000000000 +0000 @@ -1,1420 +0,0 @@ -#! /bin/sh -# Attempt to guess a canonical system name. -# Copyright 1992-2014 Free Software Foundation, Inc. - -timestamp='2014-03-23' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). -# -# Originally written by Per Bothner. -# -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD -# -# Please send patches with a ChangeLog entry to config-patches@gnu.org. - - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] - -Output the configuration name of the system \`$me' is run on. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.guess ($timestamp) - -Originally written by Per Bothner. -Copyright 1992-2014 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" >&2 - exit 1 ;; - * ) - break ;; - esac -done - -if test $# != 0; then - echo "$me: too many arguments$help" >&2 - exit 1 -fi - -trap 'exit 1' 1 2 15 - -# CC_FOR_BUILD -- compiler used by this script. Note that the use of a -# compiler to aid in system detection is discouraged as it requires -# temporary files to be created and, as you can see below, it is a -# headache to deal with in a portable fashion. - -# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still -# use `HOST_CC' if defined, but it is deprecated. - -# Portable tmp directory creation inspired by the Autoconf team. - -set_cc_for_build=' -trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; -trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; -: ${TMPDIR=/tmp} ; - { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || - { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || - { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || - { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; -dummy=$tmp/dummy ; -tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; -case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; - for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then - CC_FOR_BUILD="$c"; break ; - fi ; - done ; - if test x"$CC_FOR_BUILD" = x ; then - CC_FOR_BUILD=no_compiler_found ; - fi - ;; - ,,*) CC_FOR_BUILD=$CC ;; - ,*,*) CC_FOR_BUILD=$HOST_CC ;; -esac ; set_cc_for_build= ;' - -# This is needed to find uname on a Pyramid OSx when run in the BSD universe. -# (ghazi@noc.rutgers.edu 1994-08-24) -if (test -f /.attbin/uname) >/dev/null 2>&1 ; then - PATH=$PATH:/.attbin ; export PATH -fi - -UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown -UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown -UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown -UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown - -case "${UNAME_SYSTEM}" in -Linux|GNU|GNU/*) - # If the system lacks a compiler, then just pick glibc. - # We could probably try harder. - LIBC=gnu - - eval $set_cc_for_build - cat <<-EOF > $dummy.c - #include - #if defined(__UCLIBC__) - LIBC=uclibc - #elif defined(__dietlibc__) - LIBC=dietlibc - #else - LIBC=gnu - #endif - EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` - ;; -esac - -# Note: order is significant - the case branches are not exclusive. - -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in - *:NetBSD:*:*) - # NetBSD (nbsd) targets should (where applicable) match one or - # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, - # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently - # switched to ELF, *-*-netbsd* would select the old - # object file format. This provides both forward - # compatibility and a consistent mechanism for selecting the - # object file format. - # - # Note: NetBSD doesn't particularly care about the vendor - # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` - case "${UNAME_MACHINE_ARCH}" in - armeb) machine=armeb-unknown ;; - arm*) machine=arm-unknown ;; - sh3el) machine=shl-unknown ;; - sh3eb) machine=sh-unknown ;; - sh5el) machine=sh5le-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; - esac - # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. - case "${UNAME_MACHINE_ARCH}" in - arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build - if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ELF__ - then - # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). - # Return netbsd for either. FIX? - os=netbsd - else - os=netbsdelf - fi - ;; - *) - os=netbsd - ;; - esac - # The OS release - # Debian GNU/NetBSD machines have a different userland, and - # thus, need a distinct triplet. However, they do not need - # kernel version information, so it can be replaced with a - # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in - Debian*) - release='-gnu' - ;; - *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` - ;; - esac - # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: - # contains redundant information, the shorter form: - # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" - exit ;; - *:Bitrig:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} - exit ;; - *:OpenBSD:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} - exit ;; - *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} - exit ;; - *:SolidBSD:*:*) - echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} - exit ;; - macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd${UNAME_RELEASE} - exit ;; - *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} - exit ;; - alpha:OSF1:*:*) - case $UNAME_RELEASE in - *4.0) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` - ;; - *5.*) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` - ;; - esac - # According to Compaq, /usr/sbin/psrinfo has been available on - # OSF/1 and Tru64 systems produced since 1995. I hope that - # covers most systems running today. This code pipes the CPU - # types through head -n 1, so we only detect the type of CPU 0. - ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` - case "$ALPHA_CPU_TYPE" in - "EV4 (21064)") - UNAME_MACHINE="alpha" ;; - "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; - "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; - "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; - "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; - "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; - "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; - "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; - "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; - "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; - "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; - "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; - "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; - esac - # A Pn.n version is a patched version. - # A Vn.n version is a released version. - # A Tn.n version is a released field test version. - # A Xn.n version is an unreleased experimental baselevel. - # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - # Reset EXIT trap before exiting to avoid spurious non-zero exit code. - exitcode=$? - trap '' 0 - exit $exitcode ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 - exit ;; - Amiga*:UNIX_System_V:4.0:*) - echo m68k-unknown-sysv4 - exit ;; - *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos - exit ;; - *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos - exit ;; - *:OS/390:*:*) - echo i370-ibm-openedition - exit ;; - *:z/VM:*:*) - echo s390-ibm-zvmoe - exit ;; - *:OS400:*:*) - echo powerpc-ibm-os400 - exit ;; - arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} - exit ;; - arm*:riscos:*:*|arm*:RISCOS:*:*) - echo arm-unknown-riscos - exit ;; - SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) - echo hppa1.1-hitachi-hiuxmpp - exit ;; - Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) - # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. - if test "`(/bin/universe) 2>/dev/null`" = att ; then - echo pyramid-pyramid-sysv3 - else - echo pyramid-pyramid-bsd - fi - exit ;; - NILE*:*:*:dcosx) - echo pyramid-pyramid-svr4 - exit ;; - DRS?6000:unix:4.0:6*) - echo sparc-icl-nx6 - exit ;; - DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) - case `/usr/bin/uname -p` in - sparc) echo sparc-icl-nx7; exit ;; - esac ;; - s390x:SunOS:*:*) - echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux${UNAME_RELEASE} - exit ;; - i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) - eval $set_cc_for_build - SUN_ARCH="i386" - # If there is a compiler, see if it is configured for 64-bit objects. - # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. - # This test works for both compilers. - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - SUN_ARCH="x86_64" - fi - fi - echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:6*:*) - # According to config.sub, this is the proper way to canonicalize - # SunOS6. Hard to guess exactly what SunOS6 will be like, but - # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:*:*) - case "`/usr/bin/arch -k`" in - Series*|S4*) - UNAME_RELEASE=`uname -v` - ;; - esac - # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` - exit ;; - sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} - exit ;; - sun*:*:4.2BSD:*) - UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 - case "`/bin/arch`" in - sun3) - echo m68k-sun-sunos${UNAME_RELEASE} - ;; - sun4) - echo sparc-sun-sunos${UNAME_RELEASE} - ;; - esac - exit ;; - aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} - exit ;; - # The situation for MiNT is a little confusing. The machine name - # can be virtually everything (everything which is not - # "atarist" or "atariste" at least should have a processor - # > m68000). The system name ranges from "MiNT" over "FreeMiNT" - # to the lowercase version "mint" (or "freemint"). Finally - # the system name "TOS" denotes a system which is actually not - # MiNT. But MiNT is downward compatible to TOS, so this should - # be no problem. - atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} - exit ;; - hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} - exit ;; - *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} - exit ;; - m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} - exit ;; - powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} - exit ;; - RISC*:Mach:*:*) - echo mips-dec-mach_bsd4.3 - exit ;; - RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} - exit ;; - VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} - exit ;; - 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} - exit ;; - mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c -#ifdef __cplusplus -#include /* for printf() prototype */ - int main (int argc, char *argv[]) { -#else - int main (argc, argv) int argc; char *argv[]; { -#endif - #if defined (host_mips) && defined (MIPSEB) - #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); - #endif - #endif - exit (-1); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && - { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} - exit ;; - Motorola:PowerMAX_OS:*:*) - echo powerpc-motorola-powermax - exit ;; - Motorola:*:4.3:PL8-*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:Power_UNIX:*:*) - echo powerpc-harris-powerunix - exit ;; - m88k:CX/UX:7*:*) - echo m88k-harris-cxux7 - exit ;; - m88k:*:4*:R4*) - echo m88k-motorola-sysv4 - exit ;; - m88k:*:3*:R3*) - echo m88k-motorola-sysv3 - exit ;; - AViiON:dgux:*:*) - # DG/UX returns AViiON for all architectures - UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] - then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] - then - echo m88k-dg-dgux${UNAME_RELEASE} - else - echo m88k-dg-dguxbcs${UNAME_RELEASE} - fi - else - echo i586-dg-dgux${UNAME_RELEASE} - fi - exit ;; - M88*:DolphinOS:*:*) # DolphinOS (SVR3) - echo m88k-dolphin-sysv3 - exit ;; - M88*:*:R3*:*) - # Delta 88k system running SVR3 - echo m88k-motorola-sysv3 - exit ;; - XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) - echo m88k-tektronix-sysv3 - exit ;; - Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) - echo m68k-tektronix-bsd - exit ;; - *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` - exit ;; - ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. - echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id - exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' - i*86:AIX:*:*) - echo i386-ibm-aix - exit ;; - ia64:AIX:*:*) - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} - exit ;; - *:AIX:2:3) - if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - - main() - { - if (!__power_pc()) - exit(1); - puts("powerpc-ibm-aix3.2.5"); - exit(0); - } -EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` - then - echo "$SYSTEM_NAME" - else - echo rs6000-ibm-aix3.2.5 - fi - elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then - echo rs6000-ibm-aix3.2.4 - else - echo rs6000-ibm-aix3.2 - fi - exit ;; - *:AIX:*:[4567]) - IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then - IBM_ARCH=rs6000 - else - IBM_ARCH=powerpc - fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} - exit ;; - *:AIX:*:*) - echo rs6000-ibm-aix - exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) - echo romp-ibm-bsd4.4 - exit ;; - ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to - exit ;; # report: romp-ibm BSD 4.3 - *:BOSX:*:*) - echo rs6000-bull-bosx - exit ;; - DPX/2?00:B.O.S.:*:*) - echo m68k-bull-sysv3 - exit ;; - 9000/[34]??:4.3bsd:1.*:*) - echo m68k-hp-bsd - exit ;; - hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) - echo m68k-hp-bsd4.4 - exit ;; - 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; - 9000/[678][0-9][0-9]) - if [ -x /usr/bin/getconf ]; then - sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` - sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 - 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 - esac ;; - esac - fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - - #define _HPUX_SOURCE - #include - #include - - int main () - { - #if defined(_SC_KERNEL_BITS) - long bits = sysconf(_SC_KERNEL_BITS); - #endif - long cpu = sysconf (_SC_CPU_VERSION); - - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1"); break; - case CPU_PA_RISC2_0: - #if defined(_SC_KERNEL_BITS) - switch (bits) - { - case 64: puts ("hppa2.0w"); break; - case 32: puts ("hppa2.0n"); break; - default: puts ("hppa2.0"); break; - } break; - #else /* !defined(_SC_KERNEL_BITS) */ - puts ("hppa2.0"); break; - #endif - default: puts ("hppa1.0"); break; - } - exit (0); - } -EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` - test -z "$HP_ARCH" && HP_ARCH=hppa - fi ;; - esac - if [ ${HP_ARCH} = "hppa2.0w" ] - then - eval $set_cc_for_build - - # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating - # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler - # generating 64-bit code. GNU and HP use different nomenclature: - # - # $ CC_FOR_BUILD=cc ./config.guess - # => hppa2.0w-hp-hpux11.23 - # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess - # => hppa64-hp-hpux11.23 - - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | - grep -q __LP64__ - then - HP_ARCH="hppa2.0w" - else - HP_ARCH="hppa64" - fi - fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} - exit ;; - ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} - exit ;; - 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - int - main () - { - long cpu = sysconf (_SC_CPU_VERSION); - /* The order matters, because CPU_IS_HP_MC68K erroneously returns - true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct - results, however. */ - if (CPU_IS_PA_RISC (cpu)) - { - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; - case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; - default: puts ("hppa-hitachi-hiuxwe2"); break; - } - } - else if (CPU_IS_HP_MC68K (cpu)) - puts ("m68k-hitachi-hiuxwe2"); - else puts ("unknown-hitachi-hiuxwe2"); - exit (0); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - echo unknown-hitachi-hiuxwe2 - exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) - echo hppa1.1-hp-bsd - exit ;; - 9000/8??:4.3bsd:*:*) - echo hppa1.0-hp-bsd - exit ;; - *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) - echo hppa1.0-hp-mpeix - exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) - echo hppa1.1-hp-osf - exit ;; - hp8??:OSF1:*:*) - echo hppa1.0-hp-osf - exit ;; - i*86:OSF1:*:*) - if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk - else - echo ${UNAME_MACHINE}-unknown-osf1 - fi - exit ;; - parisc*:Lites*:*:*) - echo hppa1.1-hp-lites - exit ;; - C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) - echo c1-convex-bsd - exit ;; - C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) - echo c34-convex-bsd - exit ;; - C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) - echo c38-convex-bsd - exit ;; - C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) - echo c4-convex-bsd - exit ;; - CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ - | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ - -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ - -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` - echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` - echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} - exit ;; - sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:FreeBSD:*:*) - UNAME_PROCESSOR=`/usr/bin/uname -p` - case ${UNAME_PROCESSOR} in - amd64) - echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - *) - echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - esac - exit ;; - i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin - exit ;; - *:MINGW64*:*) - echo ${UNAME_MACHINE}-pc-mingw64 - exit ;; - *:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 - exit ;; - *:MSYS*:*) - echo ${UNAME_MACHINE}-pc-msys - exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 - exit ;; - i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 - exit ;; - *:Interix*:*) - case ${UNAME_MACHINE} in - x86) - echo i586-pc-interix${UNAME_RELEASE} - exit ;; - authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix${UNAME_RELEASE} - exit ;; - IA64) - echo ia64-unknown-interix${UNAME_RELEASE} - exit ;; - esac ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - 8664:Windows_NT:*) - echo x86_64-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; - i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin - exit ;; - amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) - echo x86_64-unknown-cygwin - exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin - exit ;; - prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - *:GNU:*:*) - # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` - exit ;; - *:GNU/*:*:*) - # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} - exit ;; - i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix - exit ;; - aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - aarch64_be:Linux:*:*) - UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - alpha:Linux:*:*) - case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in - EV5) UNAME_MACHINE=alphaev5 ;; - EV56) UNAME_MACHINE=alphaev56 ;; - PCA56) UNAME_MACHINE=alphapca56 ;; - PCA57) UNAME_MACHINE=alphapca56 ;; - EV6) UNAME_MACHINE=alphaev6 ;; - EV67) UNAME_MACHINE=alphaev67 ;; - EV68*) UNAME_MACHINE=alphaev68 ;; - esac - objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="gnulibc1" ; fi - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - arc:Linux:*:* | arceb:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - arm*:Linux:*:*) - eval $set_cc_for_build - if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_EABI__ - then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - else - if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_PCS_VFP - then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi - else - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf - fi - fi - exit ;; - avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} - exit ;; - crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} - exit ;; - frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - i*86:Linux:*:*) - echo ${UNAME_MACHINE}-pc-linux-${LIBC} - exit ;; - ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - mips:Linux:*:* | mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #undef CPU - #undef ${UNAME_MACHINE} - #undef ${UNAME_MACHINE}el - #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) - CPU=${UNAME_MACHINE}el - #else - #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) - CPU=${UNAME_MACHINE} - #else - CPU= - #endif - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } - ;; - openrisc*:Linux:*:*) - echo or1k-unknown-linux-${LIBC} - exit ;; - or32:Linux:*:* | or1k*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - padre:Linux:*:*) - echo sparc-unknown-linux-${LIBC} - exit ;; - parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-${LIBC} - exit ;; - parisc:Linux:*:* | hppa:Linux:*:*) - # Look for CPU level - case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; - PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; - *) echo hppa-unknown-linux-${LIBC} ;; - esac - exit ;; - ppc64:Linux:*:*) - echo powerpc64-unknown-linux-${LIBC} - exit ;; - ppc:Linux:*:*) - echo powerpc-unknown-linux-${LIBC} - exit ;; - ppc64le:Linux:*:*) - echo powerpc64le-unknown-linux-${LIBC} - exit ;; - ppcle:Linux:*:*) - echo powerpcle-unknown-linux-${LIBC} - exit ;; - s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux-${LIBC} - exit ;; - sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-${LIBC} - exit ;; - x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - i*86:DYNIX/ptx:4*:*) - # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. - # earlier versions are messed up and put the nodename in both - # sysname and nodename. - echo i386-sequent-sysv4 - exit ;; - i*86:UNIX_SV:4.2MP:2.*) - # Unixware is an offshoot of SVR4, but it has its own version - # number series starting with 2... - # I am not positive that other SVR4 systems won't match this, - # I just have to hope. -- rms. - # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} - exit ;; - i*86:OS/2:*:*) - # If we were able to find `uname', then EMX Unix compatibility - # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx - exit ;; - i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop - exit ;; - i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos - exit ;; - i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable - exit ;; - i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} - exit ;; - i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp - exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` - if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} - else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} - fi - exit ;; - i*86:*:5:[678]*) - # UnixWare 7.x, OpenUNIX and OpenServer 6. - case `/bin/uname -X | grep "^Machine"` in - *486*) UNAME_MACHINE=i486 ;; - *Pentium) UNAME_MACHINE=i586 ;; - *Pent*|*Celeron) UNAME_MACHINE=i686 ;; - esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} - exit ;; - i*86:*:3.2:*) - if test -f /usr/options/cb.name; then - UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then - UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` - (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 - (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ - && UNAME_MACHINE=i586 - (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ - && UNAME_MACHINE=i686 - (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ - && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL - else - echo ${UNAME_MACHINE}-pc-sysv32 - fi - exit ;; - pc:*:*:*) - # Left here for compatibility: - # uname -m prints for DJGPP always 'pc', but it prints nothing about - # the processor, so we play safe by assuming i586. - # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that - # this is a cross-build. - echo i586-pc-msdosdjgpp - exit ;; - Intel:Mach:3*:*) - echo i386-pc-mach3 - exit ;; - paragon:*:*:*) - echo i860-intel-osf1 - exit ;; - i860:*:4.*:*) # i860-SVR4 - if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 - else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 - fi - exit ;; - mini*:CTIX:SYS*5:*) - # "miniframe" - echo m68010-convergent-sysv - exit ;; - mc68k:UNIX:SYSTEM5:3.51m) - echo m68k-convergent-sysv - exit ;; - M680?0:D-NIX:5.3:*) - echo m68k-diab-dnix - exit ;; - M68*:*:R3V[5678]*:*) - test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; - 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) - OS_REL='' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4; exit; } ;; - NCR*:*:4.2:* | MPRAS*:*:4.2:*) - OS_REL='.3' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} - exit ;; - mc68030:UNIX_System_V:4.*:*) - echo m68k-atari-sysv4 - exit ;; - TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} - exit ;; - rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} - exit ;; - PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} - exit ;; - SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} - exit ;; - RM*:ReliantUNIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - RM*:SINIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - *:SINIX-*:*:*) - if uname -p 2>/dev/null >/dev/null ; then - UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 - else - echo ns32k-sni-sysv - fi - exit ;; - PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort - # says - echo i586-unisys-sysv4 - exit ;; - *:UNIX_System_V:4*:FTX*) - # From Gerald Hewes . - # How about differentiating between stratus architectures? -djm - echo hppa1.1-stratus-sysv4 - exit ;; - *:*:*:FTX*) - # From seanf@swdc.stratus.com. - echo i860-stratus-sysv4 - exit ;; - i*86:VOS:*:*) - # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos - exit ;; - *:VOS:*:*) - # From Paul.Green@stratus.com. - echo hppa1.1-stratus-vos - exit ;; - mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} - exit ;; - news*:NEWS-OS:6*:*) - echo mips-sony-newsos6 - exit ;; - R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) - if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} - else - echo mips-unknown-sysv${UNAME_RELEASE} - fi - exit ;; - BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. - echo powerpc-be-beos - exit ;; - BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. - echo powerpc-apple-beos - exit ;; - BePC:BeOS:*:*) # BeOS running on Intel PC compatible. - echo i586-pc-beos - exit ;; - BePC:Haiku:*:*) # Haiku running on Intel PC compatible. - echo i586-pc-haiku - exit ;; - x86_64:Haiku:*:*) - echo x86_64-unknown-haiku - exit ;; - SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} - exit ;; - SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} - exit ;; - SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} - exit ;; - SX-7:SUPER-UX:*:*) - echo sx7-nec-superux${UNAME_RELEASE} - exit ;; - SX-8:SUPER-UX:*:*) - echo sx8-nec-superux${UNAME_RELEASE} - exit ;; - SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux${UNAME_RELEASE} - exit ;; - Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Darwin:*:*) - UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - eval $set_cc_for_build - if test "$UNAME_PROCESSOR" = unknown ; then - UNAME_PROCESSOR=powerpc - fi - if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - case $UNAME_PROCESSOR in - i386) UNAME_PROCESSOR=x86_64 ;; - powerpc) UNAME_PROCESSOR=powerpc64 ;; - esac - fi - fi - elif test "$UNAME_PROCESSOR" = i386 ; then - # Avoid executing cc on OS X 10.9, as it ships with a stub - # that puts up a graphical alert prompting to install - # developer tools. Any system running Mac OS X 10.7 or - # later (Darwin 11 and later) is required to have a 64-bit - # processor. This is not true of the ARM version of Darwin - # that Apple uses in portable devices. - UNAME_PROCESSOR=x86_64 - fi - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} - exit ;; - *:procnto*:*:* | *:QNX:[0123456789]*:*) - UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then - UNAME_PROCESSOR=i386 - UNAME_MACHINE=pc - fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} - exit ;; - *:QNX:*:4*) - echo i386-pc-qnx - exit ;; - NEO-?:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk${UNAME_RELEASE} - exit ;; - NSE-*:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} - exit ;; - NSR-?:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} - exit ;; - *:NonStop-UX:*:*) - echo mips-compaq-nonstopux - exit ;; - BS2000:POSIX*:*:*) - echo bs2000-siemens-sysv - exit ;; - DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} - exit ;; - *:Plan9:*:*) - # "uname -m" is not consistent, so use $cputype instead. 386 - # is converted to i386 for consistency with other x86 - # operating systems. - if test "$cputype" = "386"; then - UNAME_MACHINE=i386 - else - UNAME_MACHINE="$cputype" - fi - echo ${UNAME_MACHINE}-unknown-plan9 - exit ;; - *:TOPS-10:*:*) - echo pdp10-unknown-tops10 - exit ;; - *:TENEX:*:*) - echo pdp10-unknown-tenex - exit ;; - KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) - echo pdp10-dec-tops20 - exit ;; - XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) - echo pdp10-xkl-tops20 - exit ;; - *:TOPS-20:*:*) - echo pdp10-unknown-tops20 - exit ;; - *:ITS:*:*) - echo pdp10-unknown-its - exit ;; - SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} - exit ;; - *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` - exit ;; - *:*VMS:*:*) - UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in - A*) echo alpha-dec-vms ; exit ;; - I*) echo ia64-dec-vms ; exit ;; - V*) echo vax-dec-vms ; exit ;; - esac ;; - *:XENIX:*:SysV) - echo i386-pc-xenix - exit ;; - i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' - exit ;; - i*86:rdos:*:*) - echo ${UNAME_MACHINE}-pc-rdos - exit ;; - i*86:AROS:*:*) - echo ${UNAME_MACHINE}-pc-aros - exit ;; - x86_64:VMkernel:*:*) - echo ${UNAME_MACHINE}-unknown-esx - exit ;; -esac - -cat >&2 < in order to provide the needed -information to handle your system. - -config.guess timestamp = $timestamp - -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null` - -hostinfo = `(hostinfo) 2>/dev/null` -/bin/universe = `(/bin/universe) 2>/dev/null` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` -/bin/arch = `(/bin/arch) 2>/dev/null` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` - -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} -EOF - -exit 1 - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/config.sub cabal-install-1.22-1.22.9.0/=unpacked-tar11=/config.sub --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/config.sub 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/config.sub 1970-01-01 00:00:00.000000000 +0000 @@ -1,1794 +0,0 @@ -#! /bin/sh -# Configuration validation subroutine script. -# Copyright 1992-2014 Free Software Foundation, Inc. - -timestamp='2014-05-01' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). - - -# Please send patches with a ChangeLog entry to config-patches@gnu.org. -# -# Configuration subroutine to validate and canonicalize a configuration type. -# Supply the specified configuration type as an argument. -# If it is invalid, we print an error message on stderr and exit with code 1. -# Otherwise, we print the canonical config type on stdout and succeed. - -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD - -# This file is supposed to be the same for all GNU packages -# and recognize all the CPU types, system types and aliases -# that are meaningful with *any* GNU software. -# Each package is responsible for reporting which valid configurations -# it does not support. The user should be able to distinguish -# a failure to support a valid configuration from a meaningless -# configuration. - -# The goal of this file is to map all the various variations of a given -# machine specification into a single specification in the form: -# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM -# or in some cases, the newer four-part form: -# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM -# It is wrong to echo any other type of specification. - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS - -Canonicalize a configuration name. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.sub ($timestamp) - -Copyright 1992-2014 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" - exit 1 ;; - - *local*) - # First pass through any local machine types. - echo $1 - exit ;; - - * ) - break ;; - esac -done - -case $# in - 0) echo "$me: missing argument$help" >&2 - exit 1;; - 1) ;; - *) echo "$me: too many arguments$help" >&2 - exit 1;; -esac - -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | \ - kopensolaris*-gnu* | \ - storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - android-linux) - os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac - -### Let's recognize common machines as not being operating systems so -### that things like config.sub decstation-3100 work. We also -### recognize some manufacturers as not being operating systems, so we -### can provide default operating systems below. -case $os in - -sun*os*) - # Prevent following clause from handling this invalid input. - ;; - -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ - -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ - -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ - -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ - -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ - -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis | -knuth | -cray | -microblaze*) - os= - basic_machine=$1 - ;; - -bluegene*) - os=-cnk - ;; - -sim | -cisco | -oki | -wec | -winbond) - os= - basic_machine=$1 - ;; - -scout) - ;; - -wrs) - os=-vxworks - basic_machine=$1 - ;; - -chorusos*) - os=-chorusos - basic_machine=$1 - ;; - -chorusrdb) - os=-chorusrdb - basic_machine=$1 - ;; - -hiux*) - os=-hiuxwe2 - ;; - -sco6) - os=-sco5v6 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5) - os=-sco3.2v5 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco4) - os=-sco3.2v4 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2.[4-9]*) - os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2v[4-9]*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5v6*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco*) - os=-sco3.2v2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -udk*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -isc) - os=-isc2.2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -clix*) - basic_machine=clipper-intergraph - ;; - -isc*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -lynx*178) - os=-lynxos178 - ;; - -lynx*5) - os=-lynxos5 - ;; - -lynx*) - os=-lynxos - ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` - ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` - ;; - -psos*) - os=-psos - ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; -esac - -# Decode aliases for certain CPU-COMPANY combinations. -case $basic_machine in - # Recognize the basic CPU types without company name. - # Some are omitted here because they have special meanings below. - 1750a | 580 \ - | a29k \ - | aarch64 | aarch64_be \ - | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ - | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ - | am33_2.0 \ - | arc | arceb \ - | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ - | avr | avr32 \ - | be32 | be64 \ - | bfin \ - | c4x | c8051 | clipper \ - | d10v | d30v | dlx | dsp16xx \ - | epiphany \ - | fido | fr30 | frv \ - | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ - | hexagon \ - | i370 | i860 | i960 | ia64 \ - | ip2k | iq2000 \ - | k1om \ - | le32 | le64 \ - | lm32 \ - | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa32r6 | mipsisa32r6el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64r6 | mipsisa64r6el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ - | mn10200 | mn10300 \ - | moxie \ - | mt \ - | msp430 \ - | nds32 | nds32le | nds32be \ - | nios | nios2 | nios2eb | nios2el \ - | ns16k | ns32k \ - | open8 | or1k | or1knd | or32 \ - | pdp10 | pdp11 | pj | pjl \ - | powerpc | powerpc64 | powerpc64le | powerpcle \ - | pyramid \ - | rl78 | rx \ - | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ - | sh64 | sh64le \ - | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ - | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ - | spu \ - | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ - | ubicom32 \ - | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | we32k \ - | x86 | xc16x | xstormy16 | xtensa \ - | z8k | z80) - basic_machine=$basic_machine-unknown - ;; - c54x) - basic_machine=tic54x-unknown - ;; - c55x) - basic_machine=tic55x-unknown - ;; - c6x) - basic_machine=tic6x-unknown - ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) - basic_machine=$basic_machine-unknown - os=-none - ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) - ;; - ms1) - basic_machine=mt-unknown - ;; - - strongarm | thumb | xscale) - basic_machine=arm-unknown - ;; - xgate) - basic_machine=$basic_machine-unknown - os=-none - ;; - xscaleeb) - basic_machine=armeb-unknown - ;; - - xscaleel) - basic_machine=armel-unknown - ;; - - # We use `pc' rather than `unknown' - # because (1) that's what they normally are, and - # (2) the word "unknown" tends to confuse beginning users. - i*86 | x86_64) - basic_machine=$basic_machine-pc - ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; - # Recognize the basic CPU types with company name. - 580-* \ - | a29k-* \ - | aarch64-* | aarch64_be-* \ - | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ - | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ - | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ - | avr-* | avr32-* \ - | be32-* | be64-* \ - | bfin-* | bs2000-* \ - | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | c8051-* | clipper-* | craynv-* | cydra-* \ - | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ - | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ - | h8300-* | h8500-* \ - | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ - | hexagon-* \ - | i*86-* | i860-* | i960-* | ia64-* \ - | ip2k-* | iq2000-* \ - | k1om-* \ - | le32-* | le64-* \ - | lm32-* \ - | m32c-* | m32r-* | m32rle-* \ - | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ - | microblaze-* | microblazeel-* \ - | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ - | mips16-* \ - | mips64-* | mips64el-* \ - | mips64octeon-* | mips64octeonel-* \ - | mips64orion-* | mips64orionel-* \ - | mips64r5900-* | mips64r5900el-* \ - | mips64vr-* | mips64vrel-* \ - | mips64vr4100-* | mips64vr4100el-* \ - | mips64vr4300-* | mips64vr4300el-* \ - | mips64vr5000-* | mips64vr5000el-* \ - | mips64vr5900-* | mips64vr5900el-* \ - | mipsisa32-* | mipsisa32el-* \ - | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa32r6-* | mipsisa32r6el-* \ - | mipsisa64-* | mipsisa64el-* \ - | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64r6-* | mipsisa64r6el-* \ - | mipsisa64sb1-* | mipsisa64sb1el-* \ - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipsr5900-* | mipsr5900el-* \ - | mipstx39-* | mipstx39el-* \ - | mmix-* \ - | mt-* \ - | msp430-* \ - | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* | nios2eb-* | nios2el-* \ - | none-* | np1-* | ns16k-* | ns32k-* \ - | open8-* \ - | or1k*-* \ - | orion-* \ - | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ - | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ - | pyramid-* \ - | rl78-* | romp-* | rs6000-* | rx-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ - | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ - | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ - | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ - | tahoe-* \ - | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tile*-* \ - | tron-* \ - | ubicom32-* \ - | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ - | vax-* \ - | we32k-* \ - | x86-* | x86_64-* | xc16x-* | xps100-* \ - | xstormy16-* | xtensa*-* \ - | ymp-* \ - | z8k-* | z80-*) - ;; - # Recognize the basic CPU types without company name, with glob match. - xtensa*) - basic_machine=$basic_machine-unknown - ;; - # Recognize the various machine names and aliases which stand - # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; - 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) - basic_machine=m68000-att - ;; - 3b*) - basic_machine=we32k-att - ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; - abacus) - basic_machine=abacus-unknown - ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; - alliant | fx80) - basic_machine=fx80-alliant - ;; - altos | altos3068) - basic_machine=m68k-altos - ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; - amd64) - basic_machine=x86_64-pc - ;; - amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv - ;; - amiga | amiga-*) - basic_machine=m68k-unknown - ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aros) - basic_machine=i386-pc - os=-aros - ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=-linux - ;; - blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - bluegene*) - basic_machine=powerpc-ibm - os=-cnk - ;; - c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c90) - basic_machine=c90-cray - os=-unicos - ;; - cegcc) - basic_machine=arm-unknown - os=-cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=-bsd - ;; - convex-c2) - basic_machine=c2-convex - os=-bsd - ;; - convex-c32) - basic_machine=c32-convex - os=-bsd - ;; - convex-c34) - basic_machine=c34-convex - os=-bsd - ;; - convex-c38) - basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp - ;; - cr16 | cr16-*) - basic_machine=cr16-unknown - os=-elf - ;; - crds | unos) - basic_machine=m68k-crds - ;; - crisv32 | crisv32-* | etraxfs*) - basic_machine=crisv32-axis - ;; - cris | cris-* | etrax*) - basic_machine=cris-axis - ;; - crx) - basic_machine=crx-unknown - os=-elf - ;; - da30 | da30-*) - basic_machine=m68k-da30 - ;; - decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) - basic_machine=mips-dec - ;; - decsystem10* | dec10*) - basic_machine=pdp10-dec - os=-tops10 - ;; - decsystem20* | dec20*) - basic_machine=pdp10-dec - os=-tops20 - ;; - delta | 3300 | motorola-3300 | motorola-delta \ - | 3300-motorola | delta-motorola) - basic_machine=m68k-motorola - ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - dicos) - basic_machine=i686-pc - os=-dicos - ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; - dpx20 | dpx20-*) - basic_machine=rs6000-bull - os=-bosx - ;; - dpx2* | dpx2*-bull) - basic_machine=m68k-bull - os=-sysv3 - ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon - ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd - ;; - encore | umax | mmax) - basic_machine=ns32k-encore - ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose - ;; - fx2800) - basic_machine=i860-alliant - ;; - genix) - basic_machine=ns32k-ns - ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 - ;; - h3050r* | hiux*) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 - ;; - hp300-*) - basic_machine=m68k-hp - ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux - ;; - hp3k9[0-9][0-9] | hp9[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k2[0-9][0-9] | hp9k31[0-9]) - basic_machine=m68000-hp - ;; - hp9k3[2-9][0-9]) - basic_machine=m68k-hp - ;; - hp9k6[0-9][0-9] | hp6[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k7[0-79][0-9] | hp7[0-79][0-9]) - basic_machine=hppa1.1-hp - ;; - hp9k78[0-9] | hp78[0-9]) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][13679] | hp8[0-9][13679]) - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][0-9] | hp8[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm - ;; - i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 - ;; - i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 - ;; - i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv - ;; - i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach - ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta - ;; - iris | iris4d) - basic_machine=mips-sgi - case $os in - -irix*) - ;; - *) - os=-irix4 - ;; - esac - ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; - m68knommu) - basic_machine=m68k-unknown - os=-linux - ;; - m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv - ;; - merlin) - basic_machine=ns32k-utek - os=-sysv - ;; - microblaze*) - basic_machine=microblaze-xilinx - ;; - mingw64) - basic_machine=x86_64-pc - os=-mingw64 - ;; - mingw32) - basic_machine=i686-pc - os=-mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=-mingw32ce - ;; - miniframe) - basic_machine=m68000-convergent - ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; - mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` - ;; - mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - msdos) - basic_machine=i386-pc - os=-msdos - ;; - ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` - ;; - msys) - basic_machine=i686-pc - os=-msys - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - nacl) - basic_machine=le32-unknown - os=-nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos - ;; - news-3600 | risc-news) - basic_machine=mips-sony - os=-newsos - ;; - necv70) - basic_machine=v70-nec - os=-sysv - ;; - next | m*-next ) - basic_machine=m68k-next - case $os in - -nextstep* ) - ;; - -ns2*) - os=-nextstep2 - ;; - *) - os=-nextstep3 - ;; - esac - ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; - np1) - basic_machine=np1-gould - ;; - neo-tandem) - basic_machine=neo-tandem - ;; - nse-tandem) - basic_machine=nse-tandem - ;; - nsr-tandem) - basic_machine=nsr-tandem - ;; - op50n-* | op60c-*) - basic_machine=hppa1.1-oki - os=-proelf - ;; - openrisc | openrisc-*) - basic_machine=or32-unknown - ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k - ;; - pa-hitachi) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - parisc) - basic_machine=hppa-unknown - os=-linux - ;; - parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - pbd) - basic_machine=sparc-tti - ;; - pbb) - basic_machine=m68k-tti - ;; - pc532 | pc532-*) - basic_machine=ns32k-pc532 - ;; - pc98) - basic_machine=i386-pc - ;; - pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium | p5 | k5 | k6 | nexgen | viac3) - basic_machine=i586-pc - ;; - pentiumpro | p6 | 6x86 | athlon | athlon_*) - basic_machine=i686-pc - ;; - pentiumii | pentium2 | pentiumiii | pentium3) - basic_machine=i686-pc - ;; - pentium4) - basic_machine=i786-pc - ;; - pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pn) - basic_machine=pn-gould - ;; - power) basic_machine=power-ibm - ;; - ppc | ppcbe) basic_machine=powerpc-unknown - ;; - ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppcle | powerpclittle | ppc-le | powerpc-little) - basic_machine=powerpcle-unknown - ;; - ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64) basic_machine=powerpc64-unknown - ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) - basic_machine=powerpc64le-unknown - ;; - ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ps2) - basic_machine=i386-ibm - ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rdos | rdos64) - basic_machine=x86_64-pc - os=-rdos - ;; - rdos32) - basic_machine=i386-pc - os=-rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff - ;; - rm[46]00) - basic_machine=mips-siemens - ;; - rtpc | rtpc-*) - basic_machine=romp-ibm - ;; - s390 | s390-*) - basic_machine=s390-ibm - ;; - s390x | s390x-*) - basic_machine=s390x-ibm - ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; - sb1) - basic_machine=mipsisa64sb1-unknown - ;; - sb1el) - basic_machine=mipsisa64sb1el-unknown - ;; - sde) - basic_machine=mipsisa32-sde - os=-elf - ;; - sei) - basic_machine=mips-sei - os=-seiux - ;; - sequent) - basic_machine=i386-sequent - ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; - sh5el) - basic_machine=sh5le-unknown - ;; - sh64) - basic_machine=sh64-unknown - ;; - sparclite-wrs | simso-wrs) - basic_machine=sparclite-wrs - os=-vxworks - ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 - ;; - spur) - basic_machine=spur-unknown - ;; - st2000) - basic_machine=m68k-tandem - ;; - stratus) - basic_machine=i860-stratus - os=-sysv4 - ;; - strongarm-* | thumb-*) - basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - sun2) - basic_machine=m68000-sun - ;; - sun2os3) - basic_machine=m68000-sun - os=-sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=-sunos4 - ;; - sun3os3) - basic_machine=m68k-sun - os=-sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=-sunos4 - ;; - sun4os3) - basic_machine=sparc-sun - os=-sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=-sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=-solaris2 - ;; - sun3 | sun3-*) - basic_machine=m68k-sun - ;; - sun4) - basic_machine=sparc-sun - ;; - sun386 | sun386i | roadrunner) - basic_machine=i386-sun - ;; - sv1) - basic_machine=sv1-cray - os=-unicos - ;; - symmetry) - basic_machine=i386-sequent - os=-dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=-unicos - ;; - t90) - basic_machine=t90-cray - os=-unicos - ;; - tile*) - basic_machine=$basic_machine-unknown - os=-linux-gnu - ;; - tx39) - basic_machine=mipstx39-unknown - ;; - tx39el) - basic_machine=mipstx39el-unknown - ;; - toad1) - basic_machine=pdp10-xkl - os=-tops20 - ;; - tower | tower-32) - basic_machine=m68k-ncr - ;; - tpf) - basic_machine=s390x-ibm - os=-tpf - ;; - udi29k) - basic_machine=a29k-amd - os=-udi - ;; - ultra3) - basic_machine=a29k-nyu - os=-sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=-none - ;; - vaxv) - basic_machine=vax-dec - os=-sysv - ;; - vms) - basic_machine=vax-dec - os=-vms - ;; - vpp*|vx|vx-*) - basic_machine=f301-fujitsu - ;; - vxworks960) - basic_machine=i960-wrs - os=-vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=-vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=-vxworks - ;; - w65*) - basic_machine=w65-wdc - os=-none - ;; - w89k-*) - basic_machine=hppa1.1-winbond - os=-proelf - ;; - xbox) - basic_machine=i686-pc - os=-mingw32 - ;; - xps | xps100) - basic_machine=xps100-honeywell - ;; - xscale-* | xscalee[bl]-*) - basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` - ;; - ymp) - basic_machine=ymp-cray - os=-unicos - ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - z80-*-coff) - basic_machine=z80-unknown - os=-sim - ;; - none) - basic_machine=none-none - os=-none - ;; - -# Here we handle the default manufacturer of certain CPU types. It is in -# some cases the only manufacturer, in others, it is the most popular. - w89k) - basic_machine=hppa1.1-winbond - ;; - op50n) - basic_machine=hppa1.1-oki - ;; - op60c) - basic_machine=hppa1.1-oki - ;; - romp) - basic_machine=romp-ibm - ;; - mmix) - basic_machine=mmix-knuth - ;; - rs6000) - basic_machine=rs6000-ibm - ;; - vax) - basic_machine=vax-dec - ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; - pdp11) - basic_machine=pdp11-dec - ;; - we32k) - basic_machine=we32k-att - ;; - sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) - basic_machine=sh-unknown - ;; - sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) - basic_machine=sparc-sun - ;; - cydra) - basic_machine=cydra-cydrome - ;; - orion) - basic_machine=orion-highlevel - ;; - orion105) - basic_machine=clipper-highlevel - ;; - mac | mpw | mac-mpw) - basic_machine=m68k-apple - ;; - pmac | pmac-mpw) - basic_machine=powerpc-apple - ;; - *-unknown) - # Make sure to match an already-canonicalized machine name. - ;; - *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; -esac - -# Here we canonicalize certain aliases for manufacturers. -case $basic_machine in - *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` - ;; - *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` - ;; - *) - ;; -esac - -# Decode manufacturer-specific aliases for certain operating systems. - -if [ x"$os" != x"" ] -then -case $os in - # First match some system type aliases - # that might get confused with valid system types. - # -solaris* is a basic system type, with this one exception. - -auroraux) - os=-auroraux - ;; - -solaris1 | -solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` - ;; - -solaris) - os=-solaris2 - ;; - -svr4*) - os=-sysv4 - ;; - -unixware*) - os=-sysv4.2uw - ;; - -gnu/linux*) - os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` - ;; - # First accept the basic system types. - # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. - # -sysv* is not here because it comes later, after sysvr4. - -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ - | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ - | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* | -plan9* \ - | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* \ - | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ - | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -bitrig* | -openbsd* | -solidbsd* \ - | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ - | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ - | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ - | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* \ - | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-musl* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ - | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ - | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ - | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*) - # Remember, each alternative MUST END IN *, to match a version number. - ;; - -qnx*) - case $basic_machine in - x86-* | i*86-*) - ;; - *) - os=-nto$os - ;; - esac - ;; - -nto-qnx*) - ;; - -nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` - ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ - | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) - ;; - -mac*) - os=`echo $os | sed -e 's|mac|macos|'` - ;; - -linux-dietlibc) - os=-linux-dietlibc - ;; - -linux*) - os=`echo $os | sed -e 's|linux|linux-gnu|'` - ;; - -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` - ;; - -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` - ;; - -opened*) - os=-openedition - ;; - -os400*) - os=-os400 - ;; - -wince*) - os=-wince - ;; - -osfrose*) - os=-osfrose - ;; - -osf*) - os=-osf - ;; - -utek*) - os=-bsd - ;; - -dynix*) - os=-bsd - ;; - -acis*) - os=-aos - ;; - -atheos*) - os=-atheos - ;; - -syllable*) - os=-syllable - ;; - -386bsd) - os=-bsd - ;; - -ctix* | -uts*) - os=-sysv - ;; - -nova*) - os=-rtmk-nova - ;; - -ns2 ) - os=-nextstep2 - ;; - -nsk*) - os=-nsk - ;; - # Preserve the version number of sinix5. - -sinix5.*) - os=`echo $os | sed -e 's|sinix|sysv|'` - ;; - -sinix*) - os=-sysv4 - ;; - -tpf*) - os=-tpf - ;; - -triton*) - os=-sysv3 - ;; - -oss*) - os=-sysv3 - ;; - -svr4) - os=-sysv4 - ;; - -svr3) - os=-sysv3 - ;; - -sysvr4) - os=-sysv4 - ;; - # This must come after -sysvr4. - -sysv*) - ;; - -ose*) - os=-ose - ;; - -es1800*) - os=-ose - ;; - -xenix) - os=-xenix - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - os=-mint - ;; - -aros*) - os=-aros - ;; - -zvmoe) - os=-zvmoe - ;; - -dicos*) - os=-dicos - ;; - -nacl*) - ;; - -none) - ;; - *) - # Get rid of the `-' at the beginning of $os. - os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 - exit 1 - ;; -esac -else - -# Here we handle the default operating systems that come with various machines. -# The value should be what the vendor currently ships out the door with their -# machine or put another way, the most popular os provided with the machine. - -# Note that if you're going to try to match "-MANUFACTURER" here (say, -# "-sun"), then you have to tell the case statement up towards the top -# that MANUFACTURER isn't an operating system. Otherwise, code above -# will signal an error saying that MANUFACTURER isn't an operating -# system, and we'll never get to this point. - -case $basic_machine in - score-*) - os=-elf - ;; - spu-*) - os=-elf - ;; - *-acorn) - os=-riscix1.2 - ;; - arm*-rebel) - os=-linux - ;; - arm*-semi) - os=-aout - ;; - c4x-* | tic4x-*) - os=-coff - ;; - c8051-*) - os=-elf - ;; - hexagon-*) - os=-elf - ;; - tic54x-*) - os=-coff - ;; - tic55x-*) - os=-coff - ;; - tic6x-*) - os=-coff - ;; - # This must come before the *-dec entry. - pdp10-*) - os=-tops20 - ;; - pdp11-*) - os=-none - ;; - *-dec | vax-*) - os=-ultrix4.2 - ;; - m68*-apollo) - os=-domain - ;; - i386-sun) - os=-sunos4.0.2 - ;; - m68000-sun) - os=-sunos3 - ;; - m68*-cisco) - os=-aout - ;; - mep-*) - os=-elf - ;; - mips*-cisco) - os=-elf - ;; - mips*-*) - os=-elf - ;; - or32-*) - os=-coff - ;; - *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 - ;; - sparc-* | *-sun) - os=-sunos4.1.1 - ;; - *-be) - os=-beos - ;; - *-haiku) - os=-haiku - ;; - *-ibm) - os=-aix - ;; - *-knuth) - os=-mmixware - ;; - *-wec) - os=-proelf - ;; - *-winbond) - os=-proelf - ;; - *-oki) - os=-proelf - ;; - *-hp) - os=-hpux - ;; - *-hitachi) - os=-hiux - ;; - i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv - ;; - *-cbm) - os=-amigaos - ;; - *-dg) - os=-dgux - ;; - *-dolphin) - os=-sysv3 - ;; - m68k-ccur) - os=-rtu - ;; - m88k-omron*) - os=-luna - ;; - *-next ) - os=-nextstep - ;; - *-sequent) - os=-ptx - ;; - *-crds) - os=-unos - ;; - *-ns) - os=-genix - ;; - i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 - ;; - *-gould) - os=-sysv - ;; - *-highlevel) - os=-bsd - ;; - *-encore) - os=-bsd - ;; - *-sgi) - os=-irix - ;; - *-siemens) - os=-sysv4 - ;; - *-masscomp) - os=-rtu - ;; - f30[01]-fujitsu | f700-fujitsu) - os=-uxpv - ;; - *-rom68k) - os=-coff - ;; - *-*bug) - os=-coff - ;; - *-apple) - os=-macos - ;; - *-atari*) - os=-mint - ;; - *) - os=-none - ;; -esac -fi - -# Here we handle the case where we know the os, and the CPU type, but not the -# manufacturer. We pick the logical manufacturer. -vendor=unknown -case $basic_machine in - *-unknown) - case $os in - -riscix*) - vendor=acorn - ;; - -sunos*) - vendor=sun - ;; - -cnk*|-aix*) - vendor=ibm - ;; - -beos*) - vendor=be - ;; - -hpux*) - vendor=hp - ;; - -mpeix*) - vendor=hp - ;; - -hiux*) - vendor=hitachi - ;; - -unos*) - vendor=crds - ;; - -dgux*) - vendor=dg - ;; - -luna*) - vendor=omron - ;; - -genix*) - vendor=ns - ;; - -mvs* | -opened*) - vendor=ibm - ;; - -os400*) - vendor=ibm - ;; - -ptx*) - vendor=sequent - ;; - -tpf*) - vendor=ibm - ;; - -vxsim* | -vxworks* | -windiss*) - vendor=wrs - ;; - -aux*) - vendor=apple - ;; - -hms*) - vendor=hitachi - ;; - -mpw* | -macos*) - vendor=apple - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - vendor=atari - ;; - -vos*) - vendor=stratus - ;; - esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` - ;; -esac - -echo $basic_machine$os -exit - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/configure cabal-install-1.22-1.22.9.0/=unpacked-tar11=/configure --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/configure 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/configure 1970-01-01 00:00:00.000000000 +0000 @@ -1,4613 +0,0 @@ -#! /bin/sh -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for Haskell old-time package 1.0. -# -# Report bugs to . -# -# -# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. -# -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -# Use a proper internal environment variable to ensure we don't fall - # into an infinite loop, continuously re-executing ourselves. - if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then - _as_can_reexec=no; export _as_can_reexec; - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -as_fn_exit 255 - fi - # We don't want this to propagate to other subprocesses. - { _as_can_reexec=; unset _as_can_reexec;} -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1 -test -x / || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - export CONFIG_SHELL - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -exit 255 -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org and -$0: libraries@haskell.org about your system, including any -$0: error possibly output before this message. Then install -$0: a modern shell, or manually run the script under such a -$0: shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # If we had to re-execute with $CONFIG_SHELL, we're ensured to have - # already done that, so ensure we don't try to do so again and fall - # in an infinite loop. This has already happened in practice. - _as_can_reexec=no; export _as_can_reexec - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -test -n "$DJDIR" || exec 7<&0 &1 - -# Name of the host. -# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME='Haskell old-time package' -PACKAGE_TARNAME='old-time' -PACKAGE_VERSION='1.0' -PACKAGE_STRING='Haskell old-time package 1.0' -PACKAGE_BUGREPORT='libraries@haskell.org' -PACKAGE_URL='' - -ac_unique_file="include/HsTime.h" -# Factoring default headers for most tests. -ac_includes_default="\ -#include -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef STDC_HEADERS -# include -# include -#else -# ifdef HAVE_STDLIB_H -# include -# endif -#endif -#ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include -# endif -# include -#endif -#ifdef HAVE_STRINGS_H -# include -#endif -#ifdef HAVE_INTTYPES_H -# include -#endif -#ifdef HAVE_STDINT_H -# include -#endif -#ifdef HAVE_UNISTD_H -# include -#endif" - -ac_subst_vars='LTLIBOBJS -LIBOBJS -EGREP -GREP -CPP -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CFLAGS -CC -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -with_cc -' - ac_precious_vars='build_alias -host_alias -target_alias -CC -CFLAGS -LDFLAGS -LIBS -CPPFLAGS -CPP' - - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *=) ac_optarg= ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error $? "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information" - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error $? "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error $? "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error $? "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures Haskell old-time package 1.0 to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking ...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/old-time] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF -_ACEOF -fi - -if test -n "$ac_init_help"; then - case $ac_init_help in - short | recursive ) echo "Configuration of Haskell old-time package 1.0:";; - esac - cat <<\_ACEOF - -Optional Packages: - --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] - --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) -C compiler - -Some influential environment variables: - CC C compiler command - CFLAGS C compiler flags - LDFLAGS linker flags, e.g. -L if you have libraries in a - nonstandard directory - LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if - you have headers in a nonstandard directory - CPP C preprocessor - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to . -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -Haskell old-time package configure 1.0 -generated by GNU Autoconf 2.69 - -Copyright (C) 2012 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } > conftest.i && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists, giving a warning if it cannot be compiled using -# the include files in INCLUDES and setting the cache variable VAR -# accordingly. -ac_fn_c_check_header_mongrel () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if eval \${$3+:} false; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -else - # Is the header compilable? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 -$as_echo_n "checking $2 usability... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_header_compiler=yes -else - ac_header_compiler=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 -$as_echo "$ac_header_compiler" >&6; } - -# Is the header present? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 -$as_echo_n "checking $2 presence... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <$2> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_header_preproc=yes -else - ac_header_preproc=no -fi -rm -f conftest.err conftest.i conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 -$as_echo "$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( - yes:no: ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 -$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; - no:yes:* ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 -$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 -$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 -$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 -$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} -( $as_echo "## ------------------------------------ ## -## Report this to libraries@haskell.org ## -## ------------------------------------ ##" - ) | sed "s/^/$as_me: WARNING: /" >&2 - ;; -esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=\$ac_header_compiler" -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_mongrel - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_compile - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - test -x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_link - -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case declares $2. - For example, HP-UX 11i declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_func - -# ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES -# ---------------------------------------------------- -# Tries to find if the field MEMBER exists in type AGGR, after including -# INCLUDES, setting cache variable VAR accordingly. -ac_fn_c_check_member () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 -$as_echo_n "checking for $2.$3... " >&6; } -if eval \${$4+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$5 -int -main () -{ -static $2 ac_aggr; -if (ac_aggr.$3) -return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$4=yes" -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$5 -int -main () -{ -static $2 ac_aggr; -if (sizeof ac_aggr.$3) -return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$4=yes" -else - eval "$4=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$4 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_member - -# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES -# --------------------------------------------- -# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR -# accordingly. -ac_fn_c_check_decl () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - as_decl_name=`echo $2|sed 's/ *(.*//'` - as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 -$as_echo_n "checking whether $as_decl_name is declared... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -#ifndef $as_decl_name -#ifdef __cplusplus - (void) $as_decl_use; -#else - (void) $as_decl_name; -#endif -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_decl -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by Haskell old-time package $as_me 1.0, which was -generated by GNU Autoconf 2.69. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - $as_echo "## ---------------- ## -## Cache variables. ## -## ---------------- ##" - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - $as_echo "## ----------------- ## -## Output variables. ## -## ----------------- ##" - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - $as_echo "## ------------------- ## -## File substitutions. ## -## ------------------- ##" - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - $as_echo "## ----------- ## -## confdefs.h. ## -## ----------- ##" - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - # We do not want a PATH search for config.site. - case $CONFIG_SITE in #(( - -*) ac_site_file1=./$CONFIG_SITE;; - */*) ac_site_file1=$CONFIG_SITE;; - *) ac_site_file1=./$CONFIG_SITE;; - esac -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" \ - || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "failed to load site script $ac_site_file -See \`config.log' for more details" "$LINENO" 5; } - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -# Safety check: Ensure that we are in the correct source directory. - - -ac_config_headers="$ac_config_headers include/HsTimeConfig.h" - - - -# Check whether --with-cc was given. -if test "${with_cc+set}" = set; then : - withval=$with_cc; CC=$withval -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "no acceptable C compiler found in \$PATH -See \`config.log' for more details" "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "C compiler cannot create executables -See \`config.log' for more details" "$LINENO" 5; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -ac_exeext=$ac_cv_exeext - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details" "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if ${ac_cv_objext+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of object files: cannot compile -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -struct stat; -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -# check for specific header (.h) files that we are interested in - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if ${ac_cv_prog_CPP+:} false; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if ${ac_cv_path_GREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_GREP" || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if ${ac_cv_path_EGREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_EGREP" || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif - -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -for ac_header in sys/time.h sys/timeb.h time.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -for ac_func in ftime gmtime_r localtime_r gettimeofday -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - -fi -done - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 -$as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } -if ${ac_cv_struct_tm+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include - -int -main () -{ -struct tm tm; - int *p = &tm.tm_sec; - return !p; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_struct_tm=time.h -else - ac_cv_struct_tm=sys/time.h -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 -$as_echo "$ac_cv_struct_tm" >&6; } -if test $ac_cv_struct_tm = sys/time.h; then - -$as_echo "#define TM_IN_SYS_TIME 1" >>confdefs.h - -fi - -ac_fn_c_check_member "$LINENO" "struct tm" "tm_zone" "ac_cv_member_struct_tm_tm_zone" "#include -#include <$ac_cv_struct_tm> - -" -if test "x$ac_cv_member_struct_tm_tm_zone" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_TM_TM_ZONE 1 -_ACEOF - - -fi - -if test "$ac_cv_member_struct_tm_tm_zone" = yes; then - -$as_echo "#define HAVE_TM_ZONE 1" >>confdefs.h - -else - ac_fn_c_check_decl "$LINENO" "tzname" "ac_cv_have_decl_tzname" "#include -" -if test "x$ac_cv_have_decl_tzname" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_TZNAME $ac_have_decl -_ACEOF - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for tzname" >&5 -$as_echo_n "checking for tzname... " >&6; } -if ${ac_cv_var_tzname+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#if !HAVE_DECL_TZNAME -extern char *tzname[]; -#endif - -int -main () -{ -return tzname[0][0]; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_var_tzname=yes -else - ac_cv_var_tzname=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_var_tzname" >&5 -$as_echo "$ac_cv_var_tzname" >&6; } - if test $ac_cv_var_tzname = yes; then - -$as_echo "#define HAVE_TZNAME 1" >>confdefs.h - - fi -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5 -$as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } -if ${ac_cv_header_time+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include - -int -main () -{ -if ((struct tm *) 0) -return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_time=yes -else - ac_cv_header_time=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_time" >&5 -$as_echo "$ac_cv_header_time" >&6; } -if test $ac_cv_header_time = yes; then - -$as_echo "#define TIME_WITH_SYS_TIME 1" >>confdefs.h - -fi - -for ac_header in sys/time.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "sys/time.h" "ac_cv_header_sys_time_h" "$ac_includes_default" -if test "x$ac_cv_header_sys_time_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_SYS_TIME_H 1 -_ACEOF - -fi - -done - -ac_fn_c_check_decl "$LINENO" "altzone" "ac_cv_have_decl_altzone" "#if TIME_WITH_SYS_TIME -# include -# include -#else -# if HAVE_SYS_TIME_H -# include -# else -# include -# endif -#endif -" -if test "x$ac_cv_have_decl_altzone" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_ALTZONE $ac_have_decl -_ACEOF - - - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - if test "x$cache_file" != "x/dev/null"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - if test ! -f "$cache_file" || test -h "$cache_file"; then - cat confcache >"$cache_file" - else - case $cache_file in #( - */* | ?:*) - mv -f confcache "$cache_file"$$ && - mv -f "$cache_file"$$ "$cache_file" ;; #( - *) - mv -f confcache "$cache_file" ;; - esac - fi - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -DEFS=-DHAVE_CONFIG_H - -ac_libobjs= -ac_ltlibobjs= -U= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - - -: "${CONFIG_STATUS=./config.status}" -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by Haskell old-time package $as_me 1.0, which was -generated by GNU Autoconf 2.69. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - - -case $ac_config_headers in *" -"*) set x $ac_config_headers; shift; ac_config_headers=$*;; -esac - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_headers="$ac_config_headers" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - --config print configuration, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --header=FILE[:TEMPLATE] - instantiate the configuration header FILE - -Configuration headers: -$config_headers - -Report bugs to ." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" -ac_cs_version="\\ -Haskell old-time package config.status 1.0 -configured by $0, generated by GNU Autoconf 2.69, - with options \\"\$ac_cs_config\\" - -Copyright (C) 2012 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=?*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - --*=) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg= - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --header | --heade | --head | --hea ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h) - # Conflict between --help and --header - as_fn_error $? "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; - --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error $? "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "include/HsTimeConfig.h") CONFIG_HEADERS="$CONFIG_HEADERS include/HsTimeConfig.h" ;; - - *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= ac_tmp= - trap 'exit_status=$? - : "${ac_tmp:=$tmp}" - { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 -ac_tmp=$tmp - -# Set up the scripts for CONFIG_HEADERS section. -# No need to generate them if there are no CONFIG_HEADERS. -# This happens for instance with `./config.status Makefile'. -if test -n "$CONFIG_HEADERS"; then -cat >"$ac_tmp/defines.awk" <<\_ACAWK || -BEGIN { -_ACEOF - -# Transform confdefs.h into an awk script `defines.awk', embedded as -# here-document in config.status, that substitutes the proper values into -# config.h.in to produce config.h. - -# Create a delimiter string that does not exist in confdefs.h, to ease -# handling of long lines. -ac_delim='%!_!# ' -for ac_last_try in false false :; do - ac_tt=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_tt"; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done - -# For the awk script, D is an array of macro values keyed by name, -# likewise P contains macro parameters if any. Preserve backslash -# newline sequences. - -ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* -sed -n ' -s/.\{148\}/&'"$ac_delim"'/g -t rset -:rset -s/^[ ]*#[ ]*define[ ][ ]*/ / -t def -d -:def -s/\\$// -t bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3"/p -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p -d -:bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3\\\\\\n"\\/p -t cont -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p -t cont -d -:cont -n -s/.\{148\}/&'"$ac_delim"'/g -t clear -:clear -s/\\$// -t bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/"/p -d -:bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p -b cont -' >$CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - for (key in D) D_is_set[key] = 1 - FS = "" -} -/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { - line = \$ 0 - split(line, arg, " ") - if (arg[1] == "#") { - defundef = arg[2] - mac1 = arg[3] - } else { - defundef = substr(arg[1], 2) - mac1 = arg[2] - } - split(mac1, mac2, "(") #) - macro = mac2[1] - prefix = substr(line, 1, index(line, defundef) - 1) - if (D_is_set[macro]) { - # Preserve the white space surrounding the "#". - print prefix "define", macro P[macro] D[macro] - next - } else { - # Replace #undef with comments. This is necessary, for example, - # in the case of _POSIX_SOURCE, which is predefined and required - # on some systems where configure will not decide to define it. - if (defundef == "undef") { - print "/*", prefix defundef, macro, "*/" - next - } - } -} -{ print } -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 -fi # test -n "$CONFIG_HEADERS" - - -eval set X " :H $CONFIG_HEADERS " -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$ac_tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$ac_tmp/stdin" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - - :H) - # - # CONFIG_HEADER - # - if test x"$ac_file" != x-; then - { - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" - } >"$ac_tmp/config.h" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} - else - rm -f "$ac_file" - mv "$ac_tmp/config.h" "$ac_file" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - fi - else - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error $? "could not create -" "$LINENO" 5 - fi - ;; - - - esac - -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit 1 -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/configure.ac cabal-install-1.22-1.22.9.0/=unpacked-tar11=/configure.ac --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/configure.ac 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/configure.ac 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -AC_INIT([Haskell old-time package], [1.0], [libraries@haskell.org], [old-time]) - -# Safety check: Ensure that we are in the correct source directory. -AC_CONFIG_SRCDIR([include/HsTime.h]) - -AC_CONFIG_HEADERS([include/HsTimeConfig.h]) - -AC_ARG_WITH([cc], - [C compiler], - [CC=$withval]) -AC_PROG_CC() - -# check for specific header (.h) files that we are interested in -AC_CHECK_HEADERS([sys/time.h sys/timeb.h time.h]) - -AC_CHECK_FUNCS([ftime gmtime_r localtime_r gettimeofday]) - -dnl ** how do we get a timezone name, and UTC offset ? -AC_STRUCT_TIMEZONE - -dnl ** do we have altzone? -FP_DECL_ALTZONE - -AC_OUTPUT diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/include/HsTimeConfig.h.in cabal-install-1.22-1.22.9.0/=unpacked-tar11=/include/HsTimeConfig.h.in --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/include/HsTimeConfig.h.in 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/include/HsTimeConfig.h.in 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -/* include/HsTimeConfig.h.in. Generated from configure.ac by autoheader. */ - -/* Define to 1 if you have the declaration of `altzone', and to 0 if you - don't. */ -#undef HAVE_DECL_ALTZONE - -/* Define to 1 if you have the declaration of `tzname', and to 0 if you don't. - */ -#undef HAVE_DECL_TZNAME - -/* Define to 1 if you have the `ftime' function. */ -#undef HAVE_FTIME - -/* Define to 1 if you have the `gettimeofday' function. */ -#undef HAVE_GETTIMEOFDAY - -/* Define to 1 if you have the `gmtime_r' function. */ -#undef HAVE_GMTIME_R - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if you have the `localtime_r' function. */ -#undef HAVE_LOCALTIME_R - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if `tm_zone' is a member of `struct tm'. */ -#undef HAVE_STRUCT_TM_TM_ZONE - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TIMEB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TIME_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_TIME_H - -/* Define to 1 if your `struct tm' has `tm_zone'. Deprecated, use - `HAVE_STRUCT_TM_TM_ZONE' instead. */ -#undef HAVE_TM_ZONE - -/* Define to 1 if you don't have `tm_zone' but do have the external array - `tzname'. */ -#undef HAVE_TZNAME - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the home page for this package. */ -#undef PACKAGE_URL - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* Define to 1 if you have the ANSI C header files. */ -#undef STDC_HEADERS - -/* Define to 1 if you can safely include both and . */ -#undef TIME_WITH_SYS_TIME - -/* Define to 1 if your declares `struct tm'. */ -#undef TM_IN_SYS_TIME diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/include/HsTime.h cabal-install-1.22-1.22.9.0/=unpacked-tar11=/include/HsTime.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/include/HsTime.h 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/include/HsTime.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -/* - * (c) The University of Glasgow 2002 - * - * Time Runtime Support - */ -#ifndef __TIMEUTILS_H__ -#define __TIMEUTILS_H__ - -#include "HsTimeConfig.h" -// Otherwise these clash with similar definitions from other packages: -#undef PACKAGE_BUGREPORT -#undef PACKAGE_NAME -#undef PACKAGE_STRING -#undef PACKAGE_TARNAME -#undef PACKAGE_VERSION - -#if HAVE_GETTIMEOFDAY -# if HAVE_SYS_TIME_H -# include -# endif -#elif HAVE_GETCLOCK -# if HAVE_SYS_TIMERS_H -# define POSIX_4D9 1 -# include -# endif -#endif -#if HAVE_TIME_H -#include -#endif -#if HAVE_SYS_TIMEB_H && !defined(__FreeBSD__) -#include -#endif - -extern long *__hscore_timezone( void ); -extern char **__hscore_tzname( void ); - -#if HAVE_GETTIMEOFDAY -extern int __hscore_gettimeofday(struct timeval *tp, void *tzp); -#endif - -#if HAVE_GMTIME_R -extern struct tm *__hscore_gmtime_r(const time_t *clock, struct tm *result); -#endif - -#if HAVE_LOCALTIME_R -extern struct tm *__hscore_localtime_r(const time_t *clock, struct tm *result); -#endif - -#endif /* __TIMEUTILS_H__ */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/install-sh cabal-install-1.22-1.22.9.0/=unpacked-tar11=/install-sh --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/install-sh 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/install-sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,527 +0,0 @@ -#!/bin/sh -# install - install a program, script, or datafile - -scriptversion=2011-11-20.07; # UTC - -# This originates from X11R5 (mit/util/scripts/install.sh), which was -# later released in X11R6 (xc/config/util/install.sh) with the -# following copyright and license. -# -# Copyright (C) 1994 X Consortium -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN -# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- -# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -# Except as contained in this notice, the name of the X Consortium shall not -# be used in advertising or otherwise to promote the sale, use or other deal- -# ings in this Software without prior written authorization from the X Consor- -# tium. -# -# -# FSF changes to this file are in the public domain. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# 'make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. - -nl=' -' -IFS=" "" $nl" - -# set DOITPROG to echo to test this script - -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit=${DOITPROG-} -if test -z "$doit"; then - doit_exec=exec -else - doit_exec=$doit -fi - -# Put in absolute file names if you don't have them in your path; -# or use environment vars. - -chgrpprog=${CHGRPPROG-chgrp} -chmodprog=${CHMODPROG-chmod} -chownprog=${CHOWNPROG-chown} -cmpprog=${CMPPROG-cmp} -cpprog=${CPPROG-cp} -mkdirprog=${MKDIRPROG-mkdir} -mvprog=${MVPROG-mv} -rmprog=${RMPROG-rm} -stripprog=${STRIPPROG-strip} - -posix_glob='?' -initialize_posix_glob=' - test "$posix_glob" != "?" || { - if (set -f) 2>/dev/null; then - posix_glob= - else - posix_glob=: - fi - } -' - -posix_mkdir= - -# Desired mode of installed file. -mode=0755 - -chgrpcmd= -chmodcmd=$chmodprog -chowncmd= -mvcmd=$mvprog -rmcmd="$rmprog -f" -stripcmd= - -src= -dst= -dir_arg= -dst_arg= - -copy_on_change=false -no_target_directory= - -usage="\ -Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE - or: $0 [OPTION]... SRCFILES... DIRECTORY - or: $0 [OPTION]... -t DIRECTORY SRCFILES... - or: $0 [OPTION]... -d DIRECTORIES... - -In the 1st form, copy SRCFILE to DSTFILE. -In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. -In the 4th, create DIRECTORIES. - -Options: - --help display this help and exit. - --version display version info and exit. - - -c (ignored) - -C install only if different (preserve the last data modification time) - -d create directories instead of installing files. - -g GROUP $chgrpprog installed files to GROUP. - -m MODE $chmodprog installed files to MODE. - -o USER $chownprog installed files to USER. - -s $stripprog installed files. - -t DIRECTORY install into DIRECTORY. - -T report an error if DSTFILE is a directory. - -Environment variables override the default commands: - CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG - RMPROG STRIPPROG -" - -while test $# -ne 0; do - case $1 in - -c) ;; - - -C) copy_on_change=true;; - - -d) dir_arg=true;; - - -g) chgrpcmd="$chgrpprog $2" - shift;; - - --help) echo "$usage"; exit $?;; - - -m) mode=$2 - case $mode in - *' '* | *' '* | *' -'* | *'*'* | *'?'* | *'['*) - echo "$0: invalid mode: $mode" >&2 - exit 1;; - esac - shift;; - - -o) chowncmd="$chownprog $2" - shift;; - - -s) stripcmd=$stripprog;; - - -t) dst_arg=$2 - # Protect names problematic for 'test' and other utilities. - case $dst_arg in - -* | [=\(\)!]) dst_arg=./$dst_arg;; - esac - shift;; - - -T) no_target_directory=true;; - - --version) echo "$0 $scriptversion"; exit $?;; - - --) shift - break;; - - -*) echo "$0: invalid option: $1" >&2 - exit 1;; - - *) break;; - esac - shift -done - -if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then - # When -d is used, all remaining arguments are directories to create. - # When -t is used, the destination is already specified. - # Otherwise, the last argument is the destination. Remove it from $@. - for arg - do - if test -n "$dst_arg"; then - # $@ is not empty: it contains at least $arg. - set fnord "$@" "$dst_arg" - shift # fnord - fi - shift # arg - dst_arg=$arg - # Protect names problematic for 'test' and other utilities. - case $dst_arg in - -* | [=\(\)!]) dst_arg=./$dst_arg;; - esac - done -fi - -if test $# -eq 0; then - if test -z "$dir_arg"; then - echo "$0: no input file specified." >&2 - exit 1 - fi - # It's OK to call 'install-sh -d' without argument. - # This can happen when creating conditional directories. - exit 0 -fi - -if test -z "$dir_arg"; then - do_exit='(exit $ret); exit $ret' - trap "ret=129; $do_exit" 1 - trap "ret=130; $do_exit" 2 - trap "ret=141; $do_exit" 13 - trap "ret=143; $do_exit" 15 - - # Set umask so as not to create temps with too-generous modes. - # However, 'strip' requires both read and write access to temps. - case $mode in - # Optimize common cases. - *644) cp_umask=133;; - *755) cp_umask=22;; - - *[0-7]) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw='% 200' - fi - cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; - *) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw=,u+rw - fi - cp_umask=$mode$u_plus_rw;; - esac -fi - -for src -do - # Protect names problematic for 'test' and other utilities. - case $src in - -* | [=\(\)!]) src=./$src;; - esac - - if test -n "$dir_arg"; then - dst=$src - dstdir=$dst - test -d "$dstdir" - dstdir_status=$? - else - - # Waiting for this to be detected by the "$cpprog $src $dsttmp" command - # might cause directories to be created, which would be especially bad - # if $src (and thus $dsttmp) contains '*'. - if test ! -f "$src" && test ! -d "$src"; then - echo "$0: $src does not exist." >&2 - exit 1 - fi - - if test -z "$dst_arg"; then - echo "$0: no destination specified." >&2 - exit 1 - fi - dst=$dst_arg - - # If destination is a directory, append the input filename; won't work - # if double slashes aren't ignored. - if test -d "$dst"; then - if test -n "$no_target_directory"; then - echo "$0: $dst_arg: Is a directory" >&2 - exit 1 - fi - dstdir=$dst - dst=$dstdir/`basename "$src"` - dstdir_status=0 - else - # Prefer dirname, but fall back on a substitute if dirname fails. - dstdir=` - (dirname "$dst") 2>/dev/null || - expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$dst" : 'X\(//\)[^/]' \| \ - X"$dst" : 'X\(//\)$' \| \ - X"$dst" : 'X\(/\)' \| . 2>/dev/null || - echo X"$dst" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q' - ` - - test -d "$dstdir" - dstdir_status=$? - fi - fi - - obsolete_mkdir_used=false - - if test $dstdir_status != 0; then - case $posix_mkdir in - '') - # Create intermediate dirs using mode 755 as modified by the umask. - # This is like FreeBSD 'install' as of 1997-10-28. - umask=`umask` - case $stripcmd.$umask in - # Optimize common cases. - *[2367][2367]) mkdir_umask=$umask;; - .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; - - *[0-7]) - mkdir_umask=`expr $umask + 22 \ - - $umask % 100 % 40 + $umask % 20 \ - - $umask % 10 % 4 + $umask % 2 - `;; - *) mkdir_umask=$umask,go-w;; - esac - - # With -d, create the new directory with the user-specified mode. - # Otherwise, rely on $mkdir_umask. - if test -n "$dir_arg"; then - mkdir_mode=-m$mode - else - mkdir_mode= - fi - - posix_mkdir=false - case $umask in - *[123567][0-7][0-7]) - # POSIX mkdir -p sets u+wx bits regardless of umask, which - # is incompatible with FreeBSD 'install' when (umask & 300) != 0. - ;; - *) - tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ - trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 - - if (umask $mkdir_umask && - exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 - then - if test -z "$dir_arg" || { - # Check for POSIX incompatibilities with -m. - # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or - # other-writable bit of parent directory when it shouldn't. - # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. - ls_ld_tmpdir=`ls -ld "$tmpdir"` - case $ls_ld_tmpdir in - d????-?r-*) different_mode=700;; - d????-?--*) different_mode=755;; - *) false;; - esac && - $mkdirprog -m$different_mode -p -- "$tmpdir" && { - ls_ld_tmpdir_1=`ls -ld "$tmpdir"` - test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" - } - } - then posix_mkdir=: - fi - rmdir "$tmpdir/d" "$tmpdir" - else - # Remove any dirs left behind by ancient mkdir implementations. - rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null - fi - trap '' 0;; - esac;; - esac - - if - $posix_mkdir && ( - umask $mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" - ) - then : - else - - # The umask is ridiculous, or mkdir does not conform to POSIX, - # or it failed possibly due to a race condition. Create the - # directory the slow way, step by step, checking for races as we go. - - case $dstdir in - /*) prefix='/';; - [-=\(\)!]*) prefix='./';; - *) prefix='';; - esac - - eval "$initialize_posix_glob" - - oIFS=$IFS - IFS=/ - $posix_glob set -f - set fnord $dstdir - shift - $posix_glob set +f - IFS=$oIFS - - prefixes= - - for d - do - test X"$d" = X && continue - - prefix=$prefix$d - if test -d "$prefix"; then - prefixes= - else - if $posix_mkdir; then - (umask=$mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break - # Don't fail if two instances are running concurrently. - test -d "$prefix" || exit 1 - else - case $prefix in - *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; - *) qprefix=$prefix;; - esac - prefixes="$prefixes '$qprefix'" - fi - fi - prefix=$prefix/ - done - - if test -n "$prefixes"; then - # Don't fail if two instances are running concurrently. - (umask $mkdir_umask && - eval "\$doit_exec \$mkdirprog $prefixes") || - test -d "$dstdir" || exit 1 - obsolete_mkdir_used=true - fi - fi - fi - - if test -n "$dir_arg"; then - { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && - { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || - test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 - else - - # Make a couple of temp file names in the proper directory. - dsttmp=$dstdir/_inst.$$_ - rmtmp=$dstdir/_rm.$$_ - - # Trap to clean up those temp files at exit. - trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 - - # Copy the file name to the temp name. - (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && - - # and set any options; do chmod last to preserve setuid bits. - # - # If any of these fail, we abort the whole thing. If we want to - # ignore errors from any of these, just make sure not to ignore - # errors from the above "$doit $cpprog $src $dsttmp" command. - # - { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && - { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && - { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && - - # If -C, don't bother to copy if it wouldn't change the file. - if $copy_on_change && - old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && - new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && - - eval "$initialize_posix_glob" && - $posix_glob set -f && - set X $old && old=:$2:$4:$5:$6 && - set X $new && new=:$2:$4:$5:$6 && - $posix_glob set +f && - - test "$old" = "$new" && - $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 - then - rm -f "$dsttmp" - else - # Rename the file to the real destination. - $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || - - # The rename failed, perhaps because mv can't rename something else - # to itself, or perhaps because mv is so ancient that it does not - # support -f. - { - # Now remove or move aside any old file at destination location. - # We try this two ways since rm can't unlink itself on some - # systems and the destination file might be busy for other - # reasons. In this case, the final cleanup might fail but the new - # file should still install successfully. - { - test ! -f "$dst" || - $doit $rmcmd -f "$dst" 2>/dev/null || - { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && - { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } - } || - { echo "$0: cannot unlink or rename $dst" >&2 - (exit 1); exit 1 - } - } && - - # Now rename the file to the real destination. - $doit $mvcmd "$dsttmp" "$dst" - } - fi || exit 1 - - trap '' 0 - fi -done - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" -# time-stamp-end: "; # UTC" -# End: diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar11=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/LICENSE 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -This library (libraries/base) is derived from code from two -sources: - - * Code from the GHC project which is largely (c) The University of - Glasgow, and distributable under a BSD-style license (see below), - - * Code from the Haskell 98 Report which is (c) Simon Peyton Jones - and freely redistributable (but see the full license for - restrictions). - -The full text of these licenses is reproduced below. Both of the -licenses are BSD-style or compatible. - ------------------------------------------------------------------------------ - -The Glasgow Haskell Compiler License - -Copyright 2004, The University Court of the University of Glasgow. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -- Neither name of the University nor the names of its contributors may be -used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF -GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. - ------------------------------------------------------------------------------ - -Code derived from the document "Report on the Programming Language -Haskell 98", is distributed under the following license: - - Copyright (c) 2002 Simon Peyton Jones - - The authors intend this Report to belong to the entire Haskell - community, and so we grant permission to copy and distribute it for - any purpose, provided that it is reproduced in its entirety, - including this Notice. Modified versions of this Report may also be - copied and distributed for any purpose, provided that the modified - version is clearly presented as such, and that it does not claim to - be a definition of the Haskell 98 Language. - ------------------------------------------------------------------------------ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/old-time.buildinfo cabal-install-1.22-1.22.9.0/=unpacked-tar11=/old-time.buildinfo --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/old-time.buildinfo 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/old-time.buildinfo 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -install-includes: HsTimeConfig.h diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/old-time.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar11=/old-time.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/old-time.cabal 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/old-time.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -name: old-time -version: 1.1.0.3 --- NOTE: Don't forget to update ./changelog.md -license: BSD3 -license-file: LICENSE -maintainer: libraries@haskell.org -bug-reports: https://github.com/haskell/old-time/issues -synopsis: Time library -category: System -build-type: Configure -cabal-Version: >=1.10 -description: - This package provides the old time library. - . - For new projects, the newer - - is recommended. - -extra-source-files: - aclocal.m4 - changelog.md - config.guess - config.sub - configure - configure.ac - include/HsTimeConfig.h.in - install-sh - old-time.buildinfo - -extra-tmp-files: - autom4te.cache - config.log - config.status - include/HsTimeConfig.h - -source-repository head - type: git - location: https://github.com/haskell/old-time.git - -Library - default-language: Haskell2010 - other-extensions: Trustworthy - - exposed-modules: - System.Time - - c-sources: - cbits/timeUtils.c - - include-dirs: include - includes: HsTime.h - install-includes: - HsTime.h - - build-depends: - base >= 4.7 && < 4.9, - old-locale == 1.0.* - - ghc-options: -Wall diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar11=/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/Setup.hs 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -module Main (main) where - -import Distribution.Simple - -main :: IO () -main = defaultMainWithHooks autoconfUserHooks diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar11=/System/Time.hsc cabal-install-1.22-1.22.9.0/=unpacked-tar11=/System/Time.hsc --- cabal-install-1.22-1.22.6.0/=unpacked-tar11=/System/Time.hsc 2014-11-21 10:44:17.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar11=/System/Time.hsc 1970-01-01 00:00:00.000000000 +0000 @@ -1,765 +0,0 @@ -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : System.Time --- Copyright : (c) The University of Glasgow 2001 --- License : BSD-style (see the file libraries/old-time/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : provisional --- Portability : portable --- --- The standard time library from Haskell 98. This library is --- deprecated, please look at @Data.Time@ in the @time@ package --- instead. --- --- "System.Time" provides functionality for clock times, including --- timezone information (i.e, the functionality of \"@time.h@\", --- adapted to the Haskell environment). It follows RFC 1129 in its --- use of Coordinated Universal Time (UTC). --- ------------------------------------------------------------------------------ - -{- -Haskell 98 Time of Day Library ------------------------------- - -2000/06/17 : -RESTRICTIONS: - * min./max. time diff currently is restricted to - [minBound::Int, maxBound::Int] - - * surely other restrictions wrt. min/max bounds - - -NOTES: - * printing times - - `showTime' (used in `instance Show ClockTime') always prints time - converted to the local timezone (even if it is taken from - `(toClockTime . toUTCTime)'), whereas `calendarTimeToString' - honors the tzone & tz fields and prints UTC or whatever timezone - is stored inside CalendarTime. - - Maybe `showTime' should be changed to use UTC, since it would - better correspond to the actual representation of `ClockTime' - (can be done by replacing localtime(3) by gmtime(3)). - - -BUGS: - * add proper handling of microsecs, currently, they're mostly - ignored - - * `formatFOO' case of `%s' is currently broken... - - -TODO: - * check for unusual date cases, like 1970/1/1 00:00h, and conversions - between different timezone's etc. - - * check, what needs to be in the IO monad, the current situation - seems to be a bit inconsistent to me - - * check whether `isDst = -1' works as expected on other arch's - (Solaris anyone?) - - * add functions to parse strings to `CalendarTime' (some day...) - - * implement padding capabilities ("%_", "%-") in `formatFOO' - - * add rfc822 timezone (+0200 is CEST) representation ("%z") in `formatFOO' --} - -module System.Time - ( - -- * Clock times - - ClockTime(..) -- non-standard, lib. report gives this as abstract - -- instance Eq, Ord - -- instance Show (non-standard) - - , getClockTime - - -- * Time differences - - , TimeDiff(..) - , noTimeDiff -- non-standard (but useful when constructing TimeDiff vals.) - , diffClockTimes - , addToClockTime - - , normalizeTimeDiff -- non-standard - , timeDiffToString -- non-standard - , formatTimeDiff -- non-standard - - -- * Calendar times - - , CalendarTime(..) - , Month(..) - , Day(..) - , toCalendarTime - , toUTCTime - , toClockTime - , calendarTimeToString - , formatCalendarTime - - ) where - -#ifdef __GLASGOW_HASKELL__ -#include "HsTime.h" -#endif - -import Prelude - -import Data.Ix -import System.Locale -import Foreign -import System.IO.Unsafe (unsafePerformIO) - -#ifdef __HUGS__ -import Hugs.Time ( getClockTimePrim, toCalTimePrim, toClockTimePrim ) -#else -import Foreign.C -#endif - --- One way to partition and give name to chunks of a year and a week: - --- | A month of the year. - -data Month - = January | February | March | April - | May | June | July | August - | September | October | November | December - deriving (Eq, Ord, Enum, Bounded, Ix, Read, Show) - --- | A day of the week. - -data Day - = Sunday | Monday | Tuesday | Wednesday - | Thursday | Friday | Saturday - deriving (Eq, Ord, Enum, Bounded, Ix, Read, Show) - --- | A representation of the internal clock time. --- Clock times may be compared, converted to strings, or converted to an --- external calendar time 'CalendarTime' for I\/O or other manipulations. - -data ClockTime = TOD Integer Integer - -- ^ Construct a clock time. The arguments are a number - -- of seconds since 00:00:00 (UTC) on 1 January 1970, - -- and an additional number of picoseconds. - -- - -- In Haskell 98, the 'ClockTime' type is abstract. - deriving (Eq, Ord) - --- When a ClockTime is shown, it is converted to a CalendarTime in the current --- timezone and then printed. FIXME: This is arguably wrong, since we can't --- get the current timezone without being in the IO monad. - -instance Show ClockTime where - showsPrec _ t = showString (calendarTimeToString - (unsafePerformIO (toCalendarTime t))) - -{- -The numeric fields have the following ranges. - -\begin{verbatim} -Value Range Comments ------ ----- -------- - -year -maxInt .. maxInt [Pre-Gregorian dates are inaccurate] -day 1 .. 31 -hour 0 .. 23 -min 0 .. 59 -sec 0 .. 61 [Allows for two leap seconds] -picosec 0 .. (10^12)-1 [This could be over-precise?] -yday 0 .. 365 [364 in non-Leap years] -tz -43200 .. 50400 [Variation from UTC in seconds] -\end{verbatim} --} - --- | 'CalendarTime' is a user-readable and manipulable --- representation of the internal 'ClockTime' type. - -data CalendarTime - = CalendarTime { - ctYear :: Int -- ^ Year (pre-Gregorian dates are inaccurate) - , ctMonth :: Month -- ^ Month of the year - , ctDay :: Int -- ^ Day of the month (1 to 31) - , ctHour :: Int -- ^ Hour of the day (0 to 23) - , ctMin :: Int -- ^ Minutes (0 to 59) - , ctSec :: Int -- ^ Seconds (0 to 61, allowing for up to - -- two leap seconds) - , ctPicosec :: Integer -- ^ Picoseconds - , ctWDay :: Day -- ^ Day of the week - , ctYDay :: Int -- ^ Day of the year - -- (0 to 364, or 365 in leap years) - , ctTZName :: String -- ^ Name of the time zone - , ctTZ :: Int -- ^ Variation from UTC in seconds - , ctIsDST :: Bool -- ^ 'True' if Daylight Savings Time would - -- be in effect, and 'False' otherwise - } - deriving (Eq,Ord,Read,Show) - --- | records the difference between two clock times in a user-readable way. - -data TimeDiff - = TimeDiff { - tdYear :: Int, - tdMonth :: Int, - tdDay :: Int, - tdHour :: Int, - tdMin :: Int, - tdSec :: Int, - tdPicosec :: Integer -- not standard - } - deriving (Eq,Ord,Read,Show) - --- | null time difference. - -noTimeDiff :: TimeDiff -noTimeDiff = TimeDiff 0 0 0 0 0 0 0 - --- ----------------------------------------------------------------------------- --- | returns the current time in its internal representation. - -realToInteger :: Real a => a -> Integer -realToInteger ct = round (realToFrac ct :: Double) - -- CTime, CClock, CUShort etc are in Real but not Fractional, - -- so we must convert to Double before we can round it - -getClockTime :: IO ClockTime -#ifdef __HUGS__ -getClockTime = do - (sec,usec) <- getClockTimePrim - return (TOD (fromIntegral sec) ((fromIntegral usec) * 1000000)) - -#elif HAVE_GETTIMEOFDAY - -# if defined(mingw32_HOST_OS) -type Timeval_tv_sec = CLong -type Timeval_tv_usec = CLong -# else -type Timeval_tv_sec = CTime -type Timeval_tv_usec = CSUSeconds -# endif - -getClockTime = do - allocaBytes (#const sizeof(struct timeval)) $ \ p_timeval -> do - throwErrnoIfMinus1_ "getClockTime" $ gettimeofday p_timeval nullPtr - sec <- (#peek struct timeval,tv_sec) p_timeval :: IO Timeval_tv_sec - usec <- (#peek struct timeval,tv_usec) p_timeval :: IO Timeval_tv_usec - return (TOD (realToInteger sec) ((realToInteger usec) * 1000000)) - -#elif HAVE_FTIME -getClockTime = do - allocaBytes (#const sizeof(struct timeb)) $ \ p_timeb -> do - ftime p_timeb - sec <- (#peek struct timeb,time) p_timeb :: IO CTime - msec <- (#peek struct timeb,millitm) p_timeb :: IO CUShort - return (TOD (realToInteger sec) (fromIntegral msec * 1000000000)) - -#else /* use POSIX time() */ -getClockTime = do - secs <- time nullPtr -- can't fail, according to POSIX - return (TOD (realToInteger secs) 0) - -#endif - --- ----------------------------------------------------------------------------- --- | @'addToClockTime' d t@ adds a time difference @d@ and a --- clock time @t@ to yield a new clock time. The difference @d@ --- may be either positive or negative. - -addToClockTime :: TimeDiff -> ClockTime -> ClockTime -addToClockTime (TimeDiff year mon day hour minute sec psec) - (TOD c_sec c_psec) = - let - sec_diff = toInteger sec + - 60 * toInteger minute + - 3600 * toInteger hour + - 24 * 3600 * toInteger day - (d_sec, d_psec) = (c_psec + psec) `quotRem` 1000000000000 - cal = toUTCTime (TOD (c_sec + sec_diff + d_sec) d_psec) - new_mon = fromEnum (ctMonth cal) + r_mon - month' = fst tmp - yr_diff = snd tmp - tmp - | new_mon < 0 = (toEnum (12 + new_mon), (-1)) - | new_mon > 11 = (toEnum (new_mon `mod` 12), 1) - | otherwise = (toEnum new_mon, 0) - - (r_yr, r_mon) = mon `quotRem` 12 - - year' = ctYear cal + year + r_yr + yr_diff - in - toClockTime cal{ctMonth=month', ctYear=year'} - --- | @'diffClockTimes' t1 t2@ returns the difference between two clock --- times @t1@ and @t2@ as a 'TimeDiff'. - -diffClockTimes :: ClockTime -> ClockTime -> TimeDiff --- diffClockTimes is meant to be the dual to `addToClockTime'. --- If you want to have the TimeDiff properly splitted, use --- `normalizeTimeDiff' on this function's result --- --- CAVEAT: see comment of normalizeTimeDiff -diffClockTimes (TOD sa pa) (TOD sb pb) = - noTimeDiff{ tdSec = fromIntegral (sa - sb) - -- FIXME: can handle just 68 years... - , tdPicosec = pa - pb - } - - --- | converts a time difference to normal form. - -normalizeTimeDiff :: TimeDiff -> TimeDiff --- FIXME: handle psecs properly --- FIXME: ?should be called by formatTimeDiff automagically? --- --- when applied to something coming out of `diffClockTimes', you loose --- the duality to `addToClockTime', since a year does not always have --- 365 days, etc. --- --- apply this function as late as possible to prevent those "rounding" --- errors -normalizeTimeDiff td = - let - rest0 = toInteger (tdSec td) - + 60 * (toInteger (tdMin td) - + 60 * (toInteger (tdHour td) - + 24 * (toInteger (tdDay td) - + 30 * toInteger (tdMonth td) - + 365 * toInteger (tdYear td)))) - - (diffYears, rest1) = rest0 `quotRem` (365 * 24 * 3600) - (diffMonths, rest2) = rest1 `quotRem` (30 * 24 * 3600) - (diffDays, rest3) = rest2 `quotRem` (24 * 3600) - (diffHours, rest4) = rest3 `quotRem` 3600 - (diffMins, diffSecs) = rest4 `quotRem` 60 - in - td{ tdYear = fromInteger diffYears - , tdMonth = fromInteger diffMonths - , tdDay = fromInteger diffDays - , tdHour = fromInteger diffHours - , tdMin = fromInteger diffMins - , tdSec = fromInteger diffSecs - } - -#ifndef __HUGS__ --- ----------------------------------------------------------------------------- --- How do we deal with timezones on this architecture? - --- The POSIX way to do it is through the global variable tzname[]. --- But that's crap, so we do it The BSD Way if we can: namely use the --- tm_zone and tm_gmtoff fields of struct tm, if they're available. - -zone :: Ptr CTm -> IO (Ptr CChar) -gmtoff :: Ptr CTm -> IO CLong -#if HAVE_TM_ZONE -zone x = (#peek struct tm,tm_zone) x -gmtoff x = (#peek struct tm,tm_gmtoff) x - -#else /* ! HAVE_TM_ZONE */ -# if HAVE_TZNAME || defined(_WIN32) -# if cygwin32_HOST_OS -# define tzname _tzname -# endif -# ifndef mingw32_HOST_OS -foreign import ccall unsafe "time.h &tzname" tzname :: Ptr CString -# else -foreign import ccall unsafe "__hscore_timezone" timezone :: Ptr CLong -foreign import ccall unsafe "__hscore_tzname" tzname :: Ptr CString -# endif -zone x = do - dst <- (#peek struct tm,tm_isdst) x - if dst then peekElemOff tzname 1 else peekElemOff tzname 0 -# else /* ! HAVE_TZNAME */ --- We're in trouble. If you should end up here, please report this as a bug. -# error "Don't know how to get at timezone name on your OS." -# endif /* ! HAVE_TZNAME */ - --- Get the offset in secs from UTC, if (struct tm) doesn't supply it. */ -# if HAVE_DECL_ALTZONE -foreign import ccall "&altzone" altzone :: Ptr CTime -foreign import ccall "&timezone" timezone :: Ptr CTime -gmtoff x = do - dst <- (#peek struct tm,tm_isdst) x - tz <- if dst then peek altzone else peek timezone - return (-fromIntegral (realToInteger tz)) -# else /* ! HAVE_DECL_ALTZONE */ - -#if !defined(mingw32_HOST_OS) -foreign import ccall "time.h &timezone" timezone :: Ptr CLong -#endif - --- Assume that DST offset is 1 hour ... -gmtoff x = do - dst <- (#peek struct tm,tm_isdst) x - tz <- peek timezone - -- According to the documentation for tzset(), - -- http://www.opengroup.org/onlinepubs/007908799/xsh/tzset.html - -- timezone offsets are > 0 west of the Prime Meridian. - -- - -- This module assumes the interpretation of tm_gmtoff, i.e., offsets - -- are > 0 East of the Prime Meridian, so flip the sign. - return (- (if dst then tz - 3600 else tz)) -# endif /* ! HAVE_DECL_ALTZONE */ -#endif /* ! HAVE_TM_ZONE */ -#endif /* ! __HUGS__ */ - --- ----------------------------------------------------------------------------- --- | converts an internal clock time to a local time, modified by the --- timezone and daylight savings time settings in force at the time --- of conversion. Because of this dependence on the local environment, --- 'toCalendarTime' is in the 'IO' monad. - -toCalendarTime :: ClockTime -> IO CalendarTime -#ifdef __HUGS__ -toCalendarTime = toCalTime False -#elif HAVE_LOCALTIME_R -toCalendarTime = clockToCalendarTime_reentrant (_throwAwayReturnPointer localtime_r) False -#else -toCalendarTime = clockToCalendarTime_static localtime False -#endif - --- | converts an internal clock time into a 'CalendarTime' in standard --- UTC format. - -toUTCTime :: ClockTime -> CalendarTime -#ifdef __HUGS__ -toUTCTime = unsafePerformIO . toCalTime True -#elif HAVE_GMTIME_R -toUTCTime = unsafePerformIO . clockToCalendarTime_reentrant (_throwAwayReturnPointer gmtime_r) True -#else -toUTCTime = unsafePerformIO . clockToCalendarTime_static gmtime True -#endif - -#ifdef __HUGS__ -toCalTime :: Bool -> ClockTime -> IO CalendarTime -toCalTime toUTC (TOD s psecs) - | (s > fromIntegral (maxBound :: Int)) || - (s < fromIntegral (minBound :: Int)) - = error ((if toUTC then "toUTCTime: " else "toCalendarTime: ") ++ - "clock secs out of range") - | otherwise = do - (sec,min,hour,mday,mon,year,wday,yday,isdst,zone,off) <- - toCalTimePrim (if toUTC then 1 else 0) (fromIntegral s) - return (CalendarTime{ ctYear=1900+year - , ctMonth=toEnum mon - , ctDay=mday - , ctHour=hour - , ctMin=min - , ctSec=sec - , ctPicosec=psecs - , ctWDay=toEnum wday - , ctYDay=yday - , ctTZName=(if toUTC then "UTC" else zone) - , ctTZ=(if toUTC then 0 else off) - , ctIsDST=not toUTC && (isdst/=0) - }) -#else /* ! __HUGS__ */ -_throwAwayReturnPointer :: (Ptr CTime -> Ptr CTm -> IO (Ptr CTm)) - -> (Ptr CTime -> Ptr CTm -> IO ( )) -_throwAwayReturnPointer fun x y = fun x y >> return () - -#if !HAVE_LOCALTIME_R || !HAVE_GMTIME_R -clockToCalendarTime_static :: (Ptr CTime -> IO (Ptr CTm)) -> Bool -> ClockTime - -> IO CalendarTime -clockToCalendarTime_static fun is_utc (TOD secs psec) = do - with (fromIntegral secs :: CTime) $ \ p_timer -> do - p_tm <- fun p_timer -- can't fail, according to POSIX - clockToCalendarTime_aux is_utc p_tm psec -#endif - -#if HAVE_LOCALTIME_R || HAVE_GMTIME_R -clockToCalendarTime_reentrant :: (Ptr CTime -> Ptr CTm -> IO ()) -> Bool -> ClockTime - -> IO CalendarTime -clockToCalendarTime_reentrant fun is_utc (TOD secs psec) = do - with (fromIntegral secs :: CTime) $ \ p_timer -> do - allocaBytes (#const sizeof(struct tm)) $ \ p_tm -> do - fun p_timer p_tm - clockToCalendarTime_aux is_utc p_tm psec -#endif - -clockToCalendarTime_aux :: Bool -> Ptr CTm -> Integer -> IO CalendarTime -clockToCalendarTime_aux is_utc p_tm psec = do - sec <- (#peek struct tm,tm_sec ) p_tm :: IO CInt - minute <- (#peek struct tm,tm_min ) p_tm :: IO CInt - hour <- (#peek struct tm,tm_hour ) p_tm :: IO CInt - mday <- (#peek struct tm,tm_mday ) p_tm :: IO CInt - mon <- (#peek struct tm,tm_mon ) p_tm :: IO CInt - year <- (#peek struct tm,tm_year ) p_tm :: IO CInt - wday <- (#peek struct tm,tm_wday ) p_tm :: IO CInt - yday <- (#peek struct tm,tm_yday ) p_tm :: IO CInt - isdst <- (#peek struct tm,tm_isdst) p_tm :: IO CInt - zone' <- zone p_tm - tz <- gmtoff p_tm - - tzname' <- peekCString zone' - - let month | mon >= 0 && mon <= 11 = toEnum (fromIntegral mon) - | otherwise = error ("toCalendarTime: illegal month value: " ++ show mon) - - return (CalendarTime - (1900 + fromIntegral year) - month - (fromIntegral mday) - (fromIntegral hour) - (fromIntegral minute) - (fromIntegral sec) - psec - (toEnum (fromIntegral wday)) - (fromIntegral yday) - (if is_utc then "UTC" else tzname') - (if is_utc then 0 else fromIntegral tz) - (if is_utc then False else isdst /= 0)) -#endif /* ! __HUGS__ */ - --- | converts a 'CalendarTime' into the corresponding internal --- 'ClockTime', ignoring the contents of the 'ctWDay', 'ctYDay', --- 'ctTZName' and 'ctIsDST' fields. - -toClockTime :: CalendarTime -> ClockTime -#ifdef __HUGS__ -toClockTime (CalendarTime yr mon mday hour min sec psec - _wday _yday _tzname tz _isdst) = - unsafePerformIO $ do - s <- toClockTimePrim (yr-1900) (fromEnum mon) mday hour min sec tz - return (TOD (fromIntegral s) psec) -#else /* ! __HUGS__ */ -toClockTime (CalendarTime year mon mday hour minute sec psec - _wday _yday _tzname tz _isdst) = - - -- `isDst' causes the date to be wrong by one hour... - -- FIXME: check, whether this works on other arch's than Linux, too... - -- - -- so we set it to (-1) (means `unknown') and let `mktime' determine - -- the real value... - let isDst = -1 :: CInt in -- if _isdst then (1::Int) else 0 - - if psec < 0 || psec > 999999999999 then - error "Time.toClockTime: picoseconds out of range" - else if tz < -43200 || tz > 50400 then - error "Time.toClockTime: timezone offset out of range" - else - unsafePerformIO $ do - allocaBytes (#const sizeof(struct tm)) $ \ p_tm -> do - (#poke struct tm,tm_sec ) p_tm (fromIntegral sec :: CInt) - (#poke struct tm,tm_min ) p_tm (fromIntegral minute :: CInt) - (#poke struct tm,tm_hour ) p_tm (fromIntegral hour :: CInt) - (#poke struct tm,tm_mday ) p_tm (fromIntegral mday :: CInt) - (#poke struct tm,tm_mon ) p_tm (fromIntegral (fromEnum mon) :: CInt) - (#poke struct tm,tm_year ) p_tm (fromIntegral year - 1900 :: CInt) - (#poke struct tm,tm_isdst) p_tm isDst - t <- throwIf (== -1) (\_ -> "Time.toClockTime: invalid input") - (mktime p_tm) - -- - -- mktime expects its argument to be in the local timezone, but - -- toUTCTime makes UTC-encoded CalendarTime's ... - -- - -- Since there is no any_tz_struct_tm-to-time_t conversion - -- function, we have to fake one... :-) If not in all, it works in - -- most cases (before, it was the other way round...) - -- - -- Luckily, mktime tells us, what it *thinks* the timezone is, so, - -- to compensate, we add the timezone difference to mktime's - -- result. - -- - gmtoffset <- gmtoff p_tm - let res = realToInteger t - fromIntegral tz + fromIntegral gmtoffset - return (TOD res psec) -#endif /* ! __HUGS__ */ - --- ----------------------------------------------------------------------------- --- Converting time values to strings. - --- | formats calendar times using local conventions. - -calendarTimeToString :: CalendarTime -> String -calendarTimeToString = formatCalendarTime defaultTimeLocale "%c" - --- | formats calendar times using local conventions and a formatting string. --- The formatting string is that understood by the ISO C @strftime()@ --- function. - -formatCalendarTime :: TimeLocale -> String -> CalendarTime -> String -formatCalendarTime l fmt cal@(CalendarTime year mon day hour minute sec _ - wday yday tzname' _ _) = - doFmt fmt - where doFmt ('%':'-':cs) = doFmt ('%':cs) -- padding not implemented - doFmt ('%':'_':cs) = doFmt ('%':cs) -- padding not implemented - doFmt ('%':c:cs) = decode c ++ doFmt cs - doFmt (c:cs) = c : doFmt cs - doFmt "" = "" - - decode 'A' = fst (wDays l !! fromEnum wday) -- day of the week, full name - decode 'a' = snd (wDays l !! fromEnum wday) -- day of the week, abbrev. - decode 'B' = fst (months l !! fromEnum mon) -- month, full name - decode 'b' = snd (months l !! fromEnum mon) -- month, abbrev - decode 'h' = snd (months l !! fromEnum mon) -- ditto - decode 'C' = show2 (year `quot` 100) -- century - decode 'c' = doFmt (dateTimeFmt l) -- locale's data and time format. - decode 'D' = doFmt "%m/%d/%y" - decode 'd' = show2 day -- day of the month - decode 'e' = show2' day -- ditto, padded - decode 'H' = show2 hour -- hours, 24-hour clock, padded - decode 'I' = show2 (to12 hour) -- hours, 12-hour clock - decode 'j' = show3 (yday + 1) -- day of the year - decode 'k' = show2' hour -- hours, 24-hour clock, no padding - decode 'l' = show2' (to12 hour) -- hours, 12-hour clock, no padding - decode 'M' = show2 minute -- minutes - decode 'm' = show2 (fromEnum mon+1) -- numeric month - decode 'n' = "\n" - decode 'p' = (if hour < 12 then fst else snd) (amPm l) -- am or pm - decode 'R' = doFmt "%H:%M" - decode 'r' = doFmt (time12Fmt l) - decode 'T' = doFmt "%H:%M:%S" - decode 't' = "\t" - decode 'S' = show2 sec -- seconds - decode 's' = let TOD esecs _ = toClockTime cal in show esecs - -- number of secs since Epoch. - decode 'U' = show2 ((yday + 7 - fromEnum wday) `div` 7) -- week number, starting on Sunday. - decode 'u' = show (let n = fromEnum wday in -- numeric day of the week (1=Monday, 7=Sunday) - if n == 0 then 7 else n) - decode 'V' = -- week number (as per ISO-8601.) - let (week, days) = -- [yep, I've always wanted to be able to display that too.] - (yday + 7 - if fromEnum wday > 0 then - fromEnum wday - 1 else 6) `divMod` 7 - in show2 (if days >= 4 then - week+1 - else if week == 0 then 53 else week) - - decode 'W' = -- week number, weeks starting on monday - show2 ((yday + 7 - if fromEnum wday > 0 then - fromEnum wday - 1 else 6) `div` 7) - decode 'w' = show (fromEnum wday) -- numeric day of the week, weeks starting on Sunday. - decode 'X' = doFmt (timeFmt l) -- locale's preferred way of printing time. - decode 'x' = doFmt (dateFmt l) -- locale's preferred way of printing dates. - decode 'Y' = show year -- year, including century. - decode 'y' = show2 (year `rem` 100) -- year, within century. - decode 'Z' = tzname' -- timezone name - decode '%' = "%" - decode c = [c] - - -show2, show2', show3 :: Int -> String -show2 x - | x' < 10 = '0': show x' - | otherwise = show x' - where x' = x `rem` 100 - -show2' x - | x' < 10 = ' ': show x' - | otherwise = show x' - where x' = x `rem` 100 - -show3 x = show (x `quot` 100) ++ show2 (x `rem` 100) - -to12 :: Int -> Int -to12 h = let h' = h `mod` 12 in if h' == 0 then 12 else h' - --- Useful extensions for formatting TimeDiffs. - --- | formats time differences using local conventions. - -timeDiffToString :: TimeDiff -> String -timeDiffToString = formatTimeDiff defaultTimeLocale "%c" - --- | formats time differences using local conventions and a formatting string. --- The formatting string is that understood by the ISO C @strftime()@ --- function. - -formatTimeDiff :: TimeLocale -> String -> TimeDiff -> String -formatTimeDiff l fmt (TimeDiff year month day hour minute sec _) - = doFmt fmt - where - doFmt "" = "" - doFmt ('%':'-':cs) = doFmt ('%':cs) -- padding not implemented - doFmt ('%':'_':cs) = doFmt ('%':cs) -- padding not implemented - doFmt ('%':c:cs) = decode c ++ doFmt cs - doFmt (c:cs) = c : doFmt cs - - decode spec = - case spec of - 'B' -> fst (months l !! fromEnum month) - 'b' -> snd (months l !! fromEnum month) - 'h' -> snd (months l !! fromEnum month) - 'c' -> defaultTimeDiffFmt - 'C' -> show2 (year `quot` 100) - 'D' -> doFmt "%m/%d/%y" - 'd' -> show2 day - 'e' -> show2' day - 'H' -> show2 hour - 'I' -> show2 (to12 hour) - 'k' -> show2' hour - 'l' -> show2' (to12 hour) - 'M' -> show2 minute - 'm' -> show2 (fromEnum month + 1) - 'n' -> "\n" - 'p' -> (if hour < 12 then fst else snd) (amPm l) - 'R' -> doFmt "%H:%M" - 'r' -> doFmt (time12Fmt l) - 'T' -> doFmt "%H:%M:%S" - 't' -> "\t" - 'S' -> show2 sec - 's' -> show2 sec -- Implementation-dependent, sez the lib doc.. - 'X' -> doFmt (timeFmt l) - 'x' -> doFmt (dateFmt l) - 'Y' -> show year - 'y' -> show2 (year `rem` 100) - '%' -> "%" - c -> [c] - - defaultTimeDiffFmt = - foldr (\ (v,s) rest -> - (if v /= 0 - then show v ++ ' ':(addS v s) - ++ if null rest then "" else ", " - else "") ++ rest - ) - "" - (zip [year, month, day, hour, minute, sec] (intervals l)) - - addS v s = if abs v == 1 then fst s else snd s - -#ifndef __HUGS__ --- ----------------------------------------------------------------------------- --- Foreign time interface (POSIX) - -type CTm = () -- struct tm - -#if HAVE_LOCALTIME_R -foreign import ccall unsafe "HsTime.h __hscore_localtime_r" - localtime_r :: Ptr CTime -> Ptr CTm -> IO (Ptr CTm) -#else -foreign import ccall unsafe "time.h localtime" - localtime :: Ptr CTime -> IO (Ptr CTm) -#endif -#if HAVE_GMTIME_R -foreign import ccall unsafe "HsTime.h __hscore_gmtime_r" - gmtime_r :: Ptr CTime -> Ptr CTm -> IO (Ptr CTm) -#else -foreign import ccall unsafe "time.h gmtime" - gmtime :: Ptr CTime -> IO (Ptr CTm) -#endif -foreign import ccall unsafe "time.h mktime" - mktime :: Ptr CTm -> IO CTime - -#if HAVE_GETTIMEOFDAY -type CTimeVal = () -type CTimeZone = () -foreign import ccall unsafe "HsTime.h __hscore_gettimeofday" - gettimeofday :: Ptr CTimeVal -> Ptr CTimeZone -> IO CInt -#elif HAVE_FTIME -type CTimeB = () -#ifndef mingw32_HOST_OS -foreign import ccall unsafe "time.h ftime" ftime :: Ptr CTimeB -> IO CInt -#else -foreign import ccall unsafe "time.h ftime" ftime :: Ptr CTimeB -> IO () -#endif -#else -foreign import ccall unsafe "time.h time" time :: Ptr CTime -> IO CTime -#endif -#endif /* ! __HUGS__ */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/ancilData.c cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/ancilData.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/ancilData.c 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/ancilData.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,132 +0,0 @@ -/* - * Copyright(c), 2002 The GHC Team. - */ - -#ifdef aix_HOST_OS -#define _LINUX_SOURCE_COMPAT -// Required to get CMSG_SPACE/CMSG_LEN macros. See #265. -// Alternative is to #define COMPAT_43 and use the -// HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS code instead, but that means -// fiddling with the configure script too. -#endif - -#include "HsNet.h" -#include - -#if HAVE_STRUCT_MSGHDR_MSG_CONTROL || HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS /* until end */ - -/* - * Support for transmitting file descriptors. - * - * - */ - - -/* - * sendmsg() and recvmsg() wrappers for transmitting - * ancillary socket data. - * - * Doesn't provide the full generality of either, specifically: - * - * - no support for scattered read/writes. - * - only possible to send one ancillary chunk of data at a time. - */ - -int -sendFd(int sock, - int outfd) -{ - struct msghdr msg = {0}; - struct iovec iov[1]; - char buf[2]; -#if HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS - msg.msg_accrights = (void*)&outfd; - msg.msg_accrightslen = sizeof(int); -#else - struct cmsghdr *cmsg; - char ancBuffer[CMSG_SPACE(sizeof(int))]; - char* dPtr; - - msg.msg_control = ancBuffer; - msg.msg_controllen = sizeof(ancBuffer); - - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - cmsg->cmsg_len = CMSG_LEN(sizeof(int)); - dPtr = (char*)CMSG_DATA(cmsg); - - *(int*)dPtr = outfd; - msg.msg_controllen = cmsg->cmsg_len; -#endif - - buf[0] = 0; buf[1] = '\0'; - iov[0].iov_base = buf; - iov[0].iov_len = 2; - - msg.msg_iov = iov; - msg.msg_iovlen = 1; - - return sendmsg(sock,&msg,0); -} - -int -recvFd(int sock) -{ - struct msghdr msg = {0}; - char duffBuf[10]; - int rc; - int len = sizeof(int); - struct iovec iov[1]; -#if HAVE_STRUCT_MSGHDR_MSG_CONTROL - struct cmsghdr *cmsg = NULL; - struct cmsghdr *cptr; -#else - int* fdBuffer; -#endif - int fd; - - iov[0].iov_base = duffBuf; - iov[0].iov_len = sizeof(duffBuf); - msg.msg_iov = iov; - msg.msg_iovlen = 1; - -#if HAVE_STRUCT_MSGHDR_MSG_CONTROL - cmsg = (struct cmsghdr*)malloc(CMSG_SPACE(len)); - if (cmsg==NULL) { - return -1; - } - - msg.msg_control = (void *)cmsg; - msg.msg_controllen = CMSG_LEN(len); -#else - fdBuffer = (int*)malloc(len); - if (fdBuffer) { - msg.msg_accrights = (void *)fdBuffer; - } else { - return -1; - } - msg.msg_accrightslen = len; -#endif - - if ((rc = recvmsg(sock,&msg,0)) < 0) { -#if HAVE_STRUCT_MSGHDR_MSG_CONTROL - free(cmsg); -#else - free(fdBuffer); -#endif - return rc; - } - -#if HAVE_STRUCT_MSGHDR_MSG_CONTROL - cptr = (struct cmsghdr*)CMSG_FIRSTHDR(&msg); - fd = *(int*)CMSG_DATA(cptr); - free(cmsg); -#else - fd = *(int*)fdBuffer; - free(fdBuffer); -#endif - return fd; -} - -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/asyncAccept.c cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/asyncAccept.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/asyncAccept.c 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/asyncAccept.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -/* - * (c) sof, 2003. - */ - -#include "HsNet.h" -#include "HsFFI.h" - -#if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) - -/* all the way to the end */ - -/* - * To support non-blocking accept()s with WinSock, we use the asyncDoProc# - * primop, which lets a Haskell thread call an external routine without - * blocking the progress of other threads. - * - * As can readily be seen, this is a low-level mechanism. - * - */ - -typedef struct AcceptData { - int fdSock; - int newSock; - void* sockAddr; - int size; -} AcceptData; - -/* - * Fill in parameter block that's passed along when the RTS invokes the - * accept()-calling proc below (acceptDoProc()) - */ -void* -newAcceptParams(int sock, - int sz, - void* sockaddr) -{ - AcceptData* data = (AcceptData*)malloc(sizeof(AcceptData)); - if (!data) return NULL; - data->fdSock = sock; - data->newSock = 0; - data->sockAddr = sockaddr; - data->size = sz; - - return data; -} - -/* Accessors for return code and accept()'s socket result. */ - -int -acceptNewSock(void* d) -{ - return (((AcceptData*)d)->newSock); -} - -/* Routine invoked by an RTS worker thread */ -int -acceptDoProc(void* param) -{ - SOCKET s; - - AcceptData* data = (AcceptData*)param; - s = accept( data->fdSock, - data->sockAddr, - &data->size); - data->newSock = s; - if ( s == INVALID_SOCKET ) { - return GetLastError(); - } else { - return 0; - } -} -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/HsNet.c cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/HsNet.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/HsNet.c 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/HsNet.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -/* ----------------------------------------------------------------------------- - * (c) The University of Glasgow 2002 - * - * static versions of the inline functions from HsNet.h - * -------------------------------------------------------------------------- */ - -#define INLINE -#include "HsNet.h" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/initWinSock.c cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/initWinSock.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/initWinSock.c 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/initWinSock.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ -#include "HsNet.h" -#include "HsFFI.h" - -#if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) - -static int winsock_inited = 0; -static int winsock_uninited = 0; - -/* Initialising WinSock... */ -int -initWinSock () -{ - WORD wVersionRequested; - WSADATA wsaData; - int err; - - if (!winsock_inited) { - wVersionRequested = MAKEWORD( 2, 2 ); - - err = WSAStartup ( wVersionRequested, &wsaData ); - - if ( err != 0 ) { - return err; - } - - if ( LOBYTE( wsaData.wVersion ) != 2 || - HIBYTE( wsaData.wVersion ) != 2 ) { - WSACleanup(); - return (-1); - } - - winsock_inited = 1; - } - return 0; -} - -static void -shutdownHandler(void) -{ - WSACleanup(); -} - -void -shutdownWinSock() -{ - if (!winsock_uninited) { - atexit(shutdownHandler); - winsock_uninited = 1; - } -} - -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/winSockErr.c cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/winSockErr.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/cbits/winSockErr.c 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/cbits/winSockErr.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -#include "HsNet.h" -#include "HsFFI.h" - -#if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) -#include - -/* to the end */ - -const char* -getWSErrorDescr(int err) -{ - static char otherErrMsg[256]; - - switch (err) { - case WSAEINTR: return "Interrupted function call (WSAEINTR)"; - case WSAEBADF: return "bad socket descriptor (WSAEBADF)"; - case WSAEACCES: return "Permission denied (WSAEACCESS)"; - case WSAEFAULT: return "Bad address (WSAEFAULT)"; - case WSAEINVAL: return "Invalid argument (WSAEINVAL)"; - case WSAEMFILE: return "Too many open files (WSAEMFILE)"; - case WSAEWOULDBLOCK: return "Resource temporarily unavailable (WSAEWOULDBLOCK)"; - case WSAEINPROGRESS: return "Operation now in progress (WSAEINPROGRESS)"; - case WSAEALREADY: return "Operation already in progress (WSAEALREADY)"; - case WSAENOTSOCK: return "Socket operation on non-socket (WSAENOTSOCK)"; - case WSAEDESTADDRREQ: return "Destination address required (WSAEDESTADDRREQ)"; - case WSAEMSGSIZE: return "Message too long (WSAEMSGSIZE)"; - case WSAEPROTOTYPE: return "Protocol wrong type for socket (WSAEPROTOTYPE)"; - case WSAENOPROTOOPT: return "Bad protocol option (WSAENOPROTOOPT)"; - case WSAEPROTONOSUPPORT: return "Protocol not supported (WSAEPROTONOSUPPORT)"; - case WSAESOCKTNOSUPPORT: return "Socket type not supported (WSAESOCKTNOSUPPORT)"; - case WSAEOPNOTSUPP: return "Operation not supported (WSAEOPNOTSUPP)"; - case WSAEPFNOSUPPORT: return "Protocol family not supported (WSAEPFNOSUPPORT)"; - case WSAEAFNOSUPPORT: return "Address family not supported by protocol family (WSAEAFNOSUPPORT)"; - case WSAEADDRINUSE: return "Address already in use (WSAEADDRINUSE)"; - case WSAEADDRNOTAVAIL: return "Cannot assign requested address (WSAEADDRNOTAVAIL)"; - case WSAENETDOWN: return "Network is down (WSAENETDOWN)"; - case WSAENETUNREACH: return "Network is unreachable (WSAENETUNREACH)"; - case WSAENETRESET: return "Network dropped connection on reset (WSAENETRESET)"; - case WSAECONNABORTED: return "Software caused connection abort (WSAECONNABORTED)"; - case WSAECONNRESET: return "Connection reset by peer (WSAECONNRESET)"; - case WSAENOBUFS: return "No buffer space available (WSAENOBUFS)"; - case WSAEISCONN: return "Socket is already connected (WSAEISCONN)"; - case WSAENOTCONN: return "Socket is not connected (WSAENOTCONN)"; - case WSAESHUTDOWN: return "Cannot send after socket shutdown (WSAESHUTDOWN)"; - case WSAETOOMANYREFS: return "Too many references (WSAETOOMANYREFS)"; - case WSAETIMEDOUT: return "Connection timed out (WSAETIMEDOUT)"; - case WSAECONNREFUSED: return "Connection refused (WSAECONNREFUSED)"; - case WSAELOOP: return "Too many levels of symbolic links (WSAELOOP)"; - case WSAENAMETOOLONG: return "Filename too long (WSAENAMETOOLONG)"; - case WSAEHOSTDOWN: return "Host is down (WSAEHOSTDOWN)"; - case WSAEHOSTUNREACH: return "Host is unreachable (WSAEHOSTUNREACH)"; - case WSAENOTEMPTY: return "Resource not empty (WSAENOTEMPTY)"; - case WSAEPROCLIM: return "Too many processes (WSAEPROCLIM)"; - case WSAEUSERS: return "Too many users (WSAEUSERS)"; - case WSAEDQUOT: return "Disk quota exceeded (WSAEDQUOT)"; - case WSAESTALE: return "Stale NFS file handle (WSAESTALE)"; - case WSAEREMOTE: return "Too many levels of remote in path (WSAEREMOTE)"; - case WSAEDISCON: return "Graceful shutdown in progress (WSAEDISCON)"; - case WSASYSNOTREADY: return "Network subsystem is unavailable (WSASYSNOTREADY)"; - case WSAVERNOTSUPPORTED: return "Winsock.dll version out of range (WSAVERNOTSUPPORTED)"; - case WSANOTINITIALISED: return "Successful WSAStartup not yet performed (WSANOTINITIALISED)"; -#ifdef WSATYPE_NOT_FOUND - case WSATYPE_NOT_FOUND: return "Class type not found (WSATYPE_NOT_FOUND)"; -#endif - case WSAHOST_NOT_FOUND: return "Host not found (WSAHOST_NOT_FOUND)"; - case WSATRY_AGAIN: return "Nonauthoritative host not found (WSATRY_AGAIN)"; - case WSANO_RECOVERY: return "This is a nonrecoverable error (WSANO_RECOVERY)"; - case WSANO_DATA: return "Valid name, no data record of requested type (WSANO_DATA)"; - default: - sprintf(otherErrMsg, "Unknown WinSock error: %u", err); - return otherErrMsg; - } -} - -#endif - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/config.guess cabal-install-1.22-1.22.9.0/=unpacked-tar2=/config.guess --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/config.guess 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/config.guess 1970-01-01 00:00:00.000000000 +0000 @@ -1,1420 +0,0 @@ -#! /bin/sh -# Attempt to guess a canonical system name. -# Copyright 1992-2014 Free Software Foundation, Inc. - -timestamp='2014-03-23' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). -# -# Originally written by Per Bothner. -# -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD -# -# Please send patches with a ChangeLog entry to config-patches@gnu.org. - - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] - -Output the configuration name of the system \`$me' is run on. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.guess ($timestamp) - -Originally written by Per Bothner. -Copyright 1992-2014 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" >&2 - exit 1 ;; - * ) - break ;; - esac -done - -if test $# != 0; then - echo "$me: too many arguments$help" >&2 - exit 1 -fi - -trap 'exit 1' 1 2 15 - -# CC_FOR_BUILD -- compiler used by this script. Note that the use of a -# compiler to aid in system detection is discouraged as it requires -# temporary files to be created and, as you can see below, it is a -# headache to deal with in a portable fashion. - -# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still -# use `HOST_CC' if defined, but it is deprecated. - -# Portable tmp directory creation inspired by the Autoconf team. - -set_cc_for_build=' -trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; -trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; -: ${TMPDIR=/tmp} ; - { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || - { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || - { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || - { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; -dummy=$tmp/dummy ; -tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; -case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; - for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then - CC_FOR_BUILD="$c"; break ; - fi ; - done ; - if test x"$CC_FOR_BUILD" = x ; then - CC_FOR_BUILD=no_compiler_found ; - fi - ;; - ,,*) CC_FOR_BUILD=$CC ;; - ,*,*) CC_FOR_BUILD=$HOST_CC ;; -esac ; set_cc_for_build= ;' - -# This is needed to find uname on a Pyramid OSx when run in the BSD universe. -# (ghazi@noc.rutgers.edu 1994-08-24) -if (test -f /.attbin/uname) >/dev/null 2>&1 ; then - PATH=$PATH:/.attbin ; export PATH -fi - -UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown -UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown -UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown -UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown - -case "${UNAME_SYSTEM}" in -Linux|GNU|GNU/*) - # If the system lacks a compiler, then just pick glibc. - # We could probably try harder. - LIBC=gnu - - eval $set_cc_for_build - cat <<-EOF > $dummy.c - #include - #if defined(__UCLIBC__) - LIBC=uclibc - #elif defined(__dietlibc__) - LIBC=dietlibc - #else - LIBC=gnu - #endif - EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` - ;; -esac - -# Note: order is significant - the case branches are not exclusive. - -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in - *:NetBSD:*:*) - # NetBSD (nbsd) targets should (where applicable) match one or - # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, - # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently - # switched to ELF, *-*-netbsd* would select the old - # object file format. This provides both forward - # compatibility and a consistent mechanism for selecting the - # object file format. - # - # Note: NetBSD doesn't particularly care about the vendor - # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` - case "${UNAME_MACHINE_ARCH}" in - armeb) machine=armeb-unknown ;; - arm*) machine=arm-unknown ;; - sh3el) machine=shl-unknown ;; - sh3eb) machine=sh-unknown ;; - sh5el) machine=sh5le-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; - esac - # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. - case "${UNAME_MACHINE_ARCH}" in - arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build - if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ELF__ - then - # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). - # Return netbsd for either. FIX? - os=netbsd - else - os=netbsdelf - fi - ;; - *) - os=netbsd - ;; - esac - # The OS release - # Debian GNU/NetBSD machines have a different userland, and - # thus, need a distinct triplet. However, they do not need - # kernel version information, so it can be replaced with a - # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in - Debian*) - release='-gnu' - ;; - *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` - ;; - esac - # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: - # contains redundant information, the shorter form: - # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" - exit ;; - *:Bitrig:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} - exit ;; - *:OpenBSD:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} - exit ;; - *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} - exit ;; - *:SolidBSD:*:*) - echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} - exit ;; - macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd${UNAME_RELEASE} - exit ;; - *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} - exit ;; - alpha:OSF1:*:*) - case $UNAME_RELEASE in - *4.0) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` - ;; - *5.*) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` - ;; - esac - # According to Compaq, /usr/sbin/psrinfo has been available on - # OSF/1 and Tru64 systems produced since 1995. I hope that - # covers most systems running today. This code pipes the CPU - # types through head -n 1, so we only detect the type of CPU 0. - ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` - case "$ALPHA_CPU_TYPE" in - "EV4 (21064)") - UNAME_MACHINE="alpha" ;; - "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; - "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; - "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; - "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; - "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; - "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; - "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; - "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; - "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; - "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; - "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; - "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; - esac - # A Pn.n version is a patched version. - # A Vn.n version is a released version. - # A Tn.n version is a released field test version. - # A Xn.n version is an unreleased experimental baselevel. - # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - # Reset EXIT trap before exiting to avoid spurious non-zero exit code. - exitcode=$? - trap '' 0 - exit $exitcode ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 - exit ;; - Amiga*:UNIX_System_V:4.0:*) - echo m68k-unknown-sysv4 - exit ;; - *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos - exit ;; - *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos - exit ;; - *:OS/390:*:*) - echo i370-ibm-openedition - exit ;; - *:z/VM:*:*) - echo s390-ibm-zvmoe - exit ;; - *:OS400:*:*) - echo powerpc-ibm-os400 - exit ;; - arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} - exit ;; - arm*:riscos:*:*|arm*:RISCOS:*:*) - echo arm-unknown-riscos - exit ;; - SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) - echo hppa1.1-hitachi-hiuxmpp - exit ;; - Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) - # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. - if test "`(/bin/universe) 2>/dev/null`" = att ; then - echo pyramid-pyramid-sysv3 - else - echo pyramid-pyramid-bsd - fi - exit ;; - NILE*:*:*:dcosx) - echo pyramid-pyramid-svr4 - exit ;; - DRS?6000:unix:4.0:6*) - echo sparc-icl-nx6 - exit ;; - DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) - case `/usr/bin/uname -p` in - sparc) echo sparc-icl-nx7; exit ;; - esac ;; - s390x:SunOS:*:*) - echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux${UNAME_RELEASE} - exit ;; - i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) - eval $set_cc_for_build - SUN_ARCH="i386" - # If there is a compiler, see if it is configured for 64-bit objects. - # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. - # This test works for both compilers. - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - SUN_ARCH="x86_64" - fi - fi - echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:6*:*) - # According to config.sub, this is the proper way to canonicalize - # SunOS6. Hard to guess exactly what SunOS6 will be like, but - # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:*:*) - case "`/usr/bin/arch -k`" in - Series*|S4*) - UNAME_RELEASE=`uname -v` - ;; - esac - # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` - exit ;; - sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} - exit ;; - sun*:*:4.2BSD:*) - UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 - case "`/bin/arch`" in - sun3) - echo m68k-sun-sunos${UNAME_RELEASE} - ;; - sun4) - echo sparc-sun-sunos${UNAME_RELEASE} - ;; - esac - exit ;; - aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} - exit ;; - # The situation for MiNT is a little confusing. The machine name - # can be virtually everything (everything which is not - # "atarist" or "atariste" at least should have a processor - # > m68000). The system name ranges from "MiNT" over "FreeMiNT" - # to the lowercase version "mint" (or "freemint"). Finally - # the system name "TOS" denotes a system which is actually not - # MiNT. But MiNT is downward compatible to TOS, so this should - # be no problem. - atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} - exit ;; - hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} - exit ;; - *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} - exit ;; - m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} - exit ;; - powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} - exit ;; - RISC*:Mach:*:*) - echo mips-dec-mach_bsd4.3 - exit ;; - RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} - exit ;; - VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} - exit ;; - 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} - exit ;; - mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c -#ifdef __cplusplus -#include /* for printf() prototype */ - int main (int argc, char *argv[]) { -#else - int main (argc, argv) int argc; char *argv[]; { -#endif - #if defined (host_mips) && defined (MIPSEB) - #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); - #endif - #endif - exit (-1); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && - { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} - exit ;; - Motorola:PowerMAX_OS:*:*) - echo powerpc-motorola-powermax - exit ;; - Motorola:*:4.3:PL8-*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:Power_UNIX:*:*) - echo powerpc-harris-powerunix - exit ;; - m88k:CX/UX:7*:*) - echo m88k-harris-cxux7 - exit ;; - m88k:*:4*:R4*) - echo m88k-motorola-sysv4 - exit ;; - m88k:*:3*:R3*) - echo m88k-motorola-sysv3 - exit ;; - AViiON:dgux:*:*) - # DG/UX returns AViiON for all architectures - UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] - then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] - then - echo m88k-dg-dgux${UNAME_RELEASE} - else - echo m88k-dg-dguxbcs${UNAME_RELEASE} - fi - else - echo i586-dg-dgux${UNAME_RELEASE} - fi - exit ;; - M88*:DolphinOS:*:*) # DolphinOS (SVR3) - echo m88k-dolphin-sysv3 - exit ;; - M88*:*:R3*:*) - # Delta 88k system running SVR3 - echo m88k-motorola-sysv3 - exit ;; - XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) - echo m88k-tektronix-sysv3 - exit ;; - Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) - echo m68k-tektronix-bsd - exit ;; - *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` - exit ;; - ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. - echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id - exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' - i*86:AIX:*:*) - echo i386-ibm-aix - exit ;; - ia64:AIX:*:*) - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} - exit ;; - *:AIX:2:3) - if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - - main() - { - if (!__power_pc()) - exit(1); - puts("powerpc-ibm-aix3.2.5"); - exit(0); - } -EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` - then - echo "$SYSTEM_NAME" - else - echo rs6000-ibm-aix3.2.5 - fi - elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then - echo rs6000-ibm-aix3.2.4 - else - echo rs6000-ibm-aix3.2 - fi - exit ;; - *:AIX:*:[4567]) - IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then - IBM_ARCH=rs6000 - else - IBM_ARCH=powerpc - fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} - exit ;; - *:AIX:*:*) - echo rs6000-ibm-aix - exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) - echo romp-ibm-bsd4.4 - exit ;; - ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to - exit ;; # report: romp-ibm BSD 4.3 - *:BOSX:*:*) - echo rs6000-bull-bosx - exit ;; - DPX/2?00:B.O.S.:*:*) - echo m68k-bull-sysv3 - exit ;; - 9000/[34]??:4.3bsd:1.*:*) - echo m68k-hp-bsd - exit ;; - hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) - echo m68k-hp-bsd4.4 - exit ;; - 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; - 9000/[678][0-9][0-9]) - if [ -x /usr/bin/getconf ]; then - sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` - sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 - 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 - esac ;; - esac - fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - - #define _HPUX_SOURCE - #include - #include - - int main () - { - #if defined(_SC_KERNEL_BITS) - long bits = sysconf(_SC_KERNEL_BITS); - #endif - long cpu = sysconf (_SC_CPU_VERSION); - - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1"); break; - case CPU_PA_RISC2_0: - #if defined(_SC_KERNEL_BITS) - switch (bits) - { - case 64: puts ("hppa2.0w"); break; - case 32: puts ("hppa2.0n"); break; - default: puts ("hppa2.0"); break; - } break; - #else /* !defined(_SC_KERNEL_BITS) */ - puts ("hppa2.0"); break; - #endif - default: puts ("hppa1.0"); break; - } - exit (0); - } -EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` - test -z "$HP_ARCH" && HP_ARCH=hppa - fi ;; - esac - if [ ${HP_ARCH} = "hppa2.0w" ] - then - eval $set_cc_for_build - - # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating - # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler - # generating 64-bit code. GNU and HP use different nomenclature: - # - # $ CC_FOR_BUILD=cc ./config.guess - # => hppa2.0w-hp-hpux11.23 - # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess - # => hppa64-hp-hpux11.23 - - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | - grep -q __LP64__ - then - HP_ARCH="hppa2.0w" - else - HP_ARCH="hppa64" - fi - fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} - exit ;; - ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} - exit ;; - 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - int - main () - { - long cpu = sysconf (_SC_CPU_VERSION); - /* The order matters, because CPU_IS_HP_MC68K erroneously returns - true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct - results, however. */ - if (CPU_IS_PA_RISC (cpu)) - { - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; - case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; - default: puts ("hppa-hitachi-hiuxwe2"); break; - } - } - else if (CPU_IS_HP_MC68K (cpu)) - puts ("m68k-hitachi-hiuxwe2"); - else puts ("unknown-hitachi-hiuxwe2"); - exit (0); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - echo unknown-hitachi-hiuxwe2 - exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) - echo hppa1.1-hp-bsd - exit ;; - 9000/8??:4.3bsd:*:*) - echo hppa1.0-hp-bsd - exit ;; - *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) - echo hppa1.0-hp-mpeix - exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) - echo hppa1.1-hp-osf - exit ;; - hp8??:OSF1:*:*) - echo hppa1.0-hp-osf - exit ;; - i*86:OSF1:*:*) - if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk - else - echo ${UNAME_MACHINE}-unknown-osf1 - fi - exit ;; - parisc*:Lites*:*:*) - echo hppa1.1-hp-lites - exit ;; - C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) - echo c1-convex-bsd - exit ;; - C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) - echo c34-convex-bsd - exit ;; - C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) - echo c38-convex-bsd - exit ;; - C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) - echo c4-convex-bsd - exit ;; - CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ - | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ - -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ - -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` - echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` - echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} - exit ;; - sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:FreeBSD:*:*) - UNAME_PROCESSOR=`/usr/bin/uname -p` - case ${UNAME_PROCESSOR} in - amd64) - echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - *) - echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - esac - exit ;; - i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin - exit ;; - *:MINGW64*:*) - echo ${UNAME_MACHINE}-pc-mingw64 - exit ;; - *:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 - exit ;; - *:MSYS*:*) - echo ${UNAME_MACHINE}-pc-msys - exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 - exit ;; - i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 - exit ;; - *:Interix*:*) - case ${UNAME_MACHINE} in - x86) - echo i586-pc-interix${UNAME_RELEASE} - exit ;; - authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix${UNAME_RELEASE} - exit ;; - IA64) - echo ia64-unknown-interix${UNAME_RELEASE} - exit ;; - esac ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - 8664:Windows_NT:*) - echo x86_64-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; - i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin - exit ;; - amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) - echo x86_64-unknown-cygwin - exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin - exit ;; - prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - *:GNU:*:*) - # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` - exit ;; - *:GNU/*:*:*) - # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} - exit ;; - i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix - exit ;; - aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - aarch64_be:Linux:*:*) - UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - alpha:Linux:*:*) - case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in - EV5) UNAME_MACHINE=alphaev5 ;; - EV56) UNAME_MACHINE=alphaev56 ;; - PCA56) UNAME_MACHINE=alphapca56 ;; - PCA57) UNAME_MACHINE=alphapca56 ;; - EV6) UNAME_MACHINE=alphaev6 ;; - EV67) UNAME_MACHINE=alphaev67 ;; - EV68*) UNAME_MACHINE=alphaev68 ;; - esac - objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="gnulibc1" ; fi - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - arc:Linux:*:* | arceb:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - arm*:Linux:*:*) - eval $set_cc_for_build - if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_EABI__ - then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - else - if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_PCS_VFP - then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi - else - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf - fi - fi - exit ;; - avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} - exit ;; - crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} - exit ;; - frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - i*86:Linux:*:*) - echo ${UNAME_MACHINE}-pc-linux-${LIBC} - exit ;; - ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - mips:Linux:*:* | mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #undef CPU - #undef ${UNAME_MACHINE} - #undef ${UNAME_MACHINE}el - #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) - CPU=${UNAME_MACHINE}el - #else - #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) - CPU=${UNAME_MACHINE} - #else - CPU= - #endif - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } - ;; - openrisc*:Linux:*:*) - echo or1k-unknown-linux-${LIBC} - exit ;; - or32:Linux:*:* | or1k*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - padre:Linux:*:*) - echo sparc-unknown-linux-${LIBC} - exit ;; - parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-${LIBC} - exit ;; - parisc:Linux:*:* | hppa:Linux:*:*) - # Look for CPU level - case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; - PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; - *) echo hppa-unknown-linux-${LIBC} ;; - esac - exit ;; - ppc64:Linux:*:*) - echo powerpc64-unknown-linux-${LIBC} - exit ;; - ppc:Linux:*:*) - echo powerpc-unknown-linux-${LIBC} - exit ;; - ppc64le:Linux:*:*) - echo powerpc64le-unknown-linux-${LIBC} - exit ;; - ppcle:Linux:*:*) - echo powerpcle-unknown-linux-${LIBC} - exit ;; - s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux-${LIBC} - exit ;; - sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-${LIBC} - exit ;; - x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - i*86:DYNIX/ptx:4*:*) - # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. - # earlier versions are messed up and put the nodename in both - # sysname and nodename. - echo i386-sequent-sysv4 - exit ;; - i*86:UNIX_SV:4.2MP:2.*) - # Unixware is an offshoot of SVR4, but it has its own version - # number series starting with 2... - # I am not positive that other SVR4 systems won't match this, - # I just have to hope. -- rms. - # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} - exit ;; - i*86:OS/2:*:*) - # If we were able to find `uname', then EMX Unix compatibility - # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx - exit ;; - i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop - exit ;; - i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos - exit ;; - i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable - exit ;; - i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} - exit ;; - i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp - exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` - if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} - else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} - fi - exit ;; - i*86:*:5:[678]*) - # UnixWare 7.x, OpenUNIX and OpenServer 6. - case `/bin/uname -X | grep "^Machine"` in - *486*) UNAME_MACHINE=i486 ;; - *Pentium) UNAME_MACHINE=i586 ;; - *Pent*|*Celeron) UNAME_MACHINE=i686 ;; - esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} - exit ;; - i*86:*:3.2:*) - if test -f /usr/options/cb.name; then - UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then - UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` - (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 - (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ - && UNAME_MACHINE=i586 - (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ - && UNAME_MACHINE=i686 - (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ - && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL - else - echo ${UNAME_MACHINE}-pc-sysv32 - fi - exit ;; - pc:*:*:*) - # Left here for compatibility: - # uname -m prints for DJGPP always 'pc', but it prints nothing about - # the processor, so we play safe by assuming i586. - # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that - # this is a cross-build. - echo i586-pc-msdosdjgpp - exit ;; - Intel:Mach:3*:*) - echo i386-pc-mach3 - exit ;; - paragon:*:*:*) - echo i860-intel-osf1 - exit ;; - i860:*:4.*:*) # i860-SVR4 - if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 - else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 - fi - exit ;; - mini*:CTIX:SYS*5:*) - # "miniframe" - echo m68010-convergent-sysv - exit ;; - mc68k:UNIX:SYSTEM5:3.51m) - echo m68k-convergent-sysv - exit ;; - M680?0:D-NIX:5.3:*) - echo m68k-diab-dnix - exit ;; - M68*:*:R3V[5678]*:*) - test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; - 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) - OS_REL='' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4; exit; } ;; - NCR*:*:4.2:* | MPRAS*:*:4.2:*) - OS_REL='.3' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} - exit ;; - mc68030:UNIX_System_V:4.*:*) - echo m68k-atari-sysv4 - exit ;; - TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} - exit ;; - rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} - exit ;; - PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} - exit ;; - SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} - exit ;; - RM*:ReliantUNIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - RM*:SINIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - *:SINIX-*:*:*) - if uname -p 2>/dev/null >/dev/null ; then - UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 - else - echo ns32k-sni-sysv - fi - exit ;; - PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort - # says - echo i586-unisys-sysv4 - exit ;; - *:UNIX_System_V:4*:FTX*) - # From Gerald Hewes . - # How about differentiating between stratus architectures? -djm - echo hppa1.1-stratus-sysv4 - exit ;; - *:*:*:FTX*) - # From seanf@swdc.stratus.com. - echo i860-stratus-sysv4 - exit ;; - i*86:VOS:*:*) - # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos - exit ;; - *:VOS:*:*) - # From Paul.Green@stratus.com. - echo hppa1.1-stratus-vos - exit ;; - mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} - exit ;; - news*:NEWS-OS:6*:*) - echo mips-sony-newsos6 - exit ;; - R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) - if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} - else - echo mips-unknown-sysv${UNAME_RELEASE} - fi - exit ;; - BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. - echo powerpc-be-beos - exit ;; - BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. - echo powerpc-apple-beos - exit ;; - BePC:BeOS:*:*) # BeOS running on Intel PC compatible. - echo i586-pc-beos - exit ;; - BePC:Haiku:*:*) # Haiku running on Intel PC compatible. - echo i586-pc-haiku - exit ;; - x86_64:Haiku:*:*) - echo x86_64-unknown-haiku - exit ;; - SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} - exit ;; - SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} - exit ;; - SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} - exit ;; - SX-7:SUPER-UX:*:*) - echo sx7-nec-superux${UNAME_RELEASE} - exit ;; - SX-8:SUPER-UX:*:*) - echo sx8-nec-superux${UNAME_RELEASE} - exit ;; - SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux${UNAME_RELEASE} - exit ;; - Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Darwin:*:*) - UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - eval $set_cc_for_build - if test "$UNAME_PROCESSOR" = unknown ; then - UNAME_PROCESSOR=powerpc - fi - if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - case $UNAME_PROCESSOR in - i386) UNAME_PROCESSOR=x86_64 ;; - powerpc) UNAME_PROCESSOR=powerpc64 ;; - esac - fi - fi - elif test "$UNAME_PROCESSOR" = i386 ; then - # Avoid executing cc on OS X 10.9, as it ships with a stub - # that puts up a graphical alert prompting to install - # developer tools. Any system running Mac OS X 10.7 or - # later (Darwin 11 and later) is required to have a 64-bit - # processor. This is not true of the ARM version of Darwin - # that Apple uses in portable devices. - UNAME_PROCESSOR=x86_64 - fi - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} - exit ;; - *:procnto*:*:* | *:QNX:[0123456789]*:*) - UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then - UNAME_PROCESSOR=i386 - UNAME_MACHINE=pc - fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} - exit ;; - *:QNX:*:4*) - echo i386-pc-qnx - exit ;; - NEO-?:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk${UNAME_RELEASE} - exit ;; - NSE-*:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} - exit ;; - NSR-?:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} - exit ;; - *:NonStop-UX:*:*) - echo mips-compaq-nonstopux - exit ;; - BS2000:POSIX*:*:*) - echo bs2000-siemens-sysv - exit ;; - DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} - exit ;; - *:Plan9:*:*) - # "uname -m" is not consistent, so use $cputype instead. 386 - # is converted to i386 for consistency with other x86 - # operating systems. - if test "$cputype" = "386"; then - UNAME_MACHINE=i386 - else - UNAME_MACHINE="$cputype" - fi - echo ${UNAME_MACHINE}-unknown-plan9 - exit ;; - *:TOPS-10:*:*) - echo pdp10-unknown-tops10 - exit ;; - *:TENEX:*:*) - echo pdp10-unknown-tenex - exit ;; - KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) - echo pdp10-dec-tops20 - exit ;; - XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) - echo pdp10-xkl-tops20 - exit ;; - *:TOPS-20:*:*) - echo pdp10-unknown-tops20 - exit ;; - *:ITS:*:*) - echo pdp10-unknown-its - exit ;; - SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} - exit ;; - *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` - exit ;; - *:*VMS:*:*) - UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in - A*) echo alpha-dec-vms ; exit ;; - I*) echo ia64-dec-vms ; exit ;; - V*) echo vax-dec-vms ; exit ;; - esac ;; - *:XENIX:*:SysV) - echo i386-pc-xenix - exit ;; - i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' - exit ;; - i*86:rdos:*:*) - echo ${UNAME_MACHINE}-pc-rdos - exit ;; - i*86:AROS:*:*) - echo ${UNAME_MACHINE}-pc-aros - exit ;; - x86_64:VMkernel:*:*) - echo ${UNAME_MACHINE}-unknown-esx - exit ;; -esac - -cat >&2 < in order to provide the needed -information to handle your system. - -config.guess timestamp = $timestamp - -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null` - -hostinfo = `(hostinfo) 2>/dev/null` -/bin/universe = `(/bin/universe) 2>/dev/null` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` -/bin/arch = `(/bin/arch) 2>/dev/null` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` - -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} -EOF - -exit 1 - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/config.sub cabal-install-1.22-1.22.9.0/=unpacked-tar2=/config.sub --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/config.sub 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/config.sub 1970-01-01 00:00:00.000000000 +0000 @@ -1,1790 +0,0 @@ -#! /bin/sh -# Configuration validation subroutine script. -# Copyright 1992-2014 Free Software Foundation, Inc. - -timestamp='2014-04-03' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). - - -# Please send patches with a ChangeLog entry to config-patches@gnu.org. -# -# Configuration subroutine to validate and canonicalize a configuration type. -# Supply the specified configuration type as an argument. -# If it is invalid, we print an error message on stderr and exit with code 1. -# Otherwise, we print the canonical config type on stdout and succeed. - -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD - -# This file is supposed to be the same for all GNU packages -# and recognize all the CPU types, system types and aliases -# that are meaningful with *any* GNU software. -# Each package is responsible for reporting which valid configurations -# it does not support. The user should be able to distinguish -# a failure to support a valid configuration from a meaningless -# configuration. - -# The goal of this file is to map all the various variations of a given -# machine specification into a single specification in the form: -# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM -# or in some cases, the newer four-part form: -# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM -# It is wrong to echo any other type of specification. - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS - -Canonicalize a configuration name. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.sub ($timestamp) - -Copyright 1992-2014 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" - exit 1 ;; - - *local*) - # First pass through any local machine types. - echo $1 - exit ;; - - * ) - break ;; - esac -done - -case $# in - 0) echo "$me: missing argument$help" >&2 - exit 1;; - 1) ;; - *) echo "$me: too many arguments$help" >&2 - exit 1;; -esac - -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | \ - kopensolaris*-gnu* | \ - storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - android-linux) - os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac - -### Let's recognize common machines as not being operating systems so -### that things like config.sub decstation-3100 work. We also -### recognize some manufacturers as not being operating systems, so we -### can provide default operating systems below. -case $os in - -sun*os*) - # Prevent following clause from handling this invalid input. - ;; - -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ - -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ - -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ - -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ - -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ - -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis | -knuth | -cray | -microblaze*) - os= - basic_machine=$1 - ;; - -bluegene*) - os=-cnk - ;; - -sim | -cisco | -oki | -wec | -winbond) - os= - basic_machine=$1 - ;; - -scout) - ;; - -wrs) - os=-vxworks - basic_machine=$1 - ;; - -chorusos*) - os=-chorusos - basic_machine=$1 - ;; - -chorusrdb) - os=-chorusrdb - basic_machine=$1 - ;; - -hiux*) - os=-hiuxwe2 - ;; - -sco6) - os=-sco5v6 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5) - os=-sco3.2v5 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco4) - os=-sco3.2v4 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2.[4-9]*) - os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2v[4-9]*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5v6*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco*) - os=-sco3.2v2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -udk*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -isc) - os=-isc2.2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -clix*) - basic_machine=clipper-intergraph - ;; - -isc*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -lynx*178) - os=-lynxos178 - ;; - -lynx*5) - os=-lynxos5 - ;; - -lynx*) - os=-lynxos - ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` - ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` - ;; - -psos*) - os=-psos - ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; -esac - -# Decode aliases for certain CPU-COMPANY combinations. -case $basic_machine in - # Recognize the basic CPU types without company name. - # Some are omitted here because they have special meanings below. - 1750a | 580 \ - | a29k \ - | aarch64 | aarch64_be \ - | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ - | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ - | am33_2.0 \ - | arc | arceb \ - | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ - | avr | avr32 \ - | be32 | be64 \ - | bfin \ - | c4x | c8051 | clipper \ - | d10v | d30v | dlx | dsp16xx \ - | epiphany \ - | fido | fr30 | frv \ - | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ - | hexagon \ - | i370 | i860 | i960 | ia64 \ - | ip2k | iq2000 \ - | k1om \ - | le32 | le64 \ - | lm32 \ - | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ - | mn10200 | mn10300 \ - | moxie \ - | mt \ - | msp430 \ - | nds32 | nds32le | nds32be \ - | nios | nios2 | nios2eb | nios2el \ - | ns16k | ns32k \ - | open8 | or1k | or1knd | or32 \ - | pdp10 | pdp11 | pj | pjl \ - | powerpc | powerpc64 | powerpc64le | powerpcle \ - | pyramid \ - | rl78 | rx \ - | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ - | sh64 | sh64le \ - | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ - | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ - | spu \ - | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ - | ubicom32 \ - | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | we32k \ - | x86 | xc16x | xstormy16 | xtensa \ - | z8k | z80) - basic_machine=$basic_machine-unknown - ;; - c54x) - basic_machine=tic54x-unknown - ;; - c55x) - basic_machine=tic55x-unknown - ;; - c6x) - basic_machine=tic6x-unknown - ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) - basic_machine=$basic_machine-unknown - os=-none - ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) - ;; - ms1) - basic_machine=mt-unknown - ;; - - strongarm | thumb | xscale) - basic_machine=arm-unknown - ;; - xgate) - basic_machine=$basic_machine-unknown - os=-none - ;; - xscaleeb) - basic_machine=armeb-unknown - ;; - - xscaleel) - basic_machine=armel-unknown - ;; - - # We use `pc' rather than `unknown' - # because (1) that's what they normally are, and - # (2) the word "unknown" tends to confuse beginning users. - i*86 | x86_64) - basic_machine=$basic_machine-pc - ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; - # Recognize the basic CPU types with company name. - 580-* \ - | a29k-* \ - | aarch64-* | aarch64_be-* \ - | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ - | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ - | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ - | avr-* | avr32-* \ - | be32-* | be64-* \ - | bfin-* | bs2000-* \ - | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | c8051-* | clipper-* | craynv-* | cydra-* \ - | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ - | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ - | h8300-* | h8500-* \ - | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ - | hexagon-* \ - | i*86-* | i860-* | i960-* | ia64-* \ - | ip2k-* | iq2000-* \ - | k1om-* \ - | le32-* | le64-* \ - | lm32-* \ - | m32c-* | m32r-* | m32rle-* \ - | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ - | microblaze-* | microblazeel-* \ - | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ - | mips16-* \ - | mips64-* | mips64el-* \ - | mips64octeon-* | mips64octeonel-* \ - | mips64orion-* | mips64orionel-* \ - | mips64r5900-* | mips64r5900el-* \ - | mips64vr-* | mips64vrel-* \ - | mips64vr4100-* | mips64vr4100el-* \ - | mips64vr4300-* | mips64vr4300el-* \ - | mips64vr5000-* | mips64vr5000el-* \ - | mips64vr5900-* | mips64vr5900el-* \ - | mipsisa32-* | mipsisa32el-* \ - | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa64-* | mipsisa64el-* \ - | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64sb1-* | mipsisa64sb1el-* \ - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipsr5900-* | mipsr5900el-* \ - | mipstx39-* | mipstx39el-* \ - | mmix-* \ - | mt-* \ - | msp430-* \ - | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* | nios2eb-* | nios2el-* \ - | none-* | np1-* | ns16k-* | ns32k-* \ - | open8-* \ - | or1k*-* \ - | orion-* \ - | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ - | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ - | pyramid-* \ - | rl78-* | romp-* | rs6000-* | rx-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ - | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ - | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ - | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ - | tahoe-* \ - | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tile*-* \ - | tron-* \ - | ubicom32-* \ - | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ - | vax-* \ - | we32k-* \ - | x86-* | x86_64-* | xc16x-* | xps100-* \ - | xstormy16-* | xtensa*-* \ - | ymp-* \ - | z8k-* | z80-*) - ;; - # Recognize the basic CPU types without company name, with glob match. - xtensa*) - basic_machine=$basic_machine-unknown - ;; - # Recognize the various machine names and aliases which stand - # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; - 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) - basic_machine=m68000-att - ;; - 3b*) - basic_machine=we32k-att - ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; - abacus) - basic_machine=abacus-unknown - ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; - alliant | fx80) - basic_machine=fx80-alliant - ;; - altos | altos3068) - basic_machine=m68k-altos - ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; - amd64) - basic_machine=x86_64-pc - ;; - amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv - ;; - amiga | amiga-*) - basic_machine=m68k-unknown - ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aros) - basic_machine=i386-pc - os=-aros - ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=-linux - ;; - blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - bluegene*) - basic_machine=powerpc-ibm - os=-cnk - ;; - c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c90) - basic_machine=c90-cray - os=-unicos - ;; - cegcc) - basic_machine=arm-unknown - os=-cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=-bsd - ;; - convex-c2) - basic_machine=c2-convex - os=-bsd - ;; - convex-c32) - basic_machine=c32-convex - os=-bsd - ;; - convex-c34) - basic_machine=c34-convex - os=-bsd - ;; - convex-c38) - basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp - ;; - cr16 | cr16-*) - basic_machine=cr16-unknown - os=-elf - ;; - crds | unos) - basic_machine=m68k-crds - ;; - crisv32 | crisv32-* | etraxfs*) - basic_machine=crisv32-axis - ;; - cris | cris-* | etrax*) - basic_machine=cris-axis - ;; - crx) - basic_machine=crx-unknown - os=-elf - ;; - da30 | da30-*) - basic_machine=m68k-da30 - ;; - decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) - basic_machine=mips-dec - ;; - decsystem10* | dec10*) - basic_machine=pdp10-dec - os=-tops10 - ;; - decsystem20* | dec20*) - basic_machine=pdp10-dec - os=-tops20 - ;; - delta | 3300 | motorola-3300 | motorola-delta \ - | 3300-motorola | delta-motorola) - basic_machine=m68k-motorola - ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - dicos) - basic_machine=i686-pc - os=-dicos - ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; - dpx20 | dpx20-*) - basic_machine=rs6000-bull - os=-bosx - ;; - dpx2* | dpx2*-bull) - basic_machine=m68k-bull - os=-sysv3 - ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon - ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd - ;; - encore | umax | mmax) - basic_machine=ns32k-encore - ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose - ;; - fx2800) - basic_machine=i860-alliant - ;; - genix) - basic_machine=ns32k-ns - ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 - ;; - h3050r* | hiux*) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 - ;; - hp300-*) - basic_machine=m68k-hp - ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux - ;; - hp3k9[0-9][0-9] | hp9[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k2[0-9][0-9] | hp9k31[0-9]) - basic_machine=m68000-hp - ;; - hp9k3[2-9][0-9]) - basic_machine=m68k-hp - ;; - hp9k6[0-9][0-9] | hp6[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k7[0-79][0-9] | hp7[0-79][0-9]) - basic_machine=hppa1.1-hp - ;; - hp9k78[0-9] | hp78[0-9]) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][13679] | hp8[0-9][13679]) - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][0-9] | hp8[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm - ;; - i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 - ;; - i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 - ;; - i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv - ;; - i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach - ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta - ;; - iris | iris4d) - basic_machine=mips-sgi - case $os in - -irix*) - ;; - *) - os=-irix4 - ;; - esac - ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; - m68knommu) - basic_machine=m68k-unknown - os=-linux - ;; - m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv - ;; - merlin) - basic_machine=ns32k-utek - os=-sysv - ;; - microblaze*) - basic_machine=microblaze-xilinx - ;; - mingw64) - basic_machine=x86_64-pc - os=-mingw64 - ;; - mingw32) - basic_machine=i686-pc - os=-mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=-mingw32ce - ;; - miniframe) - basic_machine=m68000-convergent - ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; - mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` - ;; - mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - msdos) - basic_machine=i386-pc - os=-msdos - ;; - ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` - ;; - msys) - basic_machine=i686-pc - os=-msys - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - nacl) - basic_machine=le32-unknown - os=-nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos - ;; - news-3600 | risc-news) - basic_machine=mips-sony - os=-newsos - ;; - necv70) - basic_machine=v70-nec - os=-sysv - ;; - next | m*-next ) - basic_machine=m68k-next - case $os in - -nextstep* ) - ;; - -ns2*) - os=-nextstep2 - ;; - *) - os=-nextstep3 - ;; - esac - ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; - np1) - basic_machine=np1-gould - ;; - neo-tandem) - basic_machine=neo-tandem - ;; - nse-tandem) - basic_machine=nse-tandem - ;; - nsr-tandem) - basic_machine=nsr-tandem - ;; - op50n-* | op60c-*) - basic_machine=hppa1.1-oki - os=-proelf - ;; - openrisc | openrisc-*) - basic_machine=or32-unknown - ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k - ;; - pa-hitachi) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - parisc) - basic_machine=hppa-unknown - os=-linux - ;; - parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - pbd) - basic_machine=sparc-tti - ;; - pbb) - basic_machine=m68k-tti - ;; - pc532 | pc532-*) - basic_machine=ns32k-pc532 - ;; - pc98) - basic_machine=i386-pc - ;; - pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium | p5 | k5 | k6 | nexgen | viac3) - basic_machine=i586-pc - ;; - pentiumpro | p6 | 6x86 | athlon | athlon_*) - basic_machine=i686-pc - ;; - pentiumii | pentium2 | pentiumiii | pentium3) - basic_machine=i686-pc - ;; - pentium4) - basic_machine=i786-pc - ;; - pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pn) - basic_machine=pn-gould - ;; - power) basic_machine=power-ibm - ;; - ppc | ppcbe) basic_machine=powerpc-unknown - ;; - ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppcle | powerpclittle | ppc-le | powerpc-little) - basic_machine=powerpcle-unknown - ;; - ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64) basic_machine=powerpc64-unknown - ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) - basic_machine=powerpc64le-unknown - ;; - ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ps2) - basic_machine=i386-ibm - ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rdos | rdos64) - basic_machine=x86_64-pc - os=-rdos - ;; - rdos32) - basic_machine=i386-pc - os=-rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff - ;; - rm[46]00) - basic_machine=mips-siemens - ;; - rtpc | rtpc-*) - basic_machine=romp-ibm - ;; - s390 | s390-*) - basic_machine=s390-ibm - ;; - s390x | s390x-*) - basic_machine=s390x-ibm - ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; - sb1) - basic_machine=mipsisa64sb1-unknown - ;; - sb1el) - basic_machine=mipsisa64sb1el-unknown - ;; - sde) - basic_machine=mipsisa32-sde - os=-elf - ;; - sei) - basic_machine=mips-sei - os=-seiux - ;; - sequent) - basic_machine=i386-sequent - ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; - sh5el) - basic_machine=sh5le-unknown - ;; - sh64) - basic_machine=sh64-unknown - ;; - sparclite-wrs | simso-wrs) - basic_machine=sparclite-wrs - os=-vxworks - ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 - ;; - spur) - basic_machine=spur-unknown - ;; - st2000) - basic_machine=m68k-tandem - ;; - stratus) - basic_machine=i860-stratus - os=-sysv4 - ;; - strongarm-* | thumb-*) - basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - sun2) - basic_machine=m68000-sun - ;; - sun2os3) - basic_machine=m68000-sun - os=-sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=-sunos4 - ;; - sun3os3) - basic_machine=m68k-sun - os=-sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=-sunos4 - ;; - sun4os3) - basic_machine=sparc-sun - os=-sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=-sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=-solaris2 - ;; - sun3 | sun3-*) - basic_machine=m68k-sun - ;; - sun4) - basic_machine=sparc-sun - ;; - sun386 | sun386i | roadrunner) - basic_machine=i386-sun - ;; - sv1) - basic_machine=sv1-cray - os=-unicos - ;; - symmetry) - basic_machine=i386-sequent - os=-dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=-unicos - ;; - t90) - basic_machine=t90-cray - os=-unicos - ;; - tile*) - basic_machine=$basic_machine-unknown - os=-linux-gnu - ;; - tx39) - basic_machine=mipstx39-unknown - ;; - tx39el) - basic_machine=mipstx39el-unknown - ;; - toad1) - basic_machine=pdp10-xkl - os=-tops20 - ;; - tower | tower-32) - basic_machine=m68k-ncr - ;; - tpf) - basic_machine=s390x-ibm - os=-tpf - ;; - udi29k) - basic_machine=a29k-amd - os=-udi - ;; - ultra3) - basic_machine=a29k-nyu - os=-sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=-none - ;; - vaxv) - basic_machine=vax-dec - os=-sysv - ;; - vms) - basic_machine=vax-dec - os=-vms - ;; - vpp*|vx|vx-*) - basic_machine=f301-fujitsu - ;; - vxworks960) - basic_machine=i960-wrs - os=-vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=-vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=-vxworks - ;; - w65*) - basic_machine=w65-wdc - os=-none - ;; - w89k-*) - basic_machine=hppa1.1-winbond - os=-proelf - ;; - xbox) - basic_machine=i686-pc - os=-mingw32 - ;; - xps | xps100) - basic_machine=xps100-honeywell - ;; - xscale-* | xscalee[bl]-*) - basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` - ;; - ymp) - basic_machine=ymp-cray - os=-unicos - ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - z80-*-coff) - basic_machine=z80-unknown - os=-sim - ;; - none) - basic_machine=none-none - os=-none - ;; - -# Here we handle the default manufacturer of certain CPU types. It is in -# some cases the only manufacturer, in others, it is the most popular. - w89k) - basic_machine=hppa1.1-winbond - ;; - op50n) - basic_machine=hppa1.1-oki - ;; - op60c) - basic_machine=hppa1.1-oki - ;; - romp) - basic_machine=romp-ibm - ;; - mmix) - basic_machine=mmix-knuth - ;; - rs6000) - basic_machine=rs6000-ibm - ;; - vax) - basic_machine=vax-dec - ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; - pdp11) - basic_machine=pdp11-dec - ;; - we32k) - basic_machine=we32k-att - ;; - sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) - basic_machine=sh-unknown - ;; - sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) - basic_machine=sparc-sun - ;; - cydra) - basic_machine=cydra-cydrome - ;; - orion) - basic_machine=orion-highlevel - ;; - orion105) - basic_machine=clipper-highlevel - ;; - mac | mpw | mac-mpw) - basic_machine=m68k-apple - ;; - pmac | pmac-mpw) - basic_machine=powerpc-apple - ;; - *-unknown) - # Make sure to match an already-canonicalized machine name. - ;; - *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; -esac - -# Here we canonicalize certain aliases for manufacturers. -case $basic_machine in - *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` - ;; - *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` - ;; - *) - ;; -esac - -# Decode manufacturer-specific aliases for certain operating systems. - -if [ x"$os" != x"" ] -then -case $os in - # First match some system type aliases - # that might get confused with valid system types. - # -solaris* is a basic system type, with this one exception. - -auroraux) - os=-auroraux - ;; - -solaris1 | -solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` - ;; - -solaris) - os=-solaris2 - ;; - -svr4*) - os=-sysv4 - ;; - -unixware*) - os=-sysv4.2uw - ;; - -gnu/linux*) - os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` - ;; - # First accept the basic system types. - # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. - # -sysv* is not here because it comes later, after sysvr4. - -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ - | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ - | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* | -plan9* \ - | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* \ - | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ - | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -bitrig* | -openbsd* | -solidbsd* \ - | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ - | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ - | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ - | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* \ - | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-musl* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ - | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ - | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ - | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*) - # Remember, each alternative MUST END IN *, to match a version number. - ;; - -qnx*) - case $basic_machine in - x86-* | i*86-*) - ;; - *) - os=-nto$os - ;; - esac - ;; - -nto-qnx*) - ;; - -nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` - ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ - | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) - ;; - -mac*) - os=`echo $os | sed -e 's|mac|macos|'` - ;; - -linux-dietlibc) - os=-linux-dietlibc - ;; - -linux*) - os=`echo $os | sed -e 's|linux|linux-gnu|'` - ;; - -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` - ;; - -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` - ;; - -opened*) - os=-openedition - ;; - -os400*) - os=-os400 - ;; - -wince*) - os=-wince - ;; - -osfrose*) - os=-osfrose - ;; - -osf*) - os=-osf - ;; - -utek*) - os=-bsd - ;; - -dynix*) - os=-bsd - ;; - -acis*) - os=-aos - ;; - -atheos*) - os=-atheos - ;; - -syllable*) - os=-syllable - ;; - -386bsd) - os=-bsd - ;; - -ctix* | -uts*) - os=-sysv - ;; - -nova*) - os=-rtmk-nova - ;; - -ns2 ) - os=-nextstep2 - ;; - -nsk*) - os=-nsk - ;; - # Preserve the version number of sinix5. - -sinix5.*) - os=`echo $os | sed -e 's|sinix|sysv|'` - ;; - -sinix*) - os=-sysv4 - ;; - -tpf*) - os=-tpf - ;; - -triton*) - os=-sysv3 - ;; - -oss*) - os=-sysv3 - ;; - -svr4) - os=-sysv4 - ;; - -svr3) - os=-sysv3 - ;; - -sysvr4) - os=-sysv4 - ;; - # This must come after -sysvr4. - -sysv*) - ;; - -ose*) - os=-ose - ;; - -es1800*) - os=-ose - ;; - -xenix) - os=-xenix - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - os=-mint - ;; - -aros*) - os=-aros - ;; - -zvmoe) - os=-zvmoe - ;; - -dicos*) - os=-dicos - ;; - -nacl*) - ;; - -none) - ;; - *) - # Get rid of the `-' at the beginning of $os. - os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 - exit 1 - ;; -esac -else - -# Here we handle the default operating systems that come with various machines. -# The value should be what the vendor currently ships out the door with their -# machine or put another way, the most popular os provided with the machine. - -# Note that if you're going to try to match "-MANUFACTURER" here (say, -# "-sun"), then you have to tell the case statement up towards the top -# that MANUFACTURER isn't an operating system. Otherwise, code above -# will signal an error saying that MANUFACTURER isn't an operating -# system, and we'll never get to this point. - -case $basic_machine in - score-*) - os=-elf - ;; - spu-*) - os=-elf - ;; - *-acorn) - os=-riscix1.2 - ;; - arm*-rebel) - os=-linux - ;; - arm*-semi) - os=-aout - ;; - c4x-* | tic4x-*) - os=-coff - ;; - c8051-*) - os=-elf - ;; - hexagon-*) - os=-elf - ;; - tic54x-*) - os=-coff - ;; - tic55x-*) - os=-coff - ;; - tic6x-*) - os=-coff - ;; - # This must come before the *-dec entry. - pdp10-*) - os=-tops20 - ;; - pdp11-*) - os=-none - ;; - *-dec | vax-*) - os=-ultrix4.2 - ;; - m68*-apollo) - os=-domain - ;; - i386-sun) - os=-sunos4.0.2 - ;; - m68000-sun) - os=-sunos3 - ;; - m68*-cisco) - os=-aout - ;; - mep-*) - os=-elf - ;; - mips*-cisco) - os=-elf - ;; - mips*-*) - os=-elf - ;; - or32-*) - os=-coff - ;; - *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 - ;; - sparc-* | *-sun) - os=-sunos4.1.1 - ;; - *-be) - os=-beos - ;; - *-haiku) - os=-haiku - ;; - *-ibm) - os=-aix - ;; - *-knuth) - os=-mmixware - ;; - *-wec) - os=-proelf - ;; - *-winbond) - os=-proelf - ;; - *-oki) - os=-proelf - ;; - *-hp) - os=-hpux - ;; - *-hitachi) - os=-hiux - ;; - i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv - ;; - *-cbm) - os=-amigaos - ;; - *-dg) - os=-dgux - ;; - *-dolphin) - os=-sysv3 - ;; - m68k-ccur) - os=-rtu - ;; - m88k-omron*) - os=-luna - ;; - *-next ) - os=-nextstep - ;; - *-sequent) - os=-ptx - ;; - *-crds) - os=-unos - ;; - *-ns) - os=-genix - ;; - i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 - ;; - *-gould) - os=-sysv - ;; - *-highlevel) - os=-bsd - ;; - *-encore) - os=-bsd - ;; - *-sgi) - os=-irix - ;; - *-siemens) - os=-sysv4 - ;; - *-masscomp) - os=-rtu - ;; - f30[01]-fujitsu | f700-fujitsu) - os=-uxpv - ;; - *-rom68k) - os=-coff - ;; - *-*bug) - os=-coff - ;; - *-apple) - os=-macos - ;; - *-atari*) - os=-mint - ;; - *) - os=-none - ;; -esac -fi - -# Here we handle the case where we know the os, and the CPU type, but not the -# manufacturer. We pick the logical manufacturer. -vendor=unknown -case $basic_machine in - *-unknown) - case $os in - -riscix*) - vendor=acorn - ;; - -sunos*) - vendor=sun - ;; - -cnk*|-aix*) - vendor=ibm - ;; - -beos*) - vendor=be - ;; - -hpux*) - vendor=hp - ;; - -mpeix*) - vendor=hp - ;; - -hiux*) - vendor=hitachi - ;; - -unos*) - vendor=crds - ;; - -dgux*) - vendor=dg - ;; - -luna*) - vendor=omron - ;; - -genix*) - vendor=ns - ;; - -mvs* | -opened*) - vendor=ibm - ;; - -os400*) - vendor=ibm - ;; - -ptx*) - vendor=sequent - ;; - -tpf*) - vendor=ibm - ;; - -vxsim* | -vxworks* | -windiss*) - vendor=wrs - ;; - -aux*) - vendor=apple - ;; - -hms*) - vendor=hitachi - ;; - -mpw* | -macos*) - vendor=apple - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - vendor=atari - ;; - -vos*) - vendor=stratus - ;; - esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` - ;; -esac - -echo $basic_machine$os -exit - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/configure cabal-install-1.22-1.22.9.0/=unpacked-tar2=/configure --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/configure 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/configure 1970-01-01 00:00:00.000000000 +0000 @@ -1,5365 +0,0 @@ -#! /bin/sh -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for Haskell network package 2.3.0.14. -# -# Report bugs to . -# -# -# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. -# -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -# Use a proper internal environment variable to ensure we don't fall - # into an infinite loop, continuously re-executing ourselves. - if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then - _as_can_reexec=no; export _as_can_reexec; - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -as_fn_exit 255 - fi - # We don't want this to propagate to other subprocesses. - { _as_can_reexec=; unset _as_can_reexec;} -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1 -test -x / || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - export CONFIG_SHELL - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -exit 255 -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org and -$0: libraries@haskell.org about your system, including any -$0: error possibly output before this message. Then install -$0: a modern shell, or manually run the script under such a -$0: shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # If we had to re-execute with $CONFIG_SHELL, we're ensured to have - # already done that, so ensure we don't try to do so again and fall - # in an infinite loop. This has already happened in practice. - _as_can_reexec=no; export _as_can_reexec - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -test -n "$DJDIR" || exec 7<&0 &1 - -# Name of the host. -# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME='Haskell network package' -PACKAGE_TARNAME='network' -PACKAGE_VERSION='2.3.0.14' -PACKAGE_STRING='Haskell network package 2.3.0.14' -PACKAGE_BUGREPORT='libraries@haskell.org' -PACKAGE_URL='' - -ac_unique_file="include/HsNet.h" -# Factoring default headers for most tests. -ac_includes_default="\ -#include -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef STDC_HEADERS -# include -# include -#else -# ifdef HAVE_STDLIB_H -# include -# endif -#endif -#ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include -# endif -# include -#endif -#ifdef HAVE_STRINGS_H -# include -#endif -#ifdef HAVE_INTTYPES_H -# include -#endif -#ifdef HAVE_STDINT_H -# include -#endif -#ifdef HAVE_UNISTD_H -# include -#endif" - -ac_subst_vars='LTLIBOBJS -LIBOBJS -EXTRA_SRCS -EXTRA_LIBS -EXTRA_CPPFLAGS -CALLCONV -EGREP -GREP -CPP -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CFLAGS -CC -host_os -host_vendor -host_cpu -host -build_os -build_vendor -build_cpu -build -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -with_cc -' - ac_precious_vars='build_alias -host_alias -target_alias -CC -CFLAGS -LDFLAGS -LIBS -CPPFLAGS -CPP' - - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *=) ac_optarg= ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error $? "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information" - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error $? "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error $? "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error $? "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures Haskell network package 2.3.0.14 to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking ...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/network] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF - -System types: - --build=BUILD configure for building on BUILD [guessed] - --host=HOST cross-compile to build programs to run on HOST [BUILD] -_ACEOF -fi - -if test -n "$ac_init_help"; then - case $ac_init_help in - short | recursive ) echo "Configuration of Haskell network package 2.3.0.14:";; - esac - cat <<\_ACEOF - -Optional Packages: - --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] - --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) -C compiler - -Some influential environment variables: - CC C compiler command - CFLAGS C compiler flags - LDFLAGS linker flags, e.g. -L if you have libraries in a - nonstandard directory - LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if - you have headers in a nonstandard directory - CPP C preprocessor - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to . -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -Haskell network package configure 2.3.0.14 -generated by GNU Autoconf 2.69 - -Copyright (C) 2012 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } > conftest.i && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists, giving a warning if it cannot be compiled using -# the include files in INCLUDES and setting the cache variable VAR -# accordingly. -ac_fn_c_check_header_mongrel () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if eval \${$3+:} false; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -else - # Is the header compilable? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 -$as_echo_n "checking $2 usability... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_header_compiler=yes -else - ac_header_compiler=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 -$as_echo "$ac_header_compiler" >&6; } - -# Is the header present? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 -$as_echo_n "checking $2 presence... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <$2> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_header_preproc=yes -else - ac_header_preproc=no -fi -rm -f conftest.err conftest.i conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 -$as_echo "$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( - yes:no: ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 -$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; - no:yes:* ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 -$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 -$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 -$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 -$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} -( $as_echo "## ------------------------------------ ## -## Report this to libraries@haskell.org ## -## ------------------------------------ ##" - ) | sed "s/^/$as_me: WARNING: /" >&2 - ;; -esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=\$ac_header_compiler" -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_mongrel - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_compile - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - test -x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_link - -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case declares $2. - For example, HP-UX 11i declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_func - -# ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES -# ---------------------------------------------------- -# Tries to find if the field MEMBER exists in type AGGR, after including -# INCLUDES, setting cache variable VAR accordingly. -ac_fn_c_check_member () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 -$as_echo_n "checking for $2.$3... " >&6; } -if eval \${$4+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$5 -int -main () -{ -static $2 ac_aggr; -if (ac_aggr.$3) -return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$4=yes" -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$5 -int -main () -{ -static $2 ac_aggr; -if (sizeof ac_aggr.$3) -return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$4=yes" -else - eval "$4=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$4 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_member - -# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES -# --------------------------------------------- -# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR -# accordingly. -ac_fn_c_check_decl () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - as_decl_name=`echo $2|sed 's/ *(.*//'` - as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 -$as_echo_n "checking whether $as_decl_name is declared... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -#ifndef $as_decl_name -#ifdef __cplusplus - (void) $as_decl_use; -#else - (void) $as_decl_name; -#endif -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_decl -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by Haskell network package $as_me 2.3.0.14, which was -generated by GNU Autoconf 2.69. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - $as_echo "## ---------------- ## -## Cache variables. ## -## ---------------- ##" - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - $as_echo "## ----------------- ## -## Output variables. ## -## ----------------- ##" - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - $as_echo "## ------------------- ## -## File substitutions. ## -## ------------------- ##" - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - $as_echo "## ----------- ## -## confdefs.h. ## -## ----------- ##" - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - # We do not want a PATH search for config.site. - case $CONFIG_SITE in #(( - -*) ac_site_file1=./$CONFIG_SITE;; - */*) ac_site_file1=$CONFIG_SITE;; - *) ac_site_file1=./$CONFIG_SITE;; - esac -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" \ - || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "failed to load site script $ac_site_file -See \`config.log' for more details" "$LINENO" 5; } - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -ac_includes_default="$ac_includes_default -#ifdef HAVE_SYS_SOCKET_H -# include -#endif -#ifdef HAVE_NETINET_IN_H -# include -#endif -#ifdef HAVE_NETDB_H -# include -#endif -#ifdef HAVE_WINSOCK2_H -# include -#endif -#ifdef HAVE_WS2TCPIP_H -# include -// fix for MingW not defining IPV6_V6ONLY -# define IPV6_V6ONLY 27 -#endif" - -# Safety check: Ensure that we are in the correct source directory. - - -ac_config_headers="$ac_config_headers include/HsNetworkConfig.h" - - -ac_aux_dir= -for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do - if test -f "$ac_dir/install-sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install-sh -c" - break - elif test -f "$ac_dir/install.sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install.sh -c" - break - elif test -f "$ac_dir/shtool"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/shtool install -c" - break - fi -done -if test -z "$ac_aux_dir"; then - as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 -fi - -# These three variables are undocumented and unsupported, -# and are intended to be withdrawn in a future Autoconf release. -# They can cause serious problems if a builder's source tree is in a directory -# whose full name contains unusual characters. -ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. -ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. -ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. - - -# Make sure we can run config.sub. -$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || - as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 -$as_echo_n "checking build system type... " >&6; } -if ${ac_cv_build+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_build_alias=$build_alias -test "x$ac_build_alias" = x && - ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` -test "x$ac_build_alias" = x && - as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 -ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 -$as_echo "$ac_cv_build" >&6; } -case $ac_cv_build in -*-*-*) ;; -*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; -esac -build=$ac_cv_build -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_build -shift -build_cpu=$1 -build_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -build_os=$* -IFS=$ac_save_IFS -case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 -$as_echo_n "checking host system type... " >&6; } -if ${ac_cv_host+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "x$host_alias" = x; then - ac_cv_host=$ac_cv_build -else - ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 -$as_echo "$ac_cv_host" >&6; } -case $ac_cv_host in -*-*-*) ;; -*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; -esac -host=$ac_cv_host -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_host -shift -host_cpu=$1 -host_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -host_os=$* -IFS=$ac_save_IFS -case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac - - - - -# Check whether --with-cc was given. -if test "${with_cc+set}" = set; then : - withval=$with_cc; CC=$withval -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "no acceptable C compiler found in \$PATH -See \`config.log' for more details" "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "C compiler cannot create executables -See \`config.log' for more details" "$LINENO" 5; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -ac_exeext=$ac_cv_exeext - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details" "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if ${ac_cv_objext+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of object files: cannot compile -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -struct stat; -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 -$as_echo_n "checking for an ANSI C-conforming const... " >&6; } -if ${ac_cv_c_const+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - -#ifndef __cplusplus - /* Ultrix mips cc rejects this sort of thing. */ - typedef int charset[2]; - const charset cs = { 0, 0 }; - /* SunOS 4.1.1 cc rejects this. */ - char const *const *pcpcc; - char **ppc; - /* NEC SVR4.0.2 mips cc rejects this. */ - struct point {int x, y;}; - static struct point const zero = {0,0}; - /* AIX XL C 1.02.0.0 rejects this. - It does not let you subtract one const X* pointer from another in - an arm of an if-expression whose if-part is not a constant - expression */ - const char *g = "string"; - pcpcc = &g + (g ? g-g : 0); - /* HPUX 7.0 cc rejects these. */ - ++pcpcc; - ppc = (char**) pcpcc; - pcpcc = (char const *const *) ppc; - { /* SCO 3.2v4 cc rejects this sort of thing. */ - char tx; - char *t = &tx; - char const *s = 0 ? (char *) 0 : (char const *) 0; - - *t++ = 0; - if (s) return 0; - } - { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ - int x[] = {25, 17}; - const int *foo = &x[0]; - ++foo; - } - { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ - typedef const int *iptr; - iptr p = 0; - ++p; - } - { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying - "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ - struct s { int j; const int *ap[3]; } bx; - struct s *b = &bx; b->j = 5; - } - { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ - const int foo = 10; - if (!foo) return 0; - } - return !cs[0] && !zero.x; -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c_const=yes -else - ac_cv_c_const=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 -$as_echo "$ac_cv_c_const" >&6; } -if test $ac_cv_c_const = no; then - -$as_echo "#define const /**/" >>confdefs.h - -fi - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if ${ac_cv_prog_CPP+:} false; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if ${ac_cv_path_GREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_GREP" || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if ${ac_cv_path_EGREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_EGREP" || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif - -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -for ac_header in fcntl.h limits.h stdlib.h sys/types.h unistd.h winsock2.h ws2tcpip.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - -for ac_header in arpa/inet.h netdb.h netinet/in.h netinet/tcp.h sys/socket.h sys/uio.h sys/un.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -for ac_func in readlink symlink -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - -fi -done - - -ac_fn_c_check_member "$LINENO" "struct msghdr" "msg_control" "ac_cv_member_struct_msghdr_msg_control" "#if HAVE_SYS_TYPES_H -# include -#endif -#if HAVE_SYS_SOCKET_H -# include -#endif -#if HAVE_SYS_UIO_H -# include -#endif -" -if test "x$ac_cv_member_struct_msghdr_msg_control" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_MSGHDR_MSG_CONTROL 1 -_ACEOF - - -fi -ac_fn_c_check_member "$LINENO" "struct msghdr" "msg_accrights" "ac_cv_member_struct_msghdr_msg_accrights" "#if HAVE_SYS_TYPES_H -# include -#endif -#if HAVE_SYS_SOCKET_H -# include -#endif -#if HAVE_SYS_UIO_H -# include -#endif -" -if test "x$ac_cv_member_struct_msghdr_msg_accrights" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS 1 -_ACEOF - - -fi - - -ac_fn_c_check_member "$LINENO" "struct sockaddr" "sa_len" "ac_cv_member_struct_sockaddr_sa_len" "#if HAVE_SYS_TYPES_H -# include -#endif -#if HAVE_SYS_SOCKET_H -# include -#endif -" -if test "x$ac_cv_member_struct_sockaddr_sa_len" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 -_ACEOF - - -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for in_addr_t in netinet/in.h" >&5 -$as_echo_n "checking for in_addr_t in netinet/in.h... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "in_addr_t" >/dev/null 2>&1; then : - -$as_echo "#define HAVE_IN_ADDR_T 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -rm -f conftest* - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for SO_PEERCRED and struct ucred in sys/socket.h" >&5 -$as_echo_n "checking for SO_PEERCRED and struct ucred in sys/socket.h... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#ifndef SO_PEERCRED -# error no SO_PEERCRED -#endif -struct ucred u; -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_ucred=yes -else - ac_cv_ucred=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -if test "x$ac_cv_ucred" = xno; then - old_CFLAGS="$CFLAGS" - CFLAGS="-D_GNU_SOURCE $CFLAGS" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#ifndef SO_PEERCRED -# error no SO_PEERCRED -#endif -struct ucred u; -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_ucred=yes -else - ac_cv_ucred=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - if test "x$ac_cv_ucred" = xyes; then - EXTRA_CPPFLAGS=-D_GNU_SOURCE - fi -else - old_CFLAGS="$CFLAGS" -fi -if test "x$ac_cv_ucred" = xno; then - CFLAGS="$old_CFLAGS" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -else - -$as_echo "#define HAVE_STRUCT_UCRED 1" >>confdefs.h - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for getpeereid in unistd.h" >&5 -$as_echo_n "checking for getpeereid in unistd.h... " >&6; } -ac_fn_c_check_func "$LINENO" "getpeereid" "ac_cv_func_getpeereid" -if test "x$ac_cv_func_getpeereid" = xyes; then : - -$as_echo "#define HAVE_GETPEEREID 1" >>confdefs.h - -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for _head_libws2_32_a in -lws2_32" >&5 -$as_echo_n "checking for _head_libws2_32_a in -lws2_32... " >&6; } -if ${ac_cv_lib_ws2_32__head_libws2_32_a+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lws2_32 $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char _head_libws2_32_a (); -int -main () -{ -return _head_libws2_32_a (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ws2_32__head_libws2_32_a=yes -else - ac_cv_lib_ws2_32__head_libws2_32_a=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ws2_32__head_libws2_32_a" >&5 -$as_echo "$ac_cv_lib_ws2_32__head_libws2_32_a" >&6; } -if test "x$ac_cv_lib_ws2_32__head_libws2_32_a" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBWS2_32 1 -_ACEOF - - LIBS="-lws2_32 $LIBS" - -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for getaddrinfo" >&5 -$as_echo_n "checking for getaddrinfo... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default -int testme(){ getaddrinfo; } -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -$as_echo "#define HAVE_GETADDRINFO 1" >>confdefs.h - ac_have_getaddrinfo=yes; { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test "x$ac_have_getaddrinfo" = x; then - old_CFLAGS="$CFLAGS" - if test "z$ac_cv_lib_ws2_32__head_libws2_32_a" = zyes; then - CFLAGS="-DWINVER=0x0501 $CFLAGS" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for getaddrinfo if WINVER is 0x0501" >&5 -$as_echo_n "checking for getaddrinfo if WINVER is 0x0501... " >&6; } - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default - int testme(){ getaddrinfo; } -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -$as_echo "#define HAVE_GETADDRINFO 1" >>confdefs.h - -$as_echo "#define NEED_WINVER_XP 1" >>confdefs.h - EXTRA_CPPFLAGS="-DWINVER=0x0501 $EXTRA_CPPFLAGS"; { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - CFLAGS="$old_CFLAGS"; { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - fi -fi - -for ac_func in gai_strerror -do : - ac_fn_c_check_func "$LINENO" "gai_strerror" "ac_cv_func_gai_strerror" -if test "x$ac_cv_func_gai_strerror" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_GAI_STRERROR 1 -_ACEOF - -fi -done - - -ac_fn_c_check_decl "$LINENO" "AI_ADDRCONFIG" "ac_cv_have_decl_AI_ADDRCONFIG" "$ac_includes_default" -if test "x$ac_cv_have_decl_AI_ADDRCONFIG" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_AI_ADDRCONFIG $ac_have_decl -_ACEOF -ac_fn_c_check_decl "$LINENO" "AI_ALL" "ac_cv_have_decl_AI_ALL" "$ac_includes_default" -if test "x$ac_cv_have_decl_AI_ALL" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_AI_ALL $ac_have_decl -_ACEOF -ac_fn_c_check_decl "$LINENO" "AI_NUMERICSERV" "ac_cv_have_decl_AI_NUMERICSERV" "$ac_includes_default" -if test "x$ac_cv_have_decl_AI_NUMERICSERV" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_AI_NUMERICSERV $ac_have_decl -_ACEOF -ac_fn_c_check_decl "$LINENO" "AI_V4MAPPED" "ac_cv_have_decl_AI_V4MAPPED" "$ac_includes_default" -if test "x$ac_cv_have_decl_AI_V4MAPPED" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_AI_V4MAPPED $ac_have_decl -_ACEOF - - -ac_fn_c_check_decl "$LINENO" "IPV6_V6ONLY" "ac_cv_have_decl_IPV6_V6ONLY" "$ac_includes_default" -if test "x$ac_cv_have_decl_IPV6_V6ONLY" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_IPV6_V6ONLY $ac_have_decl -_ACEOF - - -ac_fn_c_check_decl "$LINENO" "IPPROTO_IP" "ac_cv_have_decl_IPPROTO_IP" "$ac_includes_default" -if test "x$ac_cv_have_decl_IPPROTO_IP" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_IPPROTO_IP $ac_have_decl -_ACEOF -ac_fn_c_check_decl "$LINENO" "IPPROTO_TCP" "ac_cv_have_decl_IPPROTO_TCP" "$ac_includes_default" -if test "x$ac_cv_have_decl_IPPROTO_TCP" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_IPPROTO_TCP $ac_have_decl -_ACEOF -ac_fn_c_check_decl "$LINENO" "IPPROTO_IPV6" "ac_cv_have_decl_IPPROTO_IPV6" "$ac_includes_default" -if test "x$ac_cv_have_decl_IPPROTO_IPV6" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_IPPROTO_IPV6 $ac_have_decl -_ACEOF - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sendfile in sys/sendfile.h" >&5 -$as_echo_n "checking for sendfile in sys/sendfile.h... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "sendfile" >/dev/null 2>&1; then : - -$as_echo "#define HAVE_LINUX_SENDFILE 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -rm -f conftest* - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sendfile in sys/socket.h" >&5 -$as_echo_n "checking for sendfile in sys/socket.h... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "sendfile" >/dev/null 2>&1; then : - -$as_echo "#define HAVE_BSD_SENDFILE 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -rm -f conftest* - - -for ac_func in gethostent -do : - ac_fn_c_check_func "$LINENO" "gethostent" "ac_cv_func_gethostent" -if test "x$ac_cv_func_gethostent" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_GETHOSTENT 1 -_ACEOF - -fi -done - - -for ac_func in accept4 -do : - ac_fn_c_check_func "$LINENO" "accept4" "ac_cv_func_accept4" -if test "x$ac_cv_func_accept4" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_ACCEPT4 1 -_ACEOF - -fi -done - - -case "$host" in -*-mingw*) - EXTRA_SRCS="cbits/initWinSock.c, cbits/winSockErr.c, cbits/asyncAccept.c" - EXTRA_LIBS=ws2_32 - CALLCONV=stdcall ;; -*-solaris2*) - EXTRA_SRCS="cbits/ancilData.c" - EXTRA_LIBS="nsl, socket" - CALLCONV=ccall ;; -*) - EXTRA_SRCS="cbits/ancilData.c" - EXTRA_LIBS= - CALLCONV=ccall ;; -esac - - - - - -ac_config_files="$ac_config_files network.buildinfo" - - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - if test "x$cache_file" != "x/dev/null"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - if test ! -f "$cache_file" || test -h "$cache_file"; then - cat confcache >"$cache_file" - else - case $cache_file in #( - */* | ?:*) - mv -f confcache "$cache_file"$$ && - mv -f "$cache_file"$$ "$cache_file" ;; #( - *) - mv -f confcache "$cache_file" ;; - esac - fi - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -DEFS=-DHAVE_CONFIG_H - -ac_libobjs= -ac_ltlibobjs= -U= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - - -: "${CONFIG_STATUS=./config.status}" -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by Haskell network package $as_me 2.3.0.14, which was -generated by GNU Autoconf 2.69. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - -case $ac_config_files in *" -"*) set x $ac_config_files; shift; ac_config_files=$*;; -esac - -case $ac_config_headers in *" -"*) set x $ac_config_headers; shift; ac_config_headers=$*;; -esac - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_files="$ac_config_files" -config_headers="$ac_config_headers" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - --config print configuration, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE - --header=FILE[:TEMPLATE] - instantiate the configuration header FILE - -Configuration files: -$config_files - -Configuration headers: -$config_headers - -Report bugs to ." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" -ac_cs_version="\\ -Haskell network package config.status 2.3.0.14 -configured by $0, generated by GNU Autoconf 2.69, - with options \\"\$ac_cs_config\\" - -Copyright (C) 2012 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=?*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - --*=) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg= - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - '') as_fn_error $? "missing file argument" ;; - esac - as_fn_append CONFIG_FILES " '$ac_optarg'" - ac_need_defaults=false;; - --header | --heade | --head | --hea ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h) - # Conflict between --help and --header - as_fn_error $? "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; - --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error $? "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "include/HsNetworkConfig.h") CONFIG_HEADERS="$CONFIG_HEADERS include/HsNetworkConfig.h" ;; - "network.buildinfo") CONFIG_FILES="$CONFIG_FILES network.buildinfo" ;; - - *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= ac_tmp= - trap 'exit_status=$? - : "${ac_tmp:=$tmp}" - { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 -ac_tmp=$tmp - -# Set up the scripts for CONFIG_FILES section. -# No need to generate them if there are no CONFIG_FILES. -# This happens for instance with `./config.status config.h'. -if test -n "$CONFIG_FILES"; then - - -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi -ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` -if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\\r' -else - ac_cs_awk_cr=$ac_cr -fi - -echo 'BEGIN {' >"$ac_tmp/subs1.awk" && -_ACEOF - - -{ - echo "cat >conf$$subs.awk <<_ACEOF" && - echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && - echo "_ACEOF" -} >conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 -ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - . ./conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - - ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` - if test $ac_delim_n = $ac_delim_num; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done -rm -f conf$$subs.sh - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && -_ACEOF -sed -n ' -h -s/^/S["/; s/!.*/"]=/ -p -g -s/^[^!]*!// -:repl -t repl -s/'"$ac_delim"'$// -t delim -:nl -h -s/\(.\{148\}\)..*/\1/ -t more1 -s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ -p -n -b repl -:more1 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t nl -:delim -h -s/\(.\{148\}\)..*/\1/ -t more2 -s/["\\]/\\&/g; s/^/"/; s/$/"/ -p -b -:more2 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t delim -' >$CONFIG_STATUS || ac_write_fail=1 -rm -f conf$$subs.awk -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACAWK -cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && - for (key in S) S_is_set[key] = 1 - FS = "" - -} -{ - line = $ 0 - nfields = split(line, field, "@") - substed = 0 - len = length(field[1]) - for (i = 2; i < nfields; i++) { - key = field[i] - keylen = length(key) - if (S_is_set[key]) { - value = S[key] - line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) - len += length(value) + length(field[++i]) - substed = 1 - } else - len += 1 + keylen - } - - print line -} - -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then - sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" -else - cat -fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ - || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 -_ACEOF - -# VPATH may cause trouble with some makes, so we remove sole $(srcdir), -# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ -h -s/// -s/^/:/ -s/[ ]*$/:/ -s/:\$(srcdir):/:/g -s/:\${srcdir}:/:/g -s/:@srcdir@:/:/g -s/^:*// -s/:*$// -x -s/\(=[ ]*\).*/\1/ -G -s/\n// -s/^[^=]*=[ ]*$// -}' -fi - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -fi # test -n "$CONFIG_FILES" - -# Set up the scripts for CONFIG_HEADERS section. -# No need to generate them if there are no CONFIG_HEADERS. -# This happens for instance with `./config.status Makefile'. -if test -n "$CONFIG_HEADERS"; then -cat >"$ac_tmp/defines.awk" <<\_ACAWK || -BEGIN { -_ACEOF - -# Transform confdefs.h into an awk script `defines.awk', embedded as -# here-document in config.status, that substitutes the proper values into -# config.h.in to produce config.h. - -# Create a delimiter string that does not exist in confdefs.h, to ease -# handling of long lines. -ac_delim='%!_!# ' -for ac_last_try in false false :; do - ac_tt=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_tt"; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done - -# For the awk script, D is an array of macro values keyed by name, -# likewise P contains macro parameters if any. Preserve backslash -# newline sequences. - -ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* -sed -n ' -s/.\{148\}/&'"$ac_delim"'/g -t rset -:rset -s/^[ ]*#[ ]*define[ ][ ]*/ / -t def -d -:def -s/\\$// -t bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3"/p -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p -d -:bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3\\\\\\n"\\/p -t cont -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p -t cont -d -:cont -n -s/.\{148\}/&'"$ac_delim"'/g -t clear -:clear -s/\\$// -t bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/"/p -d -:bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p -b cont -' >$CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - for (key in D) D_is_set[key] = 1 - FS = "" -} -/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { - line = \$ 0 - split(line, arg, " ") - if (arg[1] == "#") { - defundef = arg[2] - mac1 = arg[3] - } else { - defundef = substr(arg[1], 2) - mac1 = arg[2] - } - split(mac1, mac2, "(") #) - macro = mac2[1] - prefix = substr(line, 1, index(line, defundef) - 1) - if (D_is_set[macro]) { - # Preserve the white space surrounding the "#". - print prefix "define", macro P[macro] D[macro] - next - } else { - # Replace #undef with comments. This is necessary, for example, - # in the case of _POSIX_SOURCE, which is predefined and required - # on some systems where configure will not decide to define it. - if (defundef == "undef") { - print "/*", prefix defundef, macro, "*/" - next - } - } -} -{ print } -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 -fi # test -n "$CONFIG_HEADERS" - - -eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$ac_tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$ac_tmp/stdin" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - :F) - # - # CONFIG_FILE - # - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= -ac_sed_dataroot=' -/datarootdir/ { - p - q -} -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p' -case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; -esac -_ACEOF - -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_sed_extra="$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s|@configure_input@|$ac_sed_conf_input|;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@top_build_prefix@&$ac_top_build_prefix&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -$ac_datarootdir_hack -" -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ - >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ - "$ac_tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&2;} - - rm -f "$ac_tmp/stdin" - case $ac_file in - -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; - *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; - esac \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - ;; - :H) - # - # CONFIG_HEADER - # - if test x"$ac_file" != x-; then - { - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" - } >"$ac_tmp/config.h" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} - else - rm -f "$ac_file" - mv "$ac_tmp/config.h" "$ac_file" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - fi - else - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error $? "could not create -" "$LINENO" 5 - fi - ;; - - - esac - -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit 1 -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/configure.ac cabal-install-1.22-1.22.9.0/=unpacked-tar2=/configure.ac --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/configure.ac 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/configure.ac 1970-01-01 00:00:00.000000000 +0000 @@ -1,192 +0,0 @@ -AC_INIT([Haskell network package], [2.3.0.14], [libraries@haskell.org], [network]) - -ac_includes_default="$ac_includes_default -#ifdef HAVE_SYS_SOCKET_H -# include -#endif -#ifdef HAVE_NETINET_IN_H -# include -#endif -#ifdef HAVE_NETDB_H -# include -#endif -#ifdef HAVE_WINSOCK2_H -# include -#endif -#ifdef HAVE_WS2TCPIP_H -# include -// fix for MingW not defining IPV6_V6ONLY -# define IPV6_V6ONLY 27 -#endif" - -# Safety check: Ensure that we are in the correct source directory. -AC_CONFIG_SRCDIR([include/HsNet.h]) - -AC_CONFIG_HEADERS([include/HsNetworkConfig.h]) - -AC_CANONICAL_HOST - -AC_ARG_WITH([cc], - [C compiler], - [CC=$withval]) -AC_PROG_CC() - -AC_C_CONST - -dnl ** check for specific header (.h) files that we are interested in -AC_CHECK_HEADERS([fcntl.h limits.h stdlib.h sys/types.h unistd.h winsock2.h ws2tcpip.h]) -AC_CHECK_HEADERS([arpa/inet.h netdb.h netinet/in.h netinet/tcp.h sys/socket.h sys/uio.h sys/un.h]) - -AC_CHECK_FUNCS([readlink symlink]) - -dnl ** check what fields struct msghdr contains -AC_CHECK_MEMBERS([struct msghdr.msg_control, struct msghdr.msg_accrights], [], [], [#if HAVE_SYS_TYPES_H -# include -#endif -#if HAVE_SYS_SOCKET_H -# include -#endif -#if HAVE_SYS_UIO_H -# include -#endif]) - -dnl ** check if struct sockaddr contains sa_len -AC_CHECK_MEMBERS([struct sockaddr.sa_len], [], [], [#if HAVE_SYS_TYPES_H -# include -#endif -#if HAVE_SYS_SOCKET_H -# include -#endif]) - -dnl -------------------------------------------------- -dnl * test for in_addr_t -dnl -------------------------------------------------- -AC_MSG_CHECKING(for in_addr_t in netinet/in.h) -AC_EGREP_HEADER(in_addr_t, netinet/in.h, - [ AC_DEFINE([HAVE_IN_ADDR_T], [1], [Define to 1 if in_addr_t is available.]) AC_MSG_RESULT(yes) ], - AC_MSG_RESULT(no)) - -dnl -------------------------------------------------- -dnl * test for SO_PEERCRED and struct ucred -dnl -------------------------------------------------- -AC_MSG_CHECKING(for SO_PEERCRED and struct ucred in sys/socket.h) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include -#include -#ifndef SO_PEERCRED -# error no SO_PEERCRED -#endif -struct ucred u;]])],ac_cv_ucred=yes,ac_cv_ucred=no) -if test "x$ac_cv_ucred" = xno; then - old_CFLAGS="$CFLAGS" - CFLAGS="-D_GNU_SOURCE $CFLAGS" - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include -#include -#ifndef SO_PEERCRED -# error no SO_PEERCRED -#endif -struct ucred u;]])],ac_cv_ucred=yes,ac_cv_ucred=no) - if test "x$ac_cv_ucred" = xyes; then - EXTRA_CPPFLAGS=-D_GNU_SOURCE - fi -else - old_CFLAGS="$CFLAGS" -fi -if test "x$ac_cv_ucred" = xno; then - CFLAGS="$old_CFLAGS" - AC_MSG_RESULT(no) -else - AC_DEFINE([HAVE_STRUCT_UCRED], [1], [Define to 1 if you have both SO_PEERCRED and struct ucred.]) - AC_MSG_RESULT(yes) -fi - -dnl -------------------------------------------------- -dnl * test for GETPEEREID(3) -dnl -------------------------------------------------- -AC_MSG_CHECKING(for getpeereid in unistd.h) -AC_CHECK_FUNC( getpeereid, AC_DEFINE([HAVE_GETPEEREID], [1], [Define to 1 if you have getpeereid.] )) - -dnl -------------------------------------------------- -dnl * check for Windows networking libraries -dnl -------------------------------------------------- -AC_CHECK_LIB(ws2_32, _head_libws2_32_a) - -dnl -------------------------------------------------- -dnl * test for getaddrinfo as proxy for IPv6 support -dnl -------------------------------------------------- -AC_MSG_CHECKING(for getaddrinfo) -dnl Can't use AC_CHECK_FUNC here, because it doesn't do the right -dnl thing on Windows. -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[$ac_includes_default -int testme(){ getaddrinfo; }]])],[AC_DEFINE([HAVE_GETADDRINFO], [1], [Define to 1 if you have the `getaddrinfo' function.]) ac_have_getaddrinfo=yes; AC_MSG_RESULT(yes)],[AC_MSG_RESULT(no)]) - -dnl Under mingw, we may need to set WINVER to 0x0501 to expose getaddrinfo. -if test "x$ac_have_getaddrinfo" = x; then - old_CFLAGS="$CFLAGS" - if test "z$ac_cv_lib_ws2_32__head_libws2_32_a" = zyes; then - CFLAGS="-DWINVER=0x0501 $CFLAGS" - AC_MSG_CHECKING(for getaddrinfo if WINVER is 0x0501) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[$ac_includes_default - int testme(){ getaddrinfo; }]])],[AC_DEFINE([HAVE_GETADDRINFO], [1], [Define to 1 if you have the `getaddrinfo' function.]) AC_DEFINE([NEED_WINVER_XP], [1], [Define to 1 if the `getaddrinfo' function needs WINVER set.]) EXTRA_CPPFLAGS="-DWINVER=0x0501 $EXTRA_CPPFLAGS"; AC_MSG_RESULT(yes)],[CFLAGS="$old_CFLAGS"; AC_MSG_RESULT(no)]) - fi -fi - -dnl Missing under mingw, sigh. -AC_CHECK_FUNCS(gai_strerror) - -dnl ------------------------------------------------------- -dnl * test for AI_* flags that not all implementations have -dnl ------------------------------------------------------- -AC_CHECK_DECLS([AI_ADDRCONFIG, AI_ALL, AI_NUMERICSERV, AI_V4MAPPED]) - -dnl ------------------------------------------------------- -dnl * test for IPV6_V6ONLY flags that not all implementations have -dnl ------------------------------------------------------- -AC_CHECK_DECLS([IPV6_V6ONLY]) - -dnl ------------------------------------------------------- -dnl * test for IPPROTO_* macros/constants -dnl ------------------------------------------------------- -AC_CHECK_DECLS([IPPROTO_IP, IPPROTO_TCP, IPPROTO_IPV6]) - -dnl -------------------------------------------------- -dnl * test for Linux sendfile(2) -dnl -------------------------------------------------- -AC_MSG_CHECKING(for sendfile in sys/sendfile.h) -AC_EGREP_HEADER(sendfile, sys/sendfile.h, - [ AC_DEFINE([HAVE_LINUX_SENDFILE], [1], [Define to 1 if you have a Linux sendfile(2) implementation.]) AC_MSG_RESULT(yes) ], - AC_MSG_RESULT(no)) - -dnl -------------------------------------------------- -dnl * test for BSD sendfile(2) -dnl -------------------------------------------------- -AC_MSG_CHECKING(for sendfile in sys/socket.h) -AC_EGREP_HEADER(sendfile, sys/socket.h, - [ AC_DEFINE([HAVE_BSD_SENDFILE], [1], [Define to 1 if you have a BSDish sendfile(2) implementation.]) AC_MSG_RESULT(yes) ], - AC_MSG_RESULT(no)) - -AC_CHECK_FUNCS(gethostent) - -AC_CHECK_FUNCS(accept4) - -case "$host" in -*-mingw*) - EXTRA_SRCS="cbits/initWinSock.c, cbits/winSockErr.c, cbits/asyncAccept.c" - EXTRA_LIBS=ws2_32 - CALLCONV=stdcall ;; -*-solaris2*) - EXTRA_SRCS="cbits/ancilData.c" - EXTRA_LIBS="nsl, socket" - CALLCONV=ccall ;; -*) - EXTRA_SRCS="cbits/ancilData.c" - EXTRA_LIBS= - CALLCONV=ccall ;; -esac -AC_SUBST([CALLCONV]) -AC_SUBST([EXTRA_CPPFLAGS]) -AC_SUBST([EXTRA_LIBS]) -AC_SUBST([EXTRA_SRCS]) - -AC_CONFIG_FILES([network.buildinfo]) - -AC_OUTPUT diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/examples/EchoClient.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/examples/EchoClient.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/examples/EchoClient.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/examples/EchoClient.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ --- Echo client program -module Main where - -import Network.Socket hiding (recv) -import Network.Socket.ByteString (recv, sendAll) -import qualified Data.ByteString.Char8 as C - -main :: IO () -main = withSocketsDo $ - do addrinfos <- getAddrInfo Nothing (Just "") (Just "3000") - let serveraddr = head addrinfos - sock <- socket (addrFamily serveraddr) Stream defaultProtocol - connect sock (addrAddress serveraddr) - sendAll sock $ C.pack "Hello, world!" - msg <- recv sock 1024 - sClose sock - putStr "Received " - C.putStrLn msg diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/examples/EchoServer.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/examples/EchoServer.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/examples/EchoServer.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/examples/EchoServer.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ --- Echo server program -module Main where - -import Control.Monad (unless) -import Network.Socket hiding (recv) -import qualified Data.ByteString as S -import Network.Socket.ByteString (recv, sendAll) - -main :: IO () -main = withSocketsDo $ - do addrinfos <- getAddrInfo - (Just (defaultHints {addrFlags = [AI_PASSIVE]})) - Nothing (Just "3000") - let serveraddr = head addrinfos - sock <- socket (addrFamily serveraddr) Stream defaultProtocol - bindSocket sock (addrAddress serveraddr) - listen sock 1 - (conn, _) <- accept sock - talk conn - sClose conn - sClose sock - - where - talk :: Socket -> IO () - talk conn = - do msg <- recv conn 1024 - unless (S.null msg) $ sendAll conn msg >> talk conn diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/include/HsNet.h cabal-install-1.22-1.22.9.0/=unpacked-tar2=/include/HsNet.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/include/HsNet.h 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/include/HsNet.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,180 +0,0 @@ -/* ----------------------------------------------------------------------------- - * - * Definitions for package `net' which are visible in Haskell land. - * - * ---------------------------------------------------------------------------*/ - -#ifndef HSNET_H -#define HSNET_H - -#include "HsNetworkConfig.h" - -#ifdef NEED_WINVER -# define WINVER 0x0501 -#endif - -/* ultra-evil... */ -#undef PACKAGE_BUGREPORT -#undef PACKAGE_NAME -#undef PACKAGE_STRING -#undef PACKAGE_TARNAME -#undef PACKAGE_VERSION - -#ifndef INLINE -# if defined(_MSC_VER) -# define INLINE extern __inline -# elif defined(__GNUC_GNU_INLINE__) -# define INLINE extern inline -# else -# define INLINE inline -# endif -#endif - -#ifdef HAVE_GETADDRINFO -# define IPV6_SOCKET_SUPPORT 1 -#else -# undef IPV6_SOCKET_SUPPORT -#endif - -#if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) -#include -# ifdef HAVE_WS2TCPIP_H -# include -// fix for MingW not defining IPV6_V6ONLY -# define IPV6_V6ONLY 27 -# endif - -extern void shutdownWinSock(); -extern int initWinSock (); -extern const char* getWSErrorDescr(int err); -extern void* newAcceptParams(int sock, - int sz, - void* sockaddr); -extern int acceptNewSock(void* d); -extern int acceptDoProc(void* param); - -#else - -#ifdef HAVE_LIMITS_H -# include -#endif -#ifdef HAVE_STDLIB_H -# include -#endif -#ifdef HAVE_UNISTD_H -#include -#endif -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_FCNTL_H -# include -#endif -#ifdef HAVE_SYS_UIO_H -# include -#endif -#ifdef HAVE_SYS_SOCKET_H -# include -#endif -#ifdef HAVE_NETINET_TCP_H -# include -#endif -#ifdef HAVE_NETINET_IN_H -# include -#endif -#ifdef HAVE_SYS_UN_H -# include -#endif -#ifdef HAVE_ARPA_INET_H -# include -#endif -#ifdef HAVE_NETDB_H -#include -#endif - -#ifdef HAVE_BSD_SENDFILE -#include -#endif -#ifdef HAVE_LINUX_SENDFILE -#if !defined(__USE_FILE_OFFSET64) -#include -#endif -#endif - -extern int -sendFd(int sock, int outfd); - -extern int -recvFd(int sock); - -#endif /* HAVE_WINSOCK2_H && !__CYGWIN */ - -INLINE char * -my_inet_ntoa( -#if defined(HAVE_WINSOCK2_H) - u_long addr -#elif defined(HAVE_IN_ADDR_T) - in_addr_t addr -#elif defined(HAVE_INTTYPES_H) - u_int32_t addr -#else - unsigned long addr -#endif - ) -{ - struct in_addr a; - a.s_addr = addr; - return inet_ntoa(a); -} - -#ifdef HAVE_GETADDRINFO -INLINE int -hsnet_getnameinfo(const struct sockaddr* a,socklen_t b, char* c, -# if defined(HAVE_WINSOCK2_H) && !defined(__CYGWIN__) - DWORD d, char* e, DWORD f, int g) -# else - socklen_t d, char* e, socklen_t f, int g) -# endif -{ - return getnameinfo(a,b,c,d,e,f,g); -} - -INLINE int -hsnet_getaddrinfo(const char *hostname, const char *servname, - const struct addrinfo *hints, struct addrinfo **res) -{ - return getaddrinfo(hostname, servname, hints, res); -} - -INLINE void -hsnet_freeaddrinfo(struct addrinfo *ai) -{ - freeaddrinfo(ai); -} -#endif - -#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) -# define WITH_WINSOCK 1 -#endif - -#if !defined(mingw32_HOST_OS) && !defined(_WIN32) -# define DOMAIN_SOCKET_SUPPORT 1 -#endif - -#if !defined(CALLCONV) -# if defined(WITH_WINSOCK) -# define CALLCONV stdcall -# else -# define CALLCONV ccall -# endif -#endif - -#if !defined(IOV_MAX) -# define IOV_MAX 1024 -#endif - -#if !defined(SOCK_NONBLOCK) // Missing define in Bionic libc (Android) -# define SOCK_NONBLOCK O_NONBLOCK -#endif - -#endif /* HSNET_H */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/include/HsNetworkConfig.h cabal-install-1.22-1.22.9.0/=unpacked-tar2=/include/HsNetworkConfig.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/include/HsNetworkConfig.h 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/include/HsNetworkConfig.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,166 +0,0 @@ -/* include/HsNetworkConfig.h. Generated from HsNetworkConfig.h.in by configure. */ -/* include/HsNetworkConfig.h.in. Generated from configure.ac by autoheader. */ - -/* Define to 1 if you have the `accept4' function. */ -/* #undef HAVE_ACCEPT4 */ - -/* Define to 1 if you have the header file. */ -#define HAVE_ARPA_INET_H 1 - -/* Define to 1 if you have a BSDish sendfile(2) implementation. */ -#define HAVE_BSD_SENDFILE 1 - -/* Define to 1 if you have the declaration of `AI_ADDRCONFIG', and to 0 if you - don't. */ -#define HAVE_DECL_AI_ADDRCONFIG 1 - -/* Define to 1 if you have the declaration of `AI_ALL', and to 0 if you don't. - */ -#define HAVE_DECL_AI_ALL 1 - -/* Define to 1 if you have the declaration of `AI_NUMERICSERV', and to 0 if - you don't. */ -#define HAVE_DECL_AI_NUMERICSERV 1 - -/* Define to 1 if you have the declaration of `AI_V4MAPPED', and to 0 if you - don't. */ -#define HAVE_DECL_AI_V4MAPPED 1 - -/* Define to 1 if you have the declaration of `IPPROTO_IP', and to 0 if you - don't. */ -#define HAVE_DECL_IPPROTO_IP 1 - -/* Define to 1 if you have the declaration of `IPPROTO_IPV6', and to 0 if you - don't. */ -#define HAVE_DECL_IPPROTO_IPV6 1 - -/* Define to 1 if you have the declaration of `IPPROTO_TCP', and to 0 if you - don't. */ -#define HAVE_DECL_IPPROTO_TCP 1 - -/* Define to 1 if you have the declaration of `IPV6_V6ONLY', and to 0 if you - don't. */ -#define HAVE_DECL_IPV6_V6ONLY 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_FCNTL_H 1 - -/* Define to 1 if you have the `gai_strerror' function. */ -#define HAVE_GAI_STRERROR 1 - -/* Define to 1 if you have the `getaddrinfo' function. */ -#define HAVE_GETADDRINFO 1 - -/* Define to 1 if you have the `gethostent' function. */ -#define HAVE_GETHOSTENT 1 - -/* Define to 1 if you have getpeereid. */ -#define HAVE_GETPEEREID 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* Define to 1 if in_addr_t is available. */ -#define HAVE_IN_ADDR_T 1 - -/* Define to 1 if you have the `ws2_32' library (-lws2_32). */ -/* #undef HAVE_LIBWS2_32 */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIMITS_H 1 - -/* Define to 1 if you have a Linux sendfile(2) implementation. */ -/* #undef HAVE_LINUX_SENDFILE */ - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETDB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETINET_IN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETINET_TCP_H 1 - -/* Define to 1 if you have the `readlink' function. */ -#define HAVE_READLINK 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if `msg_accrights' is a member of `struct msghdr'. */ -/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS */ - -/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ -#define HAVE_STRUCT_MSGHDR_MSG_CONTROL 1 - -/* Define to 1 if `sa_len' is a member of `struct sockaddr'. */ -#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 - -/* Define to 1 if you have both SO_PEERCRED and struct ucred. */ -/* #undef HAVE_STRUCT_UCRED */ - -/* Define to 1 if you have the `symlink' function. */ -#define HAVE_SYMLINK 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SOCKET_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_UIO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_UN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_WINSOCK2_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_WS2TCPIP_H */ - -/* Define to 1 if the `getaddrinfo' function needs WINVER set. */ -/* #undef NEED_WINVER_XP */ - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "libraries@haskell.org" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "Haskell network package" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "Haskell network package 2.3.0.14" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "network" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "2.3.0.14" - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/include/HsNetworkConfig.h.in cabal-install-1.22-1.22.9.0/=unpacked-tar2=/include/HsNetworkConfig.h.in --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/include/HsNetworkConfig.h.in 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/include/HsNetworkConfig.h.in 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -/* include/HsNetworkConfig.h.in. Generated from configure.ac by autoheader. */ - -/* Define to 1 if you have the `accept4' function. */ -#undef HAVE_ACCEPT4 - -/* Define to 1 if you have the header file. */ -#undef HAVE_ARPA_INET_H - -/* Define to 1 if you have a BSDish sendfile(2) implementation. */ -#undef HAVE_BSD_SENDFILE - -/* Define to 1 if you have the declaration of `AI_ADDRCONFIG', and to 0 if you - don't. */ -#undef HAVE_DECL_AI_ADDRCONFIG - -/* Define to 1 if you have the declaration of `AI_ALL', and to 0 if you don't. - */ -#undef HAVE_DECL_AI_ALL - -/* Define to 1 if you have the declaration of `AI_NUMERICSERV', and to 0 if - you don't. */ -#undef HAVE_DECL_AI_NUMERICSERV - -/* Define to 1 if you have the declaration of `AI_V4MAPPED', and to 0 if you - don't. */ -#undef HAVE_DECL_AI_V4MAPPED - -/* Define to 1 if you have the declaration of `IPPROTO_IP', and to 0 if you - don't. */ -#undef HAVE_DECL_IPPROTO_IP - -/* Define to 1 if you have the declaration of `IPPROTO_IPV6', and to 0 if you - don't. */ -#undef HAVE_DECL_IPPROTO_IPV6 - -/* Define to 1 if you have the declaration of `IPPROTO_TCP', and to 0 if you - don't. */ -#undef HAVE_DECL_IPPROTO_TCP - -/* Define to 1 if you have the declaration of `IPV6_V6ONLY', and to 0 if you - don't. */ -#undef HAVE_DECL_IPV6_V6ONLY - -/* Define to 1 if you have the header file. */ -#undef HAVE_FCNTL_H - -/* Define to 1 if you have the `gai_strerror' function. */ -#undef HAVE_GAI_STRERROR - -/* Define to 1 if you have the `getaddrinfo' function. */ -#undef HAVE_GETADDRINFO - -/* Define to 1 if you have the `gethostent' function. */ -#undef HAVE_GETHOSTENT - -/* Define to 1 if you have getpeereid. */ -#undef HAVE_GETPEEREID - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if in_addr_t is available. */ -#undef HAVE_IN_ADDR_T - -/* Define to 1 if you have the `ws2_32' library (-lws2_32). */ -#undef HAVE_LIBWS2_32 - -/* Define to 1 if you have the header file. */ -#undef HAVE_LIMITS_H - -/* Define to 1 if you have a Linux sendfile(2) implementation. */ -#undef HAVE_LINUX_SENDFILE - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_NETDB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_NETINET_IN_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_NETINET_TCP_H - -/* Define to 1 if you have the `readlink' function. */ -#undef HAVE_READLINK - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if `msg_accrights' is a member of `struct msghdr'. */ -#undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTS - -/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ -#undef HAVE_STRUCT_MSGHDR_MSG_CONTROL - -/* Define to 1 if `sa_len' is a member of `struct sockaddr'. */ -#undef HAVE_STRUCT_SOCKADDR_SA_LEN - -/* Define to 1 if you have both SO_PEERCRED and struct ucred. */ -#undef HAVE_STRUCT_UCRED - -/* Define to 1 if you have the `symlink' function. */ -#undef HAVE_SYMLINK - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_SOCKET_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_UIO_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_UN_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_WINSOCK2_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_WS2TCPIP_H - -/* Define to 1 if the `getaddrinfo' function needs WINVER set. */ -#undef NEED_WINVER_XP - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the home page for this package. */ -#undef PACKAGE_URL - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* Define to 1 if you have the ANSI C header files. */ -#undef STDC_HEADERS - -/* Define to empty if `const' does not conform to ANSI C. */ -#undef const diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/install-sh cabal-install-1.22-1.22.9.0/=unpacked-tar2=/install-sh --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/install-sh 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/install-sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,295 +0,0 @@ -#!/bin/sh -# install - install a program, script, or datafile - -scriptversion=2003-09-24.23 - -# This originates from X11R5 (mit/util/scripts/install.sh), which was -# later released in X11R6 (xc/config/util/install.sh) with the -# following copyright and license. -# -# Copyright (C) 1994 X Consortium -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN -# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- -# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -# Except as contained in this notice, the name of the X Consortium shall not -# be used in advertising or otherwise to promote the sale, use or other deal- -# ings in this Software without prior written authorization from the X Consor- -# tium. -# -# -# FSF changes to this file are in the public domain. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# `make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. It can only install one file at a time, a restriction -# shared with many OS's install programs. - -# set DOITPROG to echo to test this script - -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit="${DOITPROG-}" - -# put in absolute paths if you don't have them in your path; or use env. vars. - -mvprog="${MVPROG-mv}" -cpprog="${CPPROG-cp}" -chmodprog="${CHMODPROG-chmod}" -chownprog="${CHOWNPROG-chown}" -chgrpprog="${CHGRPPROG-chgrp}" -stripprog="${STRIPPROG-strip}" -rmprog="${RMPROG-rm}" -mkdirprog="${MKDIRPROG-mkdir}" - -transformbasename= -transform_arg= -instcmd="$mvprog" -chmodcmd="$chmodprog 0755" -chowncmd= -chgrpcmd= -stripcmd= -rmcmd="$rmprog -f" -mvcmd="$mvprog" -src= -dst= -dir_arg= - -usage="Usage: $0 [OPTION]... SRCFILE DSTFILE - or: $0 -d DIR1 DIR2... - -In the first form, install SRCFILE to DSTFILE, removing SRCFILE by default. -In the second, create the directory path DIR. - -Options: --b=TRANSFORMBASENAME --c copy source (using $cpprog) instead of moving (using $mvprog). --d create directories instead of installing files. --g GROUP $chgrp installed files to GROUP. --m MODE $chmod installed files to MODE. --o USER $chown installed files to USER. --s strip installed files (using $stripprog). --t=TRANSFORM ---help display this help and exit. ---version display version info and exit. - -Environment variables override the default commands: - CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG -" - -while test -n "$1"; do - case $1 in - -b=*) transformbasename=`echo $1 | sed 's/-b=//'` - shift - continue;; - - -c) instcmd=$cpprog - shift - continue;; - - -d) dir_arg=true - shift - continue;; - - -g) chgrpcmd="$chgrpprog $2" - shift - shift - continue;; - - --help) echo "$usage"; exit 0;; - - -m) chmodcmd="$chmodprog $2" - shift - shift - continue;; - - -o) chowncmd="$chownprog $2" - shift - shift - continue;; - - -s) stripcmd=$stripprog - shift - continue;; - - -t=*) transformarg=`echo $1 | sed 's/-t=//'` - shift - continue;; - - --version) echo "$0 $scriptversion"; exit 0;; - - *) if test -z "$src"; then - src=$1 - else - # this colon is to work around a 386BSD /bin/sh bug - : - dst=$1 - fi - shift - continue;; - esac -done - -if test -z "$src"; then - echo "$0: no input file specified." >&2 - exit 1 -fi - -# Protect names starting with `-'. -case $src in - -*) src=./$src ;; -esac - -if test -n "$dir_arg"; then - dst=$src - src= - - if test -d "$dst"; then - instcmd=: - chmodcmd= - else - instcmd=$mkdirprog - fi -else - # Waiting for this to be detected by the "$instcmd $src $dsttmp" command - # might cause directories to be created, which would be especially bad - # if $src (and thus $dsttmp) contains '*'. - if test ! -f "$src" && test ! -d "$src"; then - echo "$0: $src does not exist." >&2 - exit 1 - fi - - if test -z "$dst"; then - echo "$0: no destination specified." >&2 - exit 1 - fi - - # Protect names starting with `-'. - case $dst in - -*) dst=./$dst ;; - esac - - # If destination is a directory, append the input filename; won't work - # if double slashes aren't ignored. - if test -d "$dst"; then - dst=$dst/`basename "$src"` - fi -fi - -# This sed command emulates the dirname command. -dstdir=`echo "$dst" | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` - -# Make sure that the destination directory exists. - -# Skip lots of stat calls in the usual case. -if test ! -d "$dstdir"; then - defaultIFS=' - ' - IFS="${IFS-$defaultIFS}" - - oIFS=$IFS - # Some sh's can't handle IFS=/ for some reason. - IFS='%' - set - `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'` - IFS=$oIFS - - pathcomp= - - while test $# -ne 0 ; do - pathcomp=$pathcomp$1 - shift - test -d "$pathcomp" || $mkdirprog "$pathcomp" - pathcomp=$pathcomp/ - done -fi - -if test -n "$dir_arg"; then - $doit $instcmd "$dst" \ - && { test -z "$chowncmd" || $doit $chowncmd "$dst"; } \ - && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } \ - && { test -z "$stripcmd" || $doit $stripcmd "$dst"; } \ - && { test -z "$chmodcmd" || $doit $chmodcmd "$dst"; } - -else - # If we're going to rename the final executable, determine the name now. - if test -z "$transformarg"; then - dstfile=`basename "$dst"` - else - dstfile=`basename "$dst" $transformbasename \ - | sed $transformarg`$transformbasename - fi - - # don't allow the sed command to completely eliminate the filename. - test -z "$dstfile" && dstfile=`basename "$dst"` - - # Make a couple of temp file names in the proper directory. - dsttmp=$dstdir/_inst.$$_ - rmtmp=$dstdir/_rm.$$_ - - # Trap to clean up those temp files at exit. - trap 'status=$?; rm -f "$dsttmp" "$rmtmp" && exit $status' 0 - trap '(exit $?); exit' 1 2 13 15 - - # Move or copy the file name to the temp name - $doit $instcmd "$src" "$dsttmp" && - - # and set any options; do chmod last to preserve setuid bits. - # - # If any of these fail, we abort the whole thing. If we want to - # ignore errors from any of these, just make sure not to ignore - # errors from the above "$doit $instcmd $src $dsttmp" command. - # - { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \ - && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \ - && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \ - && { test -z "$chmodcmd" || $doit $chmodcmd "$dsttmp"; } && - - # Now remove or move aside any old file at destination location. We - # try this two ways since rm can't unlink itself on some systems and - # the destination file might be busy for other reasons. In this case, - # the final cleanup might fail but the new file should still install - # successfully. - { - if test -f "$dstdir/$dstfile"; then - $doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null \ - || $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null \ - || { - echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2 - (exit 1); exit - } - else - : - fi - } && - - # Now rename the file to the real destination. - $doit $mvcmd "$dsttmp" "$dstdir/$dstfile" -fi && - -# The final little trick to "correctly" pass the exit status to the exit trap. -{ - (exit 0); exit -} - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-end: "$" -# End: diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar2=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/LICENSE 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -Copyright (c) 2002-2010, The University Court of the University of Glasgow. -Copyright (c) 2007-2010, Johan Tibell - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -- Neither name of the University nor the names of its contributors may be -used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF -GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/BSD.hsc cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/BSD.hsc --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/BSD.hsc 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/BSD.hsc 1970-01-01 00:00:00.000000000 +0000 @@ -1,572 +0,0 @@ -{-# LANGUAGE CPP, ForeignFunctionInterface #-} ------------------------------------------------------------------------------ --- | --- Module : Network.BSD --- Copyright : (c) The University of Glasgow 2001 --- License : BSD-style (see the file libraries/network/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable --- --- The "Network.BSD" module defines Haskell bindings to network --- programming functionality provided by BSD Unix derivatives. --- ------------------------------------------------------------------------------ - -#include "HsNet.h" - -module Network.BSD - ( - -- * Host names - HostName - , getHostName - - , HostEntry(..) - , getHostByName - , getHostByAddr - , hostAddress - -#if defined(HAVE_GETHOSTENT) && !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) - , getHostEntries - - -- ** Low level functionality - , setHostEntry - , getHostEntry - , endHostEntry -#endif - - -- * Service names - , ServiceEntry(..) - , ServiceName - , getServiceByName - , getServiceByPort - , getServicePortNumber - -#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) - , getServiceEntries - - -- ** Low level functionality - , getServiceEntry - , setServiceEntry - , endServiceEntry -#endif - - -- * Protocol names - , ProtocolName - , ProtocolNumber - , ProtocolEntry(..) - , getProtocolByName - , getProtocolByNumber - , getProtocolNumber - , defaultProtocol - -#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) - , getProtocolEntries - -- ** Low level functionality - , setProtocolEntry - , getProtocolEntry - , endProtocolEntry -#endif - - -- * Port numbers - , PortNumber - - -- * Network names - , NetworkName - , NetworkAddr - , NetworkEntry(..) - -#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) - , getNetworkByName - , getNetworkByAddr - , getNetworkEntries - -- ** Low level functionality - , setNetworkEntry - , getNetworkEntry - , endNetworkEntry -#endif - ) where - -import Network.Socket - -import Control.Concurrent (MVar, newMVar, withMVar) -import qualified Control.Exception as E -import Foreign.C.String (CString, peekCString, withCString) -#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) -import Foreign.C.Types ( CShort ) -#endif -import Foreign.C.Types ( CInt(..), CULong(..), CSize(..) ) -import Foreign.Ptr (Ptr, nullPtr) -import Foreign.Storable (Storable(..)) -import Foreign.Marshal.Array (allocaArray0, peekArray0) -import Foreign.Marshal.Utils (with, fromBool) -import Data.Typeable -import System.IO.Error (ioeSetErrorString, mkIOError) -import System.IO.Unsafe (unsafePerformIO) - -import GHC.IO.Exception - -import Control.Monad (liftM) - -import Network.Socket.Internal (throwSocketErrorIfMinus1_) - --- --------------------------------------------------------------------------- --- Basic Types - -type ProtocolName = String - --- --------------------------------------------------------------------------- --- Service Database Access - --- Calling getServiceByName for a given service and protocol returns --- the systems service entry. This should be used to find the port --- numbers for standard protocols such as SMTP and FTP. The remaining --- three functions should be used for browsing the service database --- sequentially. - --- Calling setServiceEntry with True indicates that the service --- database should be left open between calls to getServiceEntry. To --- close the database a call to endServiceEntry is required. This --- database file is usually stored in the file /etc/services. - -data ServiceEntry = - ServiceEntry { - serviceName :: ServiceName, -- Official Name - serviceAliases :: [ServiceName], -- aliases - servicePort :: PortNumber, -- Port Number ( network byte order ) - serviceProtocol :: ProtocolName -- Protocol - } deriving (Show, Typeable) - -instance Storable ServiceEntry where - sizeOf _ = #const sizeof(struct servent) - alignment _ = alignment (undefined :: CInt) -- ??? - - peek p = do - s_name <- (#peek struct servent, s_name) p >>= peekCString - s_aliases <- (#peek struct servent, s_aliases) p - >>= peekArray0 nullPtr - >>= mapM peekCString - s_port <- (#peek struct servent, s_port) p - s_proto <- (#peek struct servent, s_proto) p >>= peekCString - return (ServiceEntry { - serviceName = s_name, - serviceAliases = s_aliases, -#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) - servicePort = PortNum (fromIntegral (s_port :: CShort)), -#else - -- s_port is already in network byte order, but it - -- might be the wrong size. - servicePort = PortNum (fromIntegral (s_port :: CInt)), -#endif - serviceProtocol = s_proto - }) - - poke _p = error "Storable.poke(BSD.ServiceEntry) not implemented" - - --- | Get service by name. -getServiceByName :: ServiceName -- Service Name - -> ProtocolName -- Protocol Name - -> IO ServiceEntry -- Service Entry -getServiceByName name proto = withLock $ do - withCString name $ \ cstr_name -> do - withCString proto $ \ cstr_proto -> do - throwNoSuchThingIfNull "getServiceByName" "no such service entry" - $ (trySysCall (c_getservbyname cstr_name cstr_proto)) - >>= peek - -foreign import CALLCONV unsafe "getservbyname" - c_getservbyname :: CString -> CString -> IO (Ptr ServiceEntry) - --- | Get the service given a 'PortNumber' and 'ProtocolName'. -getServiceByPort :: PortNumber -> ProtocolName -> IO ServiceEntry -getServiceByPort (PortNum port) proto = withLock $ do - withCString proto $ \ cstr_proto -> do - throwNoSuchThingIfNull "getServiceByPort" "no such service entry" - $ (trySysCall (c_getservbyport (fromIntegral port) cstr_proto)) - >>= peek - -foreign import CALLCONV unsafe "getservbyport" - c_getservbyport :: CInt -> CString -> IO (Ptr ServiceEntry) - --- | Get the 'PortNumber' corresponding to the 'ServiceName'. -getServicePortNumber :: ServiceName -> IO PortNumber -getServicePortNumber name = do - (ServiceEntry _ _ port _) <- getServiceByName name "tcp" - return port - -#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) -getServiceEntry :: IO ServiceEntry -getServiceEntry = withLock $ do - throwNoSuchThingIfNull "getServiceEntry" "no such service entry" - $ trySysCall c_getservent - >>= peek - -foreign import ccall unsafe "getservent" c_getservent :: IO (Ptr ServiceEntry) - -setServiceEntry :: Bool -> IO () -setServiceEntry flg = withLock $ trySysCall $ c_setservent (fromBool flg) - -foreign import ccall unsafe "setservent" c_setservent :: CInt -> IO () - -endServiceEntry :: IO () -endServiceEntry = withLock $ trySysCall $ c_endservent - -foreign import ccall unsafe "endservent" c_endservent :: IO () - -getServiceEntries :: Bool -> IO [ServiceEntry] -getServiceEntries stayOpen = do - setServiceEntry stayOpen - getEntries (getServiceEntry) (endServiceEntry) -#endif - --- --------------------------------------------------------------------------- --- Protocol Entries - --- The following relate directly to the corresponding UNIX C --- calls for returning the protocol entries. The protocol entry is --- represented by the Haskell type ProtocolEntry. - --- As for setServiceEntry above, calling setProtocolEntry. --- determines whether or not the protocol database file, usually --- @/etc/protocols@, is to be kept open between calls of --- getProtocolEntry. Similarly, - -data ProtocolEntry = - ProtocolEntry { - protoName :: ProtocolName, -- Official Name - protoAliases :: [ProtocolName], -- aliases - protoNumber :: ProtocolNumber -- Protocol Number - } deriving (Read, Show, Typeable) - -instance Storable ProtocolEntry where - sizeOf _ = #const sizeof(struct protoent) - alignment _ = alignment (undefined :: CInt) -- ??? - - peek p = do - p_name <- (#peek struct protoent, p_name) p >>= peekCString - p_aliases <- (#peek struct protoent, p_aliases) p - >>= peekArray0 nullPtr - >>= mapM peekCString -#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) - -- With WinSock, the protocol number is only a short; - -- hoist it in as such, but represent it on the Haskell side - -- as a CInt. - p_proto_short <- (#peek struct protoent, p_proto) p - let p_proto = fromIntegral (p_proto_short :: CShort) -#else - p_proto <- (#peek struct protoent, p_proto) p -#endif - return (ProtocolEntry { - protoName = p_name, - protoAliases = p_aliases, - protoNumber = p_proto - }) - - poke _p = error "Storable.poke(BSD.ProtocolEntry) not implemented" - -getProtocolByName :: ProtocolName -> IO ProtocolEntry -getProtocolByName name = withLock $ do - withCString name $ \ name_cstr -> do - throwNoSuchThingIfNull "getProtocolByName" ("no such protocol name: " ++ name) - $ (trySysCall.c_getprotobyname) name_cstr - >>= peek - -foreign import CALLCONV unsafe "getprotobyname" - c_getprotobyname :: CString -> IO (Ptr ProtocolEntry) - - -getProtocolByNumber :: ProtocolNumber -> IO ProtocolEntry -getProtocolByNumber num = withLock $ do - throwNoSuchThingIfNull "getProtocolByNumber" ("no such protocol number: " ++ show num) - $ (trySysCall.c_getprotobynumber) (fromIntegral num) - >>= peek - -foreign import CALLCONV unsafe "getprotobynumber" - c_getprotobynumber :: CInt -> IO (Ptr ProtocolEntry) - - -getProtocolNumber :: ProtocolName -> IO ProtocolNumber -getProtocolNumber proto = do - (ProtocolEntry _ _ num) <- getProtocolByName proto - return num - -#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) -getProtocolEntry :: IO ProtocolEntry -- Next Protocol Entry from DB -getProtocolEntry = withLock $ do - ent <- throwNoSuchThingIfNull "getProtocolEntry" "no such protocol entry" - $ trySysCall c_getprotoent - peek ent - -foreign import ccall unsafe "getprotoent" c_getprotoent :: IO (Ptr ProtocolEntry) - -setProtocolEntry :: Bool -> IO () -- Keep DB Open ? -setProtocolEntry flg = withLock $ trySysCall $ c_setprotoent (fromBool flg) - -foreign import ccall unsafe "setprotoent" c_setprotoent :: CInt -> IO () - -endProtocolEntry :: IO () -endProtocolEntry = withLock $ trySysCall $ c_endprotoent - -foreign import ccall unsafe "endprotoent" c_endprotoent :: IO () - -getProtocolEntries :: Bool -> IO [ProtocolEntry] -getProtocolEntries stayOpen = withLock $ do - setProtocolEntry stayOpen - getEntries (getProtocolEntry) (endProtocolEntry) -#endif - --- --------------------------------------------------------------------------- --- Host lookups - -data HostEntry = - HostEntry { - hostName :: HostName, -- Official Name - hostAliases :: [HostName], -- aliases - hostFamily :: Family, -- Host Type (currently AF_INET) - hostAddresses :: [HostAddress] -- Set of Network Addresses (in network byte order) - } deriving (Read, Show, Typeable) - -instance Storable HostEntry where - sizeOf _ = #const sizeof(struct hostent) - alignment _ = alignment (undefined :: CInt) -- ??? - - peek p = do - h_name <- (#peek struct hostent, h_name) p >>= peekCString - h_aliases <- (#peek struct hostent, h_aliases) p - >>= peekArray0 nullPtr - >>= mapM peekCString - h_addrtype <- (#peek struct hostent, h_addrtype) p - -- h_length <- (#peek struct hostent, h_length) p - h_addr_list <- (#peek struct hostent, h_addr_list) p - >>= peekArray0 nullPtr - >>= mapM peek - return (HostEntry { - hostName = h_name, - hostAliases = h_aliases, -#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) - hostFamily = unpackFamily (fromIntegral (h_addrtype :: CShort)), -#else - hostFamily = unpackFamily h_addrtype, -#endif - hostAddresses = h_addr_list - }) - - poke _p = error "Storable.poke(BSD.ServiceEntry) not implemented" - - --- convenience function: -hostAddress :: HostEntry -> HostAddress -hostAddress (HostEntry nm _ _ ls) = - case ls of - [] -> error ("BSD.hostAddress: empty network address list for " ++ nm) - (x:_) -> x - --- getHostByName must use the same lock as the *hostent functions --- may cause problems if called concurrently. - --- | Resolve a 'HostName' to IPv4 address. -getHostByName :: HostName -> IO HostEntry -getHostByName name = withLock $ do - withCString name $ \ name_cstr -> do - ent <- throwNoSuchThingIfNull "getHostByName" "no such host entry" - $ trySysCall $ c_gethostbyname name_cstr - peek ent - -foreign import CALLCONV safe "gethostbyname" - c_gethostbyname :: CString -> IO (Ptr HostEntry) - - --- The locking of gethostbyaddr is similar to gethostbyname. --- | Get a 'HostEntry' corresponding to the given address and family. --- Note that only IPv4 is currently supported. -getHostByAddr :: Family -> HostAddress -> IO HostEntry -getHostByAddr family addr = do - with addr $ \ ptr_addr -> withLock $ do - throwNoSuchThingIfNull "getHostByAddr" "no such host entry" - $ trySysCall $ c_gethostbyaddr ptr_addr (fromIntegral (sizeOf addr)) (packFamily family) - >>= peek - -foreign import CALLCONV safe "gethostbyaddr" - c_gethostbyaddr :: Ptr HostAddress -> CInt -> CInt -> IO (Ptr HostEntry) - -#if defined(HAVE_GETHOSTENT) && !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) -getHostEntry :: IO HostEntry -getHostEntry = withLock $ do - throwNoSuchThingIfNull "getHostEntry" "unable to retrieve host entry" - $ trySysCall $ c_gethostent - >>= peek - -foreign import ccall unsafe "gethostent" c_gethostent :: IO (Ptr HostEntry) - -setHostEntry :: Bool -> IO () -setHostEntry flg = withLock $ trySysCall $ c_sethostent (fromBool flg) - -foreign import ccall unsafe "sethostent" c_sethostent :: CInt -> IO () - -endHostEntry :: IO () -endHostEntry = withLock $ c_endhostent - -foreign import ccall unsafe "endhostent" c_endhostent :: IO () - -getHostEntries :: Bool -> IO [HostEntry] -getHostEntries stayOpen = do - setHostEntry stayOpen - getEntries (getHostEntry) (endHostEntry) -#endif - --- --------------------------------------------------------------------------- --- Accessing network information - --- Same set of access functions as for accessing host,protocol and --- service system info, this time for the types of networks supported. - --- network addresses are represented in host byte order. -type NetworkAddr = CULong - -type NetworkName = String - -data NetworkEntry = - NetworkEntry { - networkName :: NetworkName, -- official name - networkAliases :: [NetworkName], -- aliases - networkFamily :: Family, -- type - networkAddress :: NetworkAddr - } deriving (Read, Show, Typeable) - -instance Storable NetworkEntry where - sizeOf _ = #const sizeof(struct hostent) - alignment _ = alignment (undefined :: CInt) -- ??? - - peek p = do - n_name <- (#peek struct netent, n_name) p >>= peekCString - n_aliases <- (#peek struct netent, n_aliases) p - >>= peekArray0 nullPtr - >>= mapM peekCString - n_addrtype <- (#peek struct netent, n_addrtype) p - n_net <- (#peek struct netent, n_net) p - return (NetworkEntry { - networkName = n_name, - networkAliases = n_aliases, - networkFamily = unpackFamily (fromIntegral - (n_addrtype :: CInt)), - networkAddress = n_net - }) - - poke _p = error "Storable.poke(BSD.NetEntry) not implemented" - - -#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32) -getNetworkByName :: NetworkName -> IO NetworkEntry -getNetworkByName name = withLock $ do - withCString name $ \ name_cstr -> do - throwNoSuchThingIfNull "getNetworkByName" "no such network entry" - $ trySysCall $ c_getnetbyname name_cstr - >>= peek - -foreign import ccall unsafe "getnetbyname" - c_getnetbyname :: CString -> IO (Ptr NetworkEntry) - -getNetworkByAddr :: NetworkAddr -> Family -> IO NetworkEntry -getNetworkByAddr addr family = withLock $ do - throwNoSuchThingIfNull "getNetworkByAddr" "no such network entry" - $ trySysCall $ c_getnetbyaddr addr (packFamily family) - >>= peek - -foreign import ccall unsafe "getnetbyaddr" - c_getnetbyaddr :: NetworkAddr -> CInt -> IO (Ptr NetworkEntry) - -getNetworkEntry :: IO NetworkEntry -getNetworkEntry = withLock $ do - throwNoSuchThingIfNull "getNetworkEntry" "no more network entries" - $ trySysCall $ c_getnetent - >>= peek - -foreign import ccall unsafe "getnetent" c_getnetent :: IO (Ptr NetworkEntry) - --- | Open the network name database. The parameter specifies --- whether a connection is maintained open between various --- networkEntry calls -setNetworkEntry :: Bool -> IO () -setNetworkEntry flg = withLock $ trySysCall $ c_setnetent (fromBool flg) - -foreign import ccall unsafe "setnetent" c_setnetent :: CInt -> IO () - --- | Close the connection to the network name database. -endNetworkEntry :: IO () -endNetworkEntry = withLock $ trySysCall $ c_endnetent - -foreign import ccall unsafe "endnetent" c_endnetent :: IO () - --- | Get the list of network entries. -getNetworkEntries :: Bool -> IO [NetworkEntry] -getNetworkEntries stayOpen = do - setNetworkEntry stayOpen - getEntries (getNetworkEntry) (endNetworkEntry) -#endif - --- Mutex for name service lockdown - -{-# NOINLINE lock #-} -lock :: MVar () -lock = unsafePerformIO $ newMVar () - -withLock :: IO a -> IO a -withLock act = withMVar lock (\_ -> act) - --- --------------------------------------------------------------------------- --- Miscellaneous Functions - --- | Calling getHostName returns the standard host name for the current --- processor, as set at boot time. - -getHostName :: IO HostName -getHostName = do - let size = 256 - allocaArray0 size $ \ cstr -> do - throwSocketErrorIfMinus1_ "getHostName" $ c_gethostname cstr (fromIntegral size) - peekCString cstr - -foreign import CALLCONV unsafe "gethostname" - c_gethostname :: CString -> CSize -> IO CInt - --- Helper function used by the exported functions that provides a --- Haskellised view of the enumerator functions: - -getEntries :: IO a -- read - -> IO () -- at end - -> IO [a] -getEntries getOne atEnd = loop - where - loop = do - vv <- E.catch (liftM Just getOne) - (\ e -> let _types = e :: IOException in return Nothing) - case vv of - Nothing -> return [] - Just v -> loop >>= \ vs -> atEnd >> return (v:vs) - - --- --------------------------------------------------------------------------- --- Winsock only: --- The BSD API networking calls made locally return NULL upon failure. --- That failure may very well be due to WinSock not being initialised, --- so if NULL is seen try init'ing and repeat the call. -#if !defined(mingw32_HOST_OS) && !defined(_WIN32) -trySysCall :: IO a -> IO a -trySysCall act = act -#else -trySysCall :: IO (Ptr a) -> IO (Ptr a) -trySysCall act = do - ptr <- act - if (ptr == nullPtr) - then withSocketsDo act - else return ptr -#endif - -throwNoSuchThingIfNull :: String -> String -> IO (Ptr a) -> IO (Ptr a) -throwNoSuchThingIfNull loc desc act = do - ptr <- act - if (ptr == nullPtr) - then ioError (ioeSetErrorString (mkIOError NoSuchThing loc Nothing Nothing) desc) - else return ptr diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/Internal.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/Internal.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/Internal.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/Internal.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -{-# LANGUAGE CPP, ForeignFunctionInterface #-} - --- | --- Module : Network.Socket.ByteString.Internal --- Copyright : (c) Johan Tibell 2007-2010 --- License : BSD-style --- --- Maintainer : johan.tibell@gmail.com --- Stability : stable --- Portability : portable --- -module Network.Socket.ByteString.Internal - ( - mkInvalidRecvArgError -#if !defined(mingw32_HOST_OS) - , c_writev - , c_sendmsg -#endif - ) where - -import System.IO.Error (ioeSetErrorString, mkIOError) - -#if !defined(mingw32_HOST_OS) -import Foreign.C.Types (CInt(..)) -import System.Posix.Types (CSsize(..)) -import Foreign.Ptr (Ptr) - -import Network.Socket.ByteString.IOVec (IOVec) -import Network.Socket.ByteString.MsgHdr (MsgHdr) -#endif - -import GHC.IO.Exception (IOErrorType(..)) - -mkInvalidRecvArgError :: String -> IOError -mkInvalidRecvArgError loc = ioeSetErrorString (mkIOError - InvalidArgument - loc Nothing Nothing) "non-positive length" - -#if !defined(mingw32_HOST_OS) -foreign import ccall unsafe "writev" - c_writev :: CInt -> Ptr IOVec -> CInt -> IO CSsize - -foreign import ccall unsafe "sendmsg" - c_sendmsg :: CInt -> Ptr MsgHdr -> CInt -> IO CSsize -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/IOVec.hsc cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/IOVec.hsc --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/IOVec.hsc 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/IOVec.hsc 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -{-# OPTIONS_GHC -funbox-strict-fields #-} - --- | Support module for the POSIX writev system call. -module Network.Socket.ByteString.IOVec - ( IOVec(..) - ) where - -import Foreign.C.Types (CChar, CInt, CSize) -import Foreign.Ptr (Ptr) -import Foreign.Storable (Storable(..)) - -#include -#include - -data IOVec = IOVec - { iovBase :: !(Ptr CChar) - , iovLen :: !CSize - } - -instance Storable IOVec where - sizeOf _ = (#const sizeof(struct iovec)) - alignment _ = alignment (undefined :: CInt) - - peek p = do - base <- (#peek struct iovec, iov_base) p - len <- (#peek struct iovec, iov_len) p - return $ IOVec base len - - poke p iov = do - (#poke struct iovec, iov_base) p (iovBase iov) - (#poke struct iovec, iov_len) p (iovLen iov) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy/Posix.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy/Posix.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy/Posix.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy/Posix.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -{-# LANGUAGE BangPatterns #-} -module Network.Socket.ByteString.Lazy.Posix - ( - -- * Send data to a socket - send - , sendAll - ) where - -import Control.Monad (liftM) -import Control.Monad (unless) -import qualified Data.ByteString.Lazy as L -import Data.ByteString.Lazy.Internal (ByteString(..)) -import Data.ByteString.Unsafe (unsafeUseAsCStringLen) -import Data.Int (Int64) -import Foreign.Marshal.Array (allocaArray) -import Foreign.Ptr (plusPtr) -import Foreign.Storable (Storable(..)) - -import Network.Socket (Socket(..)) -import Network.Socket.ByteString.IOVec (IOVec(IOVec)) -import Network.Socket.ByteString.Internal (c_writev) -import Network.Socket.Internal - --- ----------------------------------------------------------------------------- --- Sending - -send :: Socket -- ^ Connected socket - -> ByteString -- ^ Data to send - -> IO Int64 -- ^ Number of bytes sent -send sock@(MkSocket fd _ _ _ _) s = do - let cs = take maxNumChunks (L.toChunks s) - len = length cs - liftM fromIntegral . allocaArray len $ \ptr -> - withPokes cs ptr $ \niovs -> - throwSocketErrorWaitWrite sock "writev" $ - c_writev (fromIntegral fd) ptr niovs - where - withPokes ss p f = loop ss p 0 0 - where loop (c:cs) q k !niovs - | k < maxNumBytes = - unsafeUseAsCStringLen c $ \(ptr,len) -> do - poke q $ IOVec ptr (fromIntegral len) - loop cs (q `plusPtr` sizeOf (undefined :: IOVec)) - (k + fromIntegral len) (niovs + 1) - | otherwise = f niovs - loop _ _ _ niovs = f niovs - maxNumBytes = 4194304 :: Int -- maximum number of bytes to transmit in one system call - maxNumChunks = 1024 :: Int -- maximum number of chunks to transmit in one system call - -sendAll :: Socket -- ^ Connected socket - -> ByteString -- ^ Data to send - -> IO () -sendAll sock bs = do - sent <- send sock bs - let bs' = L.drop sent bs - unless (L.null bs') $ sendAll sock bs' diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy/Windows.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy/Windows.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy/Windows.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy/Windows.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -{-# LANGUAGE BangPatterns #-} -module Network.Socket.ByteString.Lazy.Windows - ( - -- * Send data to a socket - send - , sendAll - ) where - -import Control.Applicative ((<$>)) -import Control.Monad (unless) -import qualified Data.ByteString as S -import qualified Data.ByteString.Lazy as L -import Data.Int (Int64) - -import Network.Socket (Socket(..)) -import qualified Network.Socket.ByteString as Socket - --- ----------------------------------------------------------------------------- --- Sending - -send :: Socket -- ^ Connected socket - -> L.ByteString -- ^ Data to send - -> IO Int64 -- ^ Number of bytes sent -send sock s = do - fromIntegral <$> case L.toChunks s of - -- TODO: Consider doing nothing if the string is empty. - [] -> Socket.send sock S.empty - (x:_) -> Socket.send sock x - -sendAll :: Socket -- ^ Connected socket - -> L.ByteString -- ^ Data to send - -> IO () -sendAll sock bs = do - sent <- send sock bs - let bs' = L.drop sent bs - unless (L.null bs') $ sendAll sock bs' diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,149 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, ForeignFunctionInterface #-} - --- | --- Module : Network.Socket.ByteString.Lazy --- Copyright : (c) Bryan O'Sullivan 2009 --- License : BSD-style --- --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : POSIX, GHC --- --- This module provides access to the BSD /socket/ interface. This --- module is generally more efficient than the 'String' based network --- functions in 'Network.Socket'. For detailed documentation, consult --- your favorite POSIX socket reference. All functions communicate --- failures by converting the error number to 'System.IO.IOError'. --- --- This module is made to be imported with 'Network.Socket' like so: --- --- > import Network.Socket hiding (send, sendTo, recv, recvFrom) --- > import Network.Socket.ByteString.Lazy --- > import Prelude hiding (getContents) --- -module Network.Socket.ByteString.Lazy - ( -#if !defined(mingw32_HOST_OS) - -- * Send data to a socket - send - , sendAll - , -#endif - - -- * Receive data from a socket - getContents - , recv - ) where - -import Control.Monad (liftM) -import Data.ByteString.Lazy.Internal (ByteString(..), defaultChunkSize) -import Data.Int (Int64) -import Network.Socket (Socket(..), ShutdownCmd(..), shutdown) -import Prelude hiding (getContents) -import System.IO.Unsafe (unsafeInterleaveIO) - -import qualified Data.ByteString as S -import qualified Network.Socket.ByteString as N - -#if !defined(mingw32_HOST_OS) -import Control.Monad (unless) -import Data.ByteString.Unsafe (unsafeUseAsCStringLen) -import Foreign.Marshal.Array (allocaArray) -import Foreign.Ptr (plusPtr) -import Foreign.Storable (Storable(..)) -import Network.Socket.ByteString.IOVec (IOVec(IOVec)) -import Network.Socket.ByteString.Internal (c_writev) -import Network.Socket.Internal - -import qualified Data.ByteString.Lazy as L - -import GHC.Conc (threadWaitWrite) -#endif - -#if !defined(mingw32_HOST_OS) --- ----------------------------------------------------------------------------- --- Sending - --- | Send data to the socket. The socket must be in a connected state. --- Returns the number of bytes sent. Applications are responsible for --- ensuring that all data has been sent. --- --- Because a lazily generated 'ByteString' may be arbitrarily long, --- this function caps the amount it will attempt to send at 4MB. This --- number is large (so it should not penalize performance on fast --- networks), but not outrageously so (to avoid demanding lazily --- computed data unnecessarily early). Before being sent, the lazy --- 'ByteString' will be converted to a list of strict 'ByteString's --- with 'L.toChunks'; at most 1024 chunks will be sent. /Unix only/. -send :: Socket -- ^ Connected socket - -> ByteString -- ^ Data to send - -> IO Int64 -- ^ Number of bytes sent -send sock@(MkSocket fd _ _ _ _) s = do - let cs = take maxNumChunks (L.toChunks s) - len = length cs - liftM fromIntegral . allocaArray len $ \ptr -> - withPokes cs ptr $ \niovs -> - throwSocketErrorWaitWrite sock "writev" $ - c_writev (fromIntegral fd) ptr niovs - where - withPokes ss p f = loop ss p 0 0 - where loop (c:cs) q k !niovs - | k < maxNumBytes = - unsafeUseAsCStringLen c $ \(ptr,len) -> do - poke q $ IOVec ptr (fromIntegral len) - loop cs (q `plusPtr` sizeOf (undefined :: IOVec)) - (k + fromIntegral len) (niovs + 1) - | otherwise = f niovs - loop _ _ _ niovs = f niovs - maxNumBytes = 4194304 :: Int -- maximum number of bytes to transmit in one system call - maxNumChunks = 1024 :: Int -- maximum number of chunks to transmit in one system call - --- | Send data to the socket. The socket must be in a connected --- state. This function continues to send data until either all data --- has been sent or an error occurs. If there is an error, an --- exception is raised, and there is no way to determine how much data --- was sent. /Unix only/. -sendAll :: Socket -- ^ Connected socket - -> ByteString -- ^ Data to send - -> IO () -sendAll sock bs = do - sent <- send sock bs - let bs' = L.drop sent bs - unless (L.null bs') $ sendAll sock bs' -#endif - --- ----------------------------------------------------------------------------- --- Receiving - --- | Receive data from the socket. The socket must be in a connected --- state. Data is received on demand, in chunks; each chunk will be --- sized to reflect the amount of data received by individual 'recv' --- calls. --- --- All remaining data from the socket is consumed. When there is no --- more data to be received, the receiving side of the socket is shut --- down. If there is an error and an exception is thrown, the socket --- is not shut down. -getContents :: Socket -- ^ Connected socket - -> IO ByteString -- ^ Data received -getContents sock = loop where - loop = unsafeInterleaveIO $ do - s <- N.recv sock defaultChunkSize - if S.null s - then shutdown sock ShutdownReceive >> return Empty - else Chunk s `liftM` loop - --- | Receive data from the socket. The socket must be in a connected --- state. This function may return fewer bytes than specified. If --- the received data is longer than the specified length, it may be --- discarded depending on the type of socket. This function may block --- until a message arrives. --- --- If there is no more data to be received, returns an empty 'ByteString'. -recv :: Socket -- ^ Connected socket - -> Int64 -- ^ Maximum number of bytes to receive - -> IO ByteString -- ^ Data received -recv sock nbytes = chunk `liftM` N.recv sock (fromIntegral nbytes) where - chunk k - | S.null k = Empty - | otherwise = Chunk k Empty diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/MsgHdr.hsc cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/MsgHdr.hsc --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString/MsgHdr.hsc 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString/MsgHdr.hsc 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# OPTIONS_GHC -funbox-strict-fields #-} - --- | Support module for the POSIX 'sendmsg' system call. -module Network.Socket.ByteString.MsgHdr - ( MsgHdr(..) - ) where - -#include -#include - -import Foreign.C.Types (CInt, CSize, CUInt) -import Foreign.Ptr (Ptr) -import Foreign.Storable (Storable(..)) -import Network.Socket (SockAddr) -import Network.Socket.Internal (zeroMemory) - -import Network.Socket.ByteString.IOVec (IOVec) - --- We don't use msg_control, msg_controllen, and msg_flags as these --- don't exist on OpenSolaris. -data MsgHdr = MsgHdr - { msgName :: !(Ptr SockAddr) - , msgNameLen :: !CUInt - , msgIov :: !(Ptr IOVec) - , msgIovLen :: !CSize - } - -instance Storable MsgHdr where - sizeOf _ = (#const sizeof(struct msghdr)) - alignment _ = alignment (undefined :: CInt) - - peek p = do - name <- (#peek struct msghdr, msg_name) p - nameLen <- (#peek struct msghdr, msg_namelen) p - iov <- (#peek struct msghdr, msg_iov) p - iovLen <- (#peek struct msghdr, msg_iovlen) p - return $ MsgHdr name nameLen iov iovLen - - poke p mh = do - -- We need to zero the msg_control, msg_controllen, and msg_flags - -- fields, but they only exist on some platforms (e.g. not on - -- Solaris). Instead of using CPP, we zero the entire struct. - zeroMemory p (#const sizeof(struct msghdr)) - (#poke struct msghdr, msg_name) p (msgName mh) - (#poke struct msghdr, msg_namelen) p (msgNameLen mh) - (#poke struct msghdr, msg_iov) p (msgIov mh) - (#poke struct msghdr, msg_iovlen) p (msgIovLen mh) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString.hsc cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString.hsc --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/ByteString.hsc 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/ByteString.hsc 1970-01-01 00:00:00.000000000 +0000 @@ -1,351 +0,0 @@ -{-# LANGUAGE CPP, ForeignFunctionInterface #-} - -#include "HsNet.h" - --- | --- Module : Network.Socket.ByteString --- Copyright : (c) Johan Tibell 2007-2010 --- License : BSD-style --- --- Maintainer : johan.tibell@gmail.com --- Stability : stable --- Portability : portable --- --- This module provides access to the BSD /socket/ interface. This --- module is generally more efficient than the 'String' based network --- functions in 'Network.Socket'. For detailed documentation, consult --- your favorite POSIX socket reference. All functions communicate --- failures by converting the error number to 'System.IO.IOError'. --- --- This module is made to be imported with 'Network.Socket' like so: --- --- > import Network.Socket hiding (send, sendTo, recv, recvFrom) --- > import Network.Socket.ByteString --- -module Network.Socket.ByteString - ( - -- * Send data to a socket - send - , sendAll - , sendTo - , sendAllTo - - -- ** Vectored I/O - -- $vectored - , sendMany - , sendManyTo - - -- * Receive data from a socket - , recv - , recvFrom - - -- * Example - -- $example - ) where - -import Control.Monad (liftM, when) -import Data.ByteString (ByteString) -import Data.ByteString.Internal (createAndTrim) -import Data.ByteString.Unsafe (unsafeUseAsCStringLen) -import Data.Word (Word8) -import Foreign.C.Types (CInt(..)) -import Foreign.Marshal.Alloc (allocaBytes) -import Foreign.Ptr (Ptr, castPtr) -import Network.Socket (SockAddr, Socket(..), sendBufTo, recvBufFrom) - -import qualified Data.ByteString as B - -import Network.Socket.ByteString.Internal -import Network.Socket.Internal -import Network.Socket.Types - -#if !defined(mingw32_HOST_OS) -import Control.Monad (zipWithM_) -import Foreign.C.Types (CChar) -import Foreign.C.Types (CSize(..)) -import Foreign.Marshal.Array (allocaArray) -import Foreign.Marshal.Utils (with) -import Foreign.Ptr (plusPtr) -import Foreign.Storable (Storable(..)) - -import Network.Socket.ByteString.IOVec (IOVec(..)) -import Network.Socket.ByteString.MsgHdr (MsgHdr(..)) - -#else -import GHC.IO.FD -#endif - -#if !defined(mingw32_HOST_OS) -foreign import CALLCONV unsafe "send" - c_send :: CInt -> Ptr a -> CSize -> CInt -> IO CInt -foreign import CALLCONV unsafe "recv" - c_recv :: CInt -> Ptr CChar -> CSize -> CInt -> IO CInt -#endif - --- ---------------------------------------------------------------------------- --- Sending - --- | Send data to the socket. The socket must be connected to a --- remote socket. Returns the number of bytes sent. Applications are --- responsible for ensuring that all data has been sent. -send :: Socket -- ^ Connected socket - -> ByteString -- ^ Data to send - -> IO Int -- ^ Number of bytes sent -send sock@(MkSocket s _ _ _ _) xs = - unsafeUseAsCStringLen xs $ \(str, len) -> - liftM fromIntegral $ -#if defined(mingw32_HOST_OS) - writeRawBufferPtr "Network.Socket.ByteString.send" - (FD s 1) (castPtr str) 0 (fromIntegral len) -#else - throwSocketErrorWaitWrite sock "send" $ - c_send s str (fromIntegral len) 0 -#endif - --- | Send data to the socket. The socket must be connected to a --- remote socket. Unlike 'send', this function continues to send data --- until either all data has been sent or an error occurs. On error, --- an exception is raised, and there is no way to determine how much --- data, if any, was successfully sent. -sendAll :: Socket -- ^ Connected socket - -> ByteString -- ^ Data to send - -> IO () -sendAll sock bs = do - sent <- send sock bs - when (sent < B.length bs) $ sendAll sock (B.drop sent bs) - --- | Send data to the socket. The recipient can be specified --- explicitly, so the socket need not be in a connected state. --- Returns the number of bytes sent. Applications are responsible for --- ensuring that all data has been sent. -sendTo :: Socket -- ^ Socket - -> ByteString -- ^ Data to send - -> SockAddr -- ^ Recipient address - -> IO Int -- ^ Number of bytes sent -sendTo sock xs addr = - unsafeUseAsCStringLen xs $ \(str, len) -> sendBufTo sock str len addr - --- | Send data to the socket. The recipient can be specified --- explicitly, so the socket need not be in a connected state. Unlike --- 'sendTo', this function continues to send data until either all --- data has been sent or an error occurs. On error, an exception is --- raised, and there is no way to determine how much data, if any, was --- successfully sent. -sendAllTo :: Socket -- ^ Socket - -> ByteString -- ^ Data to send - -> SockAddr -- ^ Recipient address - -> IO () -sendAllTo sock xs addr = do - sent <- sendTo sock xs addr - when (sent < B.length xs) $ sendAllTo sock (B.drop sent xs) addr - --- ---------------------------------------------------------------------------- --- ** Vectored I/O - --- $vectored --- --- Vectored I\/O, also known as scatter\/gather I\/O, allows multiple --- data segments to be sent using a single system call, without first --- concatenating the segments. For example, given a list of --- @ByteString@s, @xs@, --- --- > sendMany sock xs --- --- is equivalent to --- --- > sendAll sock (concat xs) --- --- but potentially more efficient. --- --- Vectored I\/O are often useful when implementing network protocols --- that, for example, group data into segments consisting of one or --- more fixed-length headers followed by a variable-length body. - --- | Send data to the socket. The socket must be in a connected --- state. The data is sent as if the parts have been concatenated. --- This function continues to send data until either all data has been --- sent or an error occurs. On error, an exception is raised, and --- there is no way to determine how much data, if any, was --- successfully sent. -sendMany :: Socket -- ^ Connected socket - -> [ByteString] -- ^ Data to send - -> IO () -#if !defined(mingw32_HOST_OS) -sendMany sock@(MkSocket fd _ _ _ _) cs = do - sent <- sendManyInner - when (sent < totalLength cs) $ sendMany sock (remainingChunks sent cs) - where - sendManyInner = - liftM fromIntegral . withIOVec cs $ \(iovsPtr, iovsLen) -> - throwSocketErrorWaitWrite sock "writev" $ - c_writev (fromIntegral fd) iovsPtr - (fromIntegral (min iovsLen (#const IOV_MAX))) -#else -sendMany sock = sendAll sock . B.concat -#endif - --- | Send data to the socket. The recipient can be specified --- explicitly, so the socket need not be in a connected state. The --- data is sent as if the parts have been concatenated. This function --- continues to send data until either all data has been sent or an --- error occurs. On error, an exception is raised, and there is no --- way to determine how much data, if any, was successfully sent. -sendManyTo :: Socket -- ^ Socket - -> [ByteString] -- ^ Data to send - -> SockAddr -- ^ Recipient address - -> IO () -#if !defined(mingw32_HOST_OS) -sendManyTo sock@(MkSocket fd _ _ _ _) cs addr = do - sent <- liftM fromIntegral sendManyToInner - when (sent < totalLength cs) $ sendManyTo sock (remainingChunks sent cs) addr - where - sendManyToInner = - withSockAddr addr $ \addrPtr addrSize -> - withIOVec cs $ \(iovsPtr, iovsLen) -> do - let msgHdr = MsgHdr - addrPtr (fromIntegral addrSize) - iovsPtr (fromIntegral iovsLen) - with msgHdr $ \msgHdrPtr -> - throwSocketErrorWaitWrite sock "sendmsg" $ - c_sendmsg (fromIntegral fd) msgHdrPtr 0 -#else -sendManyTo sock cs = sendAllTo sock (B.concat cs) -#endif - --- ---------------------------------------------------------------------------- --- Receiving - --- | Receive data from the socket. The socket must be in a connected --- state. This function may return fewer bytes than specified. If --- the message is longer than the specified length, it may be --- discarded depending on the type of socket. This function may block --- until a message arrives. --- --- Considering hardware and network realities, the maximum number of bytes to --- receive should be a small power of 2, e.g., 4096. --- --- For TCP sockets, a zero length return value means the peer has --- closed its half side of the connection. -recv :: Socket -- ^ Connected socket - -> Int -- ^ Maximum number of bytes to receive - -> IO ByteString -- ^ Data received -recv sock nbytes - | nbytes < 0 = ioError (mkInvalidRecvArgError "Network.Socket.ByteString.recv") - | otherwise = createAndTrim nbytes $ recvInner sock nbytes - -recvInner :: Socket -> Int -> Ptr Word8 -> IO Int -recvInner sock nbytes ptr = - fmap fromIntegral $ -#if defined(mingw32_HOST_OS) - readRawBufferPtr "Network.Socket.ByteString.recv" (FD s 1) ptr 0 (fromIntegral nbytes) -#else - throwSocketErrorWaitRead sock "recv" $ - c_recv s (castPtr ptr) (fromIntegral nbytes) 0 -#endif - where - s = sockFd sock - --- | Receive data from the socket. The socket need not be in a --- connected state. Returns @(bytes, address)@ where @bytes@ is a --- 'ByteString' representing the data received and @address@ is a --- 'SockAddr' representing the address of the sending socket. -recvFrom :: Socket -- ^ Socket - -> Int -- ^ Maximum number of bytes to receive - -> IO (ByteString, SockAddr) -- ^ Data received and sender address -recvFrom sock nbytes = - allocaBytes nbytes $ \ptr -> do - (len, sockaddr) <- recvBufFrom sock ptr nbytes - str <- B.packCStringLen (ptr, len) - return (str, sockaddr) - --- ---------------------------------------------------------------------------- --- Not exported - -#if !defined(mingw32_HOST_OS) --- | Suppose we try to transmit a list of chunks @cs@ via a gathering write --- operation and find that @n@ bytes were sent. Then @remainingChunks n cs@ is --- list of chunks remaining to be sent. -remainingChunks :: Int -> [ByteString] -> [ByteString] -remainingChunks _ [] = [] -remainingChunks i (x:xs) - | i < len = B.drop i x : xs - | otherwise = let i' = i - len in i' `seq` remainingChunks i' xs - where - len = B.length x - --- | @totalLength cs@ is the sum of the lengths of the chunks in the list @cs@. -totalLength :: [ByteString] -> Int -totalLength = sum . map B.length - --- | @withIOVec cs f@ executes the computation @f@, passing as argument a pair --- consisting of a pointer to a temporarily allocated array of pointers to --- 'IOVec' made from @cs@ and the number of pointers (@length cs@). --- /Unix only/. -withIOVec :: [ByteString] -> ((Ptr IOVec, Int) -> IO a) -> IO a -withIOVec cs f = - allocaArray csLen $ \aPtr -> do - zipWithM_ pokeIov (ptrs aPtr) cs - f (aPtr, csLen) - where - csLen = length cs - ptrs = iterate (`plusPtr` sizeOf (undefined :: IOVec)) - pokeIov ptr s = - unsafeUseAsCStringLen s $ \(sPtr, sLen) -> - poke ptr $ IOVec sPtr (fromIntegral sLen) -#endif - --- --------------------------------------------------------------------- --- Example - --- $example --- --- Here are two minimal example programs using the TCP/IP protocol: a --- server that echoes all data that it receives back (servicing only --- one client) and a client using it. --- --- > -- Echo server program --- > module Main where --- > --- > import Control.Monad (unless) --- > import Network.Socket hiding (recv) --- > import qualified Data.ByteString as S --- > import Network.Socket.ByteString (recv, sendAll) --- > --- > main :: IO () --- > main = withSocketsDo $ --- > do addrinfos <- getAddrInfo --- > (Just (defaultHints {addrFlags = [AI_PASSIVE]})) --- > Nothing (Just "3000") --- > let serveraddr = head addrinfos --- > sock <- socket (addrFamily serveraddr) Stream defaultProtocol --- > bindSocket sock (addrAddress serveraddr) --- > listen sock 1 --- > (conn, _) <- accept sock --- > talk conn --- > sClose conn --- > sClose sock --- > --- > where --- > talk :: Socket -> IO () --- > talk conn = --- > do msg <- recv conn 1024 --- > unless (S.null msg) $ sendAll conn msg >> talk conn --- --- > -- Echo client program --- > module Main where --- > --- > import Network.Socket hiding (recv) --- > import Network.Socket.ByteString (recv, sendAll) --- > import qualified Data.ByteString.Char8 as C --- > --- > main :: IO () --- > main = withSocketsDo $ --- > do addrinfos <- getAddrInfo Nothing (Just "") (Just "3000") --- > let serveraddr = head addrinfos --- > sock <- socket (addrFamily serveraddr) Stream defaultProtocol --- > connect sock (addrAddress serveraddr) --- > sendAll sock $ C.pack "Hello, world!" --- > msg <- recv sock 1024 --- > sClose sock --- > putStr "Received " --- > C.putStrLn msg diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/Internal.hsc cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/Internal.hsc --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/Internal.hsc 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/Internal.hsc 1970-01-01 00:00:00.000000000 +0000 @@ -1,261 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE ForeignFunctionInterface #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} ------------------------------------------------------------------------------ --- | --- Module : Network.Socket.Internal --- Copyright : (c) The University of Glasgow 2001 --- License : BSD-style (see the file libraries/network/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : provisional --- Portability : portable --- --- A module containing semi-public 'Network.Socket' internals. --- Modules which extend the 'Network.Socket' module will need to use --- this module while ideally most users will be able to make do with --- the public interface. --- ------------------------------------------------------------------------------ - -#include "HsNet.h" - -module Network.Socket.Internal - ( - -- * Socket addresses - HostAddress -#if defined(IPV6_SOCKET_SUPPORT) - , HostAddress6 - , FlowInfo - , ScopeID -#endif - , PortNumber(..) - , SockAddr(..) - - , peekSockAddr - , pokeSockAddr - , sizeOfSockAddr - , sizeOfSockAddrByFamily - , withSockAddr - , withNewSockAddr - - -- * Protocol families - , Family(..) - - -- * Socket error functions -#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) - , c_getLastError -#endif - , throwSocketError - , throwSocketErrorCode - - -- * Guards for socket operations that may fail - , throwSocketErrorIfMinus1_ - , throwSocketErrorIfMinus1Retry - , throwSocketErrorIfMinus1Retry_ - , throwSocketErrorIfMinus1RetryMayBlock - - -- ** Guards that wait and retry if the operation would block - -- | These guards are based on 'throwSocketErrorIfMinus1RetryMayBlock'. - -- They wait for socket readiness if the action fails with @EWOULDBLOCK@ - -- or similar. - , throwSocketErrorWaitRead - , throwSocketErrorWaitWrite - - -- * Initialization - , withSocketsDo - - -- * Low-level helpers - , zeroMemory - ) where - -import Foreign.C.Error (throwErrno, throwErrnoIfMinus1Retry, - throwErrnoIfMinus1RetryMayBlock, throwErrnoIfMinus1_, - Errno(..), errnoToIOError) -import Foreign.C.String (peekCString) -import Foreign.C.Types (CInt(..)) -import Foreign.Ptr (Ptr) -import GHC.Conc (threadWaitRead, threadWaitWrite) - -#if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) -import Control.Exception ( finally ) -# if __GLASGOW_HASKELL__ >= 707 -import GHC.IO.Exception ( IOErrorType(..) ) -# else -import GHC.IOBase ( IOErrorType(..) ) -# endif -import Foreign.C.Types ( CChar ) -import System.IO.Error ( ioeSetErrorString, mkIOError ) -#endif - -import Network.Socket.Types - --- --------------------------------------------------------------------- --- Guards for socket operations that may fail - --- | Throw an 'IOError' corresponding to the current socket error. -throwSocketError :: String -- ^ textual description of the error location - -> IO a - --- | Like 'throwSocketError', but the error code is supplied as an argument. --- --- On Windows, do not use errno. Use a system error code instead. -throwSocketErrorCode :: String -> CInt -> IO a - --- | Throw an 'IOError' corresponding to the current socket error if --- the IO action returns a result of @-1@. Discards the result of the --- IO action after error handling. -throwSocketErrorIfMinus1_ - :: (Eq a, Num a) - => String -- ^ textual description of the location - -> IO a -- ^ the 'IO' operation to be executed - -> IO () - -{-# SPECIALIZE throwSocketErrorIfMinus1_ :: String -> IO CInt -> IO () #-} - --- | Throw an 'IOError' corresponding to the current socket error if --- the IO action returns a result of @-1@, but retries in case of an --- interrupted operation. -throwSocketErrorIfMinus1Retry - :: (Eq a, Num a) - => String -- ^ textual description of the location - -> IO a -- ^ the 'IO' operation to be executed - -> IO a - -{-# SPECIALIZE throwSocketErrorIfMinus1Retry :: String -> IO CInt -> IO CInt #-} - --- | Throw an 'IOError' corresponding to the current socket error if --- the IO action returns a result of @-1@, but retries in case of an --- interrupted operation. Discards the result of the IO action after --- error handling. -throwSocketErrorIfMinus1Retry_ - :: (Eq a, Num a) - => String -- ^ textual description of the location - -> IO a -- ^ the 'IO' operation to be executed - -> IO () -throwSocketErrorIfMinus1Retry_ loc m = - throwSocketErrorIfMinus1Retry loc m >> return () -{-# SPECIALIZE throwSocketErrorIfMinus1Retry_ :: String -> IO CInt -> IO () #-} - --- | Throw an 'IOError' corresponding to the current socket error if --- the IO action returns a result of @-1@, but retries in case of an --- interrupted operation. Checks for operations that would block and --- executes an alternative action before retrying in that case. -throwSocketErrorIfMinus1RetryMayBlock - :: (Eq a, Num a) - => String -- ^ textual description of the location - -> IO b -- ^ action to execute before retrying if an - -- immediate retry would block - -> IO a -- ^ the 'IO' operation to be executed - -> IO a - -{-# SPECIALIZE throwSocketErrorIfMinus1RetryMayBlock - :: String -> IO b -> IO CInt -> IO CInt #-} - -#if (!defined(HAVE_WINSOCK2_H) || defined(cygwin32_HOST_OS)) - -throwSocketErrorIfMinus1RetryMayBlock name on_block act = - throwErrnoIfMinus1RetryMayBlock name act on_block - -throwSocketErrorIfMinus1Retry = throwErrnoIfMinus1Retry - -throwSocketErrorIfMinus1_ = throwErrnoIfMinus1_ - -throwSocketError = throwErrno - -throwSocketErrorCode loc errno = - ioError (errnoToIOError loc (Errno errno) Nothing Nothing) - -#else - -throwSocketErrorIfMinus1RetryMayBlock name _ act - = throwSocketErrorIfMinus1Retry name act - -throwSocketErrorIfMinus1_ name act = do - throwSocketErrorIfMinus1Retry name act - return () - -# if defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS) -throwSocketErrorIfMinus1Retry name act = do - r <- act - if (r == -1) - then do - rc <- c_getLastError - case rc of - #{const WSANOTINITIALISED} -> do - withSocketsDo (return ()) - r <- act - if (r == -1) - then throwSocketError name - else return r - _ -> throwSocketError name - else return r - -throwSocketErrorCode name rc = do - pstr <- c_getWSError rc - str <- peekCString pstr - ioError (ioeSetErrorString (mkIOError OtherError name Nothing Nothing) str) - -throwSocketError name = - c_getLastError >>= throwSocketErrorCode name - -foreign import CALLCONV unsafe "WSAGetLastError" - c_getLastError :: IO CInt - -foreign import ccall unsafe "getWSErrorDescr" - c_getWSError :: CInt -> IO (Ptr CChar) - - -# else -throwSocketErrorIfMinus1Retry = throwErrnoIfMinus1Retry -throwSocketError = throwErrno -throwSocketErrorCode loc errno = - ioError (errnoToIOError loc (Errno errno) Nothing Nothing) -# endif -#endif - --- | Like 'throwSocketErrorIfMinus1Retry', but if the action fails with --- @EWOULDBLOCK@ or similar, wait for the socket to be read-ready, --- and try again. -throwSocketErrorWaitRead :: (Eq a, Num a) => Socket -> String -> IO a -> IO a -throwSocketErrorWaitRead sock name io = - throwSocketErrorIfMinus1RetryMayBlock name - (threadWaitRead $ fromIntegral $ sockFd sock) - io - --- | Like 'throwSocketErrorIfMinus1Retry', but if the action fails with --- @EWOULDBLOCK@ or similar, wait for the socket to be write-ready, --- and try again. -throwSocketErrorWaitWrite :: (Eq a, Num a) => Socket -> String -> IO a -> IO a -throwSocketErrorWaitWrite sock name io = - throwSocketErrorIfMinus1RetryMayBlock name - (threadWaitWrite $ fromIntegral $ sockFd sock) - io - --- --------------------------------------------------------------------------- --- WinSock support - -{-| On Windows operating systems, the networking subsystem has to be -initialised using 'withSocketsDo' before any networking operations can -be used. eg. - -> main = withSocketsDo $ do {...} - -Although this is only strictly necessary on Windows platforms, it is -harmless on other platforms, so for portability it is good practice to -use it all the time. --} -withSocketsDo :: IO a -> IO a -#if !defined(WITH_WINSOCK) -withSocketsDo x = x -#else -withSocketsDo act = do - x <- initWinSock - if x /= 0 - then ioError (userError "Failed to initialise WinSock") - else act `finally` shutdownWinSock - -foreign import ccall unsafe "initWinSock" initWinSock :: IO Int -foreign import ccall unsafe "shutdownWinSock" shutdownWinSock :: IO () - -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/Types.hsc cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/Types.hsc --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket/Types.hsc 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket/Types.hsc 1970-01-01 00:00:00.000000000 +0000 @@ -1,977 +0,0 @@ -{-# LANGUAGE DeriveDataTypeable #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE ForeignFunctionInterface #-} - -#include "HsNet.h" - -module Network.Socket.Types - ( - -- * Socket - Socket(..) - , sockFd - , sockFamily - , sockType - , sockProtocol - , sockStatus - , SocketStatus(..) - - -- * Socket types - , SocketType(..) - , isSupportedSocketType - , packSocketType - , packSocketType' - , packSocketTypeOrThrow - , unpackSocketType - , unpackSocketType' - - -- * Family - , Family(..) - , isSupportedFamily - , packFamily - , unpackFamily - - -- * Socket addresses - , SockAddr(..) - , HostAddress -#if defined(IPV6_SOCKET_SUPPORT) - , HostAddress6 - , FlowInfo - , ScopeID -#endif - , peekSockAddr - , pokeSockAddr - , sizeOfSockAddr - , sizeOfSockAddrByFamily - , withSockAddr - , withNewSockAddr - - -- * Unsorted - , ProtocolNumber - , PortNumber(..) - - -- * Low-level helpers - , zeroMemory - ) where - -import Control.Concurrent.MVar -import Control.Monad -import Data.Bits -import Data.Maybe -import Data.Ratio -import Data.Typeable -import Data.Word -import Foreign.C -import Foreign.Marshal.Alloc -import Foreign.Marshal.Array -import Foreign.Ptr -import Foreign.Storable - -data Socket - = MkSocket - CInt -- File Descriptor - Family - SocketType - ProtocolNumber -- Protocol Number - (MVar SocketStatus) -- Status Flag - deriving Typeable - -sockFd :: Socket -> CInt -sockFd (MkSocket n _ _ _ _) = n - -sockFamily :: Socket -> Family -sockFamily (MkSocket _ f _ _ _) = f - -sockType :: Socket -> SocketType -sockType (MkSocket _ _ t _ _) = t - -sockProtocol :: Socket -> ProtocolNumber -sockProtocol (MkSocket _ _ _ p _) = p - -sockStatus :: Socket -> MVar SocketStatus -sockStatus (MkSocket _ _ _ _ s) = s - -instance Eq Socket where - (MkSocket _ _ _ _ m1) == (MkSocket _ _ _ _ m2) = m1 == m2 - -instance Show Socket where - showsPrec _n (MkSocket fd _ _ _ _) = - showString "" - -type ProtocolNumber = CInt - --- | The status of the socket as /determined by this library/, not --- necessarily reflecting the state of the connection itself. --- --- For example, the 'Closed' status is applied when the 'close' --- function is called. -data SocketStatus - -- Returned Status Function called - = NotConnected -- ^ Newly created, unconnected socket - | Bound -- ^ Bound, via 'bind' - | Listening -- ^ Listening, via 'listen' - | Connected -- ^ Connected or accepted, via 'connect' or 'accept' - | ConvertedToHandle -- ^ Is now a 'Handle' (via 'socketToHandle'), don't touch - | Closed -- ^ Closed was closed by 'close' - deriving (Eq, Show, Typeable) - ------------------------------------------------------------------------------ --- Socket types - --- There are a few possible ways to do this. The first is convert the --- structs used in the C library into an equivalent Haskell type. An --- other possible implementation is to keep all the internals in the C --- code and use an Int## and a status flag. The second method is used --- here since a lot of the C structures are not required to be --- manipulated. - --- Originally the status was non-mutable so we had to return a new --- socket each time we changed the status. This version now uses --- mutable variables to avoid the need to do this. The result is a --- cleaner interface and better security since the application --- programmer now can't circumvent the status information to perform --- invalid operations on sockets. - --- | Socket Types. --- --- The existence of a constructor does not necessarily imply that that --- socket type is supported on your system: see 'isSupportedSocketType'. -data SocketType - = NoSocketType -- ^ 0, used in getAddrInfo hints, for example - | Stream -- ^ SOCK_STREAM - | Datagram -- ^ SOCK_DGRAM - | Raw -- ^ SOCK_RAW - | RDM -- ^ SOCK_RDM - | SeqPacket -- ^ SOCK_SEQPACKET - deriving (Eq, Ord, Read, Show, Typeable) - --- | Does the SOCK_ constant corresponding to the given SocketType exist on --- this system? -isSupportedSocketType :: SocketType -> Bool -isSupportedSocketType = isJust . packSocketType' - --- | Find the SOCK_ constant corresponding to the SocketType value. -packSocketType' :: SocketType -> Maybe CInt -packSocketType' stype = case Just stype of - -- the Just above is to disable GHC's overlapping pattern - -- detection: see comments for packSocketOption - Just NoSocketType -> Just 0 -#ifdef SOCK_STREAM - Just Stream -> Just #const SOCK_STREAM -#endif -#ifdef SOCK_DGRAM - Just Datagram -> Just #const SOCK_DGRAM -#endif -#ifdef SOCK_RAW - Just Raw -> Just #const SOCK_RAW -#endif -#ifdef SOCK_RDM - Just RDM -> Just #const SOCK_RDM -#endif -#ifdef SOCK_SEQPACKET - Just SeqPacket -> Just #const SOCK_SEQPACKET -#endif - _ -> Nothing - -packSocketType :: SocketType -> CInt -packSocketType stype = fromMaybe (error errMsg) (packSocketType' stype) - where - errMsg = concat ["Network.Socket.packSocketType: ", - "socket type ", show stype, " unsupported on this system"] - --- | Try packSocketType' on the SocketType, if it fails throw an error with --- message starting "Network.Socket." ++ the String parameter -packSocketTypeOrThrow :: String -> SocketType -> IO CInt -packSocketTypeOrThrow caller stype = maybe err return (packSocketType' stype) - where - err = ioError . userError . concat $ ["Network.Socket.", caller, ": ", - "socket type ", show stype, " unsupported on this system"] - - -unpackSocketType:: CInt -> Maybe SocketType -unpackSocketType t = case t of - 0 -> Just NoSocketType -#ifdef SOCK_STREAM - (#const SOCK_STREAM) -> Just Stream -#endif -#ifdef SOCK_DGRAM - (#const SOCK_DGRAM) -> Just Datagram -#endif -#ifdef SOCK_RAW - (#const SOCK_RAW) -> Just Raw -#endif -#ifdef SOCK_RDM - (#const SOCK_RDM) -> Just RDM -#endif -#ifdef SOCK_SEQPACKET - (#const SOCK_SEQPACKET) -> Just SeqPacket -#endif - _ -> Nothing - --- | Try unpackSocketType on the CInt, if it fails throw an error with --- message starting "Network.Socket." ++ the String parameter -unpackSocketType' :: String -> CInt -> IO SocketType -unpackSocketType' caller ty = maybe err return (unpackSocketType ty) - where - err = ioError . userError . concat $ ["Network.Socket.", caller, ": ", - "socket type ", show ty, " unsupported on this system"] - ------------------------------------------------------------------------- --- Protocol Families. - --- | Address families. --- --- A constructor being present here does not mean it is supported by the --- operating system: see 'isSupportedFamily'. -data Family - = AF_UNSPEC -- unspecified - | AF_UNIX -- local to host (pipes, portals - | AF_INET -- internetwork: UDP, TCP, etc - | AF_INET6 -- Internet Protocol version 6 - | AF_IMPLINK -- arpanet imp addresses - | AF_PUP -- pup protocols: e.g. BSP - | AF_CHAOS -- mit CHAOS protocols - | AF_NS -- XEROX NS protocols - | AF_NBS -- nbs protocols - | AF_ECMA -- european computer manufacturers - | AF_DATAKIT -- datakit protocols - | AF_CCITT -- CCITT protocols, X.25 etc - | AF_SNA -- IBM SNA - | AF_DECnet -- DECnet - | AF_DLI -- Direct data link interface - | AF_LAT -- LAT - | AF_HYLINK -- NSC Hyperchannel - | AF_APPLETALK -- Apple Talk - | AF_ROUTE -- Internal Routing Protocol - | AF_NETBIOS -- NetBios-style addresses - | AF_NIT -- Network Interface Tap - | AF_802 -- IEEE 802.2, also ISO 8802 - | AF_ISO -- ISO protocols - | AF_OSI -- umbrella of all families used by OSI - | AF_NETMAN -- DNA Network Management - | AF_X25 -- CCITT X.25 - | AF_AX25 - | AF_OSINET -- AFI - | AF_GOSSIP -- US Government OSI - | AF_IPX -- Novell Internet Protocol - | Pseudo_AF_XTP -- eXpress Transfer Protocol (no AF) - | AF_CTF -- Common Trace Facility - | AF_WAN -- Wide Area Network protocols - | AF_SDL -- SGI Data Link for DLPI - | AF_NETWARE - | AF_NDD - | AF_INTF -- Debugging use only - | AF_COIP -- connection-oriented IP, aka ST II - | AF_CNT -- Computer Network Technology - | Pseudo_AF_RTIP -- Help Identify RTIP packets - | Pseudo_AF_PIP -- Help Identify PIP packets - | AF_SIP -- Simple Internet Protocol - | AF_ISDN -- Integrated Services Digital Network - | Pseudo_AF_KEY -- Internal key-management function - | AF_NATM -- native ATM access - | AF_ARP -- (rev.) addr. res. prot. (RFC 826) - | Pseudo_AF_HDRCMPLT -- Used by BPF to not rewrite hdrs in iface output - | AF_ENCAP - | AF_LINK -- Link layer interface - | AF_RAW -- Link layer interface - | AF_RIF -- raw interface - | AF_NETROM -- Amateur radio NetROM - | AF_BRIDGE -- multiprotocol bridge - | AF_ATMPVC -- ATM PVCs - | AF_ROSE -- Amateur Radio X.25 PLP - | AF_NETBEUI -- 802.2LLC - | AF_SECURITY -- Security callback pseudo AF - | AF_PACKET -- Packet family - | AF_ASH -- Ash - | AF_ECONET -- Acorn Econet - | AF_ATMSVC -- ATM SVCs - | AF_IRDA -- IRDA sockets - | AF_PPPOX -- PPPoX sockets - | AF_WANPIPE -- Wanpipe API sockets - | AF_BLUETOOTH -- bluetooth sockets - deriving (Eq, Ord, Read, Show) - -packFamily :: Family -> CInt -packFamily f = case packFamily' f of - Just fam -> fam - Nothing -> error $ - "Network.Socket.packFamily: unsupported address family: " ++ - show f - --- | Does the AF_ constant corresponding to the given family exist on this --- system? -isSupportedFamily :: Family -> Bool -isSupportedFamily = isJust . packFamily' - -packFamily' :: Family -> Maybe CInt -packFamily' f = case Just f of - -- the Just above is to disable GHC's overlapping pattern - -- detection: see comments for packSocketOption - Just AF_UNSPEC -> Just #const AF_UNSPEC -#ifdef AF_UNIX - Just AF_UNIX -> Just #const AF_UNIX -#endif -#ifdef AF_INET - Just AF_INET -> Just #const AF_INET -#endif -#ifdef AF_INET6 - Just AF_INET6 -> Just #const AF_INET6 -#endif -#ifdef AF_IMPLINK - Just AF_IMPLINK -> Just #const AF_IMPLINK -#endif -#ifdef AF_PUP - Just AF_PUP -> Just #const AF_PUP -#endif -#ifdef AF_CHAOS - Just AF_CHAOS -> Just #const AF_CHAOS -#endif -#ifdef AF_NS - Just AF_NS -> Just #const AF_NS -#endif -#ifdef AF_NBS - Just AF_NBS -> Just #const AF_NBS -#endif -#ifdef AF_ECMA - Just AF_ECMA -> Just #const AF_ECMA -#endif -#ifdef AF_DATAKIT - Just AF_DATAKIT -> Just #const AF_DATAKIT -#endif -#ifdef AF_CCITT - Just AF_CCITT -> Just #const AF_CCITT -#endif -#ifdef AF_SNA - Just AF_SNA -> Just #const AF_SNA -#endif -#ifdef AF_DECnet - Just AF_DECnet -> Just #const AF_DECnet -#endif -#ifdef AF_DLI - Just AF_DLI -> Just #const AF_DLI -#endif -#ifdef AF_LAT - Just AF_LAT -> Just #const AF_LAT -#endif -#ifdef AF_HYLINK - Just AF_HYLINK -> Just #const AF_HYLINK -#endif -#ifdef AF_APPLETALK - Just AF_APPLETALK -> Just #const AF_APPLETALK -#endif -#ifdef AF_ROUTE - Just AF_ROUTE -> Just #const AF_ROUTE -#endif -#ifdef AF_NETBIOS - Just AF_NETBIOS -> Just #const AF_NETBIOS -#endif -#ifdef AF_NIT - Just AF_NIT -> Just #const AF_NIT -#endif -#ifdef AF_802 - Just AF_802 -> Just #const AF_802 -#endif -#ifdef AF_ISO - Just AF_ISO -> Just #const AF_ISO -#endif -#ifdef AF_OSI - Just AF_OSI -> Just #const AF_OSI -#endif -#ifdef AF_NETMAN - Just AF_NETMAN -> Just #const AF_NETMAN -#endif -#ifdef AF_X25 - Just AF_X25 -> Just #const AF_X25 -#endif -#ifdef AF_AX25 - Just AF_AX25 -> Just #const AF_AX25 -#endif -#ifdef AF_OSINET - Just AF_OSINET -> Just #const AF_OSINET -#endif -#ifdef AF_GOSSIP - Just AF_GOSSIP -> Just #const AF_GOSSIP -#endif -#ifdef AF_IPX - Just AF_IPX -> Just #const AF_IPX -#endif -#ifdef Pseudo_AF_XTP - Just Pseudo_AF_XTP -> Just #const Pseudo_AF_XTP -#endif -#ifdef AF_CTF - Just AF_CTF -> Just #const AF_CTF -#endif -#ifdef AF_WAN - Just AF_WAN -> Just #const AF_WAN -#endif -#ifdef AF_SDL - Just AF_SDL -> Just #const AF_SDL -#endif -#ifdef AF_NETWARE - Just AF_NETWARE -> Just #const AF_NETWARE -#endif -#ifdef AF_NDD - Just AF_NDD -> Just #const AF_NDD -#endif -#ifdef AF_INTF - Just AF_INTF -> Just #const AF_INTF -#endif -#ifdef AF_COIP - Just AF_COIP -> Just #const AF_COIP -#endif -#ifdef AF_CNT - Just AF_CNT -> Just #const AF_CNT -#endif -#ifdef Pseudo_AF_RTIP - Just Pseudo_AF_RTIP -> Just #const Pseudo_AF_RTIP -#endif -#ifdef Pseudo_AF_PIP - Just Pseudo_AF_PIP -> Just #const Pseudo_AF_PIP -#endif -#ifdef AF_SIP - Just AF_SIP -> Just #const AF_SIP -#endif -#ifdef AF_ISDN - Just AF_ISDN -> Just #const AF_ISDN -#endif -#ifdef Pseudo_AF_KEY - Just Pseudo_AF_KEY -> Just #const Pseudo_AF_KEY -#endif -#ifdef AF_NATM - Just AF_NATM -> Just #const AF_NATM -#endif -#ifdef AF_ARP - Just AF_ARP -> Just #const AF_ARP -#endif -#ifdef Pseudo_AF_HDRCMPLT - Just Pseudo_AF_HDRCMPLT -> Just #const Pseudo_AF_HDRCMPLT -#endif -#ifdef AF_ENCAP - Just AF_ENCAP -> Just #const AF_ENCAP -#endif -#ifdef AF_LINK - Just AF_LINK -> Just #const AF_LINK -#endif -#ifdef AF_RAW - Just AF_RAW -> Just #const AF_RAW -#endif -#ifdef AF_RIF - Just AF_RIF -> Just #const AF_RIF -#endif -#ifdef AF_NETROM - Just AF_NETROM -> Just #const AF_NETROM -#endif -#ifdef AF_BRIDGE - Just AF_BRIDGE -> Just #const AF_BRIDGE -#endif -#ifdef AF_ATMPVC - Just AF_ATMPVC -> Just #const AF_ATMPVC -#endif -#ifdef AF_ROSE - Just AF_ROSE -> Just #const AF_ROSE -#endif -#ifdef AF_NETBEUI - Just AF_NETBEUI -> Just #const AF_NETBEUI -#endif -#ifdef AF_SECURITY - Just AF_SECURITY -> Just #const AF_SECURITY -#endif -#ifdef AF_PACKET - Just AF_PACKET -> Just #const AF_PACKET -#endif -#ifdef AF_ASH - Just AF_ASH -> Just #const AF_ASH -#endif -#ifdef AF_ECONET - Just AF_ECONET -> Just #const AF_ECONET -#endif -#ifdef AF_ATMSVC - Just AF_ATMSVC -> Just #const AF_ATMSVC -#endif -#ifdef AF_IRDA - Just AF_IRDA -> Just #const AF_IRDA -#endif -#ifdef AF_PPPOX - Just AF_PPPOX -> Just #const AF_PPPOX -#endif -#ifdef AF_WANPIPE - Just AF_WANPIPE -> Just #const AF_WANPIPE -#endif -#ifdef AF_BLUETOOTH - Just AF_BLUETOOTH -> Just #const AF_BLUETOOTH -#endif - _ -> Nothing - ---------- ---------- - -unpackFamily :: CInt -> Family -unpackFamily f = case f of - (#const AF_UNSPEC) -> AF_UNSPEC -#ifdef AF_UNIX - (#const AF_UNIX) -> AF_UNIX -#endif -#ifdef AF_INET - (#const AF_INET) -> AF_INET -#endif -#ifdef AF_INET6 - (#const AF_INET6) -> AF_INET6 -#endif -#ifdef AF_IMPLINK - (#const AF_IMPLINK) -> AF_IMPLINK -#endif -#ifdef AF_PUP - (#const AF_PUP) -> AF_PUP -#endif -#ifdef AF_CHAOS - (#const AF_CHAOS) -> AF_CHAOS -#endif -#ifdef AF_NS - (#const AF_NS) -> AF_NS -#endif -#ifdef AF_NBS - (#const AF_NBS) -> AF_NBS -#endif -#ifdef AF_ECMA - (#const AF_ECMA) -> AF_ECMA -#endif -#ifdef AF_DATAKIT - (#const AF_DATAKIT) -> AF_DATAKIT -#endif -#ifdef AF_CCITT - (#const AF_CCITT) -> AF_CCITT -#endif -#ifdef AF_SNA - (#const AF_SNA) -> AF_SNA -#endif -#ifdef AF_DECnet - (#const AF_DECnet) -> AF_DECnet -#endif -#ifdef AF_DLI - (#const AF_DLI) -> AF_DLI -#endif -#ifdef AF_LAT - (#const AF_LAT) -> AF_LAT -#endif -#ifdef AF_HYLINK - (#const AF_HYLINK) -> AF_HYLINK -#endif -#ifdef AF_APPLETALK - (#const AF_APPLETALK) -> AF_APPLETALK -#endif -#ifdef AF_ROUTE - (#const AF_ROUTE) -> AF_ROUTE -#endif -#ifdef AF_NETBIOS - (#const AF_NETBIOS) -> AF_NETBIOS -#endif -#ifdef AF_NIT - (#const AF_NIT) -> AF_NIT -#endif -#ifdef AF_802 - (#const AF_802) -> AF_802 -#endif -#ifdef AF_ISO - (#const AF_ISO) -> AF_ISO -#endif -#ifdef AF_OSI -# if (!defined(AF_ISO)) || (defined(AF_ISO) && (AF_ISO != AF_OSI)) - (#const AF_OSI) -> AF_OSI -# endif -#endif -#ifdef AF_NETMAN - (#const AF_NETMAN) -> AF_NETMAN -#endif -#ifdef AF_X25 - (#const AF_X25) -> AF_X25 -#endif -#ifdef AF_AX25 - (#const AF_AX25) -> AF_AX25 -#endif -#ifdef AF_OSINET - (#const AF_OSINET) -> AF_OSINET -#endif -#ifdef AF_GOSSIP - (#const AF_GOSSIP) -> AF_GOSSIP -#endif -#if defined(AF_IPX) && (!defined(AF_NS) || AF_NS != AF_IPX) - (#const AF_IPX) -> AF_IPX -#endif -#ifdef Pseudo_AF_XTP - (#const Pseudo_AF_XTP) -> Pseudo_AF_XTP -#endif -#ifdef AF_CTF - (#const AF_CTF) -> AF_CTF -#endif -#ifdef AF_WAN - (#const AF_WAN) -> AF_WAN -#endif -#ifdef AF_SDL - (#const AF_SDL) -> AF_SDL -#endif -#ifdef AF_NETWARE - (#const AF_NETWARE) -> AF_NETWARE -#endif -#ifdef AF_NDD - (#const AF_NDD) -> AF_NDD -#endif -#ifdef AF_INTF - (#const AF_INTF) -> AF_INTF -#endif -#ifdef AF_COIP - (#const AF_COIP) -> AF_COIP -#endif -#ifdef AF_CNT - (#const AF_CNT) -> AF_CNT -#endif -#ifdef Pseudo_AF_RTIP - (#const Pseudo_AF_RTIP) -> Pseudo_AF_RTIP -#endif -#ifdef Pseudo_AF_PIP - (#const Pseudo_AF_PIP) -> Pseudo_AF_PIP -#endif -#ifdef AF_SIP - (#const AF_SIP) -> AF_SIP -#endif -#ifdef AF_ISDN - (#const AF_ISDN) -> AF_ISDN -#endif -#ifdef Pseudo_AF_KEY - (#const Pseudo_AF_KEY) -> Pseudo_AF_KEY -#endif -#ifdef AF_NATM - (#const AF_NATM) -> AF_NATM -#endif -#ifdef AF_ARP - (#const AF_ARP) -> AF_ARP -#endif -#ifdef Pseudo_AF_HDRCMPLT - (#const Pseudo_AF_HDRCMPLT) -> Pseudo_AF_HDRCMPLT -#endif -#ifdef AF_ENCAP - (#const AF_ENCAP) -> AF_ENCAP -#endif -#ifdef AF_LINK - (#const AF_LINK) -> AF_LINK -#endif -#ifdef AF_RAW - (#const AF_RAW) -> AF_RAW -#endif -#ifdef AF_RIF - (#const AF_RIF) -> AF_RIF -#endif -#ifdef AF_NETROM - (#const AF_NETROM) -> AF_NETROM -#endif -#ifdef AF_BRIDGE - (#const AF_BRIDGE) -> AF_BRIDGE -#endif -#ifdef AF_ATMPVC - (#const AF_ATMPVC) -> AF_ATMPVC -#endif -#ifdef AF_ROSE - (#const AF_ROSE) -> AF_ROSE -#endif -#ifdef AF_NETBEUI - (#const AF_NETBEUI) -> AF_NETBEUI -#endif -#ifdef AF_SECURITY - (#const AF_SECURITY) -> AF_SECURITY -#endif -#ifdef AF_PACKET - (#const AF_PACKET) -> AF_PACKET -#endif -#ifdef AF_ASH - (#const AF_ASH) -> AF_ASH -#endif -#ifdef AF_ECONET - (#const AF_ECONET) -> AF_ECONET -#endif -#ifdef AF_ATMSVC - (#const AF_ATMSVC) -> AF_ATMSVC -#endif -#ifdef AF_IRDA - (#const AF_IRDA) -> AF_IRDA -#endif -#ifdef AF_PPPOX - (#const AF_PPPOX) -> AF_PPPOX -#endif -#ifdef AF_WANPIPE - (#const AF_WANPIPE) -> AF_WANPIPE -#endif -#ifdef AF_BLUETOOTH - (#const AF_BLUETOOTH) -> AF_BLUETOOTH -#endif - unknown -> error ("Network.Socket.unpackFamily: unknown address " ++ - "family " ++ show unknown) - ------------------------------------------------------------------------- --- Port Numbers - -newtype PortNumber = PortNum Word16 deriving (Eq, Ord, Typeable) --- newtyped to prevent accidental use of sane-looking --- port numbers that haven't actually been converted to --- network-byte-order first. - -instance Show PortNumber where - showsPrec p pn = showsPrec p (portNumberToInt pn) - -intToPortNumber :: Int -> PortNumber -intToPortNumber v = PortNum (htons (fromIntegral v)) - -portNumberToInt :: PortNumber -> Int -portNumberToInt (PortNum po) = fromIntegral (ntohs po) - -foreign import CALLCONV unsafe "ntohs" ntohs :: Word16 -> Word16 -foreign import CALLCONV unsafe "htons" htons :: Word16 -> Word16 ---foreign import CALLCONV unsafe "ntohl" ntohl :: Word32 -> Word32 - -instance Enum PortNumber where - toEnum = intToPortNumber - fromEnum = portNumberToInt - -instance Num PortNumber where - fromInteger i = intToPortNumber (fromInteger i) - -- for completeness. - (+) x y = intToPortNumber (portNumberToInt x + portNumberToInt y) - (-) x y = intToPortNumber (portNumberToInt x - portNumberToInt y) - negate x = intToPortNumber (-portNumberToInt x) - (*) x y = intToPortNumber (portNumberToInt x * portNumberToInt y) - abs n = intToPortNumber (abs (portNumberToInt n)) - signum n = intToPortNumber (signum (portNumberToInt n)) - -instance Real PortNumber where - toRational x = toInteger x % 1 - -instance Integral PortNumber where - quotRem a b = let (c,d) = quotRem (portNumberToInt a) (portNumberToInt b) in - (intToPortNumber c, intToPortNumber d) - toInteger a = toInteger (portNumberToInt a) - -instance Storable PortNumber where - sizeOf _ = sizeOf (undefined :: Word16) - alignment _ = alignment (undefined :: Word16) - poke p (PortNum po) = poke (castPtr p) po - peek p = PortNum `liftM` peek (castPtr p) - ------------------------------------------------------------------------- --- Socket addresses - --- The scheme used for addressing sockets is somewhat quirky. The --- calls in the BSD socket API that need to know the socket address --- all operate in terms of struct sockaddr, a `virtual' type of --- socket address. - --- The Internet family of sockets are addressed as struct sockaddr_in, --- so when calling functions that operate on struct sockaddr, we have --- to type cast the Internet socket address into a struct sockaddr. --- Instances of the structure for different families might *not* be --- the same size. Same casting is required of other families of --- sockets such as Xerox NS. Similarly for Unix domain sockets. - --- To represent these socket addresses in Haskell-land, we do what BSD --- didn't do, and use a union/algebraic type for the different --- families. Currently only Unix domain sockets and the Internet --- families are supported. - -#if defined(IPV6_SOCKET_SUPPORT) -type FlowInfo = Word32 -type ScopeID = Word32 -#endif - -data SockAddr -- C Names - = SockAddrInet - PortNumber -- sin_port (network byte order) - HostAddress -- sin_addr (ditto) -#if defined(IPV6_SOCKET_SUPPORT) - | SockAddrInet6 - PortNumber -- sin6_port (network byte order) - FlowInfo -- sin6_flowinfo (ditto) - HostAddress6 -- sin6_addr (ditto) - ScopeID -- sin6_scope_id (ditto) -#endif -#if defined(DOMAIN_SOCKET_SUPPORT) - | SockAddrUnix - String -- sun_path -#endif - deriving (Eq, Ord, Typeable) - -#if defined(WITH_WINSOCK) || defined(cygwin32_HOST_OS) -type CSaFamily = (#type unsigned short) -#elif defined(darwin_HOST_OS) -type CSaFamily = (#type u_char) -#else -type CSaFamily = (#type sa_family_t) -#endif - --- | Computes the storage requirements (in bytes) of the given --- 'SockAddr'. This function differs from 'Foreign.Storable.sizeOf' --- in that the value of the argument /is/ used. -sizeOfSockAddr :: SockAddr -> Int -#if defined(DOMAIN_SOCKET_SUPPORT) -sizeOfSockAddr (SockAddrUnix path) = - case path of - '\0':_ -> (#const sizeof(sa_family_t)) + length path - _ -> #const sizeof(struct sockaddr_un) -#endif -sizeOfSockAddr (SockAddrInet _ _) = #const sizeof(struct sockaddr_in) -#if defined(IPV6_SOCKET_SUPPORT) -sizeOfSockAddr (SockAddrInet6 _ _ _ _) = #const sizeof(struct sockaddr_in6) -#endif - --- | Computes the storage requirements (in bytes) required for a --- 'SockAddr' with the given 'Family'. -sizeOfSockAddrByFamily :: Family -> Int -#if defined(DOMAIN_SOCKET_SUPPORT) -sizeOfSockAddrByFamily AF_UNIX = #const sizeof(struct sockaddr_un) -#endif -#if defined(IPV6_SOCKET_SUPPORT) -sizeOfSockAddrByFamily AF_INET6 = #const sizeof(struct sockaddr_in6) -#endif -sizeOfSockAddrByFamily AF_INET = #const sizeof(struct sockaddr_in) - --- | Use a 'SockAddr' with a function requiring a pointer to a --- 'SockAddr' and the length of that 'SockAddr'. -withSockAddr :: SockAddr -> (Ptr SockAddr -> Int -> IO a) -> IO a -withSockAddr addr f = do - let sz = sizeOfSockAddr addr - allocaBytes sz $ \p -> pokeSockAddr p addr >> f (castPtr p) sz - --- | Create a new 'SockAddr' for use with a function requiring a --- pointer to a 'SockAddr' and the length of that 'SockAddr'. -withNewSockAddr :: Family -> (Ptr SockAddr -> Int -> IO a) -> IO a -withNewSockAddr family f = do - let sz = sizeOfSockAddrByFamily family - allocaBytes sz $ \ptr -> f ptr sz - --- We can't write an instance of 'Storable' for 'SockAddr' because --- @sockaddr@ is a sum type of variable size but --- 'Foreign.Storable.sizeOf' is required to be constant. - --- Note that on Darwin, the sockaddr structure must be zeroed before --- use. - --- | Write the given 'SockAddr' to the given memory location. -pokeSockAddr :: Ptr a -> SockAddr -> IO () -#if defined(DOMAIN_SOCKET_SUPPORT) -pokeSockAddr p (SockAddrUnix path) = do -#if defined(darwin_HOST_OS) - zeroMemory p (#const sizeof(struct sockaddr_un)) -#endif -#if defined(HAVE_STRUCT_SOCKADDR_SA_LEN) - (#poke struct sockaddr_un, sun_len) p ((#const sizeof(struct sockaddr_un)) :: Word8) -#endif - (#poke struct sockaddr_un, sun_family) p ((#const AF_UNIX) :: CSaFamily) - let pathC = map castCharToCChar path - poker = case path of ('\0':_) -> pokeArray; _ -> pokeArray0 0 - poker ((#ptr struct sockaddr_un, sun_path) p) pathC -#endif -pokeSockAddr p (SockAddrInet (PortNum port) addr) = do -#if defined(darwin_HOST_OS) - zeroMemory p (#const sizeof(struct sockaddr_in)) -#endif -#if defined(HAVE_STRUCT_SOCKADDR_SA_LEN) - (#poke struct sockaddr_in, sin_len) p ((#const sizeof(struct sockaddr_in)) :: Word8) -#endif - (#poke struct sockaddr_in, sin_family) p ((#const AF_INET) :: CSaFamily) - (#poke struct sockaddr_in, sin_port) p port - (#poke struct sockaddr_in, sin_addr) p addr -#if defined(IPV6_SOCKET_SUPPORT) -pokeSockAddr p (SockAddrInet6 (PortNum port) flow addr scope) = do -#if defined(darwin_HOST_OS) - zeroMemory p (#const sizeof(struct sockaddr_in6)) -#endif -#if defined(HAVE_STRUCT_SOCKADDR_SA_LEN) - (#poke struct sockaddr_in6, sin6_len) p ((#const sizeof(struct sockaddr_in6)) :: Word8) -#endif - (#poke struct sockaddr_in6, sin6_family) p ((#const AF_INET6) :: CSaFamily) - (#poke struct sockaddr_in6, sin6_port) p port - (#poke struct sockaddr_in6, sin6_flowinfo) p flow - (#poke struct sockaddr_in6, sin6_addr) p addr - (#poke struct sockaddr_in6, sin6_scope_id) p scope -#endif - --- | Read a 'SockAddr' from the given memory location. -peekSockAddr :: Ptr SockAddr -> IO SockAddr -peekSockAddr p = do - family <- (#peek struct sockaddr, sa_family) p - case family :: CSaFamily of -#if defined(DOMAIN_SOCKET_SUPPORT) - (#const AF_UNIX) -> do - str <- peekCString ((#ptr struct sockaddr_un, sun_path) p) - return (SockAddrUnix str) -#endif - (#const AF_INET) -> do - addr <- (#peek struct sockaddr_in, sin_addr) p - port <- (#peek struct sockaddr_in, sin_port) p - return (SockAddrInet (PortNum port) addr) -#if defined(IPV6_SOCKET_SUPPORT) - (#const AF_INET6) -> do - port <- (#peek struct sockaddr_in6, sin6_port) p - flow <- (#peek struct sockaddr_in6, sin6_flowinfo) p - addr <- (#peek struct sockaddr_in6, sin6_addr) p - scope <- (#peek struct sockaddr_in6, sin6_scope_id) p - return (SockAddrInet6 (PortNum port) flow addr scope) -#endif - ------------------------------------------------------------------------- - --- | Network byte order. -type HostAddress = Word32 - -#if defined(IPV6_SOCKET_SUPPORT) --- | Host byte order. -type HostAddress6 = (Word32, Word32, Word32, Word32) - --- The peek32 and poke32 functions work around the fact that the RFCs --- don't require 32-bit-wide address fields to be present. We can --- only portably rely on an 8-bit field, s6_addr. - -s6_addr_offset :: Int -s6_addr_offset = (#offset struct in6_addr, s6_addr) - -peek32 :: Ptr a -> Int -> IO Word32 -peek32 p i0 = do - let i' = i0 * 4 - peekByte n = peekByteOff p (s6_addr_offset + i' + n) :: IO Word8 - a `sl` i = fromIntegral a `shiftL` i - a0 <- peekByte 0 - a1 <- peekByte 1 - a2 <- peekByte 2 - a3 <- peekByte 3 - return ((a0 `sl` 24) .|. (a1 `sl` 16) .|. (a2 `sl` 8) .|. (a3 `sl` 0)) - -poke32 :: Ptr a -> Int -> Word32 -> IO () -poke32 p i0 a = do - let i' = i0 * 4 - pokeByte n = pokeByteOff p (s6_addr_offset + i' + n) - x `sr` i = fromIntegral (x `shiftR` i) :: Word8 - pokeByte 0 (a `sr` 24) - pokeByte 1 (a `sr` 16) - pokeByte 2 (a `sr` 8) - pokeByte 3 (a `sr` 0) - -instance Storable HostAddress6 where - sizeOf _ = (#const sizeof(struct in6_addr)) - alignment _ = alignment (undefined :: CInt) - - peek p = do - a <- peek32 p 0 - b <- peek32 p 1 - c <- peek32 p 2 - d <- peek32 p 3 - return (a, b, c, d) - - poke p (a, b, c, d) = do - poke32 p 0 a - poke32 p 1 b - poke32 p 2 c - poke32 p 3 d -#endif - ------------------------------------------------------------------------- --- Helper functions - -foreign import ccall unsafe "string.h" memset :: Ptr a -> CInt -> CSize -> IO () - --- | Zero a structure. -zeroMemory :: Ptr a -> CSize -> IO () -zeroMemory dest nbytes = memset dest 0 (fromIntegral nbytes) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket.hsc cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket.hsc --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network/Socket.hsc 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network/Socket.hsc 1970-01-01 00:00:00.000000000 +0000 @@ -1,1645 +0,0 @@ -{-# LANGUAGE CPP, ScopedTypeVariables #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} ------------------------------------------------------------------------------ --- | --- Module : Network.Socket --- Copyright : (c) The University of Glasgow 2001 --- License : BSD-style (see the file libraries/network/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : provisional --- Portability : portable --- --- The "Network.Socket" module is for when you want full control over --- sockets. Essentially the entire C socket API is exposed through --- this module; in general the operations follow the behaviour of the C --- functions of the same name (consult your favourite Unix networking book). --- --- A higher level interface to networking operations is provided --- through the module "Network". --- ------------------------------------------------------------------------------ - -#include "HsNet.h" - --- In order to process this file, you need to have CALLCONV defined. - -module Network.Socket - ( - -- * Types - Socket(..) - , Family(..) - , isSupportedFamily - , SocketType(..) - , isSupportedSocketType - , SockAddr(..) - , SocketStatus(..) - , HostAddress -#if defined(IPV6_SOCKET_SUPPORT) - , HostAddress6 - , FlowInfo - , ScopeID -#endif - , ShutdownCmd(..) - , ProtocolNumber - , defaultProtocol - , PortNumber(..) - -- PortNumber is used non-abstractly in Network.BSD. ToDo: remove - -- this use and make the type abstract. - - -- * Address operations - - , HostName - , ServiceName - -#if defined(IPV6_SOCKET_SUPPORT) - , AddrInfo(..) - - , AddrInfoFlag(..) - , addrInfoFlagImplemented - - , defaultHints - - , getAddrInfo - - , NameInfoFlag(..) - - , getNameInfo -#endif - - -- * Socket operations - , socket -#if defined(DOMAIN_SOCKET_SUPPORT) - , socketPair -#endif - , connect - , bind - , listen - , accept - , getPeerName - , getSocketName - -#if defined(HAVE_STRUCT_UCRED) || defined(HAVE_GETPEEREID) - -- get the credentials of our domain socket peer. - , getPeerCred -#if defined(HAVE_GETPEEREID) - , getPeerEid -#endif -#endif - - , socketPort - - , socketToHandle - - -- ** Sending and receiving data - -- $sendrecv - , sendTo - , sendBufTo - - , recvFrom - , recvBufFrom - - , send - , recv - , recvLen - , sendBuf - , recvBuf - - , inet_addr - , inet_ntoa - - , shutdown - , close - - -- ** Predicates on sockets - , isConnected - , isBound - , isListening - , isReadable - , isWritable - - -- * Socket options - , SocketOption(..) - , isSupportedSocketOption - , getSocketOption - , setSocketOption - - -- * File descriptor transmission -#ifdef DOMAIN_SOCKET_SUPPORT - , sendFd - , recvFd - -#endif - - -- * Special constants - , aNY_PORT - , iNADDR_ANY -#if defined(IPV6_SOCKET_SUPPORT) - , iN6ADDR_ANY -#endif - , sOMAXCONN - , sOL_SOCKET -#ifdef SCM_RIGHTS - , sCM_RIGHTS -#endif - , maxListenQueue - - -- * Initialisation - , withSocketsDo - - -- * Very low level operations - -- in case you ever want to get at the underlying file descriptor.. - , fdSocket - , mkSocket - - -- * Deprecated aliases - -- $deprecated-aliases - , bindSocket - , sClose - , sIsConnected - , sIsBound - , sIsListening - , sIsReadable - , sIsWritable - - -- * Internal - - -- | The following are exported ONLY for use in the BSD module and - -- should not be used anywhere else. - - , packFamily - , unpackFamily - , packSocketType - ) where - -import Data.Bits -import Data.List (delete, foldl') -import Data.Maybe (isJust) -import Data.Word (Word8, Word32) -import Foreign.Ptr (Ptr, castPtr, nullPtr) -import Foreign.Storable (Storable(..)) -import Foreign.C.Error -import Foreign.C.String (CString, withCString, withCStringLen, peekCString, peekCStringLen) -import Foreign.C.Types (CUInt, CChar) -import Foreign.C.Types (CInt(..), CSize(..)) -import Foreign.Marshal.Alloc ( alloca, allocaBytes ) -import Foreign.Marshal.Array ( peekArray ) -import Foreign.Marshal.Utils ( maybeWith, with ) - -import System.IO -import Control.Monad (liftM, when) - -import qualified Control.Exception as E -import Control.Concurrent.MVar -import Data.Typeable -import System.IO.Error - -import GHC.Conc (threadWaitRead, threadWaitWrite) -##if MIN_VERSION_base(4,3,1) -import GHC.Conc (closeFdWith) -##endif -# if defined(mingw32_HOST_OS) -import GHC.Conc (asyncDoProc) -import Foreign (FunPtr) -# endif -import qualified GHC.IO.Device -import GHC.IO.Handle.FD -import GHC.IO.Exception -import GHC.IO -import qualified System.Posix.Internals - -import GHC.IO.FD - -import Network.Socket.Internal -import Network.Socket.Types - --- | Either a host name e.g., @\"haskell.org\"@ or a numeric host --- address string consisting of a dotted decimal IPv4 address or an --- IPv6 address e.g., @\"192.168.0.1\"@. -type HostName = String -type ServiceName = String - --- ---------------------------------------------------------------------------- --- On Windows, our sockets are not put in non-blocking mode (non-blocking --- is not supported for regular file descriptors on Windows, and it would --- be a pain to support it only for sockets). So there are two cases: --- --- - the threaded RTS uses safe calls for socket operations to get --- non-blocking I/O, just like the rest of the I/O library --- --- - with the non-threaded RTS, only some operations on sockets will be --- non-blocking. Reads and writes go through the normal async I/O --- system. accept() uses asyncDoProc so is non-blocking. A handful --- of others (recvFrom, sendFd, recvFd) will block all threads - if this --- is a problem, -threaded is the workaround. --- -##if defined(mingw32_HOST_OS) -##define SAFE_ON_WIN safe -##else -##define SAFE_ON_WIN unsafe -##endif - ------------------------------------------------------------------------------ --- Socket types - -#if defined(mingw32_HOST_OS) -socket2FD (MkSocket fd _ _ _ _) = - -- HACK, 1 means True - FD{fdFD = fd,fdIsSocket_ = 1} -#endif - -mkSocket :: CInt - -> Family - -> SocketType - -> ProtocolNumber - -> SocketStatus - -> IO Socket -mkSocket fd fam sType pNum stat = do - mStat <- newMVar stat - return (MkSocket fd fam sType pNum mStat) - - -fdSocket :: Socket -> CInt -fdSocket (MkSocket fd _ _ _ _) = fd - --- | This is the default protocol for a given service. -defaultProtocol :: ProtocolNumber -defaultProtocol = 0 - ------------------------------------------------------------------------------ --- SockAddr - -instance Show SockAddr where -#if defined(DOMAIN_SOCKET_SUPPORT) - showsPrec _ (SockAddrUnix str) = showString str -#endif - showsPrec _ (SockAddrInet port ha) - = showString (unsafePerformIO (inet_ntoa ha)) - . showString ":" - . shows port -#if defined(IPV6_SOCKET_SUPPORT) - showsPrec _ addr@(SockAddrInet6 port _ _ _) - = showChar '[' - . showString (unsafePerformIO $ - fst `liftM` getNameInfo [NI_NUMERICHOST] True False addr >>= - maybe (fail "showsPrec: impossible internal error") return) - . showString "]:" - . shows port -#endif - ------------------------------------------------------------------------------ --- Connection Functions - --- In the following connection and binding primitives. The names of --- the equivalent C functions have been preserved where possible. It --- should be noted that some of these names used in the C library, --- \tr{bind} in particular, have a different meaning to many Haskell --- programmers and have thus been renamed by appending the prefix --- Socket. - --- | Create a new socket using the given address family, socket type --- and protocol number. The address family is usually 'AF_INET', --- 'AF_INET6', or 'AF_UNIX'. The socket type is usually 'Stream' or --- 'Datagram'. The protocol number is usually 'defaultProtocol'. --- If 'AF_INET6' is used, the 'IPv6Only' socket option is set to 0 --- so that both IPv4 and IPv6 can be handled with one socket. -socket :: Family -- Family Name (usually AF_INET) - -> SocketType -- Socket Type (usually Stream) - -> ProtocolNumber -- Protocol Number (getProtocolByName to find value) - -> IO Socket -- Unconnected Socket -socket family stype protocol = do - c_stype <- packSocketTypeOrThrow "socket" stype - fd <- throwSocketErrorIfMinus1Retry "socket" $ - c_socket (packFamily family) c_stype protocol - setNonBlockIfNeeded fd - socket_status <- newMVar NotConnected - let sock = MkSocket fd family stype protocol socket_status -#if HAVE_DECL_IPV6_V6ONLY -# if defined(mingw32_HOST_OS) - -- the IPv6Only option is only supported on Windows Vista and later, - -- so trying to change it might throw an error - when (family == AF_INET6) $ - E.catch (setSocketOption sock IPv6Only 0) $ (\(_ :: E.IOException) -> return ()) -# else - when (family == AF_INET6) $ setSocketOption sock IPv6Only 0 -# endif -#endif - return sock - --- | Build a pair of connected socket objects using the given address --- family, socket type, and protocol number. Address family, socket --- type, and protocol number are as for the 'socket' function above. --- Availability: Unix. -#if defined(DOMAIN_SOCKET_SUPPORT) -socketPair :: Family -- Family Name (usually AF_INET or AF_INET6) - -> SocketType -- Socket Type (usually Stream) - -> ProtocolNumber -- Protocol Number - -> IO (Socket, Socket) -- unnamed and connected. -socketPair family stype protocol = do - allocaBytes (2 * sizeOf (1 :: CInt)) $ \ fdArr -> do - c_stype <- packSocketTypeOrThrow "socketPair" stype - _rc <- throwSocketErrorIfMinus1Retry "socketpair" $ - c_socketpair (packFamily family) c_stype protocol fdArr - [fd1,fd2] <- peekArray 2 fdArr - s1 <- mkNonBlockingSocket fd1 - s2 <- mkNonBlockingSocket fd2 - return (s1,s2) - where - mkNonBlockingSocket fd = do - setNonBlockIfNeeded fd - stat <- newMVar Connected - return (MkSocket fd family stype protocol stat) - -foreign import ccall unsafe "socketpair" - c_socketpair :: CInt -> CInt -> CInt -> Ptr CInt -> IO CInt -#endif - --- | Set the socket to nonblocking, if applicable to this platform. -setNonBlockIfNeeded :: CInt -> IO () -setNonBlockIfNeeded fd = - System.Posix.Internals.setNonBlockingFD fd True - ------------------------------------------------------------------------------ --- Binding a socket - --- | Bind the socket to an address. The socket must not already be --- bound. The 'Family' passed to @bind@ must be the --- same as that passed to 'socket'. If the special port number --- 'aNY_PORT' is passed then the system assigns the next available --- use port. -bind :: Socket -- Unconnected Socket - -> SockAddr -- Address to Bind to - -> IO () -bind (MkSocket s _family _stype _protocol socketStatus) addr = do - modifyMVar_ socketStatus $ \ status -> do - if status /= NotConnected - then - ioError (userError ("bind: can't peform bind on socket in status " ++ - show status)) - else do - withSockAddr addr $ \p_addr sz -> do - _status <- throwSocketErrorIfMinus1Retry "bind" $ c_bind s p_addr (fromIntegral sz) - return Bound - ------------------------------------------------------------------------------ --- Connecting a socket - --- | Connect to a remote socket at address. -connect :: Socket -- Unconnected Socket - -> SockAddr -- Socket address stuff - -> IO () -connect sock@(MkSocket s _family _stype _protocol socketStatus) addr = do - modifyMVar_ socketStatus $ \currentStatus -> do - if currentStatus /= NotConnected && currentStatus /= Bound - then - ioError (userError ("connect: can't peform connect on socket in status " ++ - show currentStatus)) - else do - withSockAddr addr $ \p_addr sz -> do - - let connectLoop = do - r <- c_connect s p_addr (fromIntegral sz) - if r == -1 - then do -#if !(defined(HAVE_WINSOCK2_H) && !defined(cygwin32_HOST_OS)) - err <- getErrno - case () of - _ | err == eINTR -> connectLoop - _ | err == eINPROGRESS -> connectBlocked --- _ | err == eAGAIN -> connectBlocked - _otherwise -> throwSocketError "connect" -#else - rc <- c_getLastError - case rc of - #{const WSANOTINITIALISED} -> do - withSocketsDo (return ()) - r <- c_connect s p_addr (fromIntegral sz) - if r == -1 - then throwSocketError "connect" - else return () - _ -> throwSocketError "connect" -#endif - else return () - - connectBlocked = do - threadWaitWrite (fromIntegral s) - err <- getSocketOption sock SoError - if (err == 0) - then return () - else throwSocketErrorCode "connect" (fromIntegral err) - - connectLoop - return Connected - ------------------------------------------------------------------------------ --- Listen - --- | Listen for connections made to the socket. The second argument --- specifies the maximum number of queued connections and should be at --- least 1; the maximum value is system-dependent (usually 5). -listen :: Socket -- Connected & Bound Socket - -> Int -- Queue Length - -> IO () -listen (MkSocket s _family _stype _protocol socketStatus) backlog = do - modifyMVar_ socketStatus $ \ status -> do - if status /= Bound - then - ioError (userError ("listen: can't peform listen on socket in status " ++ - show status)) - else do - throwSocketErrorIfMinus1Retry_ "listen" (c_listen s (fromIntegral backlog)) - return Listening - ------------------------------------------------------------------------------ --- Accept --- --- A call to `accept' only returns when data is available on the given --- socket, unless the socket has been set to non-blocking. It will --- return a new socket which should be used to read the incoming data and --- should then be closed. Using the socket returned by `accept' allows --- incoming requests to be queued on the original socket. - --- | Accept a connection. The socket must be bound to an address and --- listening for connections. The return value is a pair @(conn, --- address)@ where @conn@ is a new socket object usable to send and --- receive data on the connection, and @address@ is the address bound --- to the socket on the other end of the connection. -accept :: Socket -- Queue Socket - -> IO (Socket, -- Readable Socket - SockAddr) -- Peer details - -accept sock@(MkSocket s family stype protocol status) = do - currentStatus <- readMVar status - okay <- isAcceptable sock - if not okay - then - ioError (userError ("accept: can't perform accept on socket (" ++ (show (family,stype,protocol)) ++") in status " ++ - show currentStatus)) - else do - let sz = sizeOfSockAddrByFamily family - allocaBytes sz $ \ sockaddr -> do -#if defined(mingw32_HOST_OS) - new_sock <- - if threaded - then with (fromIntegral sz) $ \ ptr_len -> - throwSocketErrorIfMinus1Retry "Network.Socket.accept" $ - c_accept_safe s sockaddr ptr_len - else do - paramData <- c_newAcceptParams s (fromIntegral sz) sockaddr - rc <- asyncDoProc c_acceptDoProc paramData - new_sock <- c_acceptNewSock paramData - c_free paramData - when (rc /= 0) $ - throwSocketErrorCode "Network.Socket.accept" (fromIntegral rc) - return new_sock -#else - with (fromIntegral sz) $ \ ptr_len -> do -# ifdef HAVE_ACCEPT4 - new_sock <- throwSocketErrorIfMinus1RetryMayBlock "accept" - (threadWaitRead (fromIntegral s)) - (c_accept4 s sockaddr ptr_len (#const SOCK_NONBLOCK)) -# else - new_sock <- throwSocketErrorWaitRead sock "accept" - (c_accept s sockaddr ptr_len) - setNonBlockIfNeeded new_sock -# endif /* HAVE_ACCEPT4 */ -#endif - addr <- peekSockAddr sockaddr - new_status <- newMVar Connected - return ((MkSocket new_sock family stype protocol new_status), addr) - -#if defined(mingw32_HOST_OS) -foreign import ccall unsafe "HsNet.h acceptNewSock" - c_acceptNewSock :: Ptr () -> IO CInt -foreign import ccall unsafe "HsNet.h newAcceptParams" - c_newAcceptParams :: CInt -> CInt -> Ptr a -> IO (Ptr ()) -foreign import ccall unsafe "HsNet.h &acceptDoProc" - c_acceptDoProc :: FunPtr (Ptr () -> IO Int) -foreign import ccall unsafe "free" - c_free:: Ptr a -> IO () -#endif - ------------------------------------------------------------------------------ --- ** Sending and reciving data - --- $sendrecv --- --- Do not use the @send@ and @recv@ functions defined in this module --- in new code, as they incorrectly represent binary data as a Unicode --- string. As a result, these functions are inefficient and may lead --- to bugs in the program. Instead use the @send@ and @recv@ --- functions defined in the 'Network.Socket.ByteString' module. - ------------------------------------------------------------------------------ --- sendTo & recvFrom - --- | Send data to the socket. The recipient can be specified --- explicitly, so the socket need not be in a connected state. --- Returns the number of bytes sent. Applications are responsible for --- ensuring that all data has been sent. --- --- NOTE: blocking on Windows unless you compile with -threaded (see --- GHC ticket #1129) -sendTo :: Socket -- (possibly) bound/connected Socket - -> String -- Data to send - -> SockAddr - -> IO Int -- Number of Bytes sent -sendTo sock xs addr = do - withCStringLen xs $ \(str, len) -> do - sendBufTo sock str len addr - --- | Send data to the socket. The recipient can be specified --- explicitly, so the socket need not be in a connected state. --- Returns the number of bytes sent. Applications are responsible for --- ensuring that all data has been sent. -sendBufTo :: Socket -- (possibly) bound/connected Socket - -> Ptr a -> Int -- Data to send - -> SockAddr - -> IO Int -- Number of Bytes sent -sendBufTo sock@(MkSocket s _family _stype _protocol _status) ptr nbytes addr = do - withSockAddr addr $ \p_addr sz -> do - liftM fromIntegral $ - throwSocketErrorWaitWrite sock "sendTo" $ - c_sendto s ptr (fromIntegral $ nbytes) 0{-flags-} - p_addr (fromIntegral sz) - --- | Receive data from the socket. The socket need not be in a --- connected state. Returns @(bytes, nbytes, address)@ where @bytes@ --- is a @String@ of length @nbytes@ representing the data received and --- @address@ is a 'SockAddr' representing the address of the sending --- socket. --- --- NOTE: blocking on Windows unless you compile with -threaded (see --- GHC ticket #1129) -recvFrom :: Socket -> Int -> IO (String, Int, SockAddr) -recvFrom sock nbytes = - allocaBytes nbytes $ \ptr -> do - (len, sockaddr) <- recvBufFrom sock ptr nbytes - str <- peekCStringLen (ptr, len) - return (str, len, sockaddr) - --- | Receive data from the socket, writing it into buffer instead of --- creating a new string. The socket need not be in a connected --- state. Returns @(nbytes, address)@ where @nbytes@ is the number of --- bytes received and @address@ is a 'SockAddr' representing the --- address of the sending socket. --- --- NOTE: blocking on Windows unless you compile with -threaded (see --- GHC ticket #1129) -recvBufFrom :: Socket -> Ptr a -> Int -> IO (Int, SockAddr) -recvBufFrom sock@(MkSocket s family _stype _protocol _status) ptr nbytes - | nbytes <= 0 = ioError (mkInvalidRecvArgError "Network.Socket.recvFrom") - | otherwise = - withNewSockAddr family $ \ptr_addr sz -> do - alloca $ \ptr_len -> do - poke ptr_len (fromIntegral sz) - len <- throwSocketErrorWaitRead sock "recvFrom" $ - c_recvfrom s ptr (fromIntegral nbytes) 0{-flags-} - ptr_addr ptr_len - let len' = fromIntegral len - if len' == 0 - then ioError (mkEOFError "Network.Socket.recvFrom") - else do - flg <- isConnected sock - -- For at least one implementation (WinSock 2), recvfrom() ignores - -- filling in the sockaddr for connected TCP sockets. Cope with - -- this by using getPeerName instead. - sockaddr <- - if flg then - getPeerName sock - else - peekSockAddr ptr_addr - return (len', sockaddr) - ------------------------------------------------------------------------------ --- send & recv - --- | Send data to the socket. The socket must be connected to a remote --- socket. Returns the number of bytes sent. Applications are --- responsible for ensuring that all data has been sent. -send :: Socket -- Bound/Connected Socket - -> String -- Data to send - -> IO Int -- Number of Bytes sent -send sock@(MkSocket s _family _stype _protocol _status) xs = do - withCStringLen xs $ \(str, len) -> do - liftM fromIntegral $ -#if defined(mingw32_HOST_OS) - writeRawBufferPtr - "Network.Socket.send" - (socket2FD sock) - (castPtr str) - 0 - (fromIntegral len) -#else - throwSocketErrorWaitWrite sock "send" $ - c_send s str (fromIntegral len) 0{-flags-} -#endif - --- | Send data to the socket. The socket must be connected to a remote --- socket. Returns the number of bytes sent. Applications are --- responsible for ensuring that all data has been sent. -sendBuf :: Socket -- Bound/Connected Socket - -> Ptr Word8 -- Pointer to the data to send - -> Int -- Length of the buffer - -> IO Int -- Number of Bytes sent -sendBuf sock@(MkSocket s _family _stype _protocol _status) str len = do - liftM fromIntegral $ -#if defined(mingw32_HOST_OS) - writeRawBufferPtr - "Network.Socket.sendBuf" - (socket2FD sock) - (castPtr str) - 0 - (fromIntegral len) -#else - throwSocketErrorWaitWrite sock "sendBuf" $ - c_send s str (fromIntegral len) 0{-flags-} -#endif - - --- | Receive data from the socket. The socket must be in a connected --- state. This function may return fewer bytes than specified. If the --- message is longer than the specified length, it may be discarded --- depending on the type of socket. This function may block until a --- message arrives. --- --- Considering hardware and network realities, the maximum number of --- bytes to receive should be a small power of 2, e.g., 4096. --- --- For TCP sockets, a zero length return value means the peer has --- closed its half side of the connection. -recv :: Socket -> Int -> IO String -recv sock l = recvLen sock l >>= \ (s,_) -> return s - -recvLen :: Socket -> Int -> IO (String, Int) -recvLen sock@(MkSocket s _family _stype _protocol _status) nbytes - | nbytes <= 0 = ioError (mkInvalidRecvArgError "Network.Socket.recv") - | otherwise = do - allocaBytes nbytes $ \ptr -> do - len <- -#if defined(mingw32_HOST_OS) - readRawBufferPtr "Network.Socket.recvLen" (socket2FD sock) ptr 0 - (fromIntegral nbytes) -#else - throwSocketErrorWaitRead sock "recv" $ - c_recv s ptr (fromIntegral nbytes) 0{-flags-} -#endif - let len' = fromIntegral len - if len' == 0 - then ioError (mkEOFError "Network.Socket.recv") - else do - s' <- peekCStringLen (castPtr ptr,len') - return (s', len') - --- | Receive data from the socket. The socket must be in a connected --- state. This function may return fewer bytes than specified. If the --- message is longer than the specified length, it may be discarded --- depending on the type of socket. This function may block until a --- message arrives. --- --- Considering hardware and network realities, the maximum number of --- bytes to receive should be a small power of 2, e.g., 4096. --- --- For TCP sockets, a zero length return value means the peer has --- closed its half side of the connection. -recvBuf :: Socket -> Ptr Word8 -> Int -> IO Int -recvBuf sock p l = recvLenBuf sock p l - -recvLenBuf :: Socket -> Ptr Word8 -> Int -> IO Int -recvLenBuf sock@(MkSocket s _family _stype _protocol _status) ptr nbytes - | nbytes <= 0 = ioError (mkInvalidRecvArgError "Network.Socket.recvBuf") - | otherwise = do - len <- -#if defined(mingw32_HOST_OS) - readRawBufferPtr "Network.Socket.recvLenBuf" (socket2FD sock) ptr 0 - (fromIntegral nbytes) -#else - throwSocketErrorWaitRead sock "recvBuf" $ - c_recv s (castPtr ptr) (fromIntegral nbytes) 0{-flags-} -#endif - let len' = fromIntegral len - if len' == 0 - then ioError (mkEOFError "Network.Socket.recvBuf") - else return len' - - --- --------------------------------------------------------------------------- --- socketPort --- --- The port number the given socket is currently connected to can be --- determined by calling $port$, is generally only useful when bind --- was given $aNY\_PORT$. - -socketPort :: Socket -- Connected & Bound Socket - -> IO PortNumber -- Port Number of Socket -socketPort sock@(MkSocket _ AF_INET _ _ _) = do - (SockAddrInet port _) <- getSocketName sock - return port -#if defined(IPV6_SOCKET_SUPPORT) -socketPort sock@(MkSocket _ AF_INET6 _ _ _) = do - (SockAddrInet6 port _ _ _) <- getSocketName sock - return port -#endif -socketPort (MkSocket _ family _ _ _) = - ioError (userError ("socketPort: not supported for Family " ++ show family)) - - --- --------------------------------------------------------------------------- --- getPeerName - --- Calling $getPeerName$ returns the address details of the machine, --- other than the local one, which is connected to the socket. This is --- used in programs such as FTP to determine where to send the --- returning data. The corresponding call to get the details of the --- local machine is $getSocketName$. - -getPeerName :: Socket -> IO SockAddr -getPeerName (MkSocket s family _ _ _) = do - withNewSockAddr family $ \ptr sz -> do - with (fromIntegral sz) $ \int_star -> do - throwSocketErrorIfMinus1Retry_ "getPeerName" $ c_getpeername s ptr int_star - _sz <- peek int_star - peekSockAddr ptr - -getSocketName :: Socket -> IO SockAddr -getSocketName (MkSocket s family _ _ _) = do - withNewSockAddr family $ \ptr sz -> do - with (fromIntegral sz) $ \int_star -> do - throwSocketErrorIfMinus1Retry_ "getSocketName" $ c_getsockname s ptr int_star - peekSockAddr ptr - ------------------------------------------------------------------------------ --- Socket Properties - --- | Socket options for use with 'setSocketOption' and 'getSocketOption'. --- --- The existence of a constructor does not imply that the relevant option --- is supported on your system: see 'isSupportedSocketOption' -data SocketOption - = Debug -- ^ SO_DEBUG - | ReuseAddr -- ^ SO_REUSEADDR - | Type -- ^ SO_TYPE - | SoError -- ^ SO_ERROR - | DontRoute -- ^ SO_DONTROUTE - | Broadcast -- ^ SO_BROADCAST - | SendBuffer -- ^ SO_SNDBUF - | RecvBuffer -- ^ SO_RCVBUF - | KeepAlive -- ^ SO_KEEPALIVE - | OOBInline -- ^ SO_OOBINLINE - | TimeToLive -- ^ IP_TTL - | MaxSegment -- ^ TCP_MAXSEG - | NoDelay -- ^ TCP_NODELAY - | Cork -- ^ TCP_CORK - | Linger -- ^ SO_LINGER - | ReusePort -- ^ SO_REUSEPORT - | RecvLowWater -- ^ SO_RCVLOWAT - | SendLowWater -- ^ SO_SNDLOWAT - | RecvTimeOut -- ^ SO_RCVTIMEO - | SendTimeOut -- ^ SO_SNDTIMEO - | UseLoopBack -- ^ SO_USELOOPBACK - | IPv6Only -- ^ IPV6_V6ONLY - | CustomSockOpt (CInt, CInt) - deriving (Show, Typeable) - --- | Does the 'SocketOption' exist on this system? -isSupportedSocketOption :: SocketOption -> Bool -isSupportedSocketOption = isJust . packSocketOption - --- | For a socket option, return Just (level, value) where level is the --- corresponding C option level constant (e.g. SOL_SOCKET) and value is --- the option constant itself (e.g. SO_DEBUG) --- If either constant does not exist, return Nothing. -packSocketOption :: SocketOption -> Maybe (CInt, CInt) -packSocketOption so = - -- The Just here is a hack to disable GHC's overlapping pattern detection: - -- the problem is if all constants are present, the fallback pattern is - -- redundant, but if they aren't then it isn't. Hence we introduce an - -- extra pattern (Nothing) that can't possibly happen, so that the - -- fallback is always (in principle) necessary. - -- I feel a little bad for including this, but such are the sacrifices we - -- make while working with CPP - excluding the fallback pattern correctly - -- would be a serious nuisance. - -- (NB: comments elsewhere in this file refer to this one) - case Just so of -#ifdef SOL_SOCKET -#ifdef SO_DEBUG - Just Debug -> Just ((#const SOL_SOCKET), (#const SO_DEBUG)) -#endif -#ifdef SO_REUSEADDR - Just ReuseAddr -> Just ((#const SOL_SOCKET), (#const SO_REUSEADDR)) -#endif -#ifdef SO_TYPE - Just Type -> Just ((#const SOL_SOCKET), (#const SO_TYPE)) -#endif -#ifdef SO_ERROR - Just SoError -> Just ((#const SOL_SOCKET), (#const SO_ERROR)) -#endif -#ifdef SO_DONTROUTE - Just DontRoute -> Just ((#const SOL_SOCKET), (#const SO_DONTROUTE)) -#endif -#ifdef SO_BROADCAST - Just Broadcast -> Just ((#const SOL_SOCKET), (#const SO_BROADCAST)) -#endif -#ifdef SO_SNDBUF - Just SendBuffer -> Just ((#const SOL_SOCKET), (#const SO_SNDBUF)) -#endif -#ifdef SO_RCVBUF - Just RecvBuffer -> Just ((#const SOL_SOCKET), (#const SO_RCVBUF)) -#endif -#ifdef SO_KEEPALIVE - Just KeepAlive -> Just ((#const SOL_SOCKET), (#const SO_KEEPALIVE)) -#endif -#ifdef SO_OOBINLINE - Just OOBInline -> Just ((#const SOL_SOCKET), (#const SO_OOBINLINE)) -#endif -#ifdef SO_LINGER - Just Linger -> Just ((#const SOL_SOCKET), (#const SO_LINGER)) -#endif -#ifdef SO_REUSEPORT - Just ReusePort -> Just ((#const SOL_SOCKET), (#const SO_REUSEPORT)) -#endif -#ifdef SO_RCVLOWAT - Just RecvLowWater -> Just ((#const SOL_SOCKET), (#const SO_RCVLOWAT)) -#endif -#ifdef SO_SNDLOWAT - Just SendLowWater -> Just ((#const SOL_SOCKET), (#const SO_SNDLOWAT)) -#endif -#ifdef SO_RCVTIMEO - Just RecvTimeOut -> Just ((#const SOL_SOCKET), (#const SO_RCVTIMEO)) -#endif -#ifdef SO_SNDTIMEO - Just SendTimeOut -> Just ((#const SOL_SOCKET), (#const SO_SNDTIMEO)) -#endif -#ifdef SO_USELOOPBACK - Just UseLoopBack -> Just ((#const SOL_SOCKET), (#const SO_USELOOPBACK)) -#endif -#endif // SOL_SOCKET -#if HAVE_DECL_IPPROTO_IP -#ifdef IP_TTL - Just TimeToLive -> Just ((#const IPPROTO_IP), (#const IP_TTL)) -#endif -#endif // HAVE_DECL_IPPROTO_IP -#if HAVE_DECL_IPPROTO_TCP -#ifdef TCP_MAXSEG - Just MaxSegment -> Just ((#const IPPROTO_TCP), (#const TCP_MAXSEG)) -#endif -#ifdef TCP_NODELAY - Just NoDelay -> Just ((#const IPPROTO_TCP), (#const TCP_NODELAY)) -#endif -#ifdef TCP_CORK - Just Cork -> Just ((#const IPPROTO_TCP), (#const TCP_CORK)) -#endif -#endif // HAVE_DECL_IPPROTO_TCP -#if HAVE_DECL_IPPROTO_IPV6 -#if HAVE_DECL_IPV6_V6ONLY - Just IPv6Only -> Just ((#const IPPROTO_IPV6), (#const IPV6_V6ONLY)) -#endif -#endif // HAVE_DECL_IPPROTO_IPV6 - Just (CustomSockOpt opt) -> Just opt - _ -> Nothing - --- | Return the option level and option value if they exist, --- otherwise throw an error that begins "Network.Socket." ++ the String --- parameter -packSocketOption' :: String -> SocketOption -> IO (CInt, CInt) -packSocketOption' caller so = maybe err return (packSocketOption so) - where - err = ioError . userError . concat $ ["Network.Socket.", caller, - ": socket option ", show so, " unsupported on this system"] - --- | Set a socket option that expects an Int value. --- There is currently no API to set e.g. the timeval socket options -setSocketOption :: Socket - -> SocketOption -- Option Name - -> Int -- Option Value - -> IO () -setSocketOption (MkSocket s _ _ _ _) so v = do - (level, opt) <- packSocketOption' "setSocketOption" so - with (fromIntegral v) $ \ptr_v -> do - throwSocketErrorIfMinus1_ "setSocketOption" $ - c_setsockopt s level opt ptr_v - (fromIntegral (sizeOf (undefined :: CInt))) - return () - - --- | Get a socket option that gives an Int value. --- There is currently no API to get e.g. the timeval socket options -getSocketOption :: Socket - -> SocketOption -- Option Name - -> IO Int -- Option Value -getSocketOption (MkSocket s _ _ _ _) so = do - (level, opt) <- packSocketOption' "getSocketOption" so - alloca $ \ptr_v -> - with (fromIntegral (sizeOf (undefined :: CInt))) $ \ptr_sz -> do - throwSocketErrorIfMinus1Retry_ "getSocketOption" $ - c_getsockopt s level opt ptr_v ptr_sz - fromIntegral `liftM` peek ptr_v - - -#if defined(HAVE_STRUCT_UCRED) || defined(HAVE_GETPEEREID) --- | Returns the processID, userID and groupID of the socket's peer. --- --- Only available on platforms that support SO_PEERCRED or GETPEEREID(3) --- on domain sockets. --- GETPEEREID(3) returns userID and groupID. processID is always 0. -getPeerCred :: Socket -> IO (CUInt, CUInt, CUInt) -getPeerCred sock = do -#ifdef HAVE_STRUCT_UCRED - let fd = fdSocket sock - let sz = (fromIntegral (#const sizeof(struct ucred))) - with sz $ \ ptr_cr -> - alloca $ \ ptr_sz -> do - poke ptr_sz sz - throwSocketErrorIfMinus1Retry "getPeerCred" $ - c_getsockopt fd (#const SOL_SOCKET) (#const SO_PEERCRED) ptr_cr ptr_sz - pid <- (#peek struct ucred, pid) ptr_cr - uid <- (#peek struct ucred, uid) ptr_cr - gid <- (#peek struct ucred, gid) ptr_cr - return (pid, uid, gid) -#else - (uid,gid) <- getPeerEid sock - return (0,uid,gid) -#endif - -#ifdef HAVE_GETPEEREID --- | The getpeereid() function returns the effective user and group IDs of the --- peer connected to a UNIX-domain socket -getPeerEid :: Socket -> IO (CUInt, CUInt) -getPeerEid sock = do - let fd = fdSocket sock - alloca $ \ ptr_uid -> - alloca $ \ ptr_gid -> do - throwSocketErrorIfMinus1Retry_ "getPeerEid" $ - c_getpeereid fd ptr_uid ptr_gid - uid <- peek ptr_uid - gid <- peek ptr_gid - return (uid, gid) -#endif -#endif - -##if !(MIN_VERSION_base(4,3,1)) -closeFdWith closer fd = closer fd -##endif - -#if defined(DOMAIN_SOCKET_SUPPORT) --- sending/receiving ancillary socket data; low-level mechanism --- for transmitting file descriptors, mainly. -sendFd :: Socket -> CInt -> IO () -sendFd sock outfd = do - throwSocketErrorWaitWrite sock "sendFd" $ - c_sendFd (fdSocket sock) outfd - -- Note: If Winsock supported FD-passing, thi would have been - -- incorrect (since socket FDs need to be closed via closesocket().) - closeFd outfd - -recvFd :: Socket -> IO CInt -recvFd sock = do - theFd <- throwSocketErrorWaitRead sock "recvFd" $ - c_recvFd (fdSocket sock) - return theFd - -foreign import ccall SAFE_ON_WIN "sendFd" c_sendFd :: CInt -> CInt -> IO CInt -foreign import ccall SAFE_ON_WIN "recvFd" c_recvFd :: CInt -> IO CInt - -#endif - --- --------------------------------------------------------------------------- --- Utility Functions - -aNY_PORT :: PortNumber -aNY_PORT = 0 - --- | The IPv4 wild card address. - -iNADDR_ANY :: HostAddress -iNADDR_ANY = htonl (#const INADDR_ANY) - -foreign import CALLCONV unsafe "htonl" htonl :: Word32 -> Word32 - -#if defined(IPV6_SOCKET_SUPPORT) --- | The IPv6 wild card address. - -iN6ADDR_ANY :: HostAddress6 -iN6ADDR_ANY = (0, 0, 0, 0) -#endif - -sOMAXCONN :: Int -sOMAXCONN = #const SOMAXCONN - -sOL_SOCKET :: Int -sOL_SOCKET = #const SOL_SOCKET - -#ifdef SCM_RIGHTS -sCM_RIGHTS :: Int -sCM_RIGHTS = #const SCM_RIGHTS -#endif - --- | This is the value of SOMAXCONN, typically 128. --- 128 is good enough for normal network servers but --- is too small for high performance servers. -maxListenQueue :: Int -maxListenQueue = sOMAXCONN - --- ----------------------------------------------------------------------------- - -data ShutdownCmd - = ShutdownReceive - | ShutdownSend - | ShutdownBoth - deriving Typeable - -sdownCmdToInt :: ShutdownCmd -> CInt -sdownCmdToInt ShutdownReceive = 0 -sdownCmdToInt ShutdownSend = 1 -sdownCmdToInt ShutdownBoth = 2 - --- | Shut down one or both halves of the connection, depending on the --- second argument to the function. If the second argument is --- 'ShutdownReceive', further receives are disallowed. If it is --- 'ShutdownSend', further sends are disallowed. If it is --- 'ShutdownBoth', further sends and receives are disallowed. -shutdown :: Socket -> ShutdownCmd -> IO () -shutdown (MkSocket s _ _ _ _) stype = do - throwSocketErrorIfMinus1Retry_ "shutdown" (c_shutdown s (sdownCmdToInt stype)) - return () - --- ----------------------------------------------------------------------------- - --- | Close the socket. All future operations on the socket object --- will fail. The remote end will receive no more data (after queued --- data is flushed). -close :: Socket -> IO () -close (MkSocket s _ _ _ socketStatus) = do - modifyMVar_ socketStatus $ \ status -> - case status of - ConvertedToHandle -> - ioError (userError ("close: converted to a Handle, use hClose instead")) - Closed -> - return status - _ -> closeFdWith (closeFd . fromIntegral) (fromIntegral s) >> return Closed - --- ----------------------------------------------------------------------------- - --- | Determines whether 'close' has been used on the 'Socket'. This --- does /not/ indicate any status about the socket beyond this. If the --- socket has been closed remotely, this function can still return --- 'True'. -isConnected :: Socket -> IO Bool -isConnected (MkSocket _ _ _ _ status) = do - value <- readMVar status - return (value == Connected) - --- ----------------------------------------------------------------------------- --- Socket Predicates - -isBound :: Socket -> IO Bool -isBound (MkSocket _ _ _ _ status) = do - value <- readMVar status - return (value == Bound) - -isListening :: Socket -> IO Bool -isListening (MkSocket _ _ _ _ status) = do - value <- readMVar status - return (value == Listening) - -isReadable :: Socket -> IO Bool -isReadable (MkSocket _ _ _ _ status) = do - value <- readMVar status - return (value == Listening || value == Connected) - -isWritable :: Socket -> IO Bool -isWritable = isReadable -- sort of. - -isAcceptable :: Socket -> IO Bool -#if defined(DOMAIN_SOCKET_SUPPORT) -isAcceptable (MkSocket _ AF_UNIX x _ status) - | x == Stream || x == SeqPacket = do - value <- readMVar status - return (value == Connected || value == Bound || value == Listening) -isAcceptable (MkSocket _ AF_UNIX _ _ _) = return False -#endif -isAcceptable (MkSocket _ _ _ _ status) = do - value <- readMVar status - return (value == Connected || value == Listening) - --- ----------------------------------------------------------------------------- --- Internet address manipulation routines: - -inet_addr :: String -> IO HostAddress -inet_addr ipstr = do - withCString ipstr $ \str -> do - had <- c_inet_addr str - if had == -1 - then ioError (userError ("inet_addr: Malformed address: " ++ ipstr)) - else return had -- network byte order - -inet_ntoa :: HostAddress -> IO String -inet_ntoa haddr = do - pstr <- c_inet_ntoa haddr - peekCString pstr - --- | Turns a Socket into an 'Handle'. By default, the new handle is --- unbuffered. Use 'System.IO.hSetBuffering' to change the buffering. --- --- Note that since a 'Handle' is automatically closed by a finalizer --- when it is no longer referenced, you should avoid doing any more --- operations on the 'Socket' after calling 'socketToHandle'. To --- close the 'Socket' after 'socketToHandle', call 'System.IO.hClose' --- on the 'Handle'. - -socketToHandle :: Socket -> IOMode -> IO Handle -socketToHandle s@(MkSocket fd _ _ _ socketStatus) mode = do - modifyMVar socketStatus $ \ status -> - if status == ConvertedToHandle - then ioError (userError ("socketToHandle: already a Handle")) - else do - h <- fdToHandle' (fromIntegral fd) (Just GHC.IO.Device.Stream) True (show s) mode True{-bin-} - hSetBuffering h NoBuffering - return (ConvertedToHandle, h) - --- | Pack a list of values into a bitmask. The possible mappings from --- value to bit-to-set are given as the first argument. We assume --- that each value can cause exactly one bit to be set; unpackBits will --- break if this property is not true. - -packBits :: (Eq a, Num b, Bits b) => [(a, b)] -> [a] -> b - -packBits mapping xs = foldl' pack 0 mapping - where pack acc (k, v) | k `elem` xs = acc .|. v - | otherwise = acc - --- | Unpack a bitmask into a list of values. - -unpackBits :: (Num b, Bits b) => [(a, b)] -> b -> [a] - --- Be permissive and ignore unknown bit values. At least on OS X, --- getaddrinfo returns an ai_flags field with bits set that have no --- entry in . -unpackBits [] _ = [] -unpackBits ((k,v):xs) r - | r .&. v /= 0 = k : unpackBits xs (r .&. complement v) - | otherwise = unpackBits xs r - ------------------------------------------------------------------------------ --- Address and service lookups - -#if defined(IPV6_SOCKET_SUPPORT) - --- | Flags that control the querying behaviour of 'getAddrInfo'. -data AddrInfoFlag - = AI_ADDRCONFIG - | AI_ALL - | AI_CANONNAME - | AI_NUMERICHOST - | AI_NUMERICSERV - | AI_PASSIVE - | AI_V4MAPPED - deriving (Eq, Read, Show, Typeable) - -aiFlagMapping :: [(AddrInfoFlag, CInt)] - -aiFlagMapping = - [ -#if HAVE_DECL_AI_ADDRCONFIG - (AI_ADDRCONFIG, #const AI_ADDRCONFIG), -#else - (AI_ADDRCONFIG, 0), -#endif -#if HAVE_DECL_AI_ALL - (AI_ALL, #const AI_ALL), -#else - (AI_ALL, 0), -#endif - (AI_CANONNAME, #const AI_CANONNAME), - (AI_NUMERICHOST, #const AI_NUMERICHOST), -#if HAVE_DECL_AI_NUMERICSERV - (AI_NUMERICSERV, #const AI_NUMERICSERV), -#else - (AI_NUMERICSERV, 0), -#endif - (AI_PASSIVE, #const AI_PASSIVE), -#if HAVE_DECL_AI_V4MAPPED - (AI_V4MAPPED, #const AI_V4MAPPED) -#else - (AI_V4MAPPED, 0) -#endif - ] - --- | Indicate whether the given 'AddrInfoFlag' will have any effect on --- this system. -addrInfoFlagImplemented :: AddrInfoFlag -> Bool -addrInfoFlagImplemented f = packBits aiFlagMapping [f] /= 0 - -data AddrInfo = - AddrInfo { - addrFlags :: [AddrInfoFlag], - addrFamily :: Family, - addrSocketType :: SocketType, - addrProtocol :: ProtocolNumber, - addrAddress :: SockAddr, - addrCanonName :: Maybe String - } - deriving (Eq, Show, Typeable) - -instance Storable AddrInfo where - sizeOf _ = #const sizeof(struct addrinfo) - alignment _ = alignment (undefined :: CInt) - - peek p = do - ai_flags <- (#peek struct addrinfo, ai_flags) p - ai_family <- (#peek struct addrinfo, ai_family) p - ai_socktype <- (#peek struct addrinfo, ai_socktype) p - ai_protocol <- (#peek struct addrinfo, ai_protocol) p - ai_addr <- (#peek struct addrinfo, ai_addr) p >>= peekSockAddr - ai_canonname_ptr <- (#peek struct addrinfo, ai_canonname) p - - ai_canonname <- if ai_canonname_ptr == nullPtr - then return Nothing - else liftM Just $ peekCString ai_canonname_ptr - - socktype <- unpackSocketType' "AddrInfo.peek" ai_socktype - return (AddrInfo - { - addrFlags = unpackBits aiFlagMapping ai_flags, - addrFamily = unpackFamily ai_family, - addrSocketType = socktype, - addrProtocol = ai_protocol, - addrAddress = ai_addr, - addrCanonName = ai_canonname - }) - - poke p (AddrInfo flags family socketType protocol _ _) = do - c_stype <- packSocketTypeOrThrow "AddrInfo.poke" socketType - - (#poke struct addrinfo, ai_flags) p (packBits aiFlagMapping flags) - (#poke struct addrinfo, ai_family) p (packFamily family) - (#poke struct addrinfo, ai_socktype) p c_stype - (#poke struct addrinfo, ai_protocol) p protocol - - -- stuff below is probably not needed, but let's zero it for safety - - (#poke struct addrinfo, ai_addrlen) p (0::CSize) - (#poke struct addrinfo, ai_addr) p nullPtr - (#poke struct addrinfo, ai_canonname) p nullPtr - (#poke struct addrinfo, ai_next) p nullPtr - -data NameInfoFlag - = NI_DGRAM - | NI_NAMEREQD - | NI_NOFQDN - | NI_NUMERICHOST - | NI_NUMERICSERV - deriving (Eq, Read, Show, Typeable) - -niFlagMapping :: [(NameInfoFlag, CInt)] - -niFlagMapping = [(NI_DGRAM, #const NI_DGRAM), - (NI_NAMEREQD, #const NI_NAMEREQD), - (NI_NOFQDN, #const NI_NOFQDN), - (NI_NUMERICHOST, #const NI_NUMERICHOST), - (NI_NUMERICSERV, #const NI_NUMERICSERV)] - --- | Default hints for address lookup with 'getAddrInfo'. The values --- of the 'addrAddress' and 'addrCanonName' fields are 'undefined', --- and are never inspected by 'getAddrInfo'. - -defaultHints :: AddrInfo - -defaultHints = AddrInfo { - addrFlags = [], - addrFamily = AF_UNSPEC, - addrSocketType = NoSocketType, - addrProtocol = defaultProtocol, - addrAddress = undefined, - addrCanonName = undefined - } - --- | Resolve a host or service name to one or more addresses. --- The 'AddrInfo' values that this function returns contain 'SockAddr' --- values that you can pass directly to 'connect' or --- 'bind'. --- --- This function is protocol independent. It can return both IPv4 and --- IPv6 address information. --- --- The 'AddrInfo' argument specifies the preferred query behaviour, --- socket options, or protocol. You can override these conveniently --- using Haskell's record update syntax on 'defaultHints', for example --- as follows: --- --- @ --- myHints = defaultHints { addrFlags = [AI_ADDRCONFIG, AI_CANONNAME] } --- @ --- --- Values for 'addrFlags' control query behaviour. The supported --- flags are as follows: --- --- [@AI_PASSIVE@] If no 'HostName' value is provided, the network --- address in each 'SockAddr' --- will be left as a "wild card", i.e. as either 'iNADDR_ANY' --- or 'iN6ADDR_ANY'. This is useful for server applications that --- will accept connections from any client. --- --- [@AI_CANONNAME@] The 'addrCanonName' field of the first returned --- 'AddrInfo' will contain the "canonical name" of the host. --- --- [@AI_NUMERICHOST@] The 'HostName' argument /must/ be a numeric --- address in string form, and network name lookups will not be --- attempted. --- --- /Note/: Although the following flags are required by RFC 3493, they --- may not have an effect on all platforms, because the underlying --- network stack may not support them. To see whether a flag from the --- list below will have any effect, call 'addrInfoFlagImplemented'. --- --- [@AI_NUMERICSERV@] The 'ServiceName' argument /must/ be a port --- number in string form, and service name lookups will not be --- attempted. --- --- [@AI_ADDRCONFIG@] The list of returned 'AddrInfo' values will --- only contain IPv4 addresses if the local system has at least --- one IPv4 interface configured, and likewise for IPv6. --- --- [@AI_V4MAPPED@] If an IPv6 lookup is performed, and no IPv6 --- addresses are found, IPv6-mapped IPv4 addresses will be --- returned. --- --- [@AI_ALL@] If 'AI_ALL' is specified, return all matching IPv6 and --- IPv4 addresses. Otherwise, this flag has no effect. --- --- You must provide a 'Just' value for at least one of the 'HostName' --- or 'ServiceName' arguments. 'HostName' can be either a numeric --- network address (dotted quad for IPv4, colon-separated hex for --- IPv6) or a hostname. In the latter case, its addresses will be --- looked up unless 'AI_NUMERICHOST' is specified as a hint. If you --- do not provide a 'HostName' value /and/ do not set 'AI_PASSIVE' as --- a hint, network addresses in the result will contain the address of --- the loopback interface. --- --- If the query fails, this function throws an IO exception instead of --- returning an empty list. Otherwise, it returns a non-empty list --- of 'AddrInfo' values. --- --- There are several reasons why a query might result in several --- values. For example, the queried-for host could be multihomed, or --- the service might be available via several protocols. --- --- Note: the order of arguments is slightly different to that defined --- for @getaddrinfo@ in RFC 2553. The 'AddrInfo' parameter comes first --- to make partial application easier. --- --- Example: --- @ --- let hints = defaultHints { addrFlags = [AI_ADDRCONFIG, AI_CANONNAME] } --- addrs <- getAddrInfo (Just hints) (Just "www.haskell.org") (Just "http") --- let addr = head addrs --- sock <- socket (addrFamily addr) (addrSocketType addr) (addrProtocol addr) --- connect sock (addrAddress addr) --- @ - -getAddrInfo :: Maybe AddrInfo -- ^ preferred socket type or protocol - -> Maybe HostName -- ^ host name to look up - -> Maybe ServiceName -- ^ service name to look up - -> IO [AddrInfo] -- ^ resolved addresses, with "best" first - -getAddrInfo hints node service = - maybeWith withCString node $ \c_node -> - maybeWith withCString service $ \c_service -> - maybeWith with filteredHints $ \c_hints -> - alloca $ \ptr_ptr_addrs -> do - ret <- c_getaddrinfo c_node c_service c_hints ptr_ptr_addrs - case ret of - 0 -> do ptr_addrs <- peek ptr_ptr_addrs - ais <- followAddrInfo ptr_addrs - c_freeaddrinfo ptr_addrs - return ais - _ -> do err <- gai_strerror ret - ioError (ioeSetErrorString - (mkIOError NoSuchThing "getAddrInfo" Nothing - Nothing) err) - -- Leaving out the service and using AI_NUMERICSERV causes a - -- segfault on OS X 10.8.2. This code removes AI_NUMERICSERV - -- (which has no effect) in that case. - where -#if defined(darwin_HOST_OS) - filteredHints = case service of - Nothing -> fmap (\ h -> h { addrFlags = delete AI_NUMERICSERV (addrFlags h) }) hints - _ -> hints -#else - filteredHints = hints -#endif - -followAddrInfo :: Ptr AddrInfo -> IO [AddrInfo] - -followAddrInfo ptr_ai | ptr_ai == nullPtr = return [] - | otherwise = do - a <- peek ptr_ai - as <- (#peek struct addrinfo, ai_next) ptr_ai >>= followAddrInfo - return (a:as) - -foreign import ccall safe "hsnet_getaddrinfo" - c_getaddrinfo :: CString -> CString -> Ptr AddrInfo -> Ptr (Ptr AddrInfo) - -> IO CInt - -foreign import ccall safe "hsnet_freeaddrinfo" - c_freeaddrinfo :: Ptr AddrInfo -> IO () - -gai_strerror :: CInt -> IO String - -#ifdef HAVE_GAI_STRERROR -gai_strerror n = c_gai_strerror n >>= peekCString - -foreign import ccall safe "gai_strerror" - c_gai_strerror :: CInt -> IO CString -#else -gai_strerror n = return ("error " ++ show n) -#endif - -withCStringIf :: Bool -> Int -> (CSize -> CString -> IO a) -> IO a -withCStringIf False _ f = f 0 nullPtr -withCStringIf True n f = allocaBytes n (f (fromIntegral n)) - --- | Resolve an address to a host or service name. --- This function is protocol independent. --- --- The list of 'NameInfoFlag' values controls query behaviour. The --- supported flags are as follows: --- --- [@NI_NOFQDN@] If a host is local, return only the --- hostname part of the FQDN. --- --- [@NI_NUMERICHOST@] The name of the host is not --- looked up. Instead, a numeric representation of the host's --- address is returned. For an IPv4 address, this will be a --- dotted-quad string. For IPv6, it will be colon-separated --- hexadecimal. --- --- [@NI_NUMERICSERV@] The name of the service is not --- looked up. Instead, a numeric representation of the --- service is returned. --- --- [@NI_NAMEREQD@] If the hostname cannot be looked up, an IO error --- is thrown. --- --- [@NI_DGRAM@] Resolve a datagram-based service name. This is --- required only for the few protocols that have different port --- numbers for their datagram-based versions than for their --- stream-based versions. --- --- Hostname and service name lookups can be expensive. You can --- specify which lookups to perform via the two 'Bool' arguments. If --- one of these is 'False', the corresponding value in the returned --- tuple will be 'Nothing', and no lookup will be performed. --- --- If a host or service's name cannot be looked up, then the numeric --- form of the address or service will be returned. --- --- If the query fails, this function throws an IO exception. --- --- Example: --- @ --- (hostName, _) <- getNameInfo [] True False myAddress --- @ - -getNameInfo :: [NameInfoFlag] -- ^ flags to control lookup behaviour - -> Bool -- ^ whether to look up a hostname - -> Bool -- ^ whether to look up a service name - -> SockAddr -- ^ the address to look up - -> IO (Maybe HostName, Maybe ServiceName) - -getNameInfo flags doHost doService addr = - withCStringIf doHost (#const NI_MAXHOST) $ \c_hostlen c_host -> - withCStringIf doService (#const NI_MAXSERV) $ \c_servlen c_serv -> do - withSockAddr addr $ \ptr_addr sz -> do - ret <- c_getnameinfo ptr_addr (fromIntegral sz) c_host c_hostlen - c_serv c_servlen (packBits niFlagMapping flags) - case ret of - 0 -> do - let peekIf doIf c_val = if doIf - then liftM Just $ peekCString c_val - else return Nothing - host <- peekIf doHost c_host - serv <- peekIf doService c_serv - return (host, serv) - _ -> do err <- gai_strerror ret - ioError (ioeSetErrorString - (mkIOError NoSuchThing "getNameInfo" Nothing - Nothing) err) - -foreign import ccall safe "hsnet_getnameinfo" - c_getnameinfo :: Ptr SockAddr -> CInt{-CSockLen???-} -> CString -> CSize -> CString - -> CSize -> CInt -> IO CInt -#endif - -mkInvalidRecvArgError :: String -> IOError -mkInvalidRecvArgError loc = ioeSetErrorString (mkIOError - InvalidArgument - loc Nothing Nothing) "non-positive length" - -mkEOFError :: String -> IOError -mkEOFError loc = ioeSetErrorString (mkIOError EOF loc Nothing Nothing) "end of file" - --- --------------------------------------------------------------------------- --- foreign imports from the C library - -foreign import ccall unsafe "my_inet_ntoa" - c_inet_ntoa :: HostAddress -> IO (Ptr CChar) - -foreign import CALLCONV unsafe "inet_addr" - c_inet_addr :: Ptr CChar -> IO HostAddress - -foreign import CALLCONV unsafe "shutdown" - c_shutdown :: CInt -> CInt -> IO CInt - -closeFd :: CInt -> IO () -closeFd fd = throwSocketErrorIfMinus1_ "Network.Socket.close" $ c_close fd - -#if !defined(WITH_WINSOCK) -foreign import ccall unsafe "close" - c_close :: CInt -> IO CInt -#else -foreign import stdcall unsafe "closesocket" - c_close :: CInt -> IO CInt -#endif - -foreign import CALLCONV unsafe "socket" - c_socket :: CInt -> CInt -> CInt -> IO CInt -foreign import CALLCONV unsafe "bind" - c_bind :: CInt -> Ptr SockAddr -> CInt{-CSockLen???-} -> IO CInt -foreign import CALLCONV SAFE_ON_WIN "connect" - c_connect :: CInt -> Ptr SockAddr -> CInt{-CSockLen???-} -> IO CInt -foreign import CALLCONV unsafe "accept" - c_accept :: CInt -> Ptr SockAddr -> Ptr CInt{-CSockLen???-} -> IO CInt -#ifdef HAVE_ACCEPT4 -foreign import CALLCONV unsafe "accept4" - c_accept4 :: CInt -> Ptr SockAddr -> Ptr CInt{-CSockLen???-} -> CInt -> IO CInt -#endif -foreign import CALLCONV unsafe "listen" - c_listen :: CInt -> CInt -> IO CInt - -#if defined(mingw32_HOST_OS) -foreign import CALLCONV safe "accept" - c_accept_safe :: CInt -> Ptr SockAddr -> Ptr CInt{-CSockLen???-} -> IO CInt - -foreign import ccall unsafe "rtsSupportsBoundThreads" threaded :: Bool -#endif - -foreign import CALLCONV unsafe "send" - c_send :: CInt -> Ptr a -> CSize -> CInt -> IO CInt -foreign import CALLCONV SAFE_ON_WIN "sendto" - c_sendto :: CInt -> Ptr a -> CSize -> CInt -> Ptr SockAddr -> CInt -> IO CInt -foreign import CALLCONV unsafe "recv" - c_recv :: CInt -> Ptr CChar -> CSize -> CInt -> IO CInt -foreign import CALLCONV SAFE_ON_WIN "recvfrom" - c_recvfrom :: CInt -> Ptr a -> CSize -> CInt -> Ptr SockAddr -> Ptr CInt -> IO CInt -foreign import CALLCONV unsafe "getpeername" - c_getpeername :: CInt -> Ptr SockAddr -> Ptr CInt -> IO CInt -foreign import CALLCONV unsafe "getsockname" - c_getsockname :: CInt -> Ptr SockAddr -> Ptr CInt -> IO CInt - -foreign import CALLCONV unsafe "getsockopt" - c_getsockopt :: CInt -> CInt -> CInt -> Ptr CInt -> Ptr CInt -> IO CInt -foreign import CALLCONV unsafe "setsockopt" - c_setsockopt :: CInt -> CInt -> CInt -> Ptr CInt -> CInt -> IO CInt - -#if defined(HAVE_GETPEEREID) -foreign import CALLCONV unsafe "getpeereid" - c_getpeereid :: CInt -> Ptr CUInt -> Ptr CUInt -> IO CInt -#endif --- --------------------------------------------------------------------------- --- * Deprecated aliases - --- $deprecated-aliases --- --- These aliases are deprecated and should not be used in new code. --- They will be removed in some future version of the package. - --- | Deprecated alias for 'bind'. -bindSocket :: Socket -- Unconnected Socket - -> SockAddr -- Address to Bind to - -> IO () -bindSocket = bind - --- | Deprecated alias for 'close'. -sClose :: Socket -> IO () -sClose = close - --- | Deprecated alias for 'isConnected'. -sIsConnected :: Socket -> IO Bool -sIsConnected = isConnected - --- | Deprecated alias for 'isBound'. -sIsBound :: Socket -> IO Bool -sIsBound = isBound - --- | Deprecated alias for 'isListening'. -sIsListening :: Socket -> IO Bool -sIsListening = isListening - --- | Deprecated alias for 'isReadable'. -sIsReadable :: Socket -> IO Bool -sIsReadable = isReadable - --- | Deprecated alias for 'isWritable'. -sIsWritable :: Socket -> IO Bool -sIsWritable = isWritable diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/network.buildinfo.in cabal-install-1.22-1.22.9.0/=unpacked-tar2=/network.buildinfo.in --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/network.buildinfo.in 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/network.buildinfo.in 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -ghc-options: -DCALLCONV=@CALLCONV@ @EXTRA_CPPFLAGS@ -ghc-prof-options: -DCALLCONV=@CALLCONV@ @EXTRA_CPPFLAGS@ -ld-options: @LDFLAGS@ -cc-options: -DCALLCONV=@CALLCONV@ @EXTRA_CPPFLAGS@ -c-sources: @EXTRA_SRCS@ -extra-libraries: @EXTRA_LIBS@ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/network.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar2=/network.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/network.cabal 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/network.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -name: network -version: 2.6.0.2 -license: BSD3 -license-file: LICENSE -maintainer: Johan Tibell -synopsis: Low-level networking interface -description: - This package provides a low-level networking interface. - . - In network-2.6 the @Network.URI@ module was split off into its own - package, network-uri-2.6. If you're using the @Network.URI@ module - you can automatically get it from the right package by adding this - to your .cabal file: - . - > flag network-uri - > description: Get Network.URI from the network-uri package - > default: True - > - > library - > -- ... - > if flag(network-uri) - > build-depends: network-uri >= 2.6, network >= 2.6 - > else - > build-depends: network-uri < 2.6, network < 2.6 - . - That is, get the module from either network < 2.6 or from - network-uri >= 2.6. -category: Network -build-type: Configure -cabal-version: >=1.8 -extra-tmp-files: - config.log config.status autom4te.cache network.buildinfo - include/HsNetworkConfig.h -extra-source-files: - README.md examples/*.hs tests/*.hs config.guess config.sub install-sh - configure.ac configure network.buildinfo.in - include/HsNetworkConfig.h.in include/HsNet.h - -- C sources only used on some systems - cbits/ancilData.c cbits/asyncAccept.c cbits/initWinSock.c - cbits/winSockErr.c -homepage: https://github.com/haskell/network -bug-reports: https://github.com/haskell/network/issues - -library - exposed-modules: - Network - Network.BSD - Network.Socket - Network.Socket.ByteString - Network.Socket.ByteString.Lazy - Network.Socket.Internal - other-modules: - Network.Socket.ByteString.Internal - Network.Socket.Types - - if !os(windows) - other-modules: - Network.Socket.ByteString.IOVec - Network.Socket.ByteString.Lazy.Posix - Network.Socket.ByteString.MsgHdr - if os(windows) - other-modules: - Network.Socket.ByteString.Lazy.Windows - - build-depends: - base >= 3 && < 5, - bytestring < 0.11 - - if !os(windows) - build-depends: - unix >= 2 - - extensions: - CPP, DeriveDataTypeable, ForeignFunctionInterface, TypeSynonymInstances - include-dirs: include - includes: HsNet.h - install-includes: HsNet.h HsNetworkConfig.h - c-sources: cbits/HsNet.c - ghc-options: -Wall -fwarn-tabs - -test-suite simple - hs-source-dirs: tests - main-is: Simple.hs - type: exitcode-stdio-1.0 - - build-depends: - base < 5, - bytestring, - HUnit, - network, - test-framework, - test-framework-hunit - -test-suite regression - hs-source-dirs: tests - main-is: Regression.hs - type: exitcode-stdio-1.0 - - build-depends: - base < 5, - bytestring, - HUnit, - network, - test-framework, - test-framework-hunit - - ghc-options: -Wall - -source-repository head - type: git - location: git://github.com/haskell/network.git diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Network.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Network.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,472 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Network --- Copyright : (c) The University of Glasgow 2001 --- License : BSD-style (see the file libraries/network/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : provisional --- Portability : portable --- --- This module is kept for backwards-compatibility. New users are --- encouraged to use "Network.Socket" instead. --- --- "Network" was intended as a \"higher-level\" interface to networking --- facilities, and only supports TCP. --- ------------------------------------------------------------------------------ - -#include "HsNetworkConfig.h" - -#ifdef HAVE_GETADDRINFO --- Use IPv6-capable function definitions if the OS supports it. -#define IPV6_SOCKET_SUPPORT 1 -#endif - -module Network - ( - -- * Basic data types - Socket - , PortID(..) - , HostName - , PortNumber - - -- * Initialisation - , withSocketsDo - - -- * Server-side connections - , listenOn - , accept - , sClose - - -- * Client-side connections - , connectTo - - -- * Simple sending and receiving - {-$sendrecv-} - , sendTo - , recvFrom - - -- * Miscellaneous - , socketPort - - -- * Networking Issues - -- ** Buffering - {-$buffering-} - - -- ** Improving I\/O Performance over sockets - {-$performance-} - - -- ** @SIGPIPE@ - {-$sigpipe-} - ) where - -import Control.Monad (liftM) -import Data.Maybe (fromJust) -import Network.BSD -import Network.Socket hiding (accept, socketPort, recvFrom, - sendTo, PortNumber, sClose) -import qualified Network.Socket as Socket (accept) -import System.IO -import Prelude -import qualified Control.Exception as Exception - --- --------------------------------------------------------------------------- --- High Level ``Setup'' functions - --- If the @PortID@ specifies a unix family socket and the @Hostname@ --- differs from that returned by @getHostname@ then an error is --- raised. Alternatively an empty string may be given to @connectTo@ --- signalling that the current hostname applies. - -data PortID = - Service String -- Service Name eg "ftp" - | PortNumber PortNumber -- User defined Port Number -#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) - | UnixSocket String -- Unix family socket in file system -#endif - deriving (Show, Eq) - --- | Calling 'connectTo' creates a client side socket which is --- connected to the given host and port. The Protocol and socket type is --- derived from the given port identifier. If a port number is given --- then the result is always an internet family 'Stream' socket. - -connectTo :: HostName -- Hostname - -> PortID -- Port Identifier - -> IO Handle -- Connected Socket - -#if defined(IPV6_SOCKET_SUPPORT) --- IPv6 and IPv4. - -connectTo hostname (Service serv) = connect' hostname serv - -connectTo hostname (PortNumber port) = connect' hostname (show port) -#else --- IPv4 only. - -connectTo hostname (Service serv) = do - proto <- getProtocolNumber "tcp" - bracketOnError - (socket AF_INET Stream proto) - (sClose) -- only done if there's an error - (\sock -> do - port <- getServicePortNumber serv - he <- getHostByName hostname - connect sock (SockAddrInet port (hostAddress he)) - socketToHandle sock ReadWriteMode - ) - -connectTo hostname (PortNumber port) = do - proto <- getProtocolNumber "tcp" - bracketOnError - (socket AF_INET Stream proto) - (sClose) -- only done if there's an error - (\sock -> do - he <- getHostByName hostname - connect sock (SockAddrInet port (hostAddress he)) - socketToHandle sock ReadWriteMode - ) -#endif - -#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) -connectTo _ (UnixSocket path) = do - bracketOnError - (socket AF_UNIX Stream 0) - (sClose) - (\sock -> do - connect sock (SockAddrUnix path) - socketToHandle sock ReadWriteMode - ) -#endif - -#if defined(IPV6_SOCKET_SUPPORT) -connect' :: HostName -> ServiceName -> IO Handle - -connect' host serv = do - proto <- getProtocolNumber "tcp" - let hints = defaultHints { addrFlags = [AI_ADDRCONFIG] - , addrProtocol = proto - , addrSocketType = Stream } - addrs <- getAddrInfo (Just hints) (Just host) (Just serv) - firstSuccessful $ map tryToConnect addrs - where - tryToConnect addr = - bracketOnError - (socket (addrFamily addr) (addrSocketType addr) (addrProtocol addr)) - (sClose) -- only done if there's an error - (\sock -> do - connect sock (addrAddress addr) - socketToHandle sock ReadWriteMode - ) -#endif - --- | Creates the server side socket which has been bound to the --- specified port. --- --- 'maxListenQueue' (typically 128) is specified to the listen queue. --- This is good enough for normal network servers but is too small --- for high performance servers. --- --- To avoid the \"Address already in use\" problems, --- the 'ReuseAddr' socket option is set on the listening socket. --- --- If available, the 'IPv6Only' socket option is set to 0 --- so that both IPv4 and IPv6 can be accepted with this socket. --- --- If you don't like the behavior above, please use the lower level --- 'Network.Socket.listen' instead. - -listenOn :: PortID -- ^ Port Identifier - -> IO Socket -- ^ Listening Socket - -#if defined(IPV6_SOCKET_SUPPORT) --- IPv6 and IPv4. - -listenOn (Service serv) = listen' serv - -listenOn (PortNumber port) = listen' (show port) -#else --- IPv4 only. - -listenOn (Service serv) = do - proto <- getProtocolNumber "tcp" - bracketOnError - (socket AF_INET Stream proto) - (sClose) - (\sock -> do - port <- getServicePortNumber serv - setSocketOption sock ReuseAddr 1 - bindSocket sock (SockAddrInet port iNADDR_ANY) - listen sock maxListenQueue - return sock - ) - -listenOn (PortNumber port) = do - proto <- getProtocolNumber "tcp" - bracketOnError - (socket AF_INET Stream proto) - (sClose) - (\sock -> do - setSocketOption sock ReuseAddr 1 - bindSocket sock (SockAddrInet port iNADDR_ANY) - listen sock maxListenQueue - return sock - ) -#endif - -#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) -listenOn (UnixSocket path) = - bracketOnError - (socket AF_UNIX Stream 0) - (sClose) - (\sock -> do - setSocketOption sock ReuseAddr 1 - bindSocket sock (SockAddrUnix path) - listen sock maxListenQueue - return sock - ) -#endif - -#if defined(IPV6_SOCKET_SUPPORT) -listen' :: ServiceName -> IO Socket - -listen' serv = do - proto <- getProtocolNumber "tcp" - -- We should probably specify addrFamily = AF_INET6 and the filter - -- code below should be removed. AI_ADDRCONFIG is probably not - -- necessary. But this code is well-tested. So, let's keep it. - let hints = defaultHints { addrFlags = [AI_ADDRCONFIG, AI_PASSIVE] - , addrSocketType = Stream - , addrProtocol = proto } - addrs <- getAddrInfo (Just hints) Nothing (Just serv) - -- Choose an IPv6 socket if exists. This ensures the socket can - -- handle both IPv4 and IPv6 if v6only is false. - let addrs' = filter (\x -> addrFamily x == AF_INET6) addrs - addr = if null addrs' then head addrs else head addrs' - bracketOnError - (socket (addrFamily addr) (addrSocketType addr) (addrProtocol addr)) - (sClose) - (\sock -> do - setSocketOption sock ReuseAddr 1 - bindSocket sock (addrAddress addr) - listen sock maxListenQueue - return sock - ) -#endif - --- ----------------------------------------------------------------------------- --- accept - --- | Accept a connection on a socket created by 'listenOn'. Normal --- I\/O operations (see "System.IO") can be used on the 'Handle' --- returned to communicate with the client. --- Notice that although you can pass any Socket to Network.accept, --- only sockets of either AF_UNIX, AF_INET, or AF_INET6 will work --- (this shouldn't be a problem, though). When using AF_UNIX, HostName --- will be set to the path of the socket and PortNumber to -1. --- -accept :: Socket -- ^ Listening Socket - -> IO (Handle, - HostName, - PortNumber) -- ^ Triple of: read\/write 'Handle' for - -- communicating with the client, - -- the 'HostName' of the peer socket, and - -- the 'PortNumber' of the remote connection. -accept sock@(MkSocket _ AF_INET _ _ _) = do - ~(sock', (SockAddrInet port haddr)) <- Socket.accept sock - peer <- catchIO - (do - (HostEntry peer _ _ _) <- getHostByAddr AF_INET haddr - return peer - ) - (\_e -> inet_ntoa haddr) - -- if getHostByName fails, we fall back to the IP address - handle <- socketToHandle sock' ReadWriteMode - return (handle, peer, port) -#if defined(IPV6_SOCKET_SUPPORT) -accept sock@(MkSocket _ AF_INET6 _ _ _) = do - (sock', addr) <- Socket.accept sock - peer <- catchIO ((fromJust . fst) `liftM` getNameInfo [] True False addr) $ - \_ -> case addr of - SockAddrInet _ a -> inet_ntoa a - SockAddrInet6 _ _ a _ -> return (show a) -# if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) - SockAddrUnix a -> return a -# endif - handle <- socketToHandle sock' ReadWriteMode - let port = case addr of - SockAddrInet p _ -> p - SockAddrInet6 p _ _ _ -> p - _ -> -1 - return (handle, peer, port) -#endif -#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) -accept sock@(MkSocket _ AF_UNIX _ _ _) = do - ~(sock', (SockAddrUnix path)) <- Socket.accept sock - handle <- socketToHandle sock' ReadWriteMode - return (handle, path, -1) -#endif -accept (MkSocket _ family _ _ _) = - error $ "Sorry, address family " ++ (show family) ++ " is not supported!" - - --- | Close the socket. All future operations on the socket object will fail. --- The remote end will receive no more data (after queued data is flushed). -sClose :: Socket -> IO () -sClose = close -- Explicit redefinition because Network.sClose is deperecated, - -- hence the re-export would also be marked as such. - --- ----------------------------------------------------------------------------- --- sendTo/recvFrom - -{-$sendrecv -Send and receive data from\/to the given host and port number. These -should normally only be used where the socket will not be required for -further calls. Also, note that due to the use of 'hGetContents' in 'recvFrom' -the socket will remain open (i.e. not available) even if the function already -returned. Their use is strongly discouraged except for small test-applications -or invocations from the command line. --} - -sendTo :: HostName -- Hostname - -> PortID -- Port Number - -> String -- Message to send - -> IO () -sendTo h p msg = do - s <- connectTo h p - hPutStr s msg - hClose s - -recvFrom :: HostName -- Hostname - -> PortID -- Port Number - -> IO String -- Received Data - -#if defined(IPV6_SOCKET_SUPPORT) -recvFrom host port = do - proto <- getProtocolNumber "tcp" - let hints = defaultHints { addrFlags = [AI_ADDRCONFIG] - , addrProtocol = proto - , addrSocketType = Stream } - allowed <- map addrAddress `liftM` getAddrInfo (Just hints) (Just host) - Nothing - s <- listenOn port - let waiting = do - (s', addr) <- Socket.accept s - if not (addr `oneOf` allowed) - then sClose s' >> waiting - else socketToHandle s' ReadMode >>= hGetContents - waiting - where - a@(SockAddrInet _ ha) `oneOf` ((SockAddrInet _ hb):bs) - | ha == hb = True - | otherwise = a `oneOf` bs - a@(SockAddrInet6 _ _ ha _) `oneOf` ((SockAddrInet6 _ _ hb _):bs) - | ha == hb = True - | otherwise = a `oneOf` bs - _ `oneOf` _ = False -#else -recvFrom host port = do - ip <- getHostByName host - let ipHs = hostAddresses ip - s <- listenOn port - let - waiting = do - ~(s', SockAddrInet _ haddr) <- Socket.accept s - he <- getHostByAddr AF_INET haddr - if not (any (`elem` ipHs) (hostAddresses he)) - then do - sClose s' - waiting - else do - h <- socketToHandle s' ReadMode - msg <- hGetContents h - return msg - - message <- waiting - return message -#endif - --- --------------------------------------------------------------------------- --- Access function returning the port type/id of socket. - --- | Returns the 'PortID' associated with a given socket. -socketPort :: Socket -> IO PortID -socketPort s = do - sockaddr <- getSocketName s - return (portID sockaddr) - where - portID sa = - case sa of - SockAddrInet port _ -> PortNumber port -#if defined(IPV6_SOCKET_SUPPORT) - SockAddrInet6 port _ _ _ -> PortNumber port -#endif -#if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS) && !defined(_WIN32) - SockAddrUnix path -> UnixSocket path -#endif - --- --------------------------------------------------------------------------- --- Utils - --- Like bracket, but only performs the final action if there was an --- exception raised by the middle bit. -bracketOnError - :: IO a -- ^ computation to run first (\"acquire resource\") - -> (a -> IO b) -- ^ computation to run last (\"release resource\") - -> (a -> IO c) -- ^ computation to run in-between - -> IO c -- returns the value from the in-between computation -bracketOnError = Exception.bracketOnError - ------------------------------------------------------------------------------ --- Extra documentation - -{-$buffering - -The 'Handle' returned by 'connectTo' and 'accept' is block-buffered by -default. For an interactive application you may want to set the -buffering mode on the 'Handle' to -'LineBuffering' or 'NoBuffering', like so: - -> h <- connectTo host port -> hSetBuffering h LineBuffering --} - -{-$performance - -For really fast I\/O, it might be worth looking at the 'hGetBuf' and -'hPutBuf' family of functions in "System.IO". --} - -{-$sigpipe - -On Unix, when writing to a socket and the reading end is -closed by the remote client, the program is normally sent a -@SIGPIPE@ signal by the operating system. The -default behaviour when a @SIGPIPE@ is received is -to terminate the program silently, which can be somewhat confusing -if you haven't encountered this before. The solution is to -specify that @SIGPIPE@ is to be ignored, using -the POSIX library: - -> import Posix -> main = do installHandler sigPIPE Ignore Nothing; ... --} - -catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a -#if MIN_VERSION_base(4,0,0) -catchIO = Exception.catch -#else -catchIO = Exception.catchJust Exception.ioErrors -#endif - --- Returns the first action from a list which does not throw an exception. --- If all the actions throw exceptions (and the list of actions is not empty), --- the last exception is thrown. -firstSuccessful :: [IO a] -> IO a -firstSuccessful [] = error "firstSuccessful: empty list" -firstSuccessful (p:ps) = catchIO p $ \e -> - case ps of - [] -> Exception.throwIO e - _ -> firstSuccessful ps diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/README.md cabal-install-1.22-1.22.9.0/=unpacked-tar2=/README.md --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/README.md 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -# [`network`](http://hackage.haskell.org/package/network) [![Build Status](https://travis-ci.org/haskell/network.svg?branch=master)](https://travis-ci.org/haskell/network) - -To build this package using Cabal directly from git, you must run -`autoreconf` before the usual Cabal build steps -(configure/build/install). `autoreconf` is included in the -[GNU Autoconf](http://www.gnu.org/software/autoconf/) tools. There is -no need to run the `configure` script: the `setup configure` step will -do this for you. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/Setup.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -module Main (main) where - -import Distribution.Simple - -main :: IO () -main = defaultMainWithHooks autoconfUserHooks diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/tests/Regression.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/tests/Regression.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/tests/Regression.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/tests/Regression.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ --- | Tests for things that didn't work in the past. -module Main where - -import Network.Socket -import Test.Framework (Test, defaultMain) -import Test.Framework.Providers.HUnit (testCase) - ------------------------------------------------------------------------- --- Tests - --- Used to segfault on OS X 10.8.2 due to AI_NUMERICSERV being set --- without a service being set. This is a OS X bug. -testGetAddrInfo :: IO () -testGetAddrInfo = do - let hints = defaultHints { addrFlags = [AI_NUMERICSERV] } - _ <- getAddrInfo (Just hints) (Just "localhost") Nothing - return () - ------------------------------------------------------------------------- --- List of all tests - -tests :: [Test] -tests = - [ testCase "testGetAddrInfo" testGetAddrInfo - ] - ------------------------------------------------------------------------- --- Test harness - -main :: IO () -main = withSocketsDo $ defaultMain tests diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar2=/tests/Simple.hs cabal-install-1.22-1.22.9.0/=unpacked-tar2=/tests/Simple.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar2=/tests/Simple.hs 2014-09-02 18:14:23.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar2=/tests/Simple.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,307 +0,0 @@ -{-# LANGUAGE ScopedTypeVariables #-} - -module Main where - -import Control.Concurrent (ThreadId, forkIO, myThreadId) -import Control.Concurrent.MVar (newEmptyMVar, putMVar, takeMVar, readMVar) -import qualified Control.Exception as E -import qualified Data.ByteString as S -import qualified Data.ByteString.Char8 as C -import Network.Socket hiding (recv, recvFrom, send, sendTo) -import Network.Socket.ByteString -import Test.Framework (Test, defaultMain, testGroup) -import Test.Framework.Providers.HUnit (testCase) -import Test.HUnit (Assertion, (@=?)) - ------------------------------------------------------------------------- - -serverAddr :: String -serverAddr = "127.0.0.1" - -testMsg :: S.ByteString -testMsg = C.pack "This is a test message." - ------------------------------------------------------------------------- --- Tests - ------------------------------------------------------------------------- --- Sending and receiving - -testSend :: Assertion -testSend = tcpTest client server - where - server sock = recv sock 1024 >>= (@=?) testMsg - client sock = send sock testMsg - -testSendAll :: Assertion -testSendAll = tcpTest client server - where - server sock = recv sock 1024 >>= (@=?) testMsg - client sock = sendAll sock testMsg - -testSendTo :: Assertion -testSendTo = udpTest client server - where - server sock = recv sock 1024 >>= (@=?) testMsg - client sock serverPort = do - addr <- inet_addr serverAddr - sendTo sock testMsg (SockAddrInet serverPort addr) - -testSendAllTo :: Assertion -testSendAllTo = udpTest client server - where - server sock = recv sock 1024 >>= (@=?) testMsg - client sock serverPort = do - addr <- inet_addr serverAddr - sendAllTo sock testMsg (SockAddrInet serverPort addr) - -testSendMany :: Assertion -testSendMany = tcpTest client server - where - server sock = recv sock 1024 >>= (@=?) (S.append seg1 seg2) - client sock = sendMany sock [seg1, seg2] - - seg1 = C.pack "This is a " - seg2 = C.pack "test message." - -testSendManyTo :: Assertion -testSendManyTo = udpTest client server - where - server sock = recv sock 1024 >>= (@=?) (S.append seg1 seg2) - client sock serverPort = do - addr <- inet_addr serverAddr - sendManyTo sock [seg1, seg2] (SockAddrInet serverPort addr) - - seg1 = C.pack "This is a " - seg2 = C.pack "test message." - -testRecv :: Assertion -testRecv = tcpTest client server - where - server sock = recv sock 1024 >>= (@=?) testMsg - client sock = send sock testMsg - -testOverFlowRecv :: Assertion -testOverFlowRecv = tcpTest client server - where - server sock = do seg1 <- recv sock (S.length testMsg - 3) - seg2 <- recv sock 1024 - let msg = S.append seg1 seg2 - testMsg @=? msg - - client sock = send sock testMsg - -testRecvFrom :: Assertion -testRecvFrom = tcpTest client server - where - server sock = do (msg, _) <- recvFrom sock 1024 - testMsg @=? msg - - client sock = do - serverPort <- getPeerPort sock - addr <- inet_addr serverAddr - sendTo sock testMsg (SockAddrInet serverPort addr) - -testOverFlowRecvFrom :: Assertion -testOverFlowRecvFrom = tcpTest client server - where - server sock = do (seg1, _) <- recvFrom sock (S.length testMsg - 3) - (seg2, _) <- recvFrom sock 1024 - let msg = S.append seg1 seg2 - testMsg @=? msg - - client sock = send sock testMsg - -{- -testGetPeerCred:: Assertion -testGetPeerCred = - test clientSetup clientAct serverSetup server - where - clientSetup = do - sock <- socket AF_UNIX Stream defaultProtocol - connect sock $ SockAddrUnix addr - return sock - - serverSetup = do - sock <- socket AF_UNIX Stream defaultProtocol - bindSocket sock $ SockAddrUnix addr - listen sock 1 - return sock - - server sock = do - (clientSock, _) <- accept sock - serverAct clientSock - sClose clientSock - - addr = "/tmp/testAddr1" - clientAct sock = withSocketsDo $ do - sendAll sock testMsg - (pid,uid,gid) <- getPeerCred sock - putStrLn $ unwords ["pid=",show pid,"uid=",show uid, "gid=", show gid] - serverAct sock = withSocketsDo $ do - msg <- recv sock 1024 - putStrLn $ C.unpack msg - - -testGetPeerEid :: Assertion -testGetPeerEid = - test clientSetup clientAct serverSetup server - where - clientSetup = do - sock <- socket AF_UNIX Stream defaultProtocol - connect sock $ SockAddrUnix addr - return sock - - serverSetup = do - sock <- socket AF_UNIX Stream defaultProtocol - bindSocket sock $ SockAddrUnix addr - listen sock 1 - return sock - - server sock = do - (clientSock, _) <- accept sock - serverAct clientSock - sClose clientSock - - addr = "/tmp/testAddr2" - clientAct sock = withSocketsDo $ do - sendAll sock testMsg - (uid,gid) <- getPeerEid sock - putStrLn $ unwords ["uid=",show uid, "gid=", show gid] - serverAct sock = withSocketsDo $ do - msg <- recv sock 1024 - putStrLn $ C.unpack msg --} - ------------------------------------------------------------------------- --- Other - ------------------------------------------------------------------------- --- List of all tests - -basicTests :: Test -basicTests = testGroup "Basic socket operations" - [ - -- Sending and receiving - testCase "testSend" testSend - , testCase "testSendAll" testSendAll - , testCase "testSendTo" testSendTo - , testCase "testSendAllTo" testSendAllTo - , testCase "testSendMany" testSendMany - , testCase "testSendManyTo" testSendManyTo - , testCase "testRecv" testRecv - , testCase "testOverFlowRecv" testOverFlowRecv - , testCase "testRecvFrom" testRecvFrom - , testCase "testOverFlowRecvFrom" testOverFlowRecvFrom --- , testCase "testGetPeerCred" testGetPeerCred --- , testCase "testGetPeerEid" testGetPeerEid - ] - -tests :: [Test] -tests = [basicTests] - ------------------------------------------------------------------------- --- Test helpers - --- | Returns the 'PortNumber' of the peer. Will throw an 'error' if --- used on a non-IP socket. -getPeerPort :: Socket -> IO PortNumber -getPeerPort sock = do - sockAddr <- getPeerName sock - case sockAddr of - (SockAddrInet port _) -> return port - (SockAddrInet6 port _ _ _) -> return port - _ -> error "getPeerPort: only works with IP sockets" - --- | Establish a connection between client and server and then run --- 'clientAct' and 'serverAct', in different threads. Both actions --- get passed a connected 'Socket', used for communicating between --- client and server. 'tcpTest' makes sure that the 'Socket' is --- closed after the actions have run. -tcpTest :: (Socket -> IO a) -> (Socket -> IO b) -> IO () -tcpTest clientAct serverAct = do - portVar <- newEmptyMVar - test (clientSetup portVar) clientAct (serverSetup portVar) server - where - clientSetup portVar = do - sock <- socket AF_INET Stream defaultProtocol - addr <- inet_addr serverAddr - serverPort <- readMVar portVar - connect sock $ SockAddrInet serverPort addr - return sock - - serverSetup portVar = do - sock <- socket AF_INET Stream defaultProtocol - setSocketOption sock ReuseAddr 1 - addr <- inet_addr serverAddr - bindSocket sock (SockAddrInet aNY_PORT addr) - listen sock 1 - serverPort <- socketPort sock - putMVar portVar serverPort - return sock - - server sock = do - (clientSock, _) <- accept sock - serverAct clientSock - sClose clientSock - --- | Create an unconnected 'Socket' for sending UDP and receiving --- datagrams and then run 'clientAct' and 'serverAct'. -udpTest :: (Socket -> PortNumber -> IO a) -> (Socket -> IO b) -> IO () -udpTest clientAct serverAct = do - portVar <- newEmptyMVar - test clientSetup (client portVar) (serverSetup portVar) serverAct - where - clientSetup = socket AF_INET Datagram defaultProtocol - - client portVar sock = do - serverPort <- readMVar portVar - clientAct sock serverPort - - serverSetup portVar = do - sock <- socket AF_INET Datagram defaultProtocol - setSocketOption sock ReuseAddr 1 - addr <- inet_addr serverAddr - bindSocket sock (SockAddrInet aNY_PORT addr) - serverPort <- socketPort sock - putMVar portVar serverPort - return sock - --- | Run a client/server pair and synchronize them so that the server --- is started before the client and the specified server action is --- finished before the client closes the 'Socket'. -test :: IO Socket -> (Socket -> IO b) -> IO Socket -> (Socket -> IO c) -> IO () -test clientSetup clientAct serverSetup serverAct = do - tid <- myThreadId - barrier <- newEmptyMVar - forkIO $ server barrier - client tid barrier - where - server barrier = do - E.bracket serverSetup sClose $ \sock -> do - serverReady - serverAct sock - putMVar barrier () - where - -- | Signal to the client that it can proceed. - serverReady = putMVar barrier () - - client tid barrier = do - takeMVar barrier - -- Transfer exceptions to the main thread. - bracketWithReraise tid clientSetup sClose $ \res -> do - clientAct res - takeMVar barrier - --- | Like 'bracket' but catches and reraises the exception in another --- thread, specified by the first argument. -bracketWithReraise :: ThreadId -> IO a -> (a -> IO b) -> (a -> IO ()) -> IO () -bracketWithReraise tid before after thing = - E.bracket before after thing - `E.catch` \ (e :: E.SomeException) -> E.throwTo tid e - ------------------------------------------------------------------------- --- Test harness - -main :: IO () -main = withSocketsDo $ defaultMain tests diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/CHANGELOG.md cabal-install-1.22-1.22.9.0/=unpacked-tar3=/CHANGELOG.md --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/CHANGELOG.md 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -# 1.1 - * breaking change to `randomIValInteger` to improve RNG quality and performance - see https://github.com/haskell/random/pull/4 and - ghc https://ghc.haskell.org/trac/ghc/ticket/8898 - * correct documentation about generated range of Int32 sized values of type Int - https://github.com/haskell/random/pull/7 - * fix memory leaks by using strict fields and strict atomicModifyIORef' - https://github.com/haskell/random/pull/8 - related to ghc trac tickets #7936 and #4218 - * support for base < 4.6 (which doesnt provide strict atomicModifyIORef') - and integrating Travis CI support. - https://github.com/haskell/random/pull/12 - * fix C type in test suite https://github.com/haskell/random/pull/9 - -# 1.0.1.1 -bump for overflow bug fixes - -# 1.0.1.2 -bump for ticket 8704, build fusion - -# 1.0.1.0 -bump for bug fixes, - -# 1.0.0.4 -bumped version for float/double range bugfix - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/.darcs-boring cabal-install-1.22-1.22.9.0/=unpacked-tar3=/.darcs-boring --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/.darcs-boring 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/.darcs-boring 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -^dist(/|$) -^setup(/|$) -^GNUmakefile$ -^Makefile.local$ -^.depend(.bak)?$ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/.gitignore cabal-install-1.22-1.22.9.0/=unpacked-tar3=/.gitignore --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/.gitignore 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -*~ - -Thumbs.db -.DS_Store - -GNUmakefile -dist-install/ -ghc.mk - -dist -.cabal-sandbox -cabal.sandbox.config diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar3=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/LICENSE 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -This library (libraries/base) is derived from code from two -sources: - - * Code from the GHC project which is largely (c) The University of - Glasgow, and distributable under a BSD-style license (see below), - - * Code from the Haskell 98 Report which is (c) Simon Peyton Jones - and freely redistributable (but see the full license for - restrictions). - -The full text of these licenses is reproduced below. Both of the -licenses are BSD-style or compatible. - ------------------------------------------------------------------------------ - -The Glasgow Haskell Compiler License - -Copyright 2004, The University Court of the University of Glasgow. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -- Neither name of the University nor the names of its contributors may be -used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF -GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. - ------------------------------------------------------------------------------ - -Code derived from the document "Report on the Programming Language -Haskell 98", is distributed under the following license: - - Copyright (c) 2002 Simon Peyton Jones - - The authors intend this Report to belong to the entire Haskell - community, and so we grant permission to copy and distribute it for - any purpose, provided that it is reproduced in its entirety, - including this Notice. Modified versions of this Report may also be - copied and distributed for any purpose, provided that the modified - version is clearly presented as such, and that it does not claim to - be a definition of the Haskell 98 Language. - ------------------------------------------------------------------------------ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/random.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar3=/random.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/random.cabal 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/random.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ -name: random -version: 1.1 - - - - -license: BSD3 -license-file: LICENSE -maintainer: core-libraries-committee@haskell.org -bug-reports: https://github.com/haskell/random/issues -synopsis: random number library -category: System -description: - This package provides a basic random number generation - library, including the ability to split random number - generators. - -extra-source-files: - .travis.yml - README.md - CHANGELOG.md - .gitignore - .darcs-boring - - - -build-type: Simple --- cabal-version 1.8 needed because "the field 'build-depends: random' refers --- to a library which is defined within the same package" -cabal-version: >= 1.8 - - - -Library - exposed-modules: - System.Random - extensions: CPP - GHC-Options: -O2 - build-depends: base >= 3 && < 5, time - -source-repository head - type: git - location: http://git.haskell.org/packages/random.git - --- To run the Test-Suite: --- $ cabal configure --enable-tests --- $ cabal test --show-details=always --test-options="+RTS -M1M -RTS" - -Test-Suite T7936 - type: exitcode-stdio-1.0 - main-is: T7936.hs - hs-source-dirs: tests - build-depends: base >= 3 && < 5, random - ghc-options: -rtsopts -O2 - -Test-Suite TestRandomRs - type: exitcode-stdio-1.0 - main-is: TestRandomRs.hs - hs-source-dirs: tests - build-depends: base >= 3 && < 5, random - ghc-options: -rtsopts -O2 - -- TODO. Why does the following not work? - --test-options: +RTS -M1M -RTS - -Test-Suite TestRandomIOs - type: exitcode-stdio-1.0 - main-is: TestRandomIOs.hs - hs-source-dirs: tests - build-depends: base >= 3 && < 5, random - ghc-options: -rtsopts -O2 diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/README.md cabal-install-1.22-1.22.9.0/=unpacked-tar3=/README.md --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/README.md 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -The Haskell Standard Library -- Random Number Generation -======================================================== -[![Build Status](https://secure.travis-ci.org/haskell/random.svg?branch=master)](http://travis-ci.org/haskell/random) - -This library provides a basic interface for (splittable) random number generators. - -The API documentation can be found here: - - http://hackage.haskell.org/package/random/docs/System-Random.html - -A module supplying this interface is required for Haskell 98 (but not Haskell -2010). An older [version] -(http://www.haskell.org/ghc/docs/latest/html/libraries/haskell98/Random.html) -of this library is included with GHC in the haskell98 package. This newer -version, with compatible api, is included in the [Haskell Platform] -(http://www.haskell.org/platform/contents.html). - -Please report bugs in the Github [issue tracker] (https://github.com/haskell/random/issues) (no longer in the GHC trac). diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar3=/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/Setup.hs 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -module Main (main) where - -import Distribution.Simple - -main :: IO () -main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/System/Random.hs cabal-install-1.22-1.22.9.0/=unpacked-tar3=/System/Random.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/System/Random.hs 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/System/Random.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,609 +0,0 @@ -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : System.Random --- Copyright : (c) The University of Glasgow 2001 --- License : BSD-style (see the file LICENSE in the 'random' repository) --- --- Maintainer : libraries@haskell.org --- Stability : stable --- Portability : portable --- --- This library deals with the common task of pseudo-random number --- generation. The library makes it possible to generate repeatable --- results, by starting with a specified initial random number generator, --- or to get different results on each run by using the system-initialised --- generator or by supplying a seed from some other source. --- --- The library is split into two layers: --- --- * A core /random number generator/ provides a supply of bits. --- The class 'RandomGen' provides a common interface to such generators. --- The library provides one instance of 'RandomGen', the abstract --- data type 'StdGen'. Programmers may, of course, supply their own --- instances of 'RandomGen'. --- --- * The class 'Random' provides a way to extract values of a particular --- type from a random number generator. For example, the 'Float' --- instance of 'Random' allows one to generate random values of type --- 'Float'. --- --- This implementation uses the Portable Combined Generator of L'Ecuyer --- ["System.Random\#LEcuyer"] for 32-bit computers, transliterated by --- Lennart Augustsson. It has a period of roughly 2.30584e18. --- ------------------------------------------------------------------------------ - -#include "MachDeps.h" - -module System.Random - ( - - -- $intro - - -- * Random number generators - -#ifdef ENABLE_SPLITTABLEGEN - RandomGen(next, genRange) - , SplittableGen(split) -#else - RandomGen(next, genRange, split) -#endif - -- ** Standard random number generators - , StdGen - , mkStdGen - - -- ** The global random number generator - - -- $globalrng - - , getStdRandom - , getStdGen - , setStdGen - , newStdGen - - -- * Random values of various types - , Random ( random, randomR, - randoms, randomRs, - randomIO, randomRIO ) - - -- * References - -- $references - - ) where - -import Prelude - -import Data.Bits -import Data.Int -import Data.Word -import Foreign.C.Types - -#ifdef __NHC__ -import CPUTime ( getCPUTime ) -import Foreign.Ptr ( Ptr, nullPtr ) -import Foreign.C ( CTime, CUInt ) -#else -import System.CPUTime ( getCPUTime ) -import Data.Time ( getCurrentTime, UTCTime(..) ) -import Data.Ratio ( numerator, denominator ) -#endif -import Data.Char ( isSpace, chr, ord ) -import System.IO.Unsafe ( unsafePerformIO ) -import Data.IORef ( IORef, newIORef, readIORef, writeIORef ) -#if MIN_VERSION_base (4,6,0) -import Data.IORef ( atomicModifyIORef' ) -#else -import Data.IORef ( atomicModifyIORef ) -#endif -import Numeric ( readDec ) - -#ifdef __GLASGOW_HASKELL__ -import GHC.Exts ( build ) -#else --- | A dummy variant of build without fusion. -{-# INLINE build #-} -build :: ((a -> [a] -> [a]) -> [a] -> [a]) -> [a] -build g = g (:) [] -#endif - -#if !MIN_VERSION_base (4,6,0) -atomicModifyIORef' :: IORef a -> (a -> (a,b)) -> IO b -atomicModifyIORef' ref f = do - b <- atomicModifyIORef ref - (\x -> let (a, b) = f x - in (a, a `seq` b)) - b `seq` return b -#endif - --- The standard nhc98 implementation of Time.ClockTime does not match --- the extended one expected in this module, so we lash-up a quick --- replacement here. -#ifdef __NHC__ -foreign import ccall "time.h time" readtime :: Ptr CTime -> IO CTime -getTime :: IO (Integer, Integer) -getTime = do CTime t <- readtime nullPtr; return (toInteger t, 0) -#else -getTime :: IO (Integer, Integer) -getTime = do - utc <- getCurrentTime - let daytime = toRational $ utctDayTime utc - return $ quotRem (numerator daytime) (denominator daytime) -#endif - --- | The class 'RandomGen' provides a common interface to random number --- generators. --- -#ifdef ENABLE_SPLITTABLEGEN --- Minimal complete definition: 'next'. -#else --- Minimal complete definition: 'next' and 'split'. -#endif - -class RandomGen g where - - -- |The 'next' operation returns an 'Int' that is uniformly distributed - -- in the range returned by 'genRange' (including both end points), - -- and a new generator. - next :: g -> (Int, g) - - -- |The 'genRange' operation yields the range of values returned by - -- the generator. - -- - -- It is required that: - -- - -- * If @(a,b) = 'genRange' g@, then @a < b@. - -- - -- * 'genRange' always returns a pair of defined 'Int's. - -- - -- The second condition ensures that 'genRange' cannot examine its - -- argument, and hence the value it returns can be determined only by the - -- instance of 'RandomGen'. That in turn allows an implementation to make - -- a single call to 'genRange' to establish a generator's range, without - -- being concerned that the generator returned by (say) 'next' might have - -- a different range to the generator passed to 'next'. - -- - -- The default definition spans the full range of 'Int'. - genRange :: g -> (Int,Int) - - -- default method - genRange _ = (minBound, maxBound) - -#ifdef ENABLE_SPLITTABLEGEN --- | The class 'SplittableGen' proivides a way to specify a random number --- generator that can be split into two new generators. -class SplittableGen g where -#endif - -- |The 'split' operation allows one to obtain two distinct random number - -- generators. This is very useful in functional programs (for example, when - -- passing a random number generator down to recursive calls), but very - -- little work has been done on statistically robust implementations of - -- 'split' (["System.Random\#Burton", "System.Random\#Hellekalek"] - -- are the only examples we know of). - split :: g -> (g, g) - -{- | -The 'StdGen' instance of 'RandomGen' has a 'genRange' of at least 30 bits. - -The result of repeatedly using 'next' should be at least as statistically -robust as the /Minimal Standard Random Number Generator/ described by -["System.Random\#Park", "System.Random\#Carta"]. -Until more is known about implementations of 'split', all we require is -that 'split' deliver generators that are (a) not identical and -(b) independently robust in the sense just given. - -The 'Show' and 'Read' instances of 'StdGen' provide a primitive way to save the -state of a random number generator. -It is required that @'read' ('show' g) == g@. - -In addition, 'reads' may be used to map an arbitrary string (not necessarily one -produced by 'show') onto a value of type 'StdGen'. In general, the 'Read' -instance of 'StdGen' has the following properties: - -* It guarantees to succeed on any string. - -* It guarantees to consume only a finite portion of the string. - -* Different argument strings are likely to result in different results. - --} - -data StdGen - = StdGen !Int32 !Int32 - -instance RandomGen StdGen where - next = stdNext - genRange _ = stdRange - -#ifdef ENABLE_SPLITTABLEGEN -instance SplittableGen StdGen where -#endif - split = stdSplit - -instance Show StdGen where - showsPrec p (StdGen s1 s2) = - showsPrec p s1 . - showChar ' ' . - showsPrec p s2 - -instance Read StdGen where - readsPrec _p = \ r -> - case try_read r of - r'@[_] -> r' - _ -> [stdFromString r] -- because it shouldn't ever fail. - where - try_read r = do - (s1, r1) <- readDec (dropWhile isSpace r) - (s2, r2) <- readDec (dropWhile isSpace r1) - return (StdGen s1 s2, r2) - -{- - If we cannot unravel the StdGen from a string, create - one based on the string given. --} -stdFromString :: String -> (StdGen, String) -stdFromString s = (mkStdGen num, rest) - where (cs, rest) = splitAt 6 s - num = foldl (\a x -> x + 3 * a) 1 (map ord cs) - - -{- | -The function 'mkStdGen' provides an alternative way of producing an initial -generator, by mapping an 'Int' into a generator. Again, distinct arguments -should be likely to produce distinct generators. --} -mkStdGen :: Int -> StdGen -- why not Integer ? -mkStdGen s = mkStdGen32 $ fromIntegral s - -{- -From ["System.Random\#LEcuyer"]: "The integer variables s1 and s2 ... must be -initialized to values in the range [1, 2147483562] and [1, 2147483398] -respectively." --} -mkStdGen32 :: Int32 -> StdGen -mkStdGen32 sMaybeNegative = StdGen (s1+1) (s2+1) - where - -- We want a non-negative number, but we can't just take the abs - -- of sMaybeNegative as -minBound == minBound. - s = sMaybeNegative .&. maxBound - (q, s1) = s `divMod` 2147483562 - s2 = q `mod` 2147483398 - -createStdGen :: Integer -> StdGen -createStdGen s = mkStdGen32 $ fromIntegral s - -{- | -With a source of random number supply in hand, the 'Random' class allows the -programmer to extract random values of a variety of types. - -Minimal complete definition: 'randomR' and 'random'. - --} - -class Random a where - -- | Takes a range /(lo,hi)/ and a random number generator - -- /g/, and returns a random value uniformly distributed in the closed - -- interval /[lo,hi]/, together with a new generator. It is unspecified - -- what happens if /lo>hi/. For continuous types there is no requirement - -- that the values /lo/ and /hi/ are ever produced, but they may be, - -- depending on the implementation and the interval. - randomR :: RandomGen g => (a,a) -> g -> (a,g) - - -- | The same as 'randomR', but using a default range determined by the type: - -- - -- * For bounded types (instances of 'Bounded', such as 'Char'), - -- the range is normally the whole type. - -- - -- * For fractional types, the range is normally the semi-closed interval - -- @[0,1)@. - -- - -- * For 'Integer', the range is (arbitrarily) the range of 'Int'. - random :: RandomGen g => g -> (a, g) - - -- | Plural variant of 'randomR', producing an infinite list of - -- random values instead of returning a new generator. - {-# INLINE randomRs #-} - randomRs :: RandomGen g => (a,a) -> g -> [a] - randomRs ival g = build (\cons _nil -> buildRandoms cons (randomR ival) g) - - -- | Plural variant of 'random', producing an infinite list of - -- random values instead of returning a new generator. - {-# INLINE randoms #-} - randoms :: RandomGen g => g -> [a] - randoms g = build (\cons _nil -> buildRandoms cons random g) - - -- | A variant of 'randomR' that uses the global random number generator - -- (see "System.Random#globalrng"). - randomRIO :: (a,a) -> IO a - randomRIO range = getStdRandom (randomR range) - - -- | A variant of 'random' that uses the global random number generator - -- (see "System.Random#globalrng"). - randomIO :: IO a - randomIO = getStdRandom random - --- | Produce an infinite list-equivalent of random values. -{-# INLINE buildRandoms #-} -buildRandoms :: RandomGen g - => (a -> as -> as) -- ^ E.g. '(:)' but subject to fusion - -> (g -> (a,g)) -- ^ E.g. 'random' - -> g -- ^ A 'RandomGen' instance - -> as -buildRandoms cons rand = go - where - -- The seq fixes part of #4218 and also makes fused Core simpler. - go g = x `seq` (x `cons` go g') where (x,g') = rand g - - -instance Random Integer where - randomR ival g = randomIvalInteger ival g - random g = randomR (toInteger (minBound::Int), toInteger (maxBound::Int)) g - -instance Random Int where randomR = randomIvalIntegral; random = randomBounded -instance Random Int8 where randomR = randomIvalIntegral; random = randomBounded -instance Random Int16 where randomR = randomIvalIntegral; random = randomBounded -instance Random Int32 where randomR = randomIvalIntegral; random = randomBounded -instance Random Int64 where randomR = randomIvalIntegral; random = randomBounded - -#ifndef __NHC__ --- Word is a type synonym in nhc98. -instance Random Word where randomR = randomIvalIntegral; random = randomBounded -#endif -instance Random Word8 where randomR = randomIvalIntegral; random = randomBounded -instance Random Word16 where randomR = randomIvalIntegral; random = randomBounded -instance Random Word32 where randomR = randomIvalIntegral; random = randomBounded -instance Random Word64 where randomR = randomIvalIntegral; random = randomBounded - -instance Random CChar where randomR = randomIvalIntegral; random = randomBounded -instance Random CSChar where randomR = randomIvalIntegral; random = randomBounded -instance Random CUChar where randomR = randomIvalIntegral; random = randomBounded -instance Random CShort where randomR = randomIvalIntegral; random = randomBounded -instance Random CUShort where randomR = randomIvalIntegral; random = randomBounded -instance Random CInt where randomR = randomIvalIntegral; random = randomBounded -instance Random CUInt where randomR = randomIvalIntegral; random = randomBounded -instance Random CLong where randomR = randomIvalIntegral; random = randomBounded -instance Random CULong where randomR = randomIvalIntegral; random = randomBounded -instance Random CPtrdiff where randomR = randomIvalIntegral; random = randomBounded -instance Random CSize where randomR = randomIvalIntegral; random = randomBounded -instance Random CWchar where randomR = randomIvalIntegral; random = randomBounded -instance Random CSigAtomic where randomR = randomIvalIntegral; random = randomBounded -instance Random CLLong where randomR = randomIvalIntegral; random = randomBounded -instance Random CULLong where randomR = randomIvalIntegral; random = randomBounded -instance Random CIntPtr where randomR = randomIvalIntegral; random = randomBounded -instance Random CUIntPtr where randomR = randomIvalIntegral; random = randomBounded -instance Random CIntMax where randomR = randomIvalIntegral; random = randomBounded -instance Random CUIntMax where randomR = randomIvalIntegral; random = randomBounded - -instance Random Char where - randomR (a,b) g = - case (randomIvalInteger (toInteger (ord a), toInteger (ord b)) g) of - (x,g') -> (chr x, g') - random g = randomR (minBound,maxBound) g - -instance Random Bool where - randomR (a,b) g = - case (randomIvalInteger (bool2Int a, bool2Int b) g) of - (x, g') -> (int2Bool x, g') - where - bool2Int :: Bool -> Integer - bool2Int False = 0 - bool2Int True = 1 - - int2Bool :: Int -> Bool - int2Bool 0 = False - int2Bool _ = True - - random g = randomR (minBound,maxBound) g - -{-# INLINE randomRFloating #-} -randomRFloating :: (Fractional a, Num a, Ord a, Random a, RandomGen g) => (a, a) -> g -> (a, g) -randomRFloating (l,h) g - | l>h = randomRFloating (h,l) g - | otherwise = let (coef,g') = random g in - (2.0 * (0.5*l + coef * (0.5*h - 0.5*l)), g') -- avoid overflow - -instance Random Double where - randomR = randomRFloating - random rng = - case random rng of - (x,rng') -> - -- We use 53 bits of randomness corresponding to the 53 bit significand: - ((fromIntegral (mask53 .&. (x::Int64)) :: Double) - / fromIntegral twoto53, rng') - where - twoto53 = (2::Int64) ^ (53::Int64) - mask53 = twoto53 - 1 - -instance Random Float where - randomR = randomRFloating - random rng = - -- TODO: Faster to just use 'next' IF it generates enough bits of randomness. - case random rng of - (x,rng') -> - -- We use 24 bits of randomness corresponding to the 24 bit significand: - ((fromIntegral (mask24 .&. (x::Int32)) :: Float) - / fromIntegral twoto24, rng') - -- Note, encodeFloat is another option, but I'm not seeing slightly - -- worse performance with the following [2011.06.25]: --- (encodeFloat rand (-24), rng') - where - mask24 = twoto24 - 1 - twoto24 = (2::Int32) ^ (24::Int32) - --- CFloat/CDouble are basically the same as a Float/Double: -instance Random CFloat where - randomR = randomRFloating - random rng = case random rng of - (x,rng') -> (realToFrac (x::Float), rng') - -instance Random CDouble where - randomR = randomRFloating - -- A MYSTERY: - -- Presently, this is showing better performance than the Double instance: - -- (And yet, if the Double instance uses randomFrac then its performance is much worse!) - random = randomFrac - -- random rng = case random rng of - -- (x,rng') -> (realToFrac (x::Double), rng') - -mkStdRNG :: Integer -> IO StdGen -mkStdRNG o = do - ct <- getCPUTime - (sec, psec) <- getTime - return (createStdGen (sec * 12345 + psec + ct + o)) - -randomBounded :: (RandomGen g, Random a, Bounded a) => g -> (a, g) -randomBounded = randomR (minBound, maxBound) - --- The two integer functions below take an [inclusive,inclusive] range. -randomIvalIntegral :: (RandomGen g, Integral a) => (a, a) -> g -> (a, g) -randomIvalIntegral (l,h) = randomIvalInteger (toInteger l, toInteger h) - -{-# SPECIALIZE randomIvalInteger :: (Num a) => - (Integer, Integer) -> StdGen -> (a, StdGen) #-} - -randomIvalInteger :: (RandomGen g, Num a) => (Integer, Integer) -> g -> (a, g) -randomIvalInteger (l,h) rng - | l > h = randomIvalInteger (h,l) rng - | otherwise = case (f 1 0 rng) of (v, rng') -> (fromInteger (l + v `mod` k), rng') - where - (genlo, genhi) = genRange rng - b = fromIntegral genhi - fromIntegral genlo + 1 - - -- Probabilities of the most likely and least likely result - -- will differ at most by a factor of (1 +- 1/q). Assuming the RandomGen - -- is uniform, of course - - -- On average, log q / log b more random values will be generated - -- than the minimum - q = 1000 - k = h - l + 1 - magtgt = k * q - - -- generate random values until we exceed the target magnitude - f mag v g | mag >= magtgt = (v, g) - | otherwise = v' `seq`f (mag*b) v' g' where - (x,g') = next g - v' = (v * b + (fromIntegral x - fromIntegral genlo)) - - --- The continuous functions on the other hand take an [inclusive,exclusive) range. -randomFrac :: (RandomGen g, Fractional a) => g -> (a, g) -randomFrac = randomIvalDouble (0::Double,1) realToFrac - -randomIvalDouble :: (RandomGen g, Fractional a) => (Double, Double) -> (Double -> a) -> g -> (a, g) -randomIvalDouble (l,h) fromDouble rng - | l > h = randomIvalDouble (h,l) fromDouble rng - | otherwise = - case (randomIvalInteger (toInteger (minBound::Int32), toInteger (maxBound::Int32)) rng) of - (x, rng') -> - let - scaled_x = - fromDouble (0.5*l + 0.5*h) + -- previously (l+h)/2, overflowed - fromDouble ((0.5*h - 0.5*l) / (0.5 * realToFrac int32Count)) * -- avoid overflow - fromIntegral (x::Int32) - in - (scaled_x, rng') - -int32Count :: Integer -int32Count = toInteger (maxBound::Int32) - toInteger (minBound::Int32) + 1 -- GHC ticket #3982 - -stdRange :: (Int,Int) -stdRange = (1, 2147483562) - -stdNext :: StdGen -> (Int, StdGen) --- Returns values in the range stdRange -stdNext (StdGen s1 s2) = (fromIntegral z', StdGen s1'' s2'') - where z' = if z < 1 then z + 2147483562 else z - z = s1'' - s2'' - - k = s1 `quot` 53668 - s1' = 40014 * (s1 - k * 53668) - k * 12211 - s1'' = if s1' < 0 then s1' + 2147483563 else s1' - - k' = s2 `quot` 52774 - s2' = 40692 * (s2 - k' * 52774) - k' * 3791 - s2'' = if s2' < 0 then s2' + 2147483399 else s2' - -stdSplit :: StdGen -> (StdGen, StdGen) -stdSplit std@(StdGen s1 s2) - = (left, right) - where - -- no statistical foundation for this! - left = StdGen new_s1 t2 - right = StdGen t1 new_s2 - - new_s1 | s1 == 2147483562 = 1 - | otherwise = s1 + 1 - - new_s2 | s2 == 1 = 2147483398 - | otherwise = s2 - 1 - - StdGen t1 t2 = snd (next std) - --- The global random number generator - -{- $globalrng #globalrng# - -There is a single, implicit, global random number generator of type -'StdGen', held in some global variable maintained by the 'IO' monad. It is -initialised automatically in some system-dependent fashion, for example, by -using the time of day, or Linux's kernel random number generator. To get -deterministic behaviour, use 'setStdGen'. --} - --- |Sets the global random number generator. -setStdGen :: StdGen -> IO () -setStdGen sgen = writeIORef theStdGen sgen - --- |Gets the global random number generator. -getStdGen :: IO StdGen -getStdGen = readIORef theStdGen - -theStdGen :: IORef StdGen -theStdGen = unsafePerformIO $ do - rng <- mkStdRNG 0 - newIORef rng - --- |Applies 'split' to the current global random generator, --- updates it with one of the results, and returns the other. -newStdGen :: IO StdGen -newStdGen = atomicModifyIORef' theStdGen split - -{- |Uses the supplied function to get a value from the current global -random generator, and updates the global generator with the new generator -returned by the function. For example, @rollDice@ gets a random integer -between 1 and 6: - -> rollDice :: IO Int -> rollDice = getStdRandom (randomR (1,6)) - --} - -getStdRandom :: (StdGen -> (a,StdGen)) -> IO a -getStdRandom f = atomicModifyIORef' theStdGen (swap . f) - where swap (v,g) = (g,v) - -{- $references - -1. FW #Burton# Burton and RL Page, /Distributed random number generation/, -Journal of Functional Programming, 2(2):203-212, April 1992. - -2. SK #Park# Park, and KW Miller, /Random number generators - -good ones are hard to find/, Comm ACM 31(10), Oct 1988, pp1192-1201. - -3. DG #Carta# Carta, /Two fast implementations of the minimal standard -random number generator/, Comm ACM, 33(1), Jan 1990, pp87-88. - -4. P #Hellekalek# Hellekalek, /Don\'t trust parallel Monte Carlo/, -Department of Mathematics, University of Salzburg, -, 1998. - -5. Pierre #LEcuyer# L'Ecuyer, /Efficient and portable combined random -number generators/, Comm ACM, 31(6), Jun 1988, pp742-749. - -The Web site is a great source of information. - --} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/tests/T7936.hs cabal-install-1.22-1.22.9.0/=unpacked-tar3=/tests/T7936.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/tests/T7936.hs 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/tests/T7936.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ --- Test for ticket #7936: --- https://ghc.haskell.org/trac/ghc/ticket/7936 --- --- Used to fail with: --- --- $ cabal test T7936 --test-options="+RTS -M1M -RTS" --- T7936: Heap exhausted; - -module Main where - -import System.Random (newStdGen) -import Control.Monad (replicateM_) - -main = replicateM_ 100000 newStdGen diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/tests/TestRandomIOs.hs cabal-install-1.22-1.22.9.0/=unpacked-tar3=/tests/TestRandomIOs.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/tests/TestRandomIOs.hs 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/tests/TestRandomIOs.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ --- Test for ticket #4218 (TestRandomIOs): --- https://ghc.haskell.org/trac/ghc/ticket/4218 --- --- Used to fail with: --- --- $ cabal test TestRandomIOs --test-options="+RTS -M1M -RTS" --- TestRandomIOs: Heap exhausted; - -module Main where - -import Control.Monad (replicateM) -import System.Random (randomIO) - --- Build a list of 5000 random ints in memory (IO Monad is strict), and print --- the last one. --- Should use less than 1Mb of heap space, or we are generating a list of --- unevaluated thunks. -main = do - rs <- replicateM 5000 randomIO :: IO [Int] - print $ last rs diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/tests/TestRandomRs.hs cabal-install-1.22-1.22.9.0/=unpacked-tar3=/tests/TestRandomRs.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/tests/TestRandomRs.hs 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/tests/TestRandomRs.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ --- Test for ticket #4218 (TestRandomRs): --- https://ghc.haskell.org/trac/ghc/ticket/4218 --- --- Fixed together with ticket #8704 --- https://ghc.haskell.org/trac/ghc/ticket/8704 --- Commit 4695ffa366f659940369f05e419a4f2249c3a776 --- --- Used to fail with: --- --- $ cabal test TestRandomRs --test-options="+RTS -M1M -RTS" --- TestRandomRs: Heap exhausted; - -module Main where - -import Control.Monad (liftM, replicateM) -import System.Random (randomRs, getStdGen) - --- Return the five-thousandth random number: --- Should run in constant space (< 1Mb heap). -main = do - n <- (last . take 5000 . randomRs (0, 1000000)) `liftM` getStdGen - print (n::Integer) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar3=/.travis.yml cabal-install-1.22-1.22.9.0/=unpacked-tar3=/.travis.yml --- cabal-install-1.22-1.22.6.0/=unpacked-tar3=/.travis.yml 2014-09-16 21:34:43.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar3=/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -language: haskell -ghc: - - 7.4 - - 7.6 - - 7.8 diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/CHANGES cabal-install-1.22-1.22.9.0/=unpacked-tar4=/CHANGES --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/CHANGES 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/CHANGES 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ - * If the URI contains "user:pass@" part, use it for Basic Authorization - * Add a test harness. - * Don't leak a socket when getHostAddr throws an exception. - * Send cookies in request format, not response format. - * Moved BrowserAction to be a StateT IO, with instances for - Applicative, MonadIO, MonadState. - * Add method to control size of connection pool. - * Consider both host and port when reusing connections. - * Handle response code 304 "not modified" properly. - * Fix digest authentication by fixing md5 output string rep. - * Make the default user agent string follow the package version. - * Document lack of HTTPS support and fail when clients try - to use it instead of silently falling back to HTTP. - * Add helper to set the request type and body. - -Version 4000.1.2: release 2011-08-11 - * Turn off buffering for the debug log. - * Update installation instructions. - * Bump base dependency to support GHC 7.2. - -Version 4000.1.1: release 2010-11-28 - * Be tolerant of LF (instead of CRLF which is the spec) in responses. - -Version 4000.1.0: release 2010-11-09 - * Retroactively fixed CHANGES to refer to 4000.x.x instead of - 4004.x.x. - * Fix problem with close looping on certain URLs due to trying - to munch the rest of the stream even on EOF. Modified from - a fix by Daniel Wagner. - * This involves a new class member for HStream and is thus an - API change, but one that will only affect clients that - define their own payload type to replace String/ByteString. - * Applied patch by Antoine Latter to fix problem with 301 and 307 - redirects. - -Version 4000.0.10: release 2010-10-29 - * Bump base dependency to support GHC 7.0. - * Stop using 'fail' from the Either monad and instead build Left - values explicitly; the behaviour of fail is changing in GHC 7.0 - and this avoids being sensitive to the change. - -Version 4000.0.9: release 2009-12-20 - - * Export headerMap from Network.HTTP.Headers - (suggested by David Leuschner.) - * Fix Network.TCP.{isTCPConnectedTo,isConnectedTo} to be useful. - * Always delay closing non-persistent connections until we reach EOF. - Delaying it until then is vital when reading the response out as a - lazy ByteString; all of the I/O may not have happened by the time we - were returning the HTTP response. Bug manifested itself occasionally - with larger responses. Courtesy of Valery Vorotyntsev; both untiring bug - hunt and fix. - * drop unused type argument from Network.Browser.BrowserEvent; needlessly general. - (patch provided by Daniel Wagner.) - -Version 4000.0.8: release 2009-08-05 - - * Incorporated proxy setting lookup and parsing contribution - by Eric Kow; provided in Network.HTTP.Proxy - * Factor out HTTP Cookies and Auth handling into separate - modules Network.HTTP.Cookie, Network.HTTP.Auth - * new Network.Browser functionality for hooking up the - proxy detection code in Network.HTTP.Proxy: - - setCheckForProxy :: Bool -> BrowserAction t () - getCheckForProxy :: BrowserAction t Bool - - If you do 'setCheckForProxy True' within a browser - session, the proxy-checking code will be called upon. - Use 'getCheckForProxy' to get the current setting for - this flag. - - * Network.Browser: if HTTP Basic Auth is allowed and - server doesn't 401-challenge with an WWW-Authenticate: - header, simply assume / realm and proceed. Preferable - than failing, even if server is the wrong. - -Version 4000.0.7: release 2009-05-22 - - * Minor release. - * Added - Network.TCP.openSocketStream :: (BufferType t) - => String {-host-} - -> Socket - -> IO (HandleStream t) - - for interfacing to pre-existing @Socket@s. Contributed and - suggested by . - -Version 4000.0.6: release 2009-04-21; changes from 4000.0.5 - - * Network.Browser: use HTTP.HandleStream.sendHTTP_notify, not HTTP.sendHTTP_notify - when issuing requests. The latter runs the risk of undoing request normalization. - * Network.HTTP.Base.normalizeRequest: when normalizing proxy-bound requests, - insert a Host: header if none present. Set it to the destination server authority, - not the proxy. - * Network.Browser: don't fail on seeing invalid cookie values, but report them - as errors and continue. - -Version 4000.0.5: release 2009-03-30; changes from 4000.0.4 - - * Get serious about comments and Haddock documentation. - * Cleaned up normalization of requests, fixing bugs and bringing together - previous disparate attempts at handling this. - * RequestMethod now supports custom verbs; use the (Custom String) constructor - * Beef up Network.HTTP.Base's support for normalizing requests and URIs: - - * added splitRequestURI which divides a URI into two; the Authority portion - (as a String) and the input URI sans the authority portion. Useful when - wanting to split up a request's URI into its Host: and abs_path pieces. - * added normalizeRequest :: Bool -> Request ty -> Request ty, which - fixes up a requests URI path and Host: info depending on whether it is - destined for a proxy or not (controlled by the Bool.) - * moved defaultRequest, defaultRequest_, libUA from Network.Browser - to Network.HTTP.Base - * added mkRequest :: RequestMethod -> URI -> Bool -> Request ty - for constructing normalized&sane Request bases on top of which - you can add custom headers, body payload etc. - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/HTTP.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar4=/HTTP.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/HTTP.cabal 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/HTTP.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,172 +0,0 @@ -Name: HTTP -Version: 4000.2.19 -Cabal-Version: >= 1.8 -Build-type: Simple -License: BSD3 -License-file: LICENSE -Author: Warrick Gray -Maintainer: Ganesh Sittampalam -Homepage: https://github.com/haskell/HTTP -Category: Network -Synopsis: A library for client-side HTTP -Description: - - The HTTP package supports client-side web programming in Haskell. It lets you set up - HTTP connections, transmitting requests and processing the responses coming back, all - from within the comforts of Haskell. It's dependent on the network package to operate, - but other than that, the implementation is all written in Haskell. - . - A basic API for issuing single HTTP requests + receiving responses is provided. On top - of that, a session-level abstraction is also on offer (the @BrowserAction@ monad); - it taking care of handling the management of persistent connections, proxies, - state (cookies) and authentication credentials required to handle multi-step - interactions with a web server. - . - The representation of the bytes flowing across is extensible via the use of a type class, - letting you pick the representation of requests and responses that best fits your use. - Some pre-packaged, common instances are provided for you (@ByteString@, @String@). - . - Here's an example use: - . - > - > do - > rsp <- Network.HTTP.simpleHTTP (getRequest "http://www.haskell.org/") - > -- fetch document and return it (as a 'String'.) - > fmap (take 100) (getResponseBody rsp) - > - > do - > (_, rsp) - > <- Network.Browser.browse $ do - > setAllowRedirects True -- handle HTTP redirects - > request $ getRequest "http://www.haskell.org/" - > return (take 100 (rspBody rsp)) - -Extra-Source-Files: CHANGES - -Source-Repository head - type: git - location: https://github.com/haskell/HTTP.git - -Flag mtl1 - description: Use the old mtl version 1. - default: False - -Flag warn-as-error - default: False - description: Build with warnings-as-errors - manual: True - -Flag network23 - description: Use version 2.3.x or below of the network package - default: False - -Flag conduit10 - description: Use version 1.0.x or below of the conduit package (for the test suite) - default: False - -Flag warp-tests - description: Test against warp - default: True - manual: True - -flag network-uri - description: Get Network.URI from the network-uri package - default: True - -Library - Exposed-modules: - Network.BufferType, - Network.Stream, - Network.StreamDebugger, - Network.StreamSocket, - Network.TCP, - Network.HTTP, - Network.HTTP.Headers, - Network.HTTP.Base, - Network.HTTP.Stream, - Network.HTTP.Auth, - Network.HTTP.Cookie, - Network.HTTP.Proxy, - Network.HTTP.HandleStream, - Network.Browser - Other-modules: - Network.HTTP.Base64, - Network.HTTP.MD5Aux, - Network.HTTP.Utils - Paths_HTTP - GHC-options: -fwarn-missing-signatures -Wall - - -- note the test harness constraints should be kept in sync with these - -- where dependencies are shared - Build-depends: base >= 4.3.0.0 && < 4.9, parsec >= 2.0 && < 3.2 - Build-depends: array >= 0.3.0.2 && < 0.6, old-time >= 1.0.0.0 && < 1.2, bytestring >= 0.9.1.5 && < 0.11 - - Extensions: FlexibleInstances - - if flag(mtl1) - Build-depends: mtl >= 1.1.1.0 && < 1.2 - CPP-Options: -DMTL1 - else - Build-depends: mtl >= 2.0 && < 2.3 - - if flag(network-uri) - Build-depends: network-uri == 2.6.*, network == 2.6.* - else - Build-depends: network >= 2.2.1.5 && < 2.6 - - build-tools: ghc >= 7.0 && < 7.12 - - if flag(warn-as-error) - ghc-options: -Werror - - if os(windows) - Build-depends: Win32 >= 2.2.0.0 && < 2.4 - -Test-Suite test - type: exitcode-stdio-1.0 - - build-tools: ghc >= 7.0 && < 7.12 - - hs-source-dirs: test - main-is: httpTests.hs - - other-modules: - Httpd - UnitTests - - -- note: version constraints for dependencies shared with the library - -- should be the same - build-depends: HTTP, - HUnit >= 1.2.0.1 && < 1.3, - httpd-shed >= 0.4 && < 0.5, - mtl >= 1.1.1.0 && < 2.3, - bytestring >= 0.9.1.5 && < 0.11, - deepseq >= 1.3.0.0 && < 1.5, - pureMD5 >= 0.2.4 && < 2.2, - base >= 4.3.0.0 && < 4.9, - split >= 0.1.3 && < 0.3, - test-framework >= 0.2.0 && < 0.9, - test-framework-hunit >= 0.3.0 && <0.4 - - if flag(network-uri) - Build-depends: network-uri == 2.6.*, network == 2.6.* - else - Build-depends: network >= 2.2.1.5 && < 2.6 - - if flag(warp-tests) - CPP-Options: -DWARP_TESTS - build-depends: - case-insensitive >= 0.4.0.1 && < 1.3, - http-types >= 0.8.0 && < 0.9, - wai >= 2.1.0 && < 3.1, - warp >= 2.1.0 && < 3.1 - - if flag(conduit10) - build-depends: - conduit >= 1.0.8 && < 1.1 - else - build-depends: - conduit >= 1.1 && < 1.3, - conduit-extra >= 1.1 && < 1.2 - - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar4=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/LICENSE 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -Copyright (c) 2002, Warrick Gray -Copyright (c) 2002-2005, Ian Lynagh -Copyright (c) 2003-2006, Bjorn Bringert -Copyright (c) 2004, Andre Furtado -Copyright (c) 2004-2005, Dominic Steinitz -Copyright (c) 2007, Robin Bate Boerop -Copyright (c) 2008-2010, Sigbjorn Finne -Copyright (c) 2009, Eric Kow -Copyright (c) 2010, Antoine Latter -Copyright (c) 2004, 2010-2011, Ganesh Sittampalam -Copyright (c) 2011, Duncan Coutts -Copyright (c) 2011, Matthew Gruen -Copyright (c) 2011, Jeremy Yallop -Copyright (c) 2011, Eric Hesselink -Copyright (c) 2011, Yi Huang -Copyright (c) 2011, Tom Lokhorst - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * The names of contributors may not be used to endorse or promote - products derived from this software without specific prior - written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/Browser.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/Browser.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/Browser.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/Browser.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1091 +0,0 @@ -{-# LANGUAGE MultiParamTypeClasses, GeneralizedNewtypeDeriving, CPP, FlexibleContexts #-} -{- | - -Module : Network.Browser -Copyright : See LICENSE file -License : BSD - -Maintainer : Ganesh Sittampalam -Stability : experimental -Portability : non-portable (not tested) - -Session-level interactions over HTTP. - -The "Network.Browser" goes beyond the basic "Network.HTTP" functionality in -providing support for more involved, and real, request/response interactions over -HTTP. Additional features supported are: - -* HTTP Authentication handling - -* Transparent handling of redirects - -* Cookie stores + transmission. - -* Transaction logging - -* Proxy-mediated connections. - -Example use: - -> do -> (_, rsp) -> <- Network.Browser.browse $ do -> setAllowRedirects True -- handle HTTP redirects -> request $ getRequest "http://www.haskell.org/" -> return (take 100 (rspBody rsp)) - --} -module Network.Browser - ( BrowserState - , BrowserAction -- browser monad, effectively a state monad. - , Proxy(..) - - , browse -- :: BrowserAction a -> IO a - , request -- :: Request -> BrowserAction Response - - , getBrowserState -- :: BrowserAction t (BrowserState t) - , withBrowserState -- :: BrowserState t -> BrowserAction t a -> BrowserAction t a - - , setAllowRedirects -- :: Bool -> BrowserAction t () - , getAllowRedirects -- :: BrowserAction t Bool - - , setMaxRedirects -- :: Int -> BrowserAction t () - , getMaxRedirects -- :: BrowserAction t (Maybe Int) - - , Authority(..) - , getAuthorities - , setAuthorities - , addAuthority - , Challenge(..) - , Qop(..) - , Algorithm(..) - - , getAuthorityGen - , setAuthorityGen - , setAllowBasicAuth - , getAllowBasicAuth - - , setMaxErrorRetries -- :: Maybe Int -> BrowserAction t () - , getMaxErrorRetries -- :: BrowserAction t (Maybe Int) - - , setMaxPoolSize -- :: Int -> BrowserAction t () - , getMaxPoolSize -- :: BrowserAction t (Maybe Int) - - , setMaxAuthAttempts -- :: Maybe Int -> BrowserAction t () - , getMaxAuthAttempts -- :: BrowserAction t (Maybe Int) - - , setCookieFilter -- :: (URI -> Cookie -> IO Bool) -> BrowserAction t () - , getCookieFilter -- :: BrowserAction t (URI -> Cookie -> IO Bool) - , defaultCookieFilter -- :: URI -> Cookie -> IO Bool - , userCookieFilter -- :: URI -> Cookie -> IO Bool - - , Cookie(..) - , getCookies -- :: BrowserAction t [Cookie] - , setCookies -- :: [Cookie] -> BrowserAction t () - , addCookie -- :: Cookie -> BrowserAction t () - - , setErrHandler -- :: (String -> IO ()) -> BrowserAction t () - , setOutHandler -- :: (String -> IO ()) -> BrowserAction t () - - , setEventHandler -- :: (BrowserEvent -> BrowserAction t ()) -> BrowserAction t () - - , BrowserEvent(..) - , BrowserEventType(..) - , RequestID - - , setProxy -- :: Proxy -> BrowserAction t () - , getProxy -- :: BrowserAction t Proxy - - , setCheckForProxy -- :: Bool -> BrowserAction t () - , getCheckForProxy -- :: BrowserAction t Bool - - , setDebugLog -- :: Maybe String -> BrowserAction t () - - , getUserAgent -- :: BrowserAction t String - , setUserAgent -- :: String -> BrowserAction t () - - , out -- :: String -> BrowserAction t () - , err -- :: String -> BrowserAction t () - , ioAction -- :: IO a -> BrowserAction a - - , defaultGETRequest - , defaultGETRequest_ - - , formToRequest - , uriDefaultTo - - -- old and half-baked; don't use: - , Form(..) - , FormVar - ) where - -import Network.URI - ( URI(..) - , URIAuth(..) - , parseURI, parseURIReference, relativeTo - ) -import Network.StreamDebugger (debugByteStream) -import Network.HTTP hiding ( sendHTTP_notify ) -import Network.HTTP.HandleStream ( sendHTTP_notify ) -import Network.HTTP.Auth -import Network.HTTP.Cookie -import Network.HTTP.Proxy - -import Network.Stream ( ConnError(..), Result ) -import Network.BufferType - -import Data.Char (toLower) -import Data.List (isPrefixOf) -import Data.Maybe (fromMaybe, listToMaybe, catMaybes ) -import Control.Applicative (Applicative (..), (<$>)) -#ifdef MTL1 -import Control.Monad (filterM, forM_, when, ap) -#else -import Control.Monad (filterM, forM_, when) -#endif -import Control.Monad.State (StateT (..), MonadIO (..), modify, gets, withStateT, evalStateT, MonadState (..)) - -import qualified System.IO - ( hSetBuffering, hPutStr, stdout, stdin, hGetChar - , BufferMode(NoBuffering, LineBuffering) - ) -import System.Time ( ClockTime, getClockTime ) - - ------------------------------------------------------------------- ------------------------ Cookie Stuff ----------------------------- ------------------------------------------------------------------- - --- | @defaultCookieFilter@ is the initial cookie acceptance filter. --- It welcomes them all into the store @:-)@ -defaultCookieFilter :: URI -> Cookie -> IO Bool -defaultCookieFilter _url _cky = return True - --- | @userCookieFilter@ is a handy acceptance filter, asking the --- user if he/she is willing to accept an incoming cookie before --- adding it to the store. -userCookieFilter :: URI -> Cookie -> IO Bool -userCookieFilter url cky = do - do putStrLn ("Set-Cookie received when requesting: " ++ show url) - case ckComment cky of - Nothing -> return () - Just x -> putStrLn ("Cookie Comment:\n" ++ x) - let pth = maybe "" ('/':) (ckPath cky) - putStrLn ("Domain/Path: " ++ ckDomain cky ++ pth) - putStrLn (ckName cky ++ '=' : ckValue cky) - System.IO.hSetBuffering System.IO.stdout System.IO.NoBuffering - System.IO.hSetBuffering System.IO.stdin System.IO.NoBuffering - System.IO.hPutStr System.IO.stdout "Accept [y/n]? " - x <- System.IO.hGetChar System.IO.stdin - System.IO.hSetBuffering System.IO.stdin System.IO.LineBuffering - System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering - return (toLower x == 'y') - --- | @addCookie c@ adds a cookie to the browser state, removing duplicates. -addCookie :: Cookie -> BrowserAction t () -addCookie c = modify (\b -> b{bsCookies = c : filter (/=c) (bsCookies b) }) - --- | @setCookies cookies@ replaces the set of cookies known to --- the browser to @cookies@. Useful when wanting to restore cookies --- used across 'browse' invocations. -setCookies :: [Cookie] -> BrowserAction t () -setCookies cs = modify (\b -> b { bsCookies=cs }) - --- | @getCookies@ returns the current set of cookies known to --- the browser. -getCookies :: BrowserAction t [Cookie] -getCookies = gets bsCookies - --- ...get domain specific cookies... --- ... this needs changing for consistency with rfc2109... --- ... currently too broad. -getCookiesFor :: String -> String -> BrowserAction t [Cookie] -getCookiesFor dom path = - do cks <- getCookies - return (filter cookiematch cks) - where - cookiematch :: Cookie -> Bool - cookiematch = cookieMatch (dom,path) - - --- | @setCookieFilter fn@ sets the cookie acceptance filter to @fn@. -setCookieFilter :: (URI -> Cookie -> IO Bool) -> BrowserAction t () -setCookieFilter f = modify (\b -> b { bsCookieFilter=f }) - --- | @getCookieFilter@ returns the current cookie acceptance filter. -getCookieFilter :: BrowserAction t (URI -> Cookie -> IO Bool) -getCookieFilter = gets bsCookieFilter - ------------------------------------------------------------------- ------------------------ Authorisation Stuff ---------------------- ------------------------------------------------------------------- - -{- - -The browser handles 401 responses in the following manner: - 1) extract all WWW-Authenticate headers from a 401 response - 2) rewrite each as a Challenge object, using "headerToChallenge" - 3) pick a challenge to respond to, usually the strongest - challenge understood by the client, using "pickChallenge" - 4) generate a username/password combination using the browsers - "bsAuthorityGen" function (the default behaviour is to ask - the user) - 5) build an Authority object based upon the challenge and user - data, store this new Authority in the browser state - 6) convert the Authority to a request header and add this - to a request using "withAuthority" - 7) send the amended request - -Note that by default requests are annotated with authority headers -before the first sending, based upon previously generated Authority -objects (which contain domain information). Once a specific authority -is added to a rejected request this predictive annotation is suppressed. - -407 responses are handled in a similar manner, except - a) Authorities are not collected, only a single proxy authority - is kept by the browser - b) If the proxy used by the browser (type Proxy) is NoProxy, then - a 407 response will generate output on the "err" stream and - the response will be returned. - - -Notes: - - digest authentication so far ignores qop, so fails to authenticate - properly with qop=auth-int challenges - - calculates a1 more than necessary - - doesn't reverse authenticate - - doesn't properly receive AuthenticationInfo headers, so fails - to use next-nonce etc - --} - --- | Return authorities for a given domain and path. --- Assumes "dom" is lower case -getAuthFor :: String -> String -> BrowserAction t [Authority] -getAuthFor dom pth = getAuthorities >>= return . (filter match) - where - match :: Authority -> Bool - match au@AuthBasic{} = matchURI (auSite au) - match au@AuthDigest{} = or (map matchURI (auDomain au)) - - matchURI :: URI -> Bool - matchURI s = (uriToAuthorityString s == dom) && (uriPath s `isPrefixOf` pth) - - --- | @getAuthorities@ return the current set of @Authority@s known --- to the browser. -getAuthorities :: BrowserAction t [Authority] -getAuthorities = gets bsAuthorities - --- @setAuthorities as@ replaces the Browser's known set --- of 'Authority's to @as@. -setAuthorities :: [Authority] -> BrowserAction t () -setAuthorities as = modify (\b -> b { bsAuthorities=as }) - --- @addAuthority a@ adds 'Authority' @a@ to the Browser's --- set of known authorities. -addAuthority :: Authority -> BrowserAction t () -addAuthority a = modify (\b -> b { bsAuthorities=a:bsAuthorities b }) - --- | @getAuthorityGen@ returns the current authority generator -getAuthorityGen :: BrowserAction t (URI -> String -> IO (Maybe (String,String))) -getAuthorityGen = gets bsAuthorityGen - --- | @setAuthorityGen genAct@ sets the auth generator to @genAct@. -setAuthorityGen :: (URI -> String -> IO (Maybe (String,String))) -> BrowserAction t () -setAuthorityGen f = modify (\b -> b { bsAuthorityGen=f }) - --- | @setAllowBasicAuth onOff@ enables\/disables HTTP Basic Authentication. -setAllowBasicAuth :: Bool -> BrowserAction t () -setAllowBasicAuth ba = modify (\b -> b { bsAllowBasicAuth=ba }) - -getAllowBasicAuth :: BrowserAction t Bool -getAllowBasicAuth = gets bsAllowBasicAuth - --- | @setMaxAuthAttempts mbMax@ sets the maximum number of authentication attempts --- to do. If @Nothing@, rever to default max. -setMaxAuthAttempts :: Maybe Int -> BrowserAction t () -setMaxAuthAttempts mb - | fromMaybe 0 mb < 0 = return () - | otherwise = modify (\ b -> b{bsMaxAuthAttempts=mb}) - --- | @getMaxAuthAttempts@ returns the current max auth attempts. If @Nothing@, --- the browser's default is used. -getMaxAuthAttempts :: BrowserAction t (Maybe Int) -getMaxAuthAttempts = gets bsMaxAuthAttempts - --- | @setMaxErrorRetries mbMax@ sets the maximum number of attempts at --- transmitting a request. If @Nothing@, rever to default max. -setMaxErrorRetries :: Maybe Int -> BrowserAction t () -setMaxErrorRetries mb - | fromMaybe 0 mb < 0 = return () - | otherwise = modify (\ b -> b{bsMaxErrorRetries=mb}) - --- | @getMaxErrorRetries@ returns the current max number of error retries. -getMaxErrorRetries :: BrowserAction t (Maybe Int) -getMaxErrorRetries = gets bsMaxErrorRetries - --- TO BE CHANGED!!! -pickChallenge :: Bool -> [Challenge] -> Maybe Challenge -pickChallenge allowBasic [] - | allowBasic = Just (ChalBasic "/") -- manufacture a challenge if one missing; more robust. -pickChallenge _ ls = listToMaybe ls - --- | Retrieve a likely looking authority for a Request. -anticipateChallenge :: Request ty -> BrowserAction t (Maybe Authority) -anticipateChallenge rq = - let uri = rqURI rq in - do { authlist <- getAuthFor (uriAuthToString $ reqURIAuth rq) (uriPath uri) - ; return (listToMaybe authlist) - } - --- | Asking the user to respond to a challenge -challengeToAuthority :: URI -> Challenge -> BrowserAction t (Maybe Authority) -challengeToAuthority uri ch - | not (answerable ch) = return Nothing - | otherwise = do - -- prompt user for authority - prompt <- getAuthorityGen - userdetails <- liftIO $ prompt uri (chRealm ch) - case userdetails of - Nothing -> return Nothing - Just (u,p) -> return (Just $ buildAuth ch u p) - where - answerable :: Challenge -> Bool - answerable ChalBasic{} = True - answerable chall = (chAlgorithm chall) == Just AlgMD5 - - buildAuth :: Challenge -> String -> String -> Authority - buildAuth (ChalBasic r) u p = - AuthBasic { auSite=uri - , auRealm=r - , auUsername=u - , auPassword=p - } - - -- note to self: this is a pretty stupid operation - -- to perform isn't it? ChalX and AuthX are so very - -- similar. - buildAuth (ChalDigest r d n o _stale a q) u p = - AuthDigest { auRealm=r - , auUsername=u - , auPassword=p - , auDomain=d - , auNonce=n - , auOpaque=o - , auAlgorithm=a - , auQop=q - } - - ------------------------------------------------------------------- ------------------- Browser State Actions ------------------------- ------------------------------------------------------------------- - - --- | @BrowserState@ is the (large) record type tracking the current --- settings of the browser. -data BrowserState connection - = BS { bsErr, bsOut :: String -> IO () - , bsCookies :: [Cookie] - , bsCookieFilter :: URI -> Cookie -> IO Bool - , bsAuthorityGen :: URI -> String -> IO (Maybe (String,String)) - , bsAuthorities :: [Authority] - , bsAllowRedirects :: Bool - , bsAllowBasicAuth :: Bool - , bsMaxRedirects :: Maybe Int - , bsMaxErrorRetries :: Maybe Int - , bsMaxAuthAttempts :: Maybe Int - , bsMaxPoolSize :: Maybe Int - , bsConnectionPool :: [connection] - , bsCheckProxy :: Bool - , bsProxy :: Proxy - , bsDebug :: Maybe String - , bsEvent :: Maybe (BrowserEvent -> BrowserAction connection ()) - , bsRequestID :: RequestID - , bsUserAgent :: Maybe String - } - -instance Show (BrowserState t) where - show bs = "BrowserState { " - ++ shows (bsCookies bs) ("\n" - {- ++ show (bsAuthorities bs) ++ "\n"-} - ++ "AllowRedirects: " ++ shows (bsAllowRedirects bs) "} ") - --- | @BrowserAction@ is the IO monad, but carrying along a 'BrowserState'. -newtype BrowserAction conn a - = BA { unBA :: StateT (BrowserState conn) IO a } -#ifdef MTL1 - deriving (Functor, Monad, MonadIO, MonadState (BrowserState conn)) - -instance Applicative (BrowserAction conn) where - pure = return - (<*>) = ap -#else - deriving (Functor, Applicative, Monad, MonadIO, MonadState (BrowserState conn)) -#endif - -runBA :: BrowserState conn -> BrowserAction conn a -> IO a -runBA bs = flip evalStateT bs . unBA - --- | @browse act@ is the toplevel action to perform a 'BrowserAction'. --- Example use: @browse (request (getRequest yourURL))@. -browse :: BrowserAction conn a -> IO a -browse = runBA defaultBrowserState - --- | The default browser state has the settings -defaultBrowserState :: BrowserState t -defaultBrowserState = res - where - res = BS - { bsErr = putStrLn - , bsOut = putStrLn - , bsCookies = [] - , bsCookieFilter = defaultCookieFilter - , bsAuthorityGen = \ _uri _realm -> do - bsErr res "No action for prompting/generating user+password credentials provided (use: setAuthorityGen); returning Nothing" - return Nothing - , bsAuthorities = [] - , bsAllowRedirects = True - , bsAllowBasicAuth = False - , bsMaxRedirects = Nothing - , bsMaxErrorRetries = Nothing - , bsMaxAuthAttempts = Nothing - , bsMaxPoolSize = Nothing - , bsConnectionPool = [] - , bsCheckProxy = defaultAutoProxyDetect - , bsProxy = noProxy - , bsDebug = Nothing - , bsEvent = Nothing - , bsRequestID = 0 - , bsUserAgent = Nothing - } - -{-# DEPRECATED getBrowserState "Use Control.Monad.State.get instead." #-} --- | @getBrowserState@ returns the current browser config. Useful --- for restoring state across 'BrowserAction's. -getBrowserState :: BrowserAction t (BrowserState t) -getBrowserState = get - --- | @withBrowserAction st act@ performs @act@ with 'BrowserState' @st@. -withBrowserState :: BrowserState t -> BrowserAction t a -> BrowserAction t a -withBrowserState bs = BA . withStateT (const bs) . unBA - --- | @nextRequest act@ performs the browser action @act@ as --- the next request, i.e., setting up a new request context --- before doing so. -nextRequest :: BrowserAction t a -> BrowserAction t a -nextRequest act = do - let updReqID st = - let - rid = succ (bsRequestID st) - in - rid `seq` st{bsRequestID=rid} - modify updReqID - act - --- | Lifts an IO action into the 'BrowserAction' monad. -{-# DEPRECATED ioAction "Use Control.Monad.Trans.liftIO instead." #-} -ioAction :: IO a -> BrowserAction t a -ioAction = liftIO - --- | @setErrHandler@ sets the IO action to call when --- the browser reports running errors. To disable any --- such, set it to @const (return ())@. -setErrHandler :: (String -> IO ()) -> BrowserAction t () -setErrHandler h = modify (\b -> b { bsErr=h }) - --- | @setOutHandler@ sets the IO action to call when --- the browser chatters info on its running. To disable any --- such, set it to @const (return ())@. -setOutHandler :: (String -> IO ()) -> BrowserAction t () -setOutHandler h = modify (\b -> b { bsOut=h }) - -out, err :: String -> BrowserAction t () -out s = do { f <- gets bsOut ; liftIO $ f s } -err s = do { f <- gets bsErr ; liftIO $ f s } - --- | @setAllowRedirects onOff@ toggles the willingness to --- follow redirects (HTTP responses with 3xx status codes). -setAllowRedirects :: Bool -> BrowserAction t () -setAllowRedirects bl = modify (\b -> b {bsAllowRedirects=bl}) - --- | @getAllowRedirects@ returns current setting of the do-chase-redirects flag. -getAllowRedirects :: BrowserAction t Bool -getAllowRedirects = gets bsAllowRedirects - --- | @setMaxRedirects maxCount@ sets the maxiumum number of forwarding hops --- we are willing to jump through. A no-op if the count is negative; if zero, --- the max is set to whatever default applies. Notice that setting the max --- redirects count does /not/ enable following of redirects itself; use --- 'setAllowRedirects' to do so. -setMaxRedirects :: Maybe Int -> BrowserAction t () -setMaxRedirects c - | fromMaybe 0 c < 0 = return () - | otherwise = modify (\b -> b{bsMaxRedirects=c}) - --- | @getMaxRedirects@ returns the current setting for the max-redirect count. --- If @Nothing@, the "Network.Browser"'s default is used. -getMaxRedirects :: BrowserAction t (Maybe Int) -getMaxRedirects = gets bsMaxRedirects - --- | @setMaxPoolSize maxCount@ sets the maximum size of the connection pool --- that is used to cache connections between requests -setMaxPoolSize :: Maybe Int -> BrowserAction t () -setMaxPoolSize c = modify (\b -> b{bsMaxPoolSize=c}) - --- | @getMaxPoolSize@ gets the maximum size of the connection pool --- that is used to cache connections between requests. --- If @Nothing@, the "Network.Browser"'s default is used. -getMaxPoolSize :: BrowserAction t (Maybe Int) -getMaxPoolSize = gets bsMaxPoolSize - --- | @setProxy p@ will disable proxy usage if @p@ is @NoProxy@. --- If @p@ is @Proxy proxyURL mbAuth@, then @proxyURL@ is interpreted --- as the URL of the proxy to use, possibly authenticating via --- 'Authority' information in @mbAuth@. -setProxy :: Proxy -> BrowserAction t () -setProxy p = - -- Note: if user _explicitly_ sets the proxy, we turn - -- off any auto-detection of proxies. - modify (\b -> b {bsProxy = p, bsCheckProxy=False}) - --- | @getProxy@ returns the current proxy settings. If --- the auto-proxy flag is set to @True@, @getProxy@ will --- perform the necessary -getProxy :: BrowserAction t Proxy -getProxy = do - p <- gets bsProxy - case p of - -- Note: if there is a proxy, no need to perform any auto-detect. - -- Presumably this is the user's explicit and preferred proxy server. - Proxy{} -> return p - NoProxy{} -> do - flg <- gets bsCheckProxy - if not flg - then return p - else do - np <- liftIO $ fetchProxy True{-issue warning on stderr if ill-formed...-} - -- note: this resets the check-proxy flag; a one-off affair. - setProxy np - return np - --- | @setCheckForProxy flg@ sets the one-time check for proxy --- flag to @flg@. If @True@, the session will try to determine --- the proxy server is locally configured. See 'Network.HTTP.Proxy.fetchProxy' --- for details of how this done. -setCheckForProxy :: Bool -> BrowserAction t () -setCheckForProxy flg = modify (\ b -> b{bsCheckProxy=flg}) - --- | @getCheckForProxy@ returns the current check-proxy setting. --- Notice that this may not be equal to @True@ if the session has --- set it to that via 'setCheckForProxy' and subsequently performed --- some HTTP protocol interactions. i.e., the flag return represents --- whether a proxy will be checked for again before any future protocol --- interactions. -getCheckForProxy :: BrowserAction t Bool -getCheckForProxy = gets bsCheckProxy - --- | @setDebugLog mbFile@ turns off debug logging iff @mbFile@ --- is @Nothing@. If set to @Just fStem@, logs of browser activity --- is appended to files of the form @fStem-url-authority@, i.e., --- @fStem@ is just the prefix for a set of log files, one per host/authority. -setDebugLog :: Maybe String -> BrowserAction t () -setDebugLog v = modify (\b -> b {bsDebug=v}) - --- | @setUserAgent ua@ sets the current @User-Agent:@ string to @ua@. It --- will be used if no explicit user agent header is found in subsequent requests. --- --- A common form of user agent string is @\"name\/version (details)\"@. For --- example @\"cabal-install/0.10.2 (HTTP 4000.1.2)\"@. Including the version --- of this HTTP package can be helpful if you ever need to track down HTTP --- compatability quirks. This version is available via 'httpPackageVersion'. --- For more info see . --- -setUserAgent :: String -> BrowserAction t () -setUserAgent ua = modify (\b -> b{bsUserAgent=Just ua}) - --- | @getUserAgent@ returns the current @User-Agent:@ default string. -getUserAgent :: BrowserAction t String -getUserAgent = do - n <- gets bsUserAgent - return (maybe defaultUserAgent id n) - --- | @RequestState@ is an internal tallying type keeping track of various --- per-connection counters, like the number of authorization attempts and --- forwards we've gone through. -data RequestState - = RequestState - { reqDenies :: Int -- ^ number of 401 responses so far - , reqRedirects :: Int -- ^ number of redirects so far - , reqRetries :: Int -- ^ number of retries so far - , reqStopOnDeny :: Bool -- ^ whether to pre-empt 401 response - } - -type RequestID = Int -- yeah, it will wrap around. - -nullRequestState :: RequestState -nullRequestState = RequestState - { reqDenies = 0 - , reqRedirects = 0 - , reqRetries = 0 - , reqStopOnDeny = True - } - --- | @BrowserEvent@ is the event record type that a user-defined handler, set --- via 'setEventHandler', will be passed. It indicates various state changes --- encountered in the processing of a given 'RequestID', along with timestamps --- at which they occurred. -data BrowserEvent - = BrowserEvent - { browserTimestamp :: ClockTime - , browserRequestID :: RequestID - , browserRequestURI :: {-URI-}String - , browserEventType :: BrowserEventType - } - --- | 'BrowserEventType' is the enumerated list of events that the browser --- internals will report to a user-defined event handler. -data BrowserEventType - = OpenConnection - | ReuseConnection - | RequestSent - | ResponseEnd ResponseData - | ResponseFinish -{- not yet, you will have to determine these via the ResponseEnd event. - | Redirect - | AuthChallenge - | AuthResponse --} - --- | @setEventHandler onBrowserEvent@ configures event handling. --- If @onBrowserEvent@ is @Nothing@, event handling is turned off; --- setting it to @Just onEv@ causes the @onEv@ IO action to be --- notified of browser events during the processing of a request --- by the Browser pipeline. -setEventHandler :: Maybe (BrowserEvent -> BrowserAction ty ()) -> BrowserAction ty () -setEventHandler mbH = modify (\b -> b { bsEvent=mbH}) - -buildBrowserEvent :: BrowserEventType -> {-URI-}String -> RequestID -> IO BrowserEvent -buildBrowserEvent bt uri reqID = do - ct <- getClockTime - return BrowserEvent - { browserTimestamp = ct - , browserRequestID = reqID - , browserRequestURI = uri - , browserEventType = bt - } - -reportEvent :: BrowserEventType -> {-URI-}String -> BrowserAction t () -reportEvent bt uri = do - st <- get - case bsEvent st of - Nothing -> return () - Just evH -> do - evt <- liftIO $ buildBrowserEvent bt uri (bsRequestID st) - evH evt -- if it fails, we fail. - --- | The default number of hops we are willing not to go beyond for --- request forwardings. -defaultMaxRetries :: Int -defaultMaxRetries = 4 - --- | The default number of error retries we are willing to perform. -defaultMaxErrorRetries :: Int -defaultMaxErrorRetries = 4 - --- | The default maximum HTTP Authentication attempts we will make for --- a single request. -defaultMaxAuthAttempts :: Int -defaultMaxAuthAttempts = 2 - --- | The default setting for auto-proxy detection. --- You may change this within a session via 'setAutoProxyDetect'. --- To avoid initial backwards compatibility issues, leave this as @False@. -defaultAutoProxyDetect :: Bool -defaultAutoProxyDetect = False - --- | @request httpRequest@ tries to submit the 'Request' @httpRequest@ --- to some HTTP server (possibly going via a /proxy/, see 'setProxy'.) --- Upon successful delivery, the URL where the response was fetched from --- is returned along with the 'Response' itself. -request :: HStream ty - => Request ty - -> BrowserAction (HandleStream ty) (URI,Response ty) -request req = nextRequest $ do - res <- request' nullVal initialState req - reportEvent ResponseFinish (show (rqURI req)) - case res of - Right r -> return r - Left e -> do - let errStr = ("Network.Browser.request: Error raised " ++ show e) - err errStr - fail errStr - where - initialState = nullRequestState - nullVal = buf_empty bufferOps - --- | Internal helper function, explicitly carrying along per-request --- counts. -request' :: HStream ty - => ty - -> RequestState - -> Request ty - -> BrowserAction (HandleStream ty) (Result (URI,Response ty)) -request' nullVal rqState rq = do - let uri = rqURI rq - failHTTPS uri - let uria = reqURIAuth rq - -- add cookies to request - cookies <- getCookiesFor (uriAuthToString uria) (uriPath uri) -{- Not for now: - (case uriUserInfo uria of - "" -> id - xs -> - case chopAtDelim ':' xs of - (_,[]) -> id - (usr,pwd) -> withAuth - AuthBasic{ auUserName = usr - , auPassword = pwd - , auRealm = "/" - , auSite = uri - }) $ do --} - when (not $ null cookies) - (out $ "Adding cookies to request. Cookie names: " ++ unwords (map ckName cookies)) - -- add credentials to request - rq' <- - if not (reqStopOnDeny rqState) - then return rq - else do - auth <- anticipateChallenge rq - case auth of - Nothing -> return rq - Just x -> return (insertHeader HdrAuthorization (withAuthority x rq) rq) - let rq'' = if not $ null cookies then insertHeaders [cookiesToHeader cookies] rq' else rq' - p <- getProxy - def_ua <- gets bsUserAgent - let defaultOpts = - case p of - NoProxy -> defaultNormalizeRequestOptions{normUserAgent=def_ua} - Proxy _ ath -> - defaultNormalizeRequestOptions - { normForProxy = True - , normUserAgent = def_ua - , normCustoms = - maybe [] - (\ authS -> [\ _ r -> insertHeader HdrProxyAuthorization (withAuthority authS r) r]) - ath - } - let final_req = normalizeRequest defaultOpts rq'' - out ("Sending:\n" ++ show final_req) - e_rsp <- - case p of - NoProxy -> dorequest (reqURIAuth rq'') final_req - Proxy str _ath -> do - let notURI - | null pt || null hst = - URIAuth{ uriUserInfo = "" - , uriRegName = str - , uriPort = "" - } - | otherwise = - URIAuth{ uriUserInfo = "" - , uriRegName = hst - , uriPort = pt - } - -- If the ':' is dropped from port below, dorequest will assume port 80. Leave it! - where (hst, pt) = span (':'/=) str - -- Proxy can take multiple forms - look for http://host:port first, - -- then host:port. Fall back to just the string given (probably a host name). - let proxyURIAuth = - maybe notURI - (\parsed -> maybe notURI id (uriAuthority parsed)) - (parseURI str) - - out $ "proxy uri host: " ++ uriRegName proxyURIAuth ++ ", port: " ++ uriPort proxyURIAuth - dorequest proxyURIAuth final_req - mbMx <- getMaxErrorRetries - case e_rsp of - Left v - | (reqRetries rqState < fromMaybe defaultMaxErrorRetries mbMx) && - (v == ErrorReset || v == ErrorClosed) -> do - --empty connnection pool in case connection has become invalid - modify (\b -> b { bsConnectionPool=[] }) - request' nullVal rqState{reqRetries=succ (reqRetries rqState)} rq - | otherwise -> - return (Left v) - Right rsp -> do - out ("Received:\n" ++ show rsp) - -- add new cookies to browser state - handleCookies uri (uriAuthToString $ reqURIAuth rq) - (retrieveHeaders HdrSetCookie rsp) - -- Deal with "Connection: close" in response. - handleConnectionClose (reqURIAuth rq) (retrieveHeaders HdrConnection rsp) - mbMxAuths <- getMaxAuthAttempts - case rspCode rsp of - (4,0,1) -- Credentials not sent or refused. - | reqDenies rqState > fromMaybe defaultMaxAuthAttempts mbMxAuths -> do - out "401 - credentials again refused; exceeded retry count (2)" - return (Right (uri,rsp)) - | otherwise -> do - out "401 - credentials not supplied or refused; retrying.." - let hdrs = retrieveHeaders HdrWWWAuthenticate rsp - flg <- getAllowBasicAuth - case pickChallenge flg (catMaybes $ map (headerToChallenge uri) hdrs) of - Nothing -> do - out "no challenge" - return (Right (uri,rsp)) {- do nothing -} - Just x -> do - au <- challengeToAuthority uri x - case au of - Nothing -> do - out "no auth" - return (Right (uri,rsp)) {- do nothing -} - Just au' -> do - out "Retrying request with new credentials" - request' nullVal - rqState{ reqDenies = succ(reqDenies rqState) - , reqStopOnDeny = False - } - (insertHeader HdrAuthorization (withAuthority au' rq) rq) - - (4,0,7) -- Proxy Authentication required - | reqDenies rqState > fromMaybe defaultMaxAuthAttempts mbMxAuths -> do - out "407 - proxy authentication required; max deny count exceeeded (2)" - return (Right (uri,rsp)) - | otherwise -> do - out "407 - proxy authentication required" - let hdrs = retrieveHeaders HdrProxyAuthenticate rsp - flg <- getAllowBasicAuth - case pickChallenge flg (catMaybes $ map (headerToChallenge uri) hdrs) of - Nothing -> return (Right (uri,rsp)) {- do nothing -} - Just x -> do - au <- challengeToAuthority uri x - case au of - Nothing -> return (Right (uri,rsp)) {- do nothing -} - Just au' -> do - pxy <- gets bsProxy - case pxy of - NoProxy -> do - err "Proxy authentication required without proxy!" - return (Right (uri,rsp)) - Proxy px _ -> do - out "Retrying with proxy authentication" - setProxy (Proxy px (Just au')) - request' nullVal - rqState{ reqDenies = succ(reqDenies rqState) - , reqStopOnDeny = False - } - rq - - (3,0,x) | x `elem` [2,3,1,7] -> do - out ("30" ++ show x ++ " - redirect") - allow_redirs <- allowRedirect rqState - case allow_redirs of - False -> return (Right (uri,rsp)) - _ -> do - case retrieveHeaders HdrLocation rsp of - [] -> do - err "No Location: header in redirect response" - return (Right (uri,rsp)) - (Header _ u:_) -> - case parseURIReference u of - Nothing -> do - err ("Parse of Location: header in a redirect response failed: " ++ u) - return (Right (uri,rsp)) - Just newURI - | {-uriScheme newURI_abs /= uriScheme uri && -}(not (supportedScheme newURI_abs)) -> do - err ("Unable to handle redirect, unsupported scheme: " ++ show newURI_abs) - return (Right (uri, rsp)) - | otherwise -> do - out ("Redirecting to " ++ show newURI_abs ++ " ...") - - -- Redirect using GET request method, depending on - -- response code. - let toGet = x `elem` [2,3] - method = if toGet then GET else rqMethod rq - rq1 = rq { rqMethod=method, rqURI=newURI_abs } - rq2 = if toGet then (replaceHeader HdrContentLength "0") (rq1 {rqBody = nullVal}) else rq1 - - request' nullVal - rqState{ reqDenies = 0 - , reqRedirects = succ(reqRedirects rqState) - , reqStopOnDeny = True - } - rq2 - where - newURI_abs = uriDefaultTo newURI uri - - (3,0,5) -> - case retrieveHeaders HdrLocation rsp of - [] -> do - err "No Location header in proxy redirect response." - return (Right (uri,rsp)) - (Header _ u:_) -> - case parseURIReference u of - Nothing -> do - err ("Parse of Location header in a proxy redirect response failed: " ++ u) - return (Right (uri,rsp)) - Just newuri -> do - out ("Retrying with proxy " ++ show newuri ++ "...") - setProxy (Proxy (uriToAuthorityString newuri) Nothing) - request' nullVal rqState{ reqDenies = 0 - , reqRedirects = 0 - , reqRetries = succ (reqRetries rqState) - , reqStopOnDeny = True - } - rq - _ -> return (Right (uri,rsp)) - --- | The internal request handling state machine. -dorequest :: (HStream ty) - => URIAuth - -> Request ty - -> BrowserAction (HandleStream ty) - (Result (Response ty)) -dorequest hst rqst = do - pool <- gets bsConnectionPool - let uPort = uriAuthPort Nothing{-ToDo: feed in complete URL-} hst - conn <- liftIO $ filterM (\c -> c `isTCPConnectedTo` EndPoint (uriRegName hst) uPort) pool - rsp <- - case conn of - [] -> do - out ("Creating new connection to " ++ uriAuthToString hst) - reportEvent OpenConnection (show (rqURI rqst)) - c <- liftIO $ openStream (uriRegName hst) uPort - updateConnectionPool c - dorequest2 c rqst - (c:_) -> do - out ("Recovering connection to " ++ uriAuthToString hst) - reportEvent ReuseConnection (show (rqURI rqst)) - dorequest2 c rqst - case rsp of - Right (Response a b c _) -> - reportEvent (ResponseEnd (a,b,c)) (show (rqURI rqst)) ; _ -> return () - return rsp - where - dorequest2 c r = do - dbg <- gets bsDebug - st <- get - let - onSendComplete = - maybe (return ()) - (\evh -> do - x <- buildBrowserEvent RequestSent (show (rqURI r)) (bsRequestID st) - runBA st (evh x) - return ()) - (bsEvent st) - liftIO $ - maybe (sendHTTP_notify c r onSendComplete) - (\ f -> do - c' <- debugByteStream (f++'-': uriAuthToString hst) c - sendHTTP_notify c' r onSendComplete) - dbg - -updateConnectionPool :: HStream hTy - => HandleStream hTy - -> BrowserAction (HandleStream hTy) () -updateConnectionPool c = do - pool <- gets bsConnectionPool - let len_pool = length pool - maxPoolSize <- fromMaybe defaultMaxPoolSize <$> gets bsMaxPoolSize - when (len_pool > maxPoolSize) - (liftIO $ close (last pool)) - let pool' - | len_pool > maxPoolSize = init pool - | otherwise = pool - when (maxPoolSize > 0) $ modify (\b -> b { bsConnectionPool=c:pool' }) - return () - --- | Default maximum number of open connections we are willing to have active. -defaultMaxPoolSize :: Int -defaultMaxPoolSize = 5 - -cleanConnectionPool :: HStream hTy - => URIAuth -> BrowserAction (HandleStream hTy) () -cleanConnectionPool uri = do - let ep = EndPoint (uriRegName uri) (uriAuthPort Nothing uri) - pool <- gets bsConnectionPool - bad <- liftIO $ mapM (\c -> c `isTCPConnectedTo` ep) pool - let tmp = zip bad pool - newpool = map snd $ filter (not . fst) tmp - toclose = map snd $ filter fst tmp - liftIO $ forM_ toclose close - modify (\b -> b { bsConnectionPool = newpool }) - -handleCookies :: URI -> String -> [Header] -> BrowserAction t () -handleCookies _ _ [] = return () -- cut short the silliness. -handleCookies uri dom cookieHeaders = do - when (not $ null errs) - (err $ unlines ("Errors parsing these cookie values: ":errs)) - when (not $ null newCookies) - (out $ foldl (\x y -> x ++ "\n " ++ show y) "Cookies received:" newCookies) - filterfn <- getCookieFilter - newCookies' <- liftIO (filterM (filterfn uri) newCookies) - when (not $ null newCookies') - (out $ "Accepting cookies with names: " ++ unwords (map ckName newCookies')) - mapM_ addCookie newCookies' - where - (errs, newCookies) = processCookieHeaders dom cookieHeaders - -handleConnectionClose :: HStream hTy - => URIAuth -> [Header] - -> BrowserAction (HandleStream hTy) () -handleConnectionClose _ [] = return () -handleConnectionClose uri headers = do - let doClose = any (== "close") $ map headerToConnType headers - when doClose $ cleanConnectionPool uri - where headerToConnType (Header _ t) = map toLower t - ------------------------------------------------------------------- ------------------------ Miscellaneous ---------------------------- ------------------------------------------------------------------- - -allowRedirect :: RequestState -> BrowserAction t Bool -allowRedirect rqState = do - rd <- getAllowRedirects - mbMxRetries <- getMaxRedirects - return (rd && (reqRedirects rqState <= fromMaybe defaultMaxRetries mbMxRetries)) - --- | Return @True@ iff the package is able to handle requests and responses --- over it. -supportedScheme :: URI -> Bool -supportedScheme u = uriScheme u == "http:" - --- | @uriDefaultTo a b@ returns a URI that is consistent with the first --- argument URI @a@ when read in the context of the second URI @b@. --- If the second argument is not sufficient context for determining --- a full URI then anarchy reins. -uriDefaultTo :: URI -> URI -> URI -#if MIN_VERSION_network(2,4,0) -uriDefaultTo a b = a `relativeTo` b -#else -uriDefaultTo a b = maybe a id (a `relativeTo` b) -#endif - - --- This form junk is completely untested... - -type FormVar = (String,String) - -data Form = Form RequestMethod URI [FormVar] - -formToRequest :: Form -> Request_String -formToRequest (Form m u vs) = - let enc = urlEncodeVars vs - in case m of - GET -> Request { rqMethod=GET - , rqHeaders=[ Header HdrContentLength "0" ] - , rqBody="" - , rqURI=u { uriQuery= '?' : enc } -- What about old query? - } - POST -> Request { rqMethod=POST - , rqHeaders=[ Header HdrContentType "application/x-www-form-urlencoded", - Header HdrContentLength (show $ length enc) ] - , rqBody=enc - , rqURI=u - } - _ -> error ("unexpected request: " ++ show m) - - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/BufferType.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/BufferType.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/BufferType.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/BufferType.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -{-# LANGUAGE TypeSynonymInstances #-} ------------------------------------------------------------------------------ --- | --- Module : Network.BufferType --- Description : Abstract representation of request and response buffer types. --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- In order to give the user freedom in how request and response content --- is represented, a sufficiently abstract representation is needed of --- these internally. The "Network.BufferType" module provides this, defining --- the 'BufferType' class and its ad-hoc representation of buffer operations --- via the 'BufferOp' record. --- --- This module provides definitions for the standard buffer types that the --- package supports, i.e., for @String@ and @ByteString@ (strict and lazy.) --- ------------------------------------------------------------------------------ -module Network.BufferType - ( - BufferType(..) - - , BufferOp(..) - , strictBufferOp - , lazyBufferOp - , stringBufferOp - ) where - - -import qualified Data.ByteString as Strict hiding ( unpack, pack, span ) -import qualified Data.ByteString.Char8 as Strict ( unpack, pack, span ) -import qualified Data.ByteString.Lazy as Lazy hiding ( pack, unpack,span ) -import qualified Data.ByteString.Lazy.Char8 as Lazy ( pack, unpack, span ) -import System.IO ( Handle ) -import Data.Word ( Word8 ) - -import Network.HTTP.Utils ( crlf, lf ) - --- | The @BufferType@ class encodes, in a mixed-mode way, the interface --- that the library requires to operate over data embedded in HTTP --- requests and responses. That is, we use explicit dictionaries --- for the operations, but overload the name of the dicts themselves. --- -class BufferType bufType where - bufferOps :: BufferOp bufType - -instance BufferType Lazy.ByteString where - bufferOps = lazyBufferOp - -instance BufferType Strict.ByteString where - bufferOps = strictBufferOp - -instance BufferType String where - bufferOps = stringBufferOp - --- | @BufferOp@ encodes the I/O operations of the underlying buffer over --- a Handle in an (explicit) dictionary type. May not be needed, but gives --- us flexibility in explicit overriding and wrapping up of these methods. --- --- Along with IO operations is an ad-hoc collection of functions for working --- with these abstract buffers, as needed by the internals of the code --- that processes requests and responses. --- --- We supply three default @BufferOp@ values, for @String@ along with the --- strict and lazy versions of @ByteString@. To add others, provide @BufferOp@ --- definitions for -data BufferOp a - = BufferOp - { buf_hGet :: Handle -> Int -> IO a - , buf_hGetContents :: Handle -> IO a - , buf_hPut :: Handle -> a -> IO () - , buf_hGetLine :: Handle -> IO a - , buf_empty :: a - , buf_append :: a -> a -> a - , buf_concat :: [a] -> a - , buf_fromStr :: String -> a - , buf_toStr :: a -> String - , buf_snoc :: a -> Word8 -> a - , buf_splitAt :: Int -> a -> (a,a) - , buf_span :: (Char -> Bool) -> a -> (a,a) - , buf_isLineTerm :: a -> Bool - , buf_isEmpty :: a -> Bool - } - -instance Eq (BufferOp a) where - _ == _ = False - --- | @strictBufferOp@ is the 'BufferOp' definition over @ByteString@s, --- the non-lazy kind. -strictBufferOp :: BufferOp Strict.ByteString -strictBufferOp = - BufferOp - { buf_hGet = Strict.hGet - , buf_hGetContents = Strict.hGetContents - , buf_hPut = Strict.hPut - , buf_hGetLine = Strict.hGetLine - , buf_append = Strict.append - , buf_concat = Strict.concat - , buf_fromStr = Strict.pack - , buf_toStr = Strict.unpack - , buf_snoc = Strict.snoc - , buf_splitAt = Strict.splitAt - , buf_span = Strict.span - , buf_empty = Strict.empty - , buf_isLineTerm = \ b -> Strict.length b == 2 && p_crlf == b || - Strict.length b == 1 && p_lf == b - , buf_isEmpty = Strict.null - } - where - p_crlf = Strict.pack crlf - p_lf = Strict.pack lf - --- | @lazyBufferOp@ is the 'BufferOp' definition over @ByteString@s, --- the non-strict kind. -lazyBufferOp :: BufferOp Lazy.ByteString -lazyBufferOp = - BufferOp - { buf_hGet = Lazy.hGet - , buf_hGetContents = Lazy.hGetContents - , buf_hPut = Lazy.hPut - , buf_hGetLine = \ h -> Strict.hGetLine h >>= \ l -> return (Lazy.fromChunks [l]) - , buf_append = Lazy.append - , buf_concat = Lazy.concat - , buf_fromStr = Lazy.pack - , buf_toStr = Lazy.unpack - , buf_snoc = Lazy.snoc - , buf_splitAt = \ i x -> Lazy.splitAt (fromIntegral i) x - , buf_span = Lazy.span - , buf_empty = Lazy.empty - , buf_isLineTerm = \ b -> Lazy.length b == 2 && p_crlf == b || - Lazy.length b == 1 && p_lf == b - , buf_isEmpty = Lazy.null - } - where - p_crlf = Lazy.pack crlf - p_lf = Lazy.pack lf - --- | @stringBufferOp@ is the 'BufferOp' definition over @String@s. --- It is defined in terms of @strictBufferOp@ operations, --- unpacking/converting to @String@ when needed. -stringBufferOp :: BufferOp String -stringBufferOp =BufferOp - { buf_hGet = \ h n -> buf_hGet strictBufferOp h n >>= return . Strict.unpack - , buf_hGetContents = \ h -> buf_hGetContents strictBufferOp h >>= return . Strict.unpack - , buf_hPut = \ h s -> buf_hPut strictBufferOp h (Strict.pack s) - , buf_hGetLine = \ h -> buf_hGetLine strictBufferOp h >>= return . Strict.unpack - , buf_append = (++) - , buf_concat = concat - , buf_fromStr = id - , buf_toStr = id - , buf_snoc = \ a x -> a ++ [toEnum (fromIntegral x)] - , buf_splitAt = splitAt - , buf_span = \ p a -> - case Strict.span p (Strict.pack a) of - (x,y) -> (Strict.unpack x, Strict.unpack y) - , buf_empty = [] - , buf_isLineTerm = \ b -> b == crlf || b == lf - , buf_isEmpty = null - } - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Auth.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Auth.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Auth.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Auth.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,221 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Network.HTTP.Auth --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- Representing HTTP Auth values in Haskell. --- Right now, it contains mostly functionality needed by 'Network.Browser'. --- ------------------------------------------------------------------------------ -module Network.HTTP.Auth - ( Authority(..) - , Algorithm(..) - , Challenge(..) - , Qop(..) - - , headerToChallenge -- :: URI -> Header -> Maybe Challenge - , withAuthority -- :: Authority -> Request ty -> String - ) where - -import Network.URI -import Network.HTTP.Base -import Network.HTTP.Utils -import Network.HTTP.Headers ( Header(..) ) -import qualified Network.HTTP.MD5Aux as MD5 (md5s, Str(Str)) -import qualified Network.HTTP.Base64 as Base64 (encode) -import Text.ParserCombinators.Parsec - ( Parser, char, many, many1, satisfy, parse, spaces, sepBy1 ) - -import Data.Char -import Data.Maybe -import Data.Word ( Word8 ) - --- | @Authority@ specifies the HTTP Authentication method to use for --- a given domain/realm; @Basic@ or @Digest@. -data Authority - = AuthBasic { auRealm :: String - , auUsername :: String - , auPassword :: String - , auSite :: URI - } - | AuthDigest{ auRealm :: String - , auUsername :: String - , auPassword :: String - , auNonce :: String - , auAlgorithm :: Maybe Algorithm - , auDomain :: [URI] - , auOpaque :: Maybe String - , auQop :: [Qop] - } - - -data Challenge - = ChalBasic { chRealm :: String } - | ChalDigest { chRealm :: String - , chDomain :: [URI] - , chNonce :: String - , chOpaque :: Maybe String - , chStale :: Bool - , chAlgorithm ::Maybe Algorithm - , chQop :: [Qop] - } - --- | @Algorithm@ controls the digest algorithm to, @MD5@ or @MD5Session@. -data Algorithm = AlgMD5 | AlgMD5sess - deriving(Eq) - -instance Show Algorithm where - show AlgMD5 = "md5" - show AlgMD5sess = "md5-sess" - --- | -data Qop = QopAuth | QopAuthInt - deriving(Eq,Show) - --- | @withAuthority auth req@ generates a credentials value from the @auth@ 'Authority', --- in the context of the given request. --- --- If a client nonce was to be used then this function might need to be of type ... -> BrowserAction String -withAuthority :: Authority -> Request ty -> String -withAuthority a rq = case a of - AuthBasic{} -> "Basic " ++ base64encode (auUsername a ++ ':' : auPassword a) - AuthDigest{} -> - "Digest " ++ - concat [ "username=" ++ quo (auUsername a) - , ",realm=" ++ quo (auRealm a) - , ",nonce=" ++ quo (auNonce a) - , ",uri=" ++ quo digesturi - , ",response=" ++ quo rspdigest - -- plus optional stuff: - , fromMaybe "" (fmap (\ alg -> ",algorithm=" ++ quo (show alg)) (auAlgorithm a)) - , fromMaybe "" (fmap (\ o -> ",opaque=" ++ quo o) (auOpaque a)) - , if null (auQop a) then "" else ",qop=auth" - ] - where - quo s = '"':s ++ "\"" - - rspdigest = map toLower (kd (md5 a1) (noncevalue ++ ":" ++ md5 a2)) - - a1, a2 :: String - a1 = auUsername a ++ ":" ++ auRealm a ++ ":" ++ auPassword a - - {- - If the "qop" directive's value is "auth" or is unspecified, then A2 - is: - A2 = Method ":" digest-uri-value - If the "qop" value is "auth-int", then A2 is: - A2 = Method ":" digest-uri-value ":" H(entity-body) - -} - a2 = show (rqMethod rq) ++ ":" ++ digesturi - - digesturi = show (rqURI rq) - noncevalue = auNonce a - -type Octet = Word8 - --- FIXME: these probably only work right for latin-1 strings -stringToOctets :: String -> [Octet] -stringToOctets = map (fromIntegral . fromEnum) - -base64encode :: String -> String -base64encode = Base64.encode . stringToOctets - -md5 :: String -> String -md5 = MD5.md5s . MD5.Str - -kd :: String -> String -> String -kd a b = md5 (a ++ ":" ++ b) - - - - --- | @headerToChallenge base www_auth@ tries to convert the @WWW-Authenticate@ header --- @www_auth@ into a 'Challenge' value. -headerToChallenge :: URI -> Header -> Maybe Challenge -headerToChallenge baseURI (Header _ str) = - case parse challenge "" str of - Left{} -> Nothing - Right (name,props) -> case name of - "basic" -> mkBasic props - "digest" -> mkDigest props - _ -> Nothing - where - challenge :: Parser (String,[(String,String)]) - challenge = - do { nme <- word - ; spaces - ; pps <- cprops - ; return (map toLower nme,pps) - } - - cprops = sepBy1 cprop comma - - comma = do { spaces ; _ <- char ',' ; spaces } - - cprop = - do { nm <- word - ; _ <- char '=' - ; val <- quotedstring - ; return (map toLower nm,val) - } - - mkBasic, mkDigest :: [(String,String)] -> Maybe Challenge - - mkBasic params = fmap ChalBasic (lookup "realm" params) - - mkDigest params = - -- with Maybe monad - do { r <- lookup "realm" params - ; n <- lookup "nonce" params - ; return $ - ChalDigest { chRealm = r - , chDomain = (annotateURIs - $ map parseURI - $ words - $ fromMaybe [] - $ lookup "domain" params) - , chNonce = n - , chOpaque = lookup "opaque" params - , chStale = "true" == (map toLower - $ fromMaybe "" (lookup "stale" params)) - , chAlgorithm= readAlgorithm (fromMaybe "MD5" $ lookup "algorithm" params) - , chQop = readQop (fromMaybe "" $ lookup "qop" params) - } - } - - annotateURIs :: [Maybe URI] -> [URI] -#if MIN_VERSION_network(2,4,0) - annotateURIs = map (`relativeTo` baseURI) . catMaybes -#else - annotateURIs = (map (\u -> fromMaybe u (u `relativeTo` baseURI))) . catMaybes -#endif - - -- Change These: - readQop :: String -> [Qop] - readQop = catMaybes . (map strToQop) . (splitBy ',') - - strToQop qs = case map toLower (trim qs) of - "auth" -> Just QopAuth - "auth-int" -> Just QopAuthInt - _ -> Nothing - - readAlgorithm astr = case map toLower (trim astr) of - "md5" -> Just AlgMD5 - "md5-sess" -> Just AlgMD5sess - _ -> Nothing - -word, quotedstring :: Parser String -quotedstring = - do { _ <- char '"' -- " - ; str <- many (satisfy $ not . (=='"')) - ; _ <- char '"' - ; return str - } - -word = many1 (satisfy (\x -> isAlphaNum x || x=='_' || x=='.' || x=='-' || x==':')) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Base64.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Base64.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Base64.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Base64.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,282 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Codec.Binary.Base64 --- Copyright : (c) Dominic Steinitz 2005, Warrick Gray 2002 --- License : BSD-style (see the file ReadMe.tex) --- --- Maintainer : dominic.steinitz@blueyonder.co.uk --- Stability : experimental --- Portability : portable --- --- Base64 encoding and decoding functions provided by Warwick Gray. --- See --- and . --- ------------------------------------------------------------------------------ - -module Network.HTTP.Base64 - ( encode - , decode - , chop72 - , Octet - ) where - -{------------------------------------------------------------------------ -This is what RFC2045 had to say: - -6.8. Base64 Content-Transfer-Encoding - - The Base64 Content-Transfer-Encoding is designed to represent - arbitrary sequences of octets in a form that need not be humanly - readable. The encoding and decoding algorithms are simple, but the - encoded data are consistently only about 33 percent larger than the - unencoded data. This encoding is virtually identical to the one used - in Privacy Enhanced Mail (PEM) applications, as defined in RFC 1421. - - A 65-character subset of US-ASCII is used, enabling 6 bits to be - represented per printable character. (The extra 65th character, "=", - is used to signify a special processing function.) - - NOTE: This subset has the important property that it is represented - identically in all versions of ISO 646, including US-ASCII, and all - characters in the subset are also represented identically in all - versions of EBCDIC. Other popular encodings, such as the encoding - used by the uuencode utility, Macintosh binhex 4.0 [RFC-1741], and - the base85 encoding specified as part of Level 2 PostScript, do not - share these properties, and thus do not fulfill the portability - requirements a binary transport encoding for mail must meet. - - The encoding process represents 24-bit groups of input bits as output - strings of 4 encoded characters. Proceeding from left to right, a - 24-bit input group is formed by concatenating 3 8bit input groups. - These 24 bits are then treated as 4 concatenated 6-bit groups, each - of which is translated into a single digit in the base64 alphabet. - When encoding a bit stream via the base64 encoding, the bit stream - must be presumed to be ordered with the most-significant-bit first. - That is, the first bit in the stream will be the high-order bit in - the first 8bit byte, and the eighth bit will be the low-order bit in - the first 8bit byte, and so on. - - Each 6-bit group is used as an index into an array of 64 printable - characters. The character referenced by the index is placed in the - output string. These characters, identified in Table 1, below, are - selected so as to be universally representable, and the set excludes - characters with particular significance to SMTP (e.g., ".", CR, LF) - and to the multipart boundary delimiters defined in RFC 2046 (e.g., - "-"). - - - - Table 1: The Base64 Alphabet - - Value Encoding Value Encoding Value Encoding Value Encoding - 0 A 17 R 34 i 51 z - 1 B 18 S 35 j 52 0 - 2 C 19 T 36 k 53 1 - 3 D 20 U 37 l 54 2 - 4 E 21 V 38 m 55 3 - 5 F 22 W 39 n 56 4 - 6 G 23 X 40 o 57 5 - 7 H 24 Y 41 p 58 6 - 8 I 25 Z 42 q 59 7 - 9 J 26 a 43 r 60 8 - 10 K 27 b 44 s 61 9 - 11 L 28 c 45 t 62 + - 12 M 29 d 46 u 63 / - 13 N 30 e 47 v - 14 O 31 f 48 w (pad) = - 15 P 32 g 49 x - 16 Q 33 h 50 y - - The encoded output stream must be represented in lines of no more - than 76 characters each. All line breaks or other characters not - found in Table 1 must be ignored by decoding software. In base64 - data, characters other than those in Table 1, line breaks, and other - white space probably indicate a transmission error, about which a - warning message or even a message rejection might be appropriate - under some circumstances. - - Special processing is performed if fewer than 24 bits are available - at the end of the data being encoded. A full encoding quantum is - always completed at the end of a body. When fewer than 24 input bits - are available in an input group, zero bits are added (on the right) - to form an integral number of 6-bit groups. Padding at the end of - the data is performed using the "=" character. Since all base64 - input is an integral number of octets, only the following cases can - arise: (1) the final quantum of encoding input is an integral - multiple of 24 bits; here, the final unit of encoded output will be - an integral multiple of 4 characters with no "=" padding, (2) the - final quantum of encoding input is exactly 8 bits; here, the final - unit of encoded output will be two characters followed by two "=" - padding characters, or (3) the final quantum of encoding input is - exactly 16 bits; here, the final unit of encoded output will be three - characters followed by one "=" padding character. - - Because it is used only for padding at the end of the data, the - occurrence of any "=" characters may be taken as evidence that the - end of the data has been reached (without truncation in transit). No - such assurance is possible, however, when the number of octets - transmitted was a multiple of three and no "=" characters are - present. - - Any characters outside of the base64 alphabet are to be ignored in - base64-encoded data. - - Care must be taken to use the proper octets for line breaks if base64 - encoding is applied directly to text material that has not been - converted to canonical form. In particular, text line breaks must be - converted into CRLF sequences prior to base64 encoding. The - important thing to note is that this may be done directly by the - encoder rather than in a prior canonicalization step in some - implementations. - - NOTE: There is no need to worry about quoting potential boundary - delimiters within base64-encoded bodies within multipart entities - because no hyphen characters are used in the base64 encoding. - -----------------------------------------------------------------------------} - -{- - -The following properties should hold: - - decode . encode = id - decode . chop72 . encode = id - -I.E. Both "encode" and "chop72 . encode" are valid methods of encoding input, -the second variation corresponds better with the RFC above, but outside of -MIME applications might be undesireable. - - -But: The Haskell98 Char type is at least 16bits (and often 32), these implementations assume only - 8 significant bits, which is more than enough for US-ASCII. --} - - -import Data.Array (Array, array, (!)) -import Data.Bits (shiftL, shiftR, (.&.), (.|.)) -import Data.Char (chr, ord) -import Data.Word (Word8) - -type Octet = Word8 - -encodeArray :: Array Int Char -encodeArray = array (0,64) - [ (0,'A'), (1,'B'), (2,'C'), (3,'D'), (4,'E'), (5,'F') - , (6,'G'), (7,'H'), (8,'I'), (9,'J'), (10,'K'), (11,'L') - , (12,'M'), (13,'N'), (14,'O'), (15,'P'), (16,'Q'), (17,'R') - , (18,'S'), (19,'T'), (20,'U'), (21,'V'), (22,'W'), (23,'X') - , (24,'Y'), (25,'Z'), (26,'a'), (27,'b'), (28,'c'), (29,'d') - , (30,'e'), (31,'f'), (32,'g'), (33,'h'), (34,'i'), (35,'j') - , (36,'k'), (37,'l'), (38,'m'), (39,'n'), (40,'o'), (41,'p') - , (42,'q'), (43,'r'), (44,'s'), (45,'t'), (46,'u'), (47,'v') - , (48,'w'), (49,'x'), (50,'y'), (51,'z'), (52,'0'), (53,'1') - , (54,'2'), (55,'3'), (56,'4'), (57,'5'), (58,'6'), (59,'7') - , (60,'8'), (61,'9'), (62,'+'), (63,'/') ] - - --- Convert between 4 base64 (6bits ea) integers and 1 ordinary integer (32 bits) --- clearly the upmost/leftmost 8 bits of the answer are 0. --- Hack Alert: In the last entry of the answer, the upper 8 bits encode --- the integer number of 6bit groups encoded in that integer, ie 1, 2, 3. --- 0 represents a 4 :( -int4_char3 :: [Int] -> [Char] -int4_char3 (a:b:c:d:t) = - let n = (a `shiftL` 18 .|. b `shiftL` 12 .|. c `shiftL` 6 .|. d) - in (chr (n `shiftR` 16 .&. 0xff)) - : (chr (n `shiftR` 8 .&. 0xff)) - : (chr (n .&. 0xff)) : int4_char3 t - -int4_char3 [a,b,c] = - let n = (a `shiftL` 18 .|. b `shiftL` 12 .|. c `shiftL` 6) - in [ (chr (n `shiftR` 16 .&. 0xff)) - , (chr (n `shiftR` 8 .&. 0xff)) ] - -int4_char3 [a,b] = - let n = (a `shiftL` 18 .|. b `shiftL` 12) - in [ (chr (n `shiftR` 16 .&. 0xff)) ] - -int4_char3 [_] = error "Network.HTTP.Base64.int4_char3: impossible number of Ints." - -int4_char3 [] = [] - - - - --- Convert triplets of characters to --- 4 base64 integers. The last entries --- in the list may not produce 4 integers, --- a trailing 2 character group gives 3 integers, --- while a trailing single character gives 2 integers. -char3_int4 :: [Char] -> [Int] -char3_int4 (a:b:c:t) - = let n = (ord a `shiftL` 16 .|. ord b `shiftL` 8 .|. ord c) - in (n `shiftR` 18 .&. 0x3f) : (n `shiftR` 12 .&. 0x3f) : (n `shiftR` 6 .&. 0x3f) : (n .&. 0x3f) : char3_int4 t - -char3_int4 [a,b] - = let n = (ord a `shiftL` 16 .|. ord b `shiftL` 8) - in [ (n `shiftR` 18 .&. 0x3f) - , (n `shiftR` 12 .&. 0x3f) - , (n `shiftR` 6 .&. 0x3f) ] - -char3_int4 [a] - = let n = (ord a `shiftL` 16) - in [(n `shiftR` 18 .&. 0x3f),(n `shiftR` 12 .&. 0x3f)] - -char3_int4 [] = [] - - --- Retrieve base64 char, given an array index integer in the range [0..63] -enc1 :: Int -> Char -enc1 ch = encodeArray!ch - - --- | Cut up a string into 72 char lines, each line terminated by CRLF. - -chop72 :: String -> String -chop72 str = let (bgn,end) = splitAt 70 str - in if null end then bgn else "\r\n" ++ chop72 end - - --- Pads a base64 code to a multiple of 4 characters, using the special --- '=' character. -quadruplets :: [Char] -> [Char] -quadruplets (a:b:c:d:t) = a:b:c:d:quadruplets t -quadruplets [a,b,c] = [a,b,c,'='] -- 16bit tail unit -quadruplets [a,b] = [a,b,'=','='] -- 8bit tail unit -quadruplets [_] = error "Network.HTTP.Base64.quadruplets: impossible number of characters." -quadruplets [] = [] -- 24bit tail unit - - -enc :: [Int] -> [Char] -enc = quadruplets . map enc1 - - -dcd :: String -> [Int] -dcd [] = [] -dcd (h:t) - | h <= 'Z' && h >= 'A' = ord h - ord 'A' : dcd t - | h >= '0' && h <= '9' = ord h - ord '0' + 52 : dcd t - | h >= 'a' && h <= 'z' = ord h - ord 'a' + 26 : dcd t - | h == '+' = 62 : dcd t - | h == '/' = 63 : dcd t - | h == '=' = [] -- terminate data stream - | otherwise = dcd t - - --- Principal encoding and decoding functions. - -encode :: [Octet] -> String -encode = enc . char3_int4 . (map (chr .fromIntegral)) - -{- -prop_base64 os = - os == (f . g . h) os - where types = (os :: [Word8]) - f = map (fromIntegral. ord) - g = decode . encode - h = map (chr . fromIntegral) --} - -decode :: String -> [Octet] -decode = (map (fromIntegral . ord)) . int4_char3 . dcd diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Base.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Base.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Base.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Base.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,994 +0,0 @@ -{-# LANGUAGE ScopedTypeVariables #-} ------------------------------------------------------------------------------ --- | --- Module : Network.HTTP.Base --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- Definitions of @Request@ and @Response@ types along with functions --- for normalizing them. It is assumed to be an internal module; user --- code should, if possible, import @Network.HTTP@ to access the functionality --- that this module provides. --- --- Additionally, the module exports internal functions for working with URLs, --- and for handling the processing of requests and responses coming back. --- ------------------------------------------------------------------------------ -module Network.HTTP.Base - ( - -- ** Constants - httpVersion -- :: String - - -- ** HTTP - , Request(..) - , Response(..) - , RequestMethod(..) - - , Request_String - , Response_String - , HTTPRequest - , HTTPResponse - - -- ** URL Encoding - , urlEncode - , urlDecode - , urlEncodeVars - - -- ** URI authority parsing - , URIAuthority(..) - , parseURIAuthority - - -- internal - , uriToAuthorityString -- :: URI -> String - , uriAuthToString -- :: URIAuth -> String - , uriAuthPort -- :: Maybe URI -> URIAuth -> Int - , reqURIAuth -- :: Request ty -> URIAuth - - , parseResponseHead -- :: [String] -> Result ResponseData - , parseRequestHead -- :: [String] -> Result RequestData - - , ResponseNextStep(..) - , matchResponse - , ResponseData - , ResponseCode - , RequestData - - , NormalizeRequestOptions(..) - , defaultNormalizeRequestOptions -- :: NormalizeRequestOptions ty - , RequestNormalizer - - , normalizeRequest -- :: NormalizeRequestOptions ty -> Request ty -> Request ty - - , splitRequestURI - - , getAuth - , normalizeRequestURI - , normalizeHostHeader - , findConnClose - - -- internal export (for the use by Network.HTTP.{Stream,ByteStream} ) - , linearTransfer - , hopefulTransfer - , chunkedTransfer - , uglyDeathTransfer - , readTillEmpty1 - , readTillEmpty2 - - , defaultGETRequest - , defaultGETRequest_ - , mkRequest - , setRequestBody - - , defaultUserAgent - , httpPackageVersion - , libUA {- backwards compatibility, will disappear..soon -} - - , catchIO - , catchIO_ - , responseParseError - - , getRequestVersion - , getResponseVersion - , setRequestVersion - , setResponseVersion - - , failHTTPS - - ) where - -import Network.URI - ( URI(uriAuthority, uriPath, uriScheme) - , URIAuth(URIAuth, uriUserInfo, uriRegName, uriPort) - , parseURIReference - ) - -import Control.Monad ( guard ) -import Control.Monad.Error () -import Data.Bits ( (.&.), (.|.), shiftL, shiftR ) -import Data.Word ( Word8 ) -import Data.Char ( digitToInt, intToDigit, toLower, isDigit, - isAscii, isAlphaNum, ord, chr ) -import Data.List ( partition, find ) -import Data.Maybe ( listToMaybe, fromMaybe ) -import Numeric ( readHex ) - -import Network.Stream -import Network.BufferType ( BufferOp(..), BufferType(..) ) -import Network.HTTP.Headers -import Network.HTTP.Utils ( trim, crlf, sp, readsOne ) -import qualified Network.HTTP.Base64 as Base64 (encode) - -import Text.Read.Lex (readDecP) -import Text.ParserCombinators.ReadP - ( ReadP, readP_to_S, char, (<++), look, munch, munch1 ) - -import Control.Exception as Exception (catch, IOException) - -import qualified Paths_HTTP as Self (version) -import Data.Version (showVersion) - ------------------------------------------------------------------ ------------------- URI Authority parsing ------------------------ ------------------------------------------------------------------ - -data URIAuthority = URIAuthority { user :: Maybe String, - password :: Maybe String, - host :: String, - port :: Maybe Int - } deriving (Eq,Show) - --- | Parse the authority part of a URL. --- --- > RFC 1732, section 3.1: --- > --- > //:@:/ --- > Some or all of the parts ":@", ":", --- > ":", and "/" may be excluded. -parseURIAuthority :: String -> Maybe URIAuthority -parseURIAuthority s = listToMaybe (map fst (readP_to_S pURIAuthority s)) - - -pURIAuthority :: ReadP URIAuthority -pURIAuthority = do - (u,pw) <- (pUserInfo `before` char '@') - <++ return (Nothing, Nothing) - h <- rfc2732host <++ munch (/=':') - p <- orNothing (char ':' >> readDecP) - look >>= guard . null - return URIAuthority{ user=u, password=pw, host=h, port=p } - --- RFC2732 adds support for '[literal-ipv6-address]' in the host part of a URL -rfc2732host :: ReadP String -rfc2732host = do - _ <- char '[' - res <- munch1 (/=']') - _ <- char ']' - return res - -pUserInfo :: ReadP (Maybe String, Maybe String) -pUserInfo = do - u <- orNothing (munch (`notElem` ":@")) - p <- orNothing (char ':' >> munch (/='@')) - return (u,p) - -before :: Monad m => m a -> m b -> m a -before a b = a >>= \x -> b >> return x - -orNothing :: ReadP a -> ReadP (Maybe a) -orNothing p = fmap Just p <++ return Nothing - --- This function duplicates old Network.URI.authority behaviour. -uriToAuthorityString :: URI -> String -uriToAuthorityString u = maybe "" uriAuthToString (uriAuthority u) - -uriAuthToString :: URIAuth -> String -uriAuthToString ua = - concat [ uriUserInfo ua - , uriRegName ua - , uriPort ua - ] - -uriAuthPort :: Maybe URI -> URIAuth -> Int -uriAuthPort mbURI u = - case uriPort u of - (':':s) -> readsOne id (default_port mbURI) s - _ -> default_port mbURI - where - default_port Nothing = default_http - default_port (Just url) = - case map toLower $ uriScheme url of - "http:" -> default_http - "https:" -> default_https - -- todo: refine - _ -> default_http - - default_http = 80 - default_https = 443 - -failHTTPS :: Monad m => URI -> m () -failHTTPS uri - | map toLower (uriScheme uri) == "https:" = fail "https not supported" - | otherwise = return () - --- Fish out the authority from a possibly normalized Request, i.e., --- the information may either be in the request's URI or inside --- the Host: header. -reqURIAuth :: Request ty -> URIAuth -reqURIAuth req = - case uriAuthority (rqURI req) of - Just ua -> ua - _ -> case lookupHeader HdrHost (rqHeaders req) of - Nothing -> error ("reqURIAuth: no URI authority for: " ++ show req) - Just h -> - case toHostPort h of - (ht,p) -> URIAuth { uriUserInfo = "" - , uriRegName = ht - , uriPort = p - } - where - -- Note: just in case you're wondering..the convention is to include the ':' - -- in the port part.. - toHostPort h = break (==':') h - ------------------------------------------------------------------ ------------------- HTTP Messages -------------------------------- ------------------------------------------------------------------ - - --- Protocol version -httpVersion :: String -httpVersion = "HTTP/1.1" - - --- | The HTTP request method, to be used in the 'Request' object. --- We are missing a few of the stranger methods, but these are --- not really necessary until we add full TLS. -data RequestMethod = HEAD | PUT | GET | POST | DELETE | OPTIONS | TRACE | CONNECT | Custom String - deriving(Eq) - -instance Show RequestMethod where - show x = - case x of - HEAD -> "HEAD" - PUT -> "PUT" - GET -> "GET" - POST -> "POST" - DELETE -> "DELETE" - OPTIONS -> "OPTIONS" - TRACE -> "TRACE" - CONNECT -> "CONNECT" - Custom c -> c - -rqMethodMap :: [(String, RequestMethod)] -rqMethodMap = [("HEAD", HEAD), - ("PUT", PUT), - ("GET", GET), - ("POST", POST), - ("DELETE", DELETE), - ("OPTIONS", OPTIONS), - ("TRACE", TRACE), - ("CONNECT", CONNECT)] - --- --- for backwards-ish compatibility; suggest --- migrating to new Req/Resp by adding type param. --- -type Request_String = Request String -type Response_String = Response String - --- Hmm..I really want to use these for the record --- type, but it will upset codebases wanting to --- migrate (and live with using pre-HTTPbis versions.) -type HTTPRequest a = Request a -type HTTPResponse a = Response a - --- | An HTTP Request. --- The 'Show' instance of this type is used for message serialisation, --- which means no body data is output. -data Request a = - Request { rqURI :: URI -- ^ might need changing in future - -- 1) to support '*' uri in OPTIONS request - -- 2) transparent support for both relative - -- & absolute uris, although this should - -- already work (leave scheme & host parts empty). - , rqMethod :: RequestMethod - , rqHeaders :: [Header] - , rqBody :: a - } - --- Notice that request body is not included, --- this show function is used to serialise --- a request for the transport link, we send --- the body separately where possible. -instance Show (Request a) where - show req@(Request u m h _) = - show m ++ sp ++ alt_uri ++ sp ++ ver ++ crlf - ++ foldr (++) [] (map show (dropHttpVersion h)) ++ crlf - where - ver = fromMaybe httpVersion (getRequestVersion req) - alt_uri = show $ if null (uriPath u) || head (uriPath u) /= '/' - then u { uriPath = '/' : uriPath u } - else u - -instance HasHeaders (Request a) where - getHeaders = rqHeaders - setHeaders rq hdrs = rq { rqHeaders=hdrs } - --- | For easy pattern matching, HTTP response codes @xyz@ are --- represented as @(x,y,z)@. -type ResponseCode = (Int,Int,Int) - --- | @ResponseData@ contains the head of a response payload; --- HTTP response code, accompanying text description + header --- fields. -type ResponseData = (ResponseCode,String,[Header]) - --- | @RequestData@ contains the head of a HTTP request; method, --- its URL along with the auxillary/supporting header data. -type RequestData = (RequestMethod,URI,[Header]) - --- | An HTTP Response. --- The 'Show' instance of this type is used for message serialisation, --- which means no body data is output, additionally the output will --- show an HTTP version of 1.1 instead of the actual version returned --- by a server. -data Response a = - Response { rspCode :: ResponseCode - , rspReason :: String - , rspHeaders :: [Header] - , rspBody :: a - } - --- This is an invalid representation of a received response, --- since we have made the assumption that all responses are HTTP/1.1 -instance Show (Response a) where - show rsp@(Response (a,b,c) reason headers _) = - ver ++ ' ' : map intToDigit [a,b,c] ++ ' ' : reason ++ crlf - ++ foldr (++) [] (map show (dropHttpVersion headers)) ++ crlf - where - ver = fromMaybe httpVersion (getResponseVersion rsp) - -instance HasHeaders (Response a) where - getHeaders = rspHeaders - setHeaders rsp hdrs = rsp { rspHeaders=hdrs } - - ------------------------------------------------------------------- ------------------- Request Building ------------------------------ ------------------------------------------------------------------- - --- | Deprecated. Use 'defaultUserAgent' -libUA :: String -libUA = "hs-HTTP-4000.0.9" -{-# DEPRECATED libUA "Use defaultUserAgent instead (but note the user agent name change)" #-} - --- | A default user agent string. The string is @\"haskell-HTTP/$version\"@ --- where @$version@ is the version of this HTTP package. --- -defaultUserAgent :: String -defaultUserAgent = "haskell-HTTP/" ++ httpPackageVersion - --- | The version of this HTTP package as a string, e.g. @\"4000.1.2\"@. This --- may be useful to include in a user agent string so that you can determine --- from server logs what version of this package HTTP clients are using. --- This can be useful for tracking down HTTP compatibility quirks. --- -httpPackageVersion :: String -httpPackageVersion = showVersion Self.version - -defaultGETRequest :: URI -> Request_String -defaultGETRequest uri = defaultGETRequest_ uri - -defaultGETRequest_ :: BufferType a => URI -> Request a -defaultGETRequest_ uri = mkRequest GET uri - --- | 'mkRequest method uri' constructs a well formed --- request for the given HTTP method and URI. It does not --- normalize the URI for the request _nor_ add the required --- Host: header. That is done either explicitly by the user --- or when requests are normalized prior to transmission. -mkRequest :: BufferType ty => RequestMethod -> URI -> Request ty -mkRequest meth uri = req - where - req = - Request { rqURI = uri - , rqBody = empty - , rqHeaders = [ Header HdrContentLength "0" - , Header HdrUserAgent defaultUserAgent - ] - , rqMethod = meth - } - - empty = buf_empty (toBufOps req) - --- set rqBody, Content-Type and Content-Length headers. -setRequestBody :: Request_String -> (String, String) -> Request_String -setRequestBody req (typ, body) = req' { rqBody=body } - where - req' = replaceHeader HdrContentType typ . - replaceHeader HdrContentLength (show $ length body) $ - req - -{- - -- stub out the user info. - updAuth = fmap (\ x -> x{uriUserInfo=""}) (uriAuthority uri) - - withHost = - case uriToAuthorityString uri{uriAuthority=updAuth} of - "" -> id - h -> ((Header HdrHost h):) - - uri_req - | forProxy = uri - | otherwise = snd (splitRequestURI uri) --} - - -toBufOps :: BufferType a => Request a -> BufferOp a -toBufOps _ = bufferOps - ------------------------------------------------------------------ ------------------- Parsing -------------------------------------- ------------------------------------------------------------------ - --- Parsing a request -parseRequestHead :: [String] -> Result RequestData -parseRequestHead [] = Left ErrorClosed -parseRequestHead (com:hdrs) = do - (version,rqm,uri) <- requestCommand com (words com) - hdrs' <- parseHeaders hdrs - return (rqm,uri,withVer version hdrs') - where - withVer [] hs = hs - withVer (h:_) hs = withVersion h hs - - requestCommand l _yes@(rqm:uri:version) = - case (parseURIReference uri, lookup rqm rqMethodMap) of - (Just u, Just r) -> return (version,r,u) - (Just u, Nothing) -> return (version,Custom rqm,u) - _ -> parse_err l - requestCommand l _ - | null l = failWith ErrorClosed - | otherwise = parse_err l - - parse_err l = responseParseError "parseRequestHead" - ("Request command line parse failure: " ++ l) - --- Parsing a response -parseResponseHead :: [String] -> Result ResponseData -parseResponseHead [] = failWith ErrorClosed -parseResponseHead (sts:hdrs) = do - (version,code,reason) <- responseStatus sts (words sts) - hdrs' <- parseHeaders hdrs - return (code,reason, withVersion version hdrs') - where - responseStatus _l _yes@(version:code:reason) = - return (version,match code,concatMap (++" ") reason) - responseStatus l _no - | null l = failWith ErrorClosed -- an assumption - | otherwise = parse_err l - - parse_err l = - responseParseError - "parseResponseHead" - ("Response status line parse failure: " ++ l) - - match [a,b,c] = (digitToInt a, - digitToInt b, - digitToInt c) - match _ = (-1,-1,-1) -- will create appropriate behaviour - --- To avoid changing the @RequestData@ and @ResponseData@ types --- just for this (and the upstream backwards compat. woes that --- will result in), encode version info as a custom header. --- Used by 'parseResponseData' and 'parseRequestData'. --- --- Note: the Request and Response types do not currently represent --- the version info explicitly in their record types. You have to use --- {get,set}{Request,Response}Version for that. -withVersion :: String -> [Header] -> [Header] -withVersion v hs - | v == httpVersion = hs -- don't bother adding it if the default. - | otherwise = (Header (HdrCustom "X-HTTP-Version") v) : hs - --- | @getRequestVersion req@ returns the HTTP protocol version of --- the request @req@. If @Nothing@, the default 'httpVersion' can be assumed. -getRequestVersion :: Request a -> Maybe String -getRequestVersion r = getHttpVersion r - --- | @setRequestVersion v req@ returns a new request, identical to --- @req@, but with its HTTP version set to @v@. -setRequestVersion :: String -> Request a -> Request a -setRequestVersion s r = setHttpVersion r s - - --- | @getResponseVersion rsp@ returns the HTTP protocol version of --- the response @rsp@. If @Nothing@, the default 'httpVersion' can be --- assumed. -getResponseVersion :: Response a -> Maybe String -getResponseVersion r = getHttpVersion r - --- | @setResponseVersion v rsp@ returns a new response, identical to --- @rsp@, but with its HTTP version set to @v@. -setResponseVersion :: String -> Response a -> Response a -setResponseVersion s r = setHttpVersion r s - --- internal functions for accessing HTTP-version info in --- requests and responses. Not exported as it exposes ho --- version info is represented internally. - -getHttpVersion :: HasHeaders a => a -> Maybe String -getHttpVersion r = - fmap toVersion $ - find isHttpVersion $ - getHeaders r - where - toVersion (Header _ x) = x - -setHttpVersion :: HasHeaders a => a -> String -> a -setHttpVersion r v = - setHeaders r $ - withVersion v $ - dropHttpVersion $ - getHeaders r - -dropHttpVersion :: [Header] -> [Header] -dropHttpVersion hs = filter (not.isHttpVersion) hs - -isHttpVersion :: Header -> Bool -isHttpVersion (Header (HdrCustom "X-HTTP-Version") _) = True -isHttpVersion _ = False - - - ------------------------------------------------------------------ ------------------- HTTP Send / Recv ---------------------------------- ------------------------------------------------------------------ - -data ResponseNextStep - = Continue - | Retry - | Done - | ExpectEntity - | DieHorribly String - -matchResponse :: RequestMethod -> ResponseCode -> ResponseNextStep -matchResponse rqst rsp = - case rsp of - (1,0,0) -> Continue - (1,0,1) -> Done -- upgrade to TLS - (1,_,_) -> Continue -- default - (2,0,4) -> Done - (2,0,5) -> Done - (2,_,_) -> ans - (3,0,4) -> Done - (3,0,5) -> Done - (3,_,_) -> ans - (4,1,7) -> Retry -- Expectation failed - (4,_,_) -> ans - (5,_,_) -> ans - (a,b,c) -> DieHorribly ("Response code " ++ map intToDigit [a,b,c] ++ " not recognised") - where - ans | rqst == HEAD = Done - | otherwise = ExpectEntity - - - ------------------------------------------------------------------ ------------------- A little friendly funtionality --------------- ------------------------------------------------------------------ - - -{- - I had a quick look around but couldn't find any RFC about - the encoding of data on the query string. I did find an - IETF memo, however, so this is how I justify the urlEncode - and urlDecode methods. - - Doc name: draft-tiwari-appl-wxxx-forms-01.txt (look on www.ietf.org) - - Reserved chars: ";", "/", "?", ":", "@", "&", "=", "+", ",", and "$" are reserved. - Unwise: "{" | "}" | "|" | "\" | "^" | "[" | "]" | "`" - URI delims: "<" | ">" | "#" | "%" | <"> - Unallowed ASCII: - - Also unallowed: any non-us-ascii character - - Escape method: char -> '%' a b where a, b :: Hex digits --} - -replacement_character :: Char -replacement_character = '\xfffd' - --- | Encode a single Haskell Char to a list of Word8 values, in UTF8 format. --- --- Shamelessly stolen from utf-8string-0.3.7 -encodeChar :: Char -> [Word8] -encodeChar = map fromIntegral . go . ord - where - go oc - | oc <= 0x7f = [oc] - - | oc <= 0x7ff = [ 0xc0 + (oc `shiftR` 6) - , 0x80 + oc .&. 0x3f - ] - - | oc <= 0xffff = [ 0xe0 + (oc `shiftR` 12) - , 0x80 + ((oc `shiftR` 6) .&. 0x3f) - , 0x80 + oc .&. 0x3f - ] - | otherwise = [ 0xf0 + (oc `shiftR` 18) - , 0x80 + ((oc `shiftR` 12) .&. 0x3f) - , 0x80 + ((oc `shiftR` 6) .&. 0x3f) - , 0x80 + oc .&. 0x3f - ] - --- | Decode a UTF8 string packed into a list of Word8 values, directly to String --- --- Shamelessly stolen from utf-8string-0.3.7 -decode :: [Word8] -> String -decode [ ] = "" -decode (c:cs) - | c < 0x80 = chr (fromEnum c) : decode cs - | c < 0xc0 = replacement_character : decode cs - | c < 0xe0 = multi1 - | c < 0xf0 = multi_byte 2 0xf 0x800 - | c < 0xf8 = multi_byte 3 0x7 0x10000 - | c < 0xfc = multi_byte 4 0x3 0x200000 - | c < 0xfe = multi_byte 5 0x1 0x4000000 - | otherwise = replacement_character : decode cs - where - multi1 = case cs of - c1 : ds | c1 .&. 0xc0 == 0x80 -> - let d = ((fromEnum c .&. 0x1f) `shiftL` 6) .|. fromEnum (c1 .&. 0x3f) - in if d >= 0x000080 then toEnum d : decode ds - else replacement_character : decode ds - _ -> replacement_character : decode cs - - multi_byte :: Int -> Word8 -> Int -> [Char] - multi_byte i mask overlong = aux i cs (fromEnum (c .&. mask)) - where - aux 0 rs acc - | overlong <= acc && acc <= 0x10ffff && - (acc < 0xd800 || 0xdfff < acc) && - (acc < 0xfffe || 0xffff < acc) = chr acc : decode rs - | otherwise = replacement_character : decode rs - - aux n (r:rs) acc - | r .&. 0xc0 == 0x80 = aux (n-1) rs - $ shiftL acc 6 .|. fromEnum (r .&. 0x3f) - - aux _ rs _ = replacement_character : decode rs - - --- This function is a bit funny because potentially the input String could contain some actual Unicode --- characters (though this shouldn't happen for most use cases), so we have to preserve those characters --- while simultaneously decoding any UTF-8 data -urlDecode :: String -> String -urlDecode = go [] - where - go bs ('%':a:b:rest) = go (fromIntegral (16 * digitToInt a + digitToInt b) : bs) rest - go bs (h:t) | fromEnum h < 256 = go (fromIntegral (fromEnum h) : bs) t -- Treat ASCII as just another byte of UTF-8 - go [] [] = [] - go [] (h:t) = h : go [] t -- h >= 256, so can't be part of any UTF-8 byte sequence - go bs rest = decode (reverse bs) ++ go [] rest - - -urlEncode :: String -> String -urlEncode [] = [] -urlEncode (ch:t) - | (isAscii ch && isAlphaNum ch) || ch `elem` "-_.~" = ch : urlEncode t - | not (isAscii ch) = foldr escape (urlEncode t) (encodeChar ch) - | otherwise = escape (fromIntegral (fromEnum ch)) (urlEncode t) - where - escape b rs = '%':showH (b `div` 16) (showH (b `mod` 16) rs) - - showH :: Word8 -> String -> String - showH x xs - | x <= 9 = to (o_0 + x) : xs - | otherwise = to (o_A + (x-10)) : xs - where - to = toEnum . fromIntegral - fro = fromIntegral . fromEnum - - o_0 = fro '0' - o_A = fro 'A' - --- Encode form variables, useable in either the --- query part of a URI, or the body of a POST request. --- I have no source for this information except experience, --- this sort of encoding worked fine in CGI programming. -urlEncodeVars :: [(String,String)] -> String -urlEncodeVars ((n,v):t) = - let (same,diff) = partition ((==n) . fst) t - in urlEncode n ++ '=' : foldl (\x y -> x ++ ',' : urlEncode y) (urlEncode $ v) (map snd same) - ++ urlEncodeRest diff - where urlEncodeRest [] = [] - urlEncodeRest diff = '&' : urlEncodeVars diff -urlEncodeVars [] = [] - --- | @getAuth req@ fishes out the authority portion of the URL in a request's @Host@ --- header. -getAuth :: Monad m => Request ty -> m URIAuthority -getAuth r = - -- ToDo: verify that Network.URI functionality doesn't take care of this (now.) - case parseURIAuthority auth of - Just x -> return x - Nothing -> fail $ "Network.HTTP.Base.getAuth: Error parsing URI authority '" ++ auth ++ "'" - where - auth = maybe (uriToAuthorityString uri) id (findHeader HdrHost r) - uri = rqURI r - -{-# DEPRECATED normalizeRequestURI "Please use Network.HTTP.Base.normalizeRequest instead" #-} -normalizeRequestURI :: Bool{-do close-} -> {-URI-}String -> Request ty -> Request ty -normalizeRequestURI doClose h r = - (if doClose then replaceHeader HdrConnection "close" else id) $ - insertHeaderIfMissing HdrHost h $ - r { rqURI = (rqURI r){ uriScheme = "" - , uriAuthority = Nothing - }} - --- | @NormalizeRequestOptions@ brings together the various defaulting\/normalization options --- over 'Request's. Use 'defaultNormalizeRequestOptions' for the standard selection of option -data NormalizeRequestOptions ty - = NormalizeRequestOptions - { normDoClose :: Bool - , normForProxy :: Bool - , normUserAgent :: Maybe String - , normCustoms :: [RequestNormalizer ty] - } - --- | @RequestNormalizer@ is the shape of a (pure) function that rewrites --- a request into some normalized form. -type RequestNormalizer ty = NormalizeRequestOptions ty -> Request ty -> Request ty - -defaultNormalizeRequestOptions :: NormalizeRequestOptions ty -defaultNormalizeRequestOptions = NormalizeRequestOptions - { normDoClose = False - , normForProxy = False - , normUserAgent = Just defaultUserAgent - , normCustoms = [] - } - --- | @normalizeRequest opts req@ is the entry point to use to normalize your --- request prior to transmission (or other use.) Normalization is controlled --- via the @NormalizeRequestOptions@ record. -normalizeRequest :: NormalizeRequestOptions ty - -> Request ty - -> Request ty -normalizeRequest opts req = foldr (\ f -> f opts) req normalizers - where - --normalizers :: [RequestNormalizer ty] - normalizers = - ( normalizeHostURI - : normalizeBasicAuth - : normalizeConnectionClose - : normalizeUserAgent - : normCustoms opts - ) - --- | @normalizeUserAgent ua x req@ augments the request @req@ with --- a @User-Agent: ua@ header if @req@ doesn't already have a --- a @User-Agent:@ set. -normalizeUserAgent :: RequestNormalizer ty -normalizeUserAgent opts req = - case normUserAgent opts of - Nothing -> req - Just ua -> - case findHeader HdrUserAgent req of - Just u | u /= defaultUserAgent -> req - _ -> replaceHeader HdrUserAgent ua req - --- | @normalizeConnectionClose opts req@ sets the header @Connection: close@ --- to indicate one-shot behavior iff @normDoClose@ is @True@. i.e., it then --- _replaces_ any an existing @Connection:@ header in @req@. -normalizeConnectionClose :: RequestNormalizer ty -normalizeConnectionClose opts req - | normDoClose opts = replaceHeader HdrConnection "close" req - | otherwise = req - --- | @normalizeBasicAuth opts req@ sets the header @Authorization: Basic...@ --- if the "user:pass@" part is present in the "http://user:pass@host/path" --- of the URI. If Authorization header was present already it is not replaced. -normalizeBasicAuth :: RequestNormalizer ty -normalizeBasicAuth _ req = - case getAuth req of - Just uriauth -> - case (user uriauth, password uriauth) of - (Just u, Just p) -> - insertHeaderIfMissing HdrAuthorization astr req - where - astr = "Basic " ++ base64encode (u ++ ":" ++ p) - base64encode = Base64.encode . stringToOctets :: String -> String - stringToOctets = map (fromIntegral . fromEnum) :: String -> [Word8] - (_, _) -> req - Nothing ->req - --- | @normalizeHostURI forProxy req@ rewrites your request to have it --- follow the expected formats by the receiving party (proxy or server.) --- -normalizeHostURI :: RequestNormalizer ty -normalizeHostURI opts req = - case splitRequestURI uri of - ("",_uri_abs) - | forProxy -> - case findHeader HdrHost req of - Nothing -> req -- no host/authority in sight..not much we can do. - Just h -> req{rqURI=uri{ uriAuthority=Just URIAuth{uriUserInfo="", uriRegName=hst, uriPort=pNum} - , uriScheme=if (null (uriScheme uri)) then "http" else uriScheme uri - }} - where - hst = case span (/='@') user_hst of - (as,'@':bs) -> - case span (/=':') as of - (_,_:_) -> bs - _ -> user_hst - _ -> user_hst - - (user_hst, pNum) = - case span isDigit (reverse h) of - (ds,':':bs) -> (reverse bs, ':':reverse ds) - _ -> (h,"") - | otherwise -> - case findHeader HdrHost req of - Nothing -> req -- no host/authority in sight..not much we can do...complain? - Just{} -> req - (h,uri_abs) - | forProxy -> insertHeaderIfMissing HdrHost h req - | otherwise -> replaceHeader HdrHost h req{rqURI=uri_abs} -- Note: _not_ stubbing out user:pass - where - uri0 = rqURI req - -- stub out the user:pass - uri = uri0{uriAuthority=fmap (\ x -> x{uriUserInfo=""}) (uriAuthority uri0)} - - forProxy = normForProxy opts - -{- Comments re: above rewriting: - RFC 2616, section 5.1.2: - "The most common form of Request-URI is that used to identify a - resource on an origin server or gateway. In this case the absolute - path of the URI MUST be transmitted (see section 3.2.1, abs_path) as - the Request-URI, and the network location of the URI (authority) MUST - be transmitted in a Host header field." - We assume that this is the case, so we take the host name from - the Host header if there is one, otherwise from the request-URI. - Then we make the request-URI an abs_path and make sure that there - is a Host header. --} - -splitRequestURI :: URI -> ({-authority-}String, URI) -splitRequestURI uri = (uriToAuthorityString uri, uri{uriScheme="", uriAuthority=Nothing}) - --- Adds a Host header if one is NOT ALREADY PRESENT.. -{-# DEPRECATED normalizeHostHeader "Please use Network.HTTP.Base.normalizeRequest instead" #-} -normalizeHostHeader :: Request ty -> Request ty -normalizeHostHeader rq = - insertHeaderIfMissing HdrHost - (uriToAuthorityString $ rqURI rq) - rq - --- Looks for a "Connection" header with the value "close". --- Returns True when this is found. -findConnClose :: [Header] -> Bool -findConnClose hdrs = - maybe False - (\ x -> map toLower (trim x) == "close") - (lookupHeader HdrConnection hdrs) - --- | Used when we know exactly how many bytes to expect. -linearTransfer :: (Int -> IO (Result a)) -> Int -> IO (Result ([Header],a)) -linearTransfer readBlk n = fmapE (\str -> Right ([],str)) (readBlk n) - --- | Used when nothing about data is known, --- Unfortunately waiting for a socket closure --- causes bad behaviour. Here we just --- take data once and give up the rest. -hopefulTransfer :: BufferOp a - -> IO (Result a) - -> [a] - -> IO (Result ([Header],a)) -hopefulTransfer bufOps readL strs - = readL >>= - either (\v -> return $ Left v) - (\more -> if (buf_isEmpty bufOps more) - then return (Right ([], buf_concat bufOps $ reverse strs)) - else hopefulTransfer bufOps readL (more:strs)) - --- | A necessary feature of HTTP\/1.1 --- Also the only transfer variety likely to --- return any footers. -chunkedTransfer :: BufferOp a - -> IO (Result a) - -> (Int -> IO (Result a)) - -> IO (Result ([Header], a)) -chunkedTransfer bufOps readL readBlk = chunkedTransferC bufOps readL readBlk [] 0 - -chunkedTransferC :: BufferOp a - -> IO (Result a) - -> (Int -> IO (Result a)) - -> [a] - -> Int - -> IO (Result ([Header], a)) -chunkedTransferC bufOps readL readBlk acc n = do - v <- readL - case v of - Left e -> return (Left e) - Right line - | size == 0 -> - -- last chunk read; look for trailing headers.. - fmapE (\ strs -> do - ftrs <- parseHeaders (map (buf_toStr bufOps) strs) - -- insert (computed) Content-Length header. - let ftrs' = Header HdrContentLength (show n) : ftrs - return (ftrs',buf_concat bufOps (reverse acc))) - - (readTillEmpty2 bufOps readL []) - - | otherwise -> do - some <- readBlk size - case some of - Left e -> return (Left e) - Right cdata -> do - _ <- readL -- CRLF is mandated after the chunk block; ToDo: check that the line is empty.? - chunkedTransferC bufOps readL readBlk (cdata:acc) (n+size) - where - size - | buf_isEmpty bufOps line = 0 - | otherwise = - case readHex (buf_toStr bufOps line) of - (hx,_):_ -> hx - _ -> 0 - --- | Maybe in the future we will have a sensible thing --- to do here, at that time we might want to change --- the name. -uglyDeathTransfer :: String -> IO (Result ([Header],a)) -uglyDeathTransfer loc = return (responseParseError loc "Unknown Transfer-Encoding") - --- | Remove leading crlfs then call readTillEmpty2 (not required by RFC) -readTillEmpty1 :: BufferOp a - -> IO (Result a) - -> IO (Result [a]) -readTillEmpty1 bufOps readL = - readL >>= - either (return . Left) - (\ s -> - if buf_isLineTerm bufOps s - then readTillEmpty1 bufOps readL - else readTillEmpty2 bufOps readL [s]) - --- | Read lines until an empty line (CRLF), --- also accepts a connection close as end of --- input, which is not an HTTP\/1.1 compliant --- thing to do - so probably indicates an --- error condition. -readTillEmpty2 :: BufferOp a - -> IO (Result a) - -> [a] - -> IO (Result [a]) -readTillEmpty2 bufOps readL list = - readL >>= - either (return . Left) - (\ s -> - if buf_isLineTerm bufOps s || buf_isEmpty bufOps s - then return (Right $ reverse (s:list)) - else readTillEmpty2 bufOps readL (s:list)) - --- --- Misc --- - --- | @catchIO a h@ handles IO action exceptions throughout codebase; version-specific --- tweaks better go here. -catchIO :: IO a -> (IOException -> IO a) -> IO a -catchIO a h = Exception.catch a h - -catchIO_ :: IO a -> IO a -> IO a -catchIO_ a h = Exception.catch a (\(_ :: IOException) -> h) - -responseParseError :: String -> String -> Result a -responseParseError loc v = failWith (ErrorParse (loc ++ ' ':v)) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Cookie.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Cookie.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Cookie.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Cookie.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,141 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Network.HTTP.Cookie --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- This module provides the data types and functions for working with HTTP cookies. --- Right now, it contains mostly functionality needed by 'Network.Browser'. --- ------------------------------------------------------------------------------ -module Network.HTTP.Cookie - ( Cookie(..) - , cookieMatch -- :: (String,String) -> Cookie -> Bool - - -- functions for translating cookies and headers. - , cookiesToHeader -- :: [Cookie] -> Header - , processCookieHeaders -- :: String -> [Header] -> ([String], [Cookie]) - ) where - -import Network.HTTP.Headers - -import Data.Char -import Data.List -import Data.Maybe - -import Text.ParserCombinators.Parsec - ( Parser, char, many, many1, satisfy, parse, option, try - , (<|>), sepBy1 - ) - ------------------------------------------------------------------- ------------------------ Cookie Stuff ----------------------------- ------------------------------------------------------------------- - --- | @Cookie@ is the Haskell representation of HTTP cookie values. --- See its relevant specs for authoritative details. -data Cookie - = MkCookie - { ckDomain :: String - , ckName :: String - , ckValue :: String - , ckPath :: Maybe String - , ckComment :: Maybe String - , ckVersion :: Maybe String - } - deriving(Show,Read) - -instance Eq Cookie where - a == b = ckDomain a == ckDomain b - && ckName a == ckName b - && ckPath a == ckPath b - --- | @cookieToHeaders ck@ serialises @Cookie@s to an HTTP request header. -cookiesToHeader :: [Cookie] -> Header -cookiesToHeader cs = Header HdrCookie (mkCookieHeaderValue cs) - --- | Turn a list of cookies into a key=value pair list, separated by --- semicolons. -mkCookieHeaderValue :: [Cookie] -> String -mkCookieHeaderValue = intercalate "; " . map mkCookieHeaderValue1 - where - mkCookieHeaderValue1 c = ckName c ++ "=" ++ ckValue c - --- | @cookieMatch (domain,path) ck@ performs the standard cookie --- match wrt the given domain and path. -cookieMatch :: (String, String) -> Cookie -> Bool -cookieMatch (dom,path) ck = - ckDomain ck `isSuffixOf` dom && - case ckPath ck of - Nothing -> True - Just p -> p `isPrefixOf` path - - --- | @processCookieHeaders dom hdrs@ -processCookieHeaders :: String -> [Header] -> ([String], [Cookie]) -processCookieHeaders dom hdrs = foldr (headerToCookies dom) ([],[]) hdrs - --- | @headerToCookies dom hdr acc@ -headerToCookies :: String -> Header -> ([String], [Cookie]) -> ([String], [Cookie]) -headerToCookies dom (Header HdrSetCookie val) (accErr, accCookie) = - case parse cookies "" val of - Left{} -> (val:accErr, accCookie) - Right x -> (accErr, x ++ accCookie) - where - cookies :: Parser [Cookie] - cookies = sepBy1 cookie (char ',') - - cookie :: Parser Cookie - cookie = - do name <- word - _ <- spaces_l - _ <- char '=' - _ <- spaces_l - val1 <- cvalue - args <- cdetail - return $ mkCookie name val1 args - - cvalue :: Parser String - - spaces_l = many (satisfy isSpace) - - cvalue = quotedstring <|> many1 (satisfy $ not . (==';')) <|> return "" - - -- all keys in the result list MUST be in lower case - cdetail :: Parser [(String,String)] - cdetail = many $ - try (do _ <- spaces_l - _ <- char ';' - _ <- spaces_l - s1 <- word - _ <- spaces_l - s2 <- option "" (char '=' >> spaces_l >> cvalue) - return (map toLower s1,s2) - ) - - mkCookie :: String -> String -> [(String,String)] -> Cookie - mkCookie nm cval more = - MkCookie { ckName = nm - , ckValue = cval - , ckDomain = map toLower (fromMaybe dom (lookup "domain" more)) - , ckPath = lookup "path" more - , ckVersion = lookup "version" more - , ckComment = lookup "comment" more - } -headerToCookies _ _ acc = acc - - - - -word, quotedstring :: Parser String -quotedstring = - do _ <- char '"' -- " - str <- many (satisfy $ not . (=='"')) - _ <- char '"' - return str - -word = many1 (satisfy (\x -> isAlphaNum x || x=='_' || x=='.' || x=='-' || x==':')) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/HandleStream.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/HandleStream.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/HandleStream.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/HandleStream.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,252 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Network.HTTP.HandleStream --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- A 'HandleStream'-based version of "Network.HTTP" interface. --- --- For more detailed information about what the individual exports do, please consult --- the documentation for "Network.HTTP". /Notice/ however that the functions here do --- not perform any kind of normalization prior to transmission (or receipt); you are --- responsible for doing any such yourself, or, if you prefer, just switch to using --- "Network.HTTP" function instead. --- ------------------------------------------------------------------------------ -module Network.HTTP.HandleStream - ( simpleHTTP -- :: Request ty -> IO (Result (Response ty)) - , simpleHTTP_ -- :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) - , sendHTTP -- :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) - , sendHTTP_notify -- :: HStream ty => HandleStream ty -> Request ty -> IO () -> IO (Result (Response ty)) - , receiveHTTP -- :: HStream ty => HandleStream ty -> IO (Result (Request ty)) - , respondHTTP -- :: HStream ty => HandleStream ty -> Response ty -> IO () - - , simpleHTTP_debug -- :: FilePath -> Request DebugString -> IO (Response DebugString) - ) where - ------------------------------------------------------------------ ------------------- Imports -------------------------------------- ------------------------------------------------------------------ - -import Network.BufferType -import Network.Stream ( fmapE, Result ) -import Network.StreamDebugger ( debugByteStream ) -import Network.TCP (HStream(..), HandleStream ) - -import Network.HTTP.Base -import Network.HTTP.Headers -import Network.HTTP.Utils ( trim, readsOne ) - -import Data.Char (toLower) -import Data.Maybe (fromMaybe) -import Control.Exception (onException) -import Control.Monad (when) - ------------------------------------------------------------------ ------------------- Misc ----------------------------------------- ------------------------------------------------------------------ - --- | @simpleHTTP@ transmits a resource across a non-persistent connection. -simpleHTTP :: HStream ty => Request ty -> IO (Result (Response ty)) -simpleHTTP r = do - auth <- getAuth r - failHTTPS (rqURI r) - c <- openStream (host auth) (fromMaybe 80 (port auth)) - simpleHTTP_ c r - --- | @simpleHTTP_debug debugFile req@ behaves like 'simpleHTTP', but logs --- the HTTP operation via the debug file @debugFile@. -simpleHTTP_debug :: HStream ty => FilePath -> Request ty -> IO (Result (Response ty)) -simpleHTTP_debug httpLogFile r = do - auth <- getAuth r - failHTTPS (rqURI r) - c0 <- openStream (host auth) (fromMaybe 80 (port auth)) - c <- debugByteStream httpLogFile c0 - simpleHTTP_ c r - --- | Like 'simpleHTTP', but acting on an already opened stream. -simpleHTTP_ :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) -simpleHTTP_ s r = sendHTTP s r - --- | @sendHTTP hStream httpRequest@ transmits @httpRequest@ over --- @hStream@, but does not alter the status of the connection, nor request it to be --- closed upon receiving the response. -sendHTTP :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) -sendHTTP conn rq = sendHTTP_notify conn rq (return ()) - --- | @sendHTTP_notify hStream httpRequest action@ behaves like 'sendHTTP', but --- lets you supply an IO @action@ to execute once the request has been successfully --- transmitted over the connection. Useful when you want to set up tracing of --- request transmission and its performance. -sendHTTP_notify :: HStream ty - => HandleStream ty - -> Request ty - -> IO () - -> IO (Result (Response ty)) -sendHTTP_notify conn rq onSendComplete = do - when providedClose $ (closeOnEnd conn True) - onException (sendMain conn rq onSendComplete) - (close conn) - where - providedClose = findConnClose (rqHeaders rq) - --- From RFC 2616, section 8.2.3: --- 'Because of the presence of older implementations, the protocol allows --- ambiguous situations in which a client may send "Expect: 100- --- continue" without receiving either a 417 (Expectation Failed) status --- or a 100 (Continue) status. Therefore, when a client sends this --- header field to an origin server (possibly via a proxy) from which it --- has never seen a 100 (Continue) status, the client SHOULD NOT wait --- for an indefinite period before sending the request body.' --- --- Since we would wait forever, I have disabled use of 100-continue for now. -sendMain :: HStream ty - => HandleStream ty - -> Request ty - -> (IO ()) - -> IO (Result (Response ty)) -sendMain conn rqst onSendComplete = do - --let str = if null (rqBody rqst) - -- then show rqst - -- else show (insertHeader HdrExpect "100-continue" rqst) - -- TODO review throwing away of result - _ <- writeBlock conn (buf_fromStr bufferOps $ show rqst) - -- write body immediately, don't wait for 100 CONTINUE - -- TODO review throwing away of result - _ <- writeBlock conn (rqBody rqst) - onSendComplete - rsp <- getResponseHead conn - switchResponse conn True False rsp rqst - - -- Hmmm, this could go bad if we keep getting "100 Continue" - -- responses... Except this should never happen according - -- to the RFC. - -switchResponse :: HStream ty - => HandleStream ty - -> Bool {- allow retry? -} - -> Bool {- is body sent? -} - -> Result ResponseData - -> Request ty - -> IO (Result (Response ty)) -switchResponse _ _ _ (Left e) _ = return (Left e) - -- retry on connreset? - -- if we attempt to use the same socket then there is an excellent - -- chance that the socket is not in a completely closed state. - -switchResponse conn allow_retry bdy_sent (Right (cd,rn,hdrs)) rqst = - case matchResponse (rqMethod rqst) cd of - Continue - | not bdy_sent -> do {- Time to send the body -} - writeBlock conn (rqBody rqst) >>= either (return . Left) - (\ _ -> do - rsp <- getResponseHead conn - switchResponse conn allow_retry True rsp rqst) - | otherwise -> do {- keep waiting -} - rsp <- getResponseHead conn - switchResponse conn allow_retry bdy_sent rsp rqst - - Retry -> do {- Request with "Expect" header failed. - Trouble is the request contains Expects - other than "100-Continue" -} - -- TODO review throwing away of result - _ <- writeBlock conn ((buf_append bufferOps) - (buf_fromStr bufferOps (show rqst)) - (rqBody rqst)) - rsp <- getResponseHead conn - switchResponse conn False bdy_sent rsp rqst - - Done -> do - when (findConnClose hdrs) - (closeOnEnd conn True) - return (Right $ Response cd rn hdrs (buf_empty bufferOps)) - - DieHorribly str -> do - close conn - return (responseParseError "Invalid response:" str) - ExpectEntity -> do - r <- fmapE (\ (ftrs,bdy) -> Right (Response cd rn (hdrs++ftrs) bdy)) $ - maybe (maybe (hopefulTransfer bo (readLine conn) []) - (\ x -> - readsOne (linearTransfer (readBlock conn)) - (return$responseParseError "unrecognized content-length value" x) - x) - cl) - (ifChunked (chunkedTransfer bo (readLine conn) (readBlock conn)) - (uglyDeathTransfer "sendHTTP")) - tc - case r of - Left{} -> do - close conn - return r - Right (Response _ _ hs _) -> do - when (findConnClose hs) - (closeOnEnd conn True) - return r - - where - tc = lookupHeader HdrTransferEncoding hdrs - cl = lookupHeader HdrContentLength hdrs - bo = bufferOps - --- reads and parses headers -getResponseHead :: HStream ty => HandleStream ty -> IO (Result ResponseData) -getResponseHead conn = - fmapE (\es -> parseResponseHead (map (buf_toStr bufferOps) es)) - (readTillEmpty1 bufferOps (readLine conn)) - --- | @receiveHTTP hStream@ reads a 'Request' from the 'HandleStream' @hStream@ -receiveHTTP :: HStream bufTy => HandleStream bufTy -> IO (Result (Request bufTy)) -receiveHTTP conn = getRequestHead >>= either (return . Left) processRequest - where - -- reads and parses headers - getRequestHead :: IO (Result RequestData) - getRequestHead = do - fmapE (\es -> parseRequestHead (map (buf_toStr bufferOps) es)) - (readTillEmpty1 bufferOps (readLine conn)) - - processRequest (rm,uri,hdrs) = - fmapE (\ (ftrs,bdy) -> Right (Request uri rm (hdrs++ftrs) bdy)) $ - maybe - (maybe (return (Right ([], buf_empty bo))) -- hopefulTransfer "" - (\ x -> readsOne (linearTransfer (readBlock conn)) - (return$responseParseError "unrecognized Content-Length value" x) - x) - - cl) - (ifChunked (chunkedTransfer bo (readLine conn) (readBlock conn)) - (uglyDeathTransfer "receiveHTTP")) - tc - where - -- FIXME : Also handle 100-continue. - tc = lookupHeader HdrTransferEncoding hdrs - cl = lookupHeader HdrContentLength hdrs - bo = bufferOps - --- | @respondHTTP hStream httpResponse@ transmits an HTTP 'Response' over --- the 'HandleStream' @hStream@. It could be used to implement simple web --- server interactions, performing the dual role to 'sendHTTP'. -respondHTTP :: HStream ty => HandleStream ty -> Response ty -> IO () -respondHTTP conn rsp = do - -- TODO: review throwing away of result - _ <- writeBlock conn (buf_fromStr bufferOps $ show rsp) - -- write body immediately, don't wait for 100 CONTINUE - -- TODO: review throwing away of result - _ <- writeBlock conn (rspBody rsp) - return () - ------------------------------------------------------------------------------- - -headerName :: String -> String -headerName x = map toLower (trim x) - -ifChunked :: a -> a -> String -> a -ifChunked a b s = - case headerName s of - "chunked" -> a - _ -> b - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Headers.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Headers.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Headers.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Headers.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,306 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Network.HTTP.Headers --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- This module provides the data types for representing HTTP headers, and --- operations for looking up header values and working with sequences of --- header values in 'Request's and 'Response's. To avoid having to provide --- separate set of operations for doing so, we introduce a type class 'HasHeaders' --- to facilitate writing such processing using overloading instead. --- ------------------------------------------------------------------------------ -module Network.HTTP.Headers - ( HasHeaders(..) -- type class - - , Header(..) - , mkHeader -- :: HeaderName -> String -> Header - , hdrName -- :: Header -> HeaderName - , hdrValue -- :: Header -> String - - , HeaderName(..) - - , insertHeader -- :: HasHeaders a => HeaderName -> String -> a -> a - , insertHeaderIfMissing -- :: HasHeaders a => HeaderName -> String -> a -> a - , insertHeaders -- :: HasHeaders a => [Header] -> a -> a - , retrieveHeaders -- :: HasHeaders a => HeaderName -> a -> [Header] - , replaceHeader -- :: HasHeaders a => HeaderName -> String -> a -> a - , findHeader -- :: HasHeaders a => HeaderName -> a -> Maybe String - , lookupHeader -- :: HeaderName -> [Header] -> Maybe String - - , parseHeader -- :: parseHeader :: String -> Result Header - , parseHeaders -- :: [String] -> Result [Header] - - , headerMap -- :: [(String, HeaderName)] - - , HeaderSetter - ) where - -import Data.Char (toLower) -import Network.Stream (Result, failParse) -import Network.HTTP.Utils ( trim, split, crlf ) - --- | The @Header@ data type pairs header names & values. -data Header = Header HeaderName String - -hdrName :: Header -> HeaderName -hdrName (Header h _) = h - -hdrValue :: Header -> String -hdrValue (Header _ v) = v - --- | Header constructor as a function, hiding above rep. -mkHeader :: HeaderName -> String -> Header -mkHeader = Header - -instance Show Header where - show (Header key value) = shows key (':':' ':value ++ crlf) - --- | HTTP @HeaderName@ type, a Haskell data constructor for each --- specification-defined header, prefixed with @Hdr@ and CamelCased, --- (i.e., eliding the @-@ in the process.) Should you require using --- a custom header, there's the @HdrCustom@ constructor which takes --- a @String@ argument. --- --- Encoding HTTP header names differently, as Strings perhaps, is an --- equally fine choice..no decidedly clear winner, but let's stick --- with data constructors here. --- -data HeaderName - -- Generic Headers -- - = HdrCacheControl - | HdrConnection - | HdrDate - | HdrPragma - | HdrTransferEncoding - | HdrUpgrade - | HdrVia - -- Request Headers -- - | HdrAccept - | HdrAcceptCharset - | HdrAcceptEncoding - | HdrAcceptLanguage - | HdrAuthorization - | HdrCookie - | HdrExpect - | HdrFrom - | HdrHost - | HdrIfModifiedSince - | HdrIfMatch - | HdrIfNoneMatch - | HdrIfRange - | HdrIfUnmodifiedSince - | HdrMaxForwards - | HdrProxyAuthorization - | HdrRange - | HdrReferer - | HdrUserAgent - -- Response Headers - | HdrAge - | HdrLocation - | HdrProxyAuthenticate - | HdrPublic - | HdrRetryAfter - | HdrServer - | HdrSetCookie - | HdrTE - | HdrTrailer - | HdrVary - | HdrWarning - | HdrWWWAuthenticate - -- Entity Headers - | HdrAllow - | HdrContentBase - | HdrContentEncoding - | HdrContentLanguage - | HdrContentLength - | HdrContentLocation - | HdrContentMD5 - | HdrContentRange - | HdrContentType - | HdrETag - | HdrExpires - | HdrLastModified - -- | MIME entity headers (for sub-parts) - | HdrContentTransferEncoding - -- | Allows for unrecognised or experimental headers. - | HdrCustom String -- not in header map below. - deriving(Eq) - --- | @headerMap@ is a straight assoc list for translating between header names --- and values. -headerMap :: [ (String,HeaderName) ] -headerMap = - [ p "Cache-Control" HdrCacheControl - , p "Connection" HdrConnection - , p "Date" HdrDate - , p "Pragma" HdrPragma - , p "Transfer-Encoding" HdrTransferEncoding - , p "Upgrade" HdrUpgrade - , p "Via" HdrVia - , p "Accept" HdrAccept - , p "Accept-Charset" HdrAcceptCharset - , p "Accept-Encoding" HdrAcceptEncoding - , p "Accept-Language" HdrAcceptLanguage - , p "Authorization" HdrAuthorization - , p "Cookie" HdrCookie - , p "Expect" HdrExpect - , p "From" HdrFrom - , p "Host" HdrHost - , p "If-Modified-Since" HdrIfModifiedSince - , p "If-Match" HdrIfMatch - , p "If-None-Match" HdrIfNoneMatch - , p "If-Range" HdrIfRange - , p "If-Unmodified-Since" HdrIfUnmodifiedSince - , p "Max-Forwards" HdrMaxForwards - , p "Proxy-Authorization" HdrProxyAuthorization - , p "Range" HdrRange - , p "Referer" HdrReferer - , p "User-Agent" HdrUserAgent - , p "Age" HdrAge - , p "Location" HdrLocation - , p "Proxy-Authenticate" HdrProxyAuthenticate - , p "Public" HdrPublic - , p "Retry-After" HdrRetryAfter - , p "Server" HdrServer - , p "Set-Cookie" HdrSetCookie - , p "TE" HdrTE - , p "Trailer" HdrTrailer - , p "Vary" HdrVary - , p "Warning" HdrWarning - , p "WWW-Authenticate" HdrWWWAuthenticate - , p "Allow" HdrAllow - , p "Content-Base" HdrContentBase - , p "Content-Encoding" HdrContentEncoding - , p "Content-Language" HdrContentLanguage - , p "Content-Length" HdrContentLength - , p "Content-Location" HdrContentLocation - , p "Content-MD5" HdrContentMD5 - , p "Content-Range" HdrContentRange - , p "Content-Type" HdrContentType - , p "ETag" HdrETag - , p "Expires" HdrExpires - , p "Last-Modified" HdrLastModified - , p "Content-Transfer-Encoding" HdrContentTransferEncoding - ] - where - p a b = (a,b) - -instance Show HeaderName where - show (HdrCustom s) = s - show x = case filter ((==x).snd) headerMap of - [] -> error "headerMap incomplete" - (h:_) -> fst h - --- | @HasHeaders@ is a type class for types containing HTTP headers, allowing --- you to write overloaded header manipulation functions --- for both 'Request' and 'Response' data types, for instance. -class HasHeaders x where - getHeaders :: x -> [Header] - setHeaders :: x -> [Header] -> x - --- Header manipulation functions - -type HeaderSetter a = HeaderName -> String -> a -> a - --- | @insertHeader hdr val x@ inserts a header with the given header name --- and value. Does not check for existing headers with same name, allowing --- duplicates to be introduce (use 'replaceHeader' if you want to avoid this.) -insertHeader :: HasHeaders a => HeaderSetter a -insertHeader name value x = setHeaders x newHeaders - where - newHeaders = (Header name value) : getHeaders x - --- | @insertHeaderIfMissing hdr val x@ adds the new header only if no previous --- header with name @hdr@ exists in @x@. -insertHeaderIfMissing :: HasHeaders a => HeaderSetter a -insertHeaderIfMissing name value x = setHeaders x (newHeaders $ getHeaders x) - where - newHeaders list@(h@(Header n _): rest) - | n == name = list - | otherwise = h : newHeaders rest - newHeaders [] = [Header name value] - --- | @replaceHeader hdr val o@ replaces the header @hdr@ with the --- value @val@, dropping any existing -replaceHeader :: HasHeaders a => HeaderSetter a -replaceHeader name value h = setHeaders h newHeaders - where - newHeaders = Header name value : [ x | x@(Header n _) <- getHeaders h, name /= n ] - --- | @insertHeaders hdrs x@ appends multiple headers to @x@'s existing --- set. -insertHeaders :: HasHeaders a => [Header] -> a -> a -insertHeaders hdrs x = setHeaders x (getHeaders x ++ hdrs) - --- | @retrieveHeaders hdrNm x@ gets a list of headers with 'HeaderName' @hdrNm@. -retrieveHeaders :: HasHeaders a => HeaderName -> a -> [Header] -retrieveHeaders name x = filter matchname (getHeaders x) - where - matchname (Header n _) = n == name - --- | @findHeader hdrNm x@ looks up @hdrNm@ in @x@, returning the first --- header that matches, if any. -findHeader :: HasHeaders a => HeaderName -> a -> Maybe String -findHeader n x = lookupHeader n (getHeaders x) - --- | @lookupHeader hdr hdrs@ locates the first header matching @hdr@ in the --- list @hdrs@. -lookupHeader :: HeaderName -> [Header] -> Maybe String -lookupHeader _ [] = Nothing -lookupHeader v (Header n s:t) - | v == n = Just s - | otherwise = lookupHeader v t - --- | @parseHeader headerNameAndValueString@ tries to unscramble a --- @header: value@ pairing and returning it as a 'Header'. -parseHeader :: String -> Result Header -parseHeader str = - case split ':' str of - Nothing -> failParse ("Unable to parse header: " ++ str) - Just (k,v) -> return $ Header (fn k) (trim $ drop 1 v) - where - fn k = case map snd $ filter (match k . fst) headerMap of - [] -> (HdrCustom k) - (h:_) -> h - - match :: String -> String -> Bool - match s1 s2 = map toLower s1 == map toLower s2 - --- | @parseHeaders hdrs@ takes a sequence of strings holding header --- information and parses them into a set of headers (preserving their --- order in the input argument.) Handles header values split up over --- multiple lines. -parseHeaders :: [String] -> Result [Header] -parseHeaders = catRslts [] . - map (parseHeader . clean) . - joinExtended "" - where - -- Joins consecutive lines where the second line - -- begins with ' ' or '\t'. - joinExtended old [] = [old] - joinExtended old (h : t) - | isLineExtension h = joinExtended (old ++ ' ' : tail h) t - | otherwise = old : joinExtended h t - - isLineExtension (x:_) = x == ' ' || x == '\t' - isLineExtension _ = False - - clean [] = [] - clean (h:t) | h `elem` "\t\r\n" = ' ' : clean t - | otherwise = h : clean t - - -- tolerant of errors? should parse - -- errors here be reported or ignored? - -- currently ignored. - catRslts :: [a] -> [Result a] -> Result [a] - catRslts list (h:t) = - case h of - Left _ -> catRslts list t - Right v -> catRslts (v:list) t - catRslts list [] = Right $ reverse list diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/MD5Aux.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/MD5Aux.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/MD5Aux.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/MD5Aux.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,342 +0,0 @@ -module Network.HTTP.MD5Aux - (md5, md5s, md5i, - MD5(..), ABCD(..), - Zord64, Str(..), BoolList(..), WordList(..)) where - -import Data.Char (ord, chr) -import Data.Bits (rotateL, shiftL, shiftR, (.&.), (.|.), xor, complement) -import Data.Word (Word32, Word64) - -rotL :: Word32 -> Int -> Word32 -rotL x = rotateL x - -type Zord64 = Word64 - --- ===================== TYPES AND CLASS DEFINTIONS ======================== - - -type XYZ = (Word32, Word32, Word32) -type Rotation = Int -newtype ABCD = ABCD (Word32, Word32, Word32, Word32) deriving (Eq, Show) -newtype Str = Str String -newtype BoolList = BoolList [Bool] -newtype WordList = WordList ([Word32], Word64) - --- Anything we want to work out the MD5 of must be an instance of class MD5 - -class MD5 a where - get_next :: a -> ([Word32], Int, a) -- get the next blocks worth - -- \ \ \------ the rest of the input - -- \ \--------- the number of bits returned - -- \--------------- the bits returned in 32bit words - len_pad :: Word64 -> a -> a -- append the padding and length - finished :: a -> Bool -- Have we run out of input yet? - - --- Mainly exists because it's fairly easy to do MD5s on input where the --- length is not a multiple of 8 - -instance MD5 BoolList where - get_next (BoolList s) = (bools_to_word32s ys, length ys, BoolList zs) - where (ys, zs) = splitAt 512 s - len_pad l (BoolList bs) - = BoolList (bs ++ [True] - ++ replicate (fromIntegral $ (447 - l) .&. 511) False - ++ [l .&. (shiftL 1 x) > 0 | x <- (mangle [0..63])] - ) - where mangle [] = [] - mangle xs = reverse ys ++ mangle zs - where (ys, zs) = splitAt 8 xs - finished (BoolList s) = s == [] - - --- The string instance is fairly straightforward - -instance MD5 Str where - get_next (Str s) = (string_to_word32s ys, 8 * length ys, Str zs) - where (ys, zs) = splitAt 64 s - len_pad c64 (Str s) = Str (s ++ padding ++ l) - where padding = '\128':replicate (fromIntegral zeros) '\000' - zeros = shiftR ((440 - c64) .&. 511) 3 - l = length_to_chars 8 c64 - finished (Str s) = s == "" - - --- YA instance that is believed will be useful - -instance MD5 WordList where - get_next (WordList (ws, l)) = (xs, fromIntegral taken, WordList (ys, l - taken)) - where (xs, ys) = splitAt 16 ws - taken = if l > 511 then 512 else l .&. 511 - len_pad c64 (WordList (ws, l)) = WordList (beginning ++ nextish ++ blanks ++ size, newlen) - where beginning = if length ws > 0 then start ++ lastone' else [] - start = init ws - lastone = last ws - offset = c64 .&. 31 - lastone' = [if offset > 0 then lastone + theone else lastone] - theone = shiftL (shiftR 128 (fromIntegral $ offset .&. 7)) - (fromIntegral $ offset .&. (31 - 7)) - nextish = if offset == 0 then [128] else [] - c64' = c64 + (32 - offset) - num_blanks = (fromIntegral $ shiftR ((448 - c64') .&. 511) 5) - blanks = replicate num_blanks 0 - lowsize = fromIntegral $ c64 .&. (shiftL 1 32 - 1) - topsize = fromIntegral $ shiftR c64 32 - size = [lowsize, topsize] - newlen = l .&. (complement 511) - + if c64 .&. 511 >= 448 then 1024 else 512 - finished (WordList (_, z)) = z == 0 - - -instance Num ABCD where - ABCD (a1, b1, c1, d1) + ABCD (a2, b2, c2, d2) = ABCD (a1 + a2, b1 + b2, c1 + c2, d1 + d2) - - (*) = error "(*){ABCD}: no instance method defined" - signum = error "signum{ABCD}: no instance method defined" - fromInteger = error "fromInteger{ABCD}: no instance method defined" - abs = error "abs{ABCD}: no instance method defined" --- ===================== EXPORTED FUNCTIONS ======================== - - --- The simplest function, gives you the MD5 of a string as 4-tuple of --- 32bit words. - -md5 :: (MD5 a) => a -> ABCD -md5 m = md5_main False 0 magic_numbers m - - --- Returns a hex number ala the md5sum program - -md5s :: (MD5 a) => a -> String -md5s = abcd_to_string . md5 - - --- Returns an integer equivalent to the above hex number - -md5i :: (MD5 a) => a -> Integer -md5i = abcd_to_integer . md5 - - --- ===================== THE CORE ALGORITHM ======================== - - --- Decides what to do. The first argument indicates if padding has been --- added. The second is the length mod 2^64 so far. Then we have the --- starting state, the rest of the string and the final state. - -md5_main :: (MD5 a) => - Bool -- Have we added padding yet? - -> Word64 -- The length so far mod 2^64 - -> ABCD -- The initial state - -> a -- The non-processed portion of the message - -> ABCD -- The resulting state -md5_main padded ilen abcd m - = if finished m && padded - then abcd - else md5_main padded' (ilen + 512) (abcd + abcd') m'' - where (m16, l, m') = get_next m - len' = ilen + fromIntegral l - ((m16', _, m''), padded') = if not padded && l < 512 - then (get_next $ len_pad len' m, True) - else ((m16, l, m'), padded) - abcd' = md5_do_block abcd m16' - - --- md5_do_block processes a 512 bit block by calling md5_round 4 times to --- apply each round with the correct constants and permutations of the --- block - -md5_do_block :: ABCD -- Initial state - -> [Word32] -- The block to be processed - 16 32bit words - -> ABCD -- Resulting state -md5_do_block abcd0 w = abcd4 - where (r1, r2, r3, r4) = rounds - {- - map (\x -> w !! x) [1,6,11,0,5,10,15,4,9,14,3,8,13,2,7,12] - -- [(5 * x + 1) `mod` 16 | x <- [0..15]] - map (\x -> w !! x) [5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2] - -- [(3 * x + 5) `mod` 16 | x <- [0..15]] - map (\x -> w !! x) [0,7,14,5,12,3,10,1,8,15,6,13,4,11,2,9] - -- [(7 * x) `mod` 16 | x <- [0..15]] - -} - perm5 [c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15] - = [c1,c6,c11,c0,c5,c10,c15,c4,c9,c14,c3,c8,c13,c2,c7,c12] - perm5 _ = error "broke at perm5" - perm3 [c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15] - = [c5,c8,c11,c14,c1,c4,c7,c10,c13,c0,c3,c6,c9,c12,c15,c2] - perm3 _ = error "broke at perm3" - perm7 [c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15] - = [c0,c7,c14,c5,c12,c3,c10,c1,c8,c15,c6,c13,c4,c11,c2,c9] - perm7 _ = error "broke at perm7" - abcd1 = md5_round md5_f abcd0 w r1 - abcd2 = md5_round md5_g abcd1 (perm5 w) r2 - abcd3 = md5_round md5_h abcd2 (perm3 w) r3 - abcd4 = md5_round md5_i abcd3 (perm7 w) r4 - - --- md5_round does one of the rounds. It takes an auxiliary function and foldls --- (md5_inner_function f) to repeatedly apply it to the initial state with the --- correct constants - -md5_round :: (XYZ -> Word32) -- Auxiliary function (F, G, H or I - -- for those of you with a copy of - -- the prayer book^W^WRFC) - -> ABCD -- Initial state - -> [Word32] -- The 16 32bit words of input - -> [(Rotation, Word32)] -- The list of 16 rotations and - -- additive constants - -> ABCD -- Resulting state -md5_round f abcd s ns = foldl (md5_inner_function f) abcd ns' - where ns' = zipWith (\x (y, z) -> (y, x + z)) s ns - - --- Apply one of the functions md5_[fghi] and put the new ABCD together - -md5_inner_function :: (XYZ -> Word32) -- Auxiliary function - -> ABCD -- Initial state - -> (Rotation, Word32) -- The rotation and additive - -- constant (X[i] + T[j]) - -> ABCD -- Resulting state -md5_inner_function f (ABCD (a, b, c, d)) (s, ki) = ABCD (d, a', b, c) - where mid_a = a + f(b,c,d) + ki - rot_a = rotL mid_a s - a' = b + rot_a - - --- The 4 auxiliary functions - -md5_f :: XYZ -> Word32 -md5_f (x, y, z) = z `xor` (x .&. (y `xor` z)) -{- optimised version of: (x .&. y) .|. ((complement x) .&. z) -} - -md5_g :: XYZ -> Word32 -md5_g (x, y, z) = md5_f (z, x, y) -{- was: (x .&. z) .|. (y .&. (complement z)) -} - -md5_h :: XYZ -> Word32 -md5_h (x, y, z) = x `xor` y `xor` z - -md5_i :: XYZ -> Word32 -md5_i (x, y, z) = y `xor` (x .|. (complement z)) - - --- The magic numbers from the RFC. - -magic_numbers :: ABCD -magic_numbers = ABCD (0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476) - - --- The 4 lists of (rotation, additive constant) tuples, one for each round - -rounds :: ([(Rotation, Word32)], - [(Rotation, Word32)], - [(Rotation, Word32)], - [(Rotation, Word32)]) -rounds = (r1, r2, r3, r4) - where r1 = [(s11, 0xd76aa478), (s12, 0xe8c7b756), (s13, 0x242070db), - (s14, 0xc1bdceee), (s11, 0xf57c0faf), (s12, 0x4787c62a), - (s13, 0xa8304613), (s14, 0xfd469501), (s11, 0x698098d8), - (s12, 0x8b44f7af), (s13, 0xffff5bb1), (s14, 0x895cd7be), - (s11, 0x6b901122), (s12, 0xfd987193), (s13, 0xa679438e), - (s14, 0x49b40821)] - r2 = [(s21, 0xf61e2562), (s22, 0xc040b340), (s23, 0x265e5a51), - (s24, 0xe9b6c7aa), (s21, 0xd62f105d), (s22, 0x2441453), - (s23, 0xd8a1e681), (s24, 0xe7d3fbc8), (s21, 0x21e1cde6), - (s22, 0xc33707d6), (s23, 0xf4d50d87), (s24, 0x455a14ed), - (s21, 0xa9e3e905), (s22, 0xfcefa3f8), (s23, 0x676f02d9), - (s24, 0x8d2a4c8a)] - r3 = [(s31, 0xfffa3942), (s32, 0x8771f681), (s33, 0x6d9d6122), - (s34, 0xfde5380c), (s31, 0xa4beea44), (s32, 0x4bdecfa9), - (s33, 0xf6bb4b60), (s34, 0xbebfbc70), (s31, 0x289b7ec6), - (s32, 0xeaa127fa), (s33, 0xd4ef3085), (s34, 0x4881d05), - (s31, 0xd9d4d039), (s32, 0xe6db99e5), (s33, 0x1fa27cf8), - (s34, 0xc4ac5665)] - r4 = [(s41, 0xf4292244), (s42, 0x432aff97), (s43, 0xab9423a7), - (s44, 0xfc93a039), (s41, 0x655b59c3), (s42, 0x8f0ccc92), - (s43, 0xffeff47d), (s44, 0x85845dd1), (s41, 0x6fa87e4f), - (s42, 0xfe2ce6e0), (s43, 0xa3014314), (s44, 0x4e0811a1), - (s41, 0xf7537e82), (s42, 0xbd3af235), (s43, 0x2ad7d2bb), - (s44, 0xeb86d391)] - s11 = 7 - s12 = 12 - s13 = 17 - s14 = 22 - s21 = 5 - s22 = 9 - s23 = 14 - s24 = 20 - s31 = 4 - s32 = 11 - s33 = 16 - s34 = 23 - s41 = 6 - s42 = 10 - s43 = 15 - s44 = 21 - - --- ===================== CONVERSION FUNCTIONS ======================== - - --- Turn the 4 32 bit words into a string representing the hex number they --- represent. - -abcd_to_string :: ABCD -> String -abcd_to_string (ABCD (a,b,c,d)) = concat $ map display_32bits_as_hex [a,b,c,d] - - --- Split the 32 bit word up, swap the chunks over and convert the numbers --- to their hex equivalents. - -display_32bits_as_hex :: Word32 -> String -display_32bits_as_hex w = swap_pairs cs - where cs = map (\x -> getc $ (shiftR w (4*x)) .&. 15) [0..7] - getc n = (['0'..'9'] ++ ['a'..'f']) !! (fromIntegral n) - swap_pairs (x1:x2:xs) = x2:x1:swap_pairs xs - swap_pairs _ = [] - --- Convert to an integer, performing endianness magic as we go - -abcd_to_integer :: ABCD -> Integer -abcd_to_integer (ABCD (a,b,c,d)) = rev_num a * 2^(96 :: Int) - + rev_num b * 2^(64 :: Int) - + rev_num c * 2^(32 :: Int) - + rev_num d - -rev_num :: Word32 -> Integer -rev_num i = toInteger j `mod` (2^(32 :: Int)) - -- NHC's fault ~~~~~~~~~~~~~~~~~~~~~ - where j = foldl (\so_far next -> shiftL so_far 8 + (shiftR i next .&. 255)) - 0 [0,8,16,24] - --- Used to convert a 64 byte string to 16 32bit words - -string_to_word32s :: String -> [Word32] -string_to_word32s "" = [] -string_to_word32s ss = this:string_to_word32s ss' - where (s, ss') = splitAt 4 ss - this = foldr (\c w -> shiftL w 8 + (fromIntegral.ord) c) 0 s - - --- Used to convert a list of 512 bools to 16 32bit words - -bools_to_word32s :: [Bool] -> [Word32] -bools_to_word32s [] = [] -bools_to_word32s bs = this:bools_to_word32s rest - where (bs1, bs1') = splitAt 8 bs - (bs2, bs2') = splitAt 8 bs1' - (bs3, bs3') = splitAt 8 bs2' - (bs4, rest) = splitAt 8 bs3' - this = boolss_to_word32 [bs1, bs2, bs3, bs4] - bools_to_word8 = foldl (\w b -> shiftL w 1 + if b then 1 else 0) 0 - boolss_to_word32 = foldr (\w8 w -> shiftL w 8 + bools_to_word8 w8) 0 - - --- Convert the size into a list of characters used by the len_pad function --- for strings - -length_to_chars :: Int -> Word64 -> String -length_to_chars 0 _ = [] -length_to_chars p n = this:length_to_chars (p-1) (shiftR n 8) - where this = chr $ fromIntegral $ n .&. 255 - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Proxy.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Proxy.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Proxy.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Proxy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,180 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Module : Network.HTTP.Proxy --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- Handling proxy server settings and their resolution. --- ------------------------------------------------------------------------------ -module Network.HTTP.Proxy - ( Proxy(..) - , noProxy -- :: Proxy - , fetchProxy -- :: Bool -> IO Proxy - , parseProxy -- :: String -> Maybe Proxy - ) where - -{- -#if !defined(WIN32) && defined(mingw32_HOST_OS) -#define WIN32 1 -#endif --} - -import Control.Monad ( when, mplus, join, liftM2) - -#if defined(WIN32) -import Network.HTTP.Base ( catchIO ) -#endif -import Network.HTTP.Utils ( dropWhileTail, chopAtDelim ) -import Network.HTTP.Auth -import Network.URI - ( URI(..), URIAuth(..), parseAbsoluteURI, unEscapeString ) -import System.IO ( hPutStrLn, stderr ) -import System.Environment - -{- -#if !defined(WIN32) && defined(mingw32_HOST_OS) -#define WIN32 1 -#endif --} - -#if defined(WIN32) -import System.Win32.Types ( DWORD, HKEY ) -import System.Win32.Registry( hKEY_CURRENT_USER, regOpenKey, regCloseKey, regQueryValue, regQueryValueEx ) -import Control.Exception ( bracket ) -import Foreign ( toBool, Storable(peek, sizeOf), castPtr, alloca ) -#endif - --- | HTTP proxies (or not) are represented via 'Proxy', specifying if a --- proxy should be used for the request (see 'Network.Browser.setProxy') -data Proxy - = NoProxy -- ^ Don't use a proxy. - | Proxy String - (Maybe Authority) -- ^ Use the proxy given. Should be of the - -- form "http:\/\/host:port", "host", "host:port", or "http:\/\/host". - -- Additionally, an optional 'Authority' for authentication with the proxy. - - -noProxy :: Proxy -noProxy = NoProxy - --- | @envProxyString@ locates proxy server settings by looking --- up env variable @HTTP_PROXY@ (or its lower-case equivalent.) --- If no mapping found, returns @Nothing@. -envProxyString :: IO (Maybe String) -envProxyString = do - env <- getEnvironment - return (lookup "http_proxy" env `mplus` lookup "HTTP_PROXY" env) - --- | @proxyString@ tries to locate the user's proxy server setting. --- Consults environment variable, and in case of Windows, by querying --- the Registry (cf. @registryProxyString@.) -proxyString :: IO (Maybe String) -proxyString = liftM2 mplus envProxyString registryProxyString - -registryProxyString :: IO (Maybe String) -#if !defined(WIN32) -registryProxyString = return Nothing -#else -registryProxyLoc :: (HKEY,String) -registryProxyLoc = (hive, path) - where - -- some sources say proxy settings should be at - -- HKEY_LOCAL_MACHINE\SOFTWARE\Policies\Microsoft\Windows - -- \CurrentVersion\Internet Settings\ProxyServer - -- but if the user sets them with IE connection panel they seem to - -- end up in the following place: - hive = hKEY_CURRENT_USER - path = "Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings" - --- read proxy settings from the windows registry; this is just a best --- effort and may not work on all setups. -registryProxyString = catchIO - (bracket (uncurry regOpenKey registryProxyLoc) regCloseKey $ \hkey -> do - enable <- fmap toBool $ regQueryValueDWORD hkey "ProxyEnable" - if enable - then fmap Just $ regQueryValue hkey (Just "ProxyServer") - else return Nothing) - (\_ -> return Nothing) -#endif - --- | @fetchProxy flg@ gets the local proxy settings and parse the string --- into a @Proxy@ value. If you want to be informed of ill-formed proxy --- configuration strings, supply @True@ for @flg@. --- Proxy settings are sourced from the @HTTP_PROXY@ environment variable, --- and in the case of Windows platforms, by consulting IE/WinInet's proxy --- setting in the Registry. -fetchProxy :: Bool -> IO Proxy -fetchProxy warnIfIllformed = do - mstr <- proxyString - case mstr of - Nothing -> return NoProxy - Just str -> case parseProxy str of - Just p -> return p - Nothing -> do - when warnIfIllformed $ System.IO.hPutStrLn System.IO.stderr $ unlines - [ "invalid http proxy uri: " ++ show str - , "proxy uri must be http with a hostname" - , "ignoring http proxy, trying a direct connection" - ] - return NoProxy - --- | @parseProxy str@ translates a proxy server string into a @Proxy@ value; --- returns @Nothing@ if not well-formed. -parseProxy :: String -> Maybe Proxy -parseProxy str = join - . fmap uri2proxy - $ parseHttpURI str - `mplus` parseHttpURI ("http://" ++ str) - where - parseHttpURI str' = - case parseAbsoluteURI str' of - Just uri@URI{uriAuthority = Just{}} -> Just (fixUserInfo uri) - _ -> Nothing - - -- Note: we need to be able to parse non-URIs like @\"wwwcache.example.com:80\"@ - -- which lack the @\"http://\"@ URI scheme. The problem is that - -- @\"wwwcache.example.com:80\"@ is in fact a valid URI but with scheme - -- @\"wwwcache.example.com:\"@, no authority part and a path of @\"80\"@. - -- - -- So our strategy is to try parsing as normal uri first and if it lacks the - -- 'uriAuthority' then we try parsing again with a @\"http://\"@ prefix. - -- - --- | tidy up user portion, don't want the trailing "\@". -fixUserInfo :: URI -> URI -fixUserInfo uri = uri{ uriAuthority = f `fmap` uriAuthority uri } - where - f a@URIAuth{uriUserInfo=s} = a{uriUserInfo=dropWhileTail (=='@') s} - --- -uri2proxy :: URI -> Maybe Proxy -uri2proxy uri@URI{ uriScheme = "http:" - , uriAuthority = Just (URIAuth auth' hst prt) - } = - Just (Proxy (hst ++ prt) auth) - where - auth = - case auth' of - [] -> Nothing - as -> Just (AuthBasic "" (unEscapeString usr) (unEscapeString pwd) uri) - where - (usr,pwd) = chopAtDelim ':' as - -uri2proxy _ = Nothing - --- utilities -#if defined(WIN32) -regQueryValueDWORD :: HKEY -> String -> IO DWORD -regQueryValueDWORD hkey name = alloca $ \ptr -> do - -- TODO: this throws away the key type returned by regQueryValueEx - -- we should check it's what we expect instead - _ <- regQueryValueEx hkey name (castPtr ptr) (sizeOf (undefined :: DWORD)) - peek ptr - -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Stream.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Stream.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Stream.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Stream.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,236 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Network.HTTP.Stream --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- Transmitting HTTP requests and responses holding @String@ in their payload bodies. --- This is one of the implementation modules for the "Network.HTTP" interface, representing --- request and response content as @String@s and transmitting them in non-packed form --- (cf. "Network.HTTP.HandleStream" and its use of @ByteString@s.) over 'Stream' handles. --- It is mostly here for backwards compatibility, representing how requests and responses --- were transmitted up until the 4.x releases of the HTTP package. --- --- For more detailed information about what the individual exports do, please consult --- the documentation for "Network.HTTP". /Notice/ however that the functions here do --- not perform any kind of normalization prior to transmission (or receipt); you are --- responsible for doing any such yourself, or, if you prefer, just switch to using --- "Network.HTTP" function instead. --- ------------------------------------------------------------------------------ -module Network.HTTP.Stream - ( module Network.Stream - - , simpleHTTP -- :: Request_String -> IO (Result Response_String) - , simpleHTTP_ -- :: Stream s => s -> Request_String -> IO (Result Response_String) - , sendHTTP -- :: Stream s => s -> Request_String -> IO (Result Response_String) - , sendHTTP_notify -- :: Stream s => s -> Request_String -> IO () -> IO (Result Response_String) - , receiveHTTP -- :: Stream s => s -> IO (Result Request_String) - , respondHTTP -- :: Stream s => s -> Response_String -> IO () - - ) where - ------------------------------------------------------------------ ------------------- Imports -------------------------------------- ------------------------------------------------------------------ - -import Network.Stream -import Network.StreamDebugger (debugStream) -import Network.TCP (openTCPPort) -import Network.BufferType ( stringBufferOp ) - -import Network.HTTP.Base -import Network.HTTP.Headers -import Network.HTTP.Utils ( trim ) - -import Data.Char (toLower) -import Data.Maybe (fromMaybe) -import Control.Exception (onException) -import Control.Monad (when) - - --- Turn on to enable HTTP traffic logging -debug :: Bool -debug = False - --- File that HTTP traffic logs go to -httpLogFile :: String -httpLogFile = "http-debug.log" - ------------------------------------------------------------------ ------------------- Misc ----------------------------------------- ------------------------------------------------------------------ - - --- | Simple way to transmit a resource across a non-persistent connection. -simpleHTTP :: Request_String -> IO (Result Response_String) -simpleHTTP r = do - auth <- getAuth r - c <- openTCPPort (host auth) (fromMaybe 80 (port auth)) - simpleHTTP_ c r - --- | Like 'simpleHTTP', but acting on an already opened stream. -simpleHTTP_ :: Stream s => s -> Request_String -> IO (Result Response_String) -simpleHTTP_ s r - | not debug = sendHTTP s r - | otherwise = do - s' <- debugStream httpLogFile s - sendHTTP s' r - -sendHTTP :: Stream s => s -> Request_String -> IO (Result Response_String) -sendHTTP conn rq = sendHTTP_notify conn rq (return ()) - -sendHTTP_notify :: Stream s => s -> Request_String -> IO () -> IO (Result Response_String) -sendHTTP_notify conn rq onSendComplete = do - when providedClose $ (closeOnEnd conn True) - onException (sendMain conn rq onSendComplete) - (close conn) - where - providedClose = findConnClose (rqHeaders rq) - --- From RFC 2616, section 8.2.3: --- 'Because of the presence of older implementations, the protocol allows --- ambiguous situations in which a client may send "Expect: 100- --- continue" without receiving either a 417 (Expectation Failed) status --- or a 100 (Continue) status. Therefore, when a client sends this --- header field to an origin server (possibly via a proxy) from which it --- has never seen a 100 (Continue) status, the client SHOULD NOT wait --- for an indefinite period before sending the request body.' --- --- Since we would wait forever, I have disabled use of 100-continue for now. -sendMain :: Stream s => s -> Request_String -> IO () -> IO (Result Response_String) -sendMain conn rqst onSendComplete = do - --let str = if null (rqBody rqst) - -- then show rqst - -- else show (insertHeader HdrExpect "100-continue" rqst) - -- TODO review throwing away of result - _ <- writeBlock conn (show rqst) - -- write body immediately, don't wait for 100 CONTINUE - -- TODO review throwing away of result - _ <- writeBlock conn (rqBody rqst) - onSendComplete - rsp <- getResponseHead conn - switchResponse conn True False rsp rqst - --- reads and parses headers -getResponseHead :: Stream s => s -> IO (Result ResponseData) -getResponseHead conn = do - lor <- readTillEmpty1 stringBufferOp (readLine conn) - return $ lor >>= parseResponseHead - --- Hmmm, this could go bad if we keep getting "100 Continue" --- responses... Except this should never happen according --- to the RFC. -switchResponse :: Stream s - => s - -> Bool {- allow retry? -} - -> Bool {- is body sent? -} - -> Result ResponseData - -> Request_String - -> IO (Result Response_String) -switchResponse _ _ _ (Left e) _ = return (Left e) - -- retry on connreset? - -- if we attempt to use the same socket then there is an excellent - -- chance that the socket is not in a completely closed state. -switchResponse conn allow_retry bdy_sent (Right (cd,rn,hdrs)) rqst = - case matchResponse (rqMethod rqst) cd of - Continue - | not bdy_sent -> {- Time to send the body -} - do { val <- writeBlock conn (rqBody rqst) - ; case val of - Left e -> return (Left e) - Right _ -> - do { rsp <- getResponseHead conn - ; switchResponse conn allow_retry True rsp rqst - } - } - | otherwise -> {- keep waiting -} - do { rsp <- getResponseHead conn - ; switchResponse conn allow_retry bdy_sent rsp rqst - } - - Retry -> {- Request with "Expect" header failed. - Trouble is the request contains Expects - other than "100-Continue" -} - do { -- TODO review throwing away of result - _ <- writeBlock conn (show rqst ++ rqBody rqst) - ; rsp <- getResponseHead conn - ; switchResponse conn False bdy_sent rsp rqst - } - - Done -> do - when (findConnClose hdrs) - (closeOnEnd conn True) - return (Right $ Response cd rn hdrs "") - - DieHorribly str -> do - close conn - return $ responseParseError "sendHTTP" ("Invalid response: " ++ str) - - ExpectEntity -> - let tc = lookupHeader HdrTransferEncoding hdrs - cl = lookupHeader HdrContentLength hdrs - in - do { rslt <- case tc of - Nothing -> - case cl of - Just x -> linearTransfer (readBlock conn) (read x :: Int) - Nothing -> hopefulTransfer stringBufferOp {-null (++) []-} (readLine conn) [] - Just x -> - case map toLower (trim x) of - "chunked" -> chunkedTransfer stringBufferOp - (readLine conn) (readBlock conn) - _ -> uglyDeathTransfer "sendHTTP" - ; case rslt of - Left e -> close conn >> return (Left e) - Right (ftrs,bdy) -> do - when (findConnClose (hdrs++ftrs)) - (closeOnEnd conn True) - return (Right (Response cd rn (hdrs++ftrs) bdy)) - } - --- | Receive and parse a HTTP request from the given Stream. Should be used --- for server side interactions. -receiveHTTP :: Stream s => s -> IO (Result Request_String) -receiveHTTP conn = getRequestHead >>= processRequest - where - -- reads and parses headers - getRequestHead :: IO (Result RequestData) - getRequestHead = - do { lor <- readTillEmpty1 stringBufferOp (readLine conn) - ; return $ lor >>= parseRequestHead - } - - processRequest (Left e) = return $ Left e - processRequest (Right (rm,uri,hdrs)) = - do -- FIXME : Also handle 100-continue. - let tc = lookupHeader HdrTransferEncoding hdrs - cl = lookupHeader HdrContentLength hdrs - rslt <- case tc of - Nothing -> - case cl of - Just x -> linearTransfer (readBlock conn) (read x :: Int) - Nothing -> return (Right ([], "")) -- hopefulTransfer "" - Just x -> - case map toLower (trim x) of - "chunked" -> chunkedTransfer stringBufferOp - (readLine conn) (readBlock conn) - _ -> uglyDeathTransfer "receiveHTTP" - - return $ do - (ftrs,bdy) <- rslt - return (Request uri rm (hdrs++ftrs) bdy) - --- | Very simple function, send a HTTP response over the given stream. This --- could be improved on to use different transfer types. -respondHTTP :: Stream s => s -> Response_String -> IO () -respondHTTP conn rsp = do -- TODO review throwing away of result - _ <- writeBlock conn (show rsp) - -- write body immediately, don't wait for 100 CONTINUE - -- TODO review throwing away of result - _ <- writeBlock conn (rspBody rsp) - return () diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Utils.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Utils.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP/Utils.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP/Utils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Network.HTTP.Utils --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- Set of utility functions and definitions used by package modules. --- -module Network.HTTP.Utils - ( trim -- :: String -> String - , trimL -- :: String -> String - , trimR -- :: String -> String - - , crlf -- :: String - , lf -- :: String - , sp -- :: String - - , split -- :: Eq a => a -> [a] -> Maybe ([a],[a]) - , splitBy -- :: Eq a => a -> [a] -> [[a]] - - , readsOne -- :: Read a => (a -> b) -> b -> String -> b - - , dropWhileTail -- :: (a -> Bool) -> [a] -> [a] - , chopAtDelim -- :: Eq a => a -> [a] -> ([a],[a]) - - ) where - -import Data.Char -import Data.List ( elemIndex ) -import Data.Maybe ( fromMaybe ) - --- | @crlf@ is our beloved two-char line terminator. -crlf :: String -crlf = "\r\n" - --- | @lf@ is a tolerated line terminator, per RFC 2616 section 19.3. -lf :: String -lf = "\n" - --- | @sp@ lets you save typing one character. -sp :: String -sp = " " - --- | @split delim ls@ splits a list into two parts, the @delim@ occurring --- at the head of the second list. If @delim@ isn't in @ls@, @Nothing@ is --- returned. -split :: Eq a => a -> [a] -> Maybe ([a],[a]) -split delim list = case delim `elemIndex` list of - Nothing -> Nothing - Just x -> Just $ splitAt x list - --- | @trim str@ removes leading and trailing whitespace from @str@. -trim :: String -> String -trim xs = trimR (trimL xs) - --- | @trimL str@ removes leading whitespace (as defined by 'Data.Char.isSpace') --- from @str@. -trimL :: String -> String -trimL xs = dropWhile isSpace xs - --- | @trimL str@ removes trailing whitespace (as defined by 'Data.Char.isSpace') --- from @str@. -trimR :: String -> String -trimR str = fromMaybe "" $ foldr trimIt Nothing str - where - trimIt x (Just xs) = Just (x:xs) - trimIt x Nothing - | isSpace x = Nothing - | otherwise = Just [x] - --- | @splitMany delim ls@ removes the delimiter @delim@ from @ls@. -splitBy :: Eq a => a -> [a] -> [[a]] -splitBy _ [] = [] -splitBy c xs = - case break (==c) xs of - (_,[]) -> [xs] - (as,_:bs) -> as : splitBy c bs - --- | @readsOne f def str@ tries to 'read' @str@, taking --- the first result and passing it to @f@. If the 'read' --- doesn't succeed, return @def@. -readsOne :: Read a => (a -> b) -> b -> String -> b -readsOne f n str = - case reads str of - ((v,_):_) -> f v - _ -> n - - --- | @dropWhileTail p ls@ chops off trailing elements from @ls@ --- until @p@ returns @False@. -dropWhileTail :: (a -> Bool) -> [a] -> [a] -dropWhileTail f ls = - case foldr chop Nothing ls of { Just xs -> xs; Nothing -> [] } - where - chop x (Just xs) = Just (x:xs) - chop x _ - | f x = Nothing - | otherwise = Just [x] - --- | @chopAtDelim elt ls@ breaks up @ls@ into two at first occurrence --- of @elt@; @elt@ is elided too. If @elt@ does not occur, the second --- list is empty and the first is equal to @ls@. -chopAtDelim :: Eq a => a -> [a] -> ([a],[a]) -chopAtDelim elt xs = - case break (==elt) xs of - (_,[]) -> (xs,[]) - (as,_:bs) -> (as,bs) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/HTTP.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/HTTP.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,265 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Network.HTTP --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- The 'Network.HTTP' module provides a simple interface for sending and --- receiving content over HTTP in Haskell. Here's how to fetch a document from --- a URL and return it as a String: --- --- > --- > simpleHTTP (getRequest "http://www.haskell.org/") >>= fmap (take 100) . getResponseBody --- > -- fetch document and return it (as a 'String'.) --- --- Other functions let you control the submission and transfer of HTTP --- 'Request's and 'Response's more carefully, letting you integrate the use --- of 'Network.HTTP' functionality into your application. --- --- The module also exports the main types of the package, 'Request' and 'Response', --- along with 'Header' and functions for working with these. --- --- The actual functionality is implemented by modules in the @Network.HTTP.*@ --- namespace, letting you either use the default implementation here --- by importing @Network.HTTP@ or, for more specific uses, selectively --- import the modules in @Network.HTTP.*@. To wit, more than one kind of --- representation of the bulk data that flows across a HTTP connection is --- supported. (see "Network.HTTP.HandleStream".) --- --- /NOTE:/ The 'Request' send actions will normalize the @Request@ prior to transmission. --- Normalization such as having the request path be in the expected form and, possibly, --- introduce a default @Host:@ header if one isn't already present. --- Normalization also takes the @"user:pass\@"@ portion out of the the URI, --- if it was supplied, and converts it into @Authorization: Basic$ header. --- If you do not --- want the requests tampered with, but sent as-is, please import and use the --- the "Network.HTTP.HandleStream" or "Network.HTTP.Stream" modules instead. They --- export the same functions, but leaves construction and any normalization of --- @Request@s to the user. --- --- /NOTE:/ This package only supports HTTP; it does not support HTTPS. --- Attempts to use HTTPS result in an error. ------------------------------------------------------------------------------ -module Network.HTTP - ( module Network.HTTP.Base - , module Network.HTTP.Headers - - {- the functionality that the implementation modules, - Network.HTTP.HandleStream and Network.HTTP.Stream, - exposes: - -} - , simpleHTTP -- :: Request -> IO (Result Response) - , simpleHTTP_ -- :: Stream s => s -> Request -> IO (Result Response) - , sendHTTP -- :: Stream s => s -> Request -> IO (Result Response) - , sendHTTP_notify -- :: Stream s => s -> Request -> IO () -> IO (Result Response) - , receiveHTTP -- :: Stream s => s -> IO (Result Request) - , respondHTTP -- :: Stream s => s -> Response -> IO () - - , module Network.TCP - - , getRequest -- :: String -> Request_String - , headRequest -- :: String -> Request_String - , postRequest -- :: String -> Request_String - , postRequestWithBody -- :: String -> String -> String -> Request_String - - , getResponseBody -- :: Result (Request ty) -> IO ty - , getResponseCode -- :: Result (Request ty) -> IO ResponseCode - ) where - ------------------------------------------------------------------ ------------------- Imports -------------------------------------- ------------------------------------------------------------------ - -import Network.HTTP.Headers -import Network.HTTP.Base -import qualified Network.HTTP.HandleStream as S --- old implementation: import Network.HTTP.Stream -import Network.TCP -import Network.Stream ( Result ) -import Network.URI ( parseURI ) - -import Data.Maybe ( fromMaybe ) - -{- - Note: if you switch over/back to using Network.HTTP.Stream here, you'll - have to wrap the results from 'openStream' as Connections via 'hstreamToConnection' - prior to delegating to the Network.HTTP.Stream functions. --} - --- | @simpleHTTP req@ transmits the 'Request' @req@ by opening a /direct/, non-persistent --- connection to the HTTP server that @req@ is destined for, followed by transmitting --- it and gathering up the response as a 'Result'. Prior to sending the request, --- it is normalized (via 'normalizeRequest'). If you have to mediate the request --- via an HTTP proxy, you will have to normalize the request yourself. Or switch to --- using 'Network.Browser' instead. --- --- Examples: --- --- > simpleHTTP (getRequest "http://hackage.haskell.org/") --- > simpleHTTP (getRequest "http://hackage.haskell.org:8012/") - -simpleHTTP :: (HStream ty) => Request ty -> IO (Result (Response ty)) -simpleHTTP r = do - auth <- getAuth r - failHTTPS (rqURI r) - c <- openStream (host auth) (fromMaybe 80 (port auth)) - let norm_r = normalizeRequest defaultNormalizeRequestOptions{normDoClose=True} r - simpleHTTP_ c norm_r - --- | Identical to 'simpleHTTP', but acting on an already opened stream. -simpleHTTP_ :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) -simpleHTTP_ s r = do - let norm_r = normalizeRequest defaultNormalizeRequestOptions{normDoClose=True} r - S.sendHTTP s norm_r - --- | @sendHTTP hStream httpRequest@ transmits @httpRequest@ (after normalization) over --- @hStream@, but does not alter the status of the connection, nor request it to be --- closed upon receiving the response. -sendHTTP :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty)) -sendHTTP conn rq = do - let norm_r = normalizeRequest defaultNormalizeRequestOptions rq - S.sendHTTP conn norm_r - --- | @sendHTTP_notify hStream httpRequest action@ behaves like 'sendHTTP', but --- lets you supply an IO @action@ to execute once the request has been successfully --- transmitted over the connection. Useful when you want to set up tracing of --- request transmission and its performance. -sendHTTP_notify :: HStream ty - => HandleStream ty - -> Request ty - -> IO () - -> IO (Result (Response ty)) -sendHTTP_notify conn rq onSendComplete = do - let norm_r = normalizeRequest defaultNormalizeRequestOptions rq - S.sendHTTP_notify conn norm_r onSendComplete - --- | @receiveHTTP hStream@ reads a 'Request' from the 'HandleStream' @hStream@ -receiveHTTP :: HStream ty => HandleStream ty -> IO (Result (Request ty)) -receiveHTTP conn = S.receiveHTTP conn - --- | @respondHTTP hStream httpResponse@ transmits an HTTP 'Response' over --- the 'HandleStream' @hStream@. It could be used to implement simple web --- server interactions, performing the dual role to 'sendHTTP'. -respondHTTP :: HStream ty => HandleStream ty -> Response ty -> IO () -respondHTTP conn rsp = S.respondHTTP conn rsp - - --- | A convenience constructor for a GET 'Request'. --- --- If the URL isn\'t syntactically valid, the function raises an error. -getRequest - :: String -- ^URL to fetch - -> Request_String -- ^The constructed request -getRequest urlString = - case parseURI urlString of - Nothing -> error ("getRequest: Not a valid URL - " ++ urlString) - Just u -> mkRequest GET u - --- | A convenience constructor for a HEAD 'Request'. --- --- If the URL isn\'t syntactically valid, the function raises an error. -headRequest - :: String -- ^URL to fetch - -> Request_String -- ^The constructed request -headRequest urlString = - case parseURI urlString of - Nothing -> error ("headRequest: Not a valid URL - " ++ urlString) - Just u -> mkRequest HEAD u - --- | A convenience constructor for a POST 'Request'. --- --- If the URL isn\'t syntactically valid, the function raises an error. -postRequest - :: String -- ^URL to POST to - -> Request_String -- ^The constructed request -postRequest urlString = - case parseURI urlString of - Nothing -> error ("postRequest: Not a valid URL - " ++ urlString) - Just u -> mkRequest POST u - --- | A convenience constructor for a POST 'Request'. --- --- It constructs a request and sets the body as well as --- the Content-Type and Content-Length headers. The contents of the body --- are forced to calculate the value for the Content-Length header. --- --- If the URL isn\'t syntactically valid, the function raises an error. -postRequestWithBody - :: String -- ^URL to POST to - -> String -- ^Content-Type of body - -> String -- ^The body of the request - -> Request_String -- ^The constructed request -postRequestWithBody urlString typ body = - case parseURI urlString of - Nothing -> error ("postRequestWithBody: Not a valid URL - " ++ urlString) - Just u -> setRequestBody (mkRequest POST u) (typ, body) - --- | @getResponseBody response@ takes the response of a HTTP requesting action and --- tries to extricate the body of the 'Response' @response@. If the request action --- returned an error, an IO exception is raised. -getResponseBody :: Result (Response ty) -> IO ty -getResponseBody (Left err) = fail (show err) -getResponseBody (Right r) = return (rspBody r) - --- | @getResponseBody response@ takes the response of a HTTP requesting action and --- tries to extricate the status code of the 'Response' @response@. If the request action --- returned an error, an IO exception is raised. -getResponseCode :: Result (Response ty) -> IO ResponseCode -getResponseCode (Left err) = fail (show err) -getResponseCode (Right r) = return (rspCode r) - - --- --- * TODO --- - request pipelining --- - https upgrade (includes full TLS, i.e. SSL, implementation) --- - use of Stream classes will pay off --- - consider C implementation of encryption\/decryption --- - comm timeouts --- - MIME & entity stuff (happening in separate module) --- - support \"*\" uri-request-string for OPTIONS request method --- --- --- * Header notes: --- --- [@Host@] --- Required by HTTP\/1.1, if not supplied as part --- of a request a default Host value is extracted --- from the request-uri. --- --- [@Connection@] --- If this header is present in any request or --- response, and it's value is "close", then --- the current request\/response is the last --- to be allowed on that connection. --- --- [@Expect@] --- Should a request contain a body, an Expect --- header will be added to the request. The added --- header has the value \"100-continue\". After --- a 417 \"Expectation Failed\" response the request --- is attempted again without this added Expect --- header. --- --- [@TransferEncoding,ContentLength,...@] --- if request is inconsistent with any of these --- header values then you may not receive any response --- or will generate an error response (probably 4xx). --- --- --- * Response code notes --- Some response codes induce special behaviour: --- --- [@1xx@] \"100 Continue\" will cause any unsent request body to be sent. --- \"101 Upgrade\" will be returned. --- Other 1xx responses are ignored. --- --- [@417@] The reason for this code is \"Expectation failed\", indicating --- that the server did not like the Expect \"100-continue\" header --- added to a request. Receipt of 417 will induce another --- request attempt (without Expect header), unless no Expect header --- had been added (in which case 417 response is returned). diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/StreamDebugger.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/StreamDebugger.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/StreamDebugger.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/StreamDebugger.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,103 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Network.StreamDebugger --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- Implements debugging of @Stream@s. Originally part of Gray's\/Bringert's --- HTTP module. --- --- * Changes by Robin Bate Boerop : --- - Created. Made minor formatting changes. --- ------------------------------------------------------------------------------ -module Network.StreamDebugger - ( StreamDebugger - , debugStream - , debugByteStream - ) where - -import Network.Stream (Stream(..)) -import System.IO - ( Handle, hFlush, hPutStrLn, IOMode(AppendMode), hClose, openFile, - hSetBuffering, BufferMode(NoBuffering) - ) -import Network.TCP ( HandleStream, HStream, - StreamHooks(..), setStreamHooks, getStreamHooks ) - --- | Allows stream logging. Refer to 'debugStream' below. -data StreamDebugger x - = Dbg Handle x - -instance (Stream x) => Stream (StreamDebugger x) where - readBlock (Dbg h x) n = - do val <- readBlock x n - hPutStrLn h ("--readBlock " ++ show n) - hPutStrLn h (show val) - return val - readLine (Dbg h x) = - do val <- readLine x - hPutStrLn h ("--readLine") - hPutStrLn h (show val) - return val - writeBlock (Dbg h x) str = - do val <- writeBlock x str - hPutStrLn h ("--writeBlock" ++ show str) - hPutStrLn h (show val) - return val - close (Dbg h x) = - do hPutStrLn h "--closing..." - hFlush h - close x - hPutStrLn h "--closed." - hClose h - closeOnEnd (Dbg h x) f = - do hPutStrLn h ("--close-on-end.." ++ show f) - hFlush h - closeOnEnd x f - --- | Wraps a stream with logging I\/O. --- The first argument is a filename which is opened in @AppendMode@. -debugStream :: (Stream a) => FilePath -> a -> IO (StreamDebugger a) -debugStream file stream = - do h <- openFile file AppendMode - hPutStrLn h ("File \"" ++ file ++ "\" opened for appending.") - return (Dbg h stream) - -debugByteStream :: HStream ty => FilePath -> HandleStream ty -> IO (HandleStream ty) -debugByteStream file stream = do - sh <- getStreamHooks stream - case sh of - Just h - | hook_name h == file -> return stream -- reuse the stream hooks. - _ -> do - h <- openFile file AppendMode - hSetBuffering h NoBuffering - hPutStrLn h ("File \"" ++ file ++ "\" opened for appending.") - setStreamHooks stream (debugStreamHooks h file) - return stream - -debugStreamHooks :: HStream ty => Handle -> String -> StreamHooks ty -debugStreamHooks h nm = - StreamHooks - { hook_readBlock = \ toStr n val -> do - let eval = case val of { Left e -> Left e ; Right v -> Right $ toStr v} - hPutStrLn h ("--readBlock " ++ show n) - hPutStrLn h (either show show eval) - , hook_readLine = \ toStr val -> do - let eval = case val of { Left e -> Left e ; Right v -> Right $ toStr v} - hPutStrLn h ("--readLine") - hPutStrLn h (either show show eval) - , hook_writeBlock = \ toStr str val -> do - hPutStrLn h ("--writeBlock " ++ show val) - hPutStrLn h (toStr str) - , hook_close = do - hPutStrLn h "--closing..." - hFlush h - hClose h - , hook_name = nm - } diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/Stream.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/Stream.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/Stream.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/Stream.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Network.Stream --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- An library for creating abstract streams. Originally part of Gray's\/Bringert's --- HTTP module. --- --- * Changes by Robin Bate Boerop : --- - Removed unnecessary import statements. --- - Moved Debug code to StreamDebugger.hs --- - Moved Socket-related code to StreamSocket.hs. --- --- * Changes by Simon Foster: --- - Split Network.HTTPmodule up into to separate --- Network.[Stream,TCP,HTTP] modules ------------------------------------------------------------------------------ -module Network.Stream - ( Stream(..) - , ConnError(..) - , Result - , bindE - , fmapE - - , failParse -- :: String -> Result a - , failWith -- :: ConnError -> Result a - , failMisc -- :: String -> Result a - ) where - -import Control.Monad.Error - -data ConnError - = ErrorReset - | ErrorClosed - | ErrorParse String - | ErrorMisc String - deriving(Show,Eq) - -instance Error ConnError where - noMsg = strMsg "unknown error" - strMsg x = ErrorMisc x - --- in GHC 7.0 the Monad instance for Error no longer --- uses fail x = Left (strMsg x). failMisc is therefore --- used instead. -failMisc :: String -> Result a -failMisc x = failWith (strMsg x) - -failParse :: String -> Result a -failParse x = failWith (ErrorParse x) - -failWith :: ConnError -> Result a -failWith x = Left x - -bindE :: Result a -> (a -> Result b) -> Result b -bindE (Left e) _ = Left e -bindE (Right v) f = f v - -fmapE :: (a -> Result b) -> IO (Result a) -> IO (Result b) -fmapE f a = do - x <- a - case x of - Left e -> return (Left e) - Right r -> return (f r) - --- | This is the type returned by many exported network functions. -type Result a = Either ConnError {- error -} - a {- result -} - --- | Streams should make layering of TLS protocol easier in future, --- they allow reading/writing to files etc for debugging, --- they allow use of protocols other than TCP/IP --- and they allow customisation. --- --- Instances of this class should not trim --- the input in any way, e.g. leave LF on line --- endings etc. Unless that is exactly the behaviour --- you want from your twisted instances ;) -class Stream x where - readLine :: x -> IO (Result String) - readBlock :: x -> Int -> IO (Result String) - writeBlock :: x -> String -> IO (Result ()) - close :: x -> IO () - closeOnEnd :: x -> Bool -> IO () - -- ^ True => shutdown the connection when response has been read / end-of-stream - -- has been reached. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/StreamSocket.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/StreamSocket.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/StreamSocket.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/StreamSocket.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-orphans #-} ------------------------------------------------------------------------------ --- | --- Module : Network.StreamSocket --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- Socket Stream instance. Originally part of Gray's\/Bringert's HTTP module. --- --- * Changes by Robin Bate Boerop : --- - Made dependencies explicit in import statements. --- - Removed false dependencies in import statements. --- - Created separate module for instance Stream Socket. --- --- * Changes by Simon Foster: --- - Split module up into to sepearate Network.[Stream,TCP,HTTP] modules --- ------------------------------------------------------------------------------ -module Network.StreamSocket - ( handleSocketError - , myrecv - ) where - -import Network.Stream - ( Stream(..), ConnError(ErrorReset, ErrorMisc), Result - ) -import Network.Socket - ( Socket, getSocketOption, shutdown, send, recv, sClose - , ShutdownCmd(ShutdownBoth), SocketOption(SoError) - ) - -import Network.HTTP.Base ( catchIO ) -import Control.Monad (liftM) -import Control.Exception as Exception (IOException) -import System.IO.Error (isEOFError) - --- | Exception handler for socket operations. -handleSocketError :: Socket -> IOException -> IO (Result a) -handleSocketError sk e = - do se <- getSocketOption sk SoError - case se of - 0 -> ioError e - 10054 -> return $ Left ErrorReset -- reset - _ -> return $ Left $ ErrorMisc $ show se - -myrecv :: Socket -> Int -> IO String -myrecv sock len = - let handler e = if isEOFError e then return [] else ioError e - in catchIO (recv sock len) handler - -instance Stream Socket where - readBlock sk n = readBlockSocket sk n - readLine sk = readLineSocket sk - writeBlock sk str = writeBlockSocket sk str - close sk = do - -- This slams closed the connection (which is considered rude for TCP\/IP) - shutdown sk ShutdownBoth - sClose sk - closeOnEnd _sk _ = return () -- can't really deal with this, so do run the risk of leaking sockets here. - -readBlockSocket :: Socket -> Int -> IO (Result String) -readBlockSocket sk n = (liftM Right $ fn n) `catchIO` (handleSocketError sk) - where - fn x = do { str <- myrecv sk x - ; let len = length str - ; if len < x - then ( fn (x-len) >>= \more -> return (str++more) ) - else return str - } - --- Use of the following function is discouraged. --- The function reads in one character at a time, --- which causes many calls to the kernel recv() --- hence causes many context switches. -readLineSocket :: Socket -> IO (Result String) -readLineSocket sk = (liftM Right $ fn "") `catchIO` (handleSocketError sk) - where - fn str = do - c <- myrecv sk 1 -- like eating through a straw. - if null c || c == "\n" - then return (reverse str++c) - else fn (head c:str) - -writeBlockSocket :: Socket -> String -> IO (Result ()) -writeBlockSocket sk str = (liftM Right $ fn str) `catchIO` (handleSocketError sk) - where - fn [] = return () - fn x = send sk x >>= \i -> fn (drop i x) - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/TCP.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/TCP.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Network/TCP.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Network/TCP.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,421 +0,0 @@ -{-# LANGUAGE TypeSynonymInstances #-} ------------------------------------------------------------------------------ --- | --- Module : Network.TCP --- Copyright : See LICENSE file --- License : BSD --- --- Maintainer : Ganesh Sittampalam --- Stability : experimental --- Portability : non-portable (not tested) --- --- Some utility functions for working with the Haskell @network@ package. Mostly --- for internal use by the @Network.HTTP@ code, but --- ------------------------------------------------------------------------------ -module Network.TCP - ( Connection - , EndPoint(..) - , openTCPPort - , isConnectedTo - - , openTCPConnection - , socketConnection - , isTCPConnectedTo - - , HandleStream - , HStream(..) - - , StreamHooks(..) - , nullHooks - , setStreamHooks - , getStreamHooks - , hstreamToConnection - - ) where - -import Network.Socket - ( Socket, SocketOption(KeepAlive) - , SocketType(Stream), connect - , shutdown, ShutdownCmd(..) - , sClose, setSocketOption, getPeerName - , socket, Family(AF_UNSPEC), defaultProtocol, getAddrInfo - , defaultHints, addrFamily, withSocketsDo - , addrSocketType, addrAddress - ) -import qualified Network.Stream as Stream - ( Stream(readBlock, readLine, writeBlock, close, closeOnEnd) ) -import Network.Stream - ( ConnError(..) - , Result - , failWith - , failMisc - ) -import Network.BufferType - -import Network.HTTP.Base ( catchIO ) -import Network.Socket ( socketToHandle ) - -import Data.Char ( toLower ) -import Data.Word ( Word8 ) -import Control.Concurrent -import Control.Exception ( onException ) -import Control.Monad ( liftM, when ) -import System.IO ( Handle, hFlush, IOMode(..), hClose ) -import System.IO.Error ( isEOFError ) - -import qualified Data.ByteString as Strict -import qualified Data.ByteString.Lazy as Lazy - ------------------------------------------------------------------ ------------------- TCP Connections ------------------------------ ------------------------------------------------------------------ - --- | The 'Connection' newtype is a wrapper that allows us to make --- connections an instance of the Stream class, without GHC extensions. --- While this looks sort of like a generic reference to the transport --- layer it is actually TCP specific, which can be seen in the --- implementation of the 'Stream Connection' instance. -newtype Connection = Connection (HandleStream String) - -newtype HandleStream a = HandleStream {getRef :: MVar (Conn a)} - -data EndPoint = EndPoint { epHost :: String, epPort :: Int } - -instance Eq EndPoint where - EndPoint host1 port1 == EndPoint host2 port2 = - map toLower host1 == map toLower host2 && port1 == port2 - -data Conn a - = MkConn { connSock :: ! Socket - , connHandle :: Handle - , connBuffer :: BufferOp a - , connInput :: Maybe a - , connEndPoint :: EndPoint - , connHooks :: Maybe (StreamHooks a) - , connCloseEOF :: Bool -- True => close socket upon reaching end-of-stream. - } - | ConnClosed - deriving(Eq) - -hstreamToConnection :: HandleStream String -> Connection -hstreamToConnection h = Connection h - -connHooks' :: Conn a -> Maybe (StreamHooks a) -connHooks' ConnClosed{} = Nothing -connHooks' x = connHooks x - --- all of these are post-op hooks -data StreamHooks ty - = StreamHooks - { hook_readLine :: (ty -> String) -> Result ty -> IO () - , hook_readBlock :: (ty -> String) -> Int -> Result ty -> IO () - , hook_writeBlock :: (ty -> String) -> ty -> Result () -> IO () - , hook_close :: IO () - , hook_name :: String -- hack alert: name of the hook itself. - } - -instance Eq ty => Eq (StreamHooks ty) where - (==) _ _ = True - -nullHooks :: StreamHooks ty -nullHooks = StreamHooks - { hook_readLine = \ _ _ -> return () - , hook_readBlock = \ _ _ _ -> return () - , hook_writeBlock = \ _ _ _ -> return () - , hook_close = return () - , hook_name = "" - } - -setStreamHooks :: HandleStream ty -> StreamHooks ty -> IO () -setStreamHooks h sh = modifyMVar_ (getRef h) (\ c -> return c{connHooks=Just sh}) - -getStreamHooks :: HandleStream ty -> IO (Maybe (StreamHooks ty)) -getStreamHooks h = readMVar (getRef h) >>= return.connHooks - --- | @HStream@ overloads the use of 'HandleStream's, letting you --- overload the handle operations over the type that is communicated --- across the handle. It comes in handy for @Network.HTTP@ 'Request' --- and 'Response's as the payload representation isn't fixed, but overloaded. --- --- The library comes with instances for @ByteString@s and @String@, but --- should you want to plug in your own payload representation, defining --- your own @HStream@ instance _should_ be all that it takes. --- -class BufferType bufType => HStream bufType where - openStream :: String -> Int -> IO (HandleStream bufType) - openSocketStream :: String -> Int -> Socket -> IO (HandleStream bufType) - readLine :: HandleStream bufType -> IO (Result bufType) - readBlock :: HandleStream bufType -> Int -> IO (Result bufType) - writeBlock :: HandleStream bufType -> bufType -> IO (Result ()) - close :: HandleStream bufType -> IO () - closeQuick :: HandleStream bufType -> IO () - closeOnEnd :: HandleStream bufType -> Bool -> IO () - -instance HStream Strict.ByteString where - openStream = openTCPConnection - openSocketStream = socketConnection - readBlock c n = readBlockBS c n - readLine c = readLineBS c - writeBlock c str = writeBlockBS c str - close c = closeIt c Strict.null True - closeQuick c = closeIt c Strict.null False - closeOnEnd c f = closeEOF c f - -instance HStream Lazy.ByteString where - openStream = \ a b -> openTCPConnection_ a b True - openSocketStream = \ a b c -> socketConnection_ a b c True - readBlock c n = readBlockBS c n - readLine c = readLineBS c - writeBlock c str = writeBlockBS c str - close c = closeIt c Lazy.null True - closeQuick c = closeIt c Lazy.null False - closeOnEnd c f = closeEOF c f - -instance Stream.Stream Connection where - readBlock (Connection c) = Network.TCP.readBlock c - readLine (Connection c) = Network.TCP.readLine c - writeBlock (Connection c) = Network.TCP.writeBlock c - close (Connection c) = Network.TCP.close c - closeOnEnd (Connection c) f = Network.TCP.closeEOF c f - -instance HStream String where - openStream = openTCPConnection - openSocketStream = socketConnection - readBlock ref n = readBlockBS ref n - - -- This function uses a buffer, at this time the buffer is just 1000 characters. - -- (however many bytes this is is left to the user to decypher) - readLine ref = readLineBS ref - -- The 'Connection' object allows no outward buffering, - -- since in general messages are serialised in their entirety. - writeBlock ref str = writeBlockBS ref str -- (stringToBuf str) - - -- Closes a Connection. Connection will no longer - -- allow any of the other Stream functions. Notice that a Connection may close - -- at any time before a call to this function. This function is idempotent. - -- (I think the behaviour here is TCP specific) - close c = closeIt c null True - - -- Closes a Connection without munching the rest of the stream. - closeQuick c = closeIt c null False - - closeOnEnd c f = closeEOF c f - --- | @openTCPPort uri port@ establishes a connection to a remote --- host, using 'getHostByName' which possibly queries the DNS system, hence --- may trigger a network connection. -openTCPPort :: String -> Int -> IO Connection -openTCPPort uri port = openTCPConnection uri port >>= return.Connection - --- Add a "persistent" option? Current persistent is default. --- Use "Result" type for synchronous exception reporting? -openTCPConnection :: BufferType ty => String -> Int -> IO (HandleStream ty) -openTCPConnection uri port = openTCPConnection_ uri port False - -openTCPConnection_ :: BufferType ty => String -> Int -> Bool -> IO (HandleStream ty) -openTCPConnection_ uri port stashInput = do - -- HACK: uri is sometimes obtained by calling Network.URI.uriRegName, and this includes - -- the surrounding square brackets for an RFC 2732 host like [::1]. It's not clear whether - -- it should, or whether all call sites should be using something different instead, but - -- the simplest short-term fix is to strip any surrounding square brackets here. - -- It shouldn't affect any as this is the only situation they can occur - see RFC 3986. - let fixedUri = - case uri of - '[':(rest@(c:_)) | last rest == ']' - -> if c == 'v' || c == 'V' - then error $ "Unsupported post-IPv6 address " ++ uri - else init rest - _ -> uri - - - -- use withSocketsDo here in case the caller hasn't used it, which would make getAddrInfo fail on Windows - -- although withSocketsDo is supposed to wrap the entire program, in practice it is safe to use it locally - -- like this as it just does a once-only installation of a shutdown handler to run at program exit, - -- rather than actually shutting down after the action - addrinfos <- withSocketsDo $ getAddrInfo (Just $ defaultHints { addrFamily = AF_UNSPEC, addrSocketType = Stream }) (Just fixedUri) (Just . show $ port) - case addrinfos of - [] -> fail "openTCPConnection: getAddrInfo returned no address information" - (a:_) -> do - s <- socket (addrFamily a) Stream defaultProtocol - onException (do - setSocketOption s KeepAlive 1 - connect s (addrAddress a) - socketConnection_ fixedUri port s stashInput - ) (sClose s) - --- | @socketConnection@, like @openConnection@ but using a pre-existing 'Socket'. -socketConnection :: BufferType ty - => String - -> Int - -> Socket - -> IO (HandleStream ty) -socketConnection hst port sock = socketConnection_ hst port sock False - --- Internal function used to control the on-demand streaming of input --- for /lazy/ streams. -socketConnection_ :: BufferType ty - => String - -> Int - -> Socket - -> Bool - -> IO (HandleStream ty) -socketConnection_ hst port sock stashInput = do - h <- socketToHandle sock ReadWriteMode - mb <- case stashInput of { True -> liftM Just $ buf_hGetContents bufferOps h; _ -> return Nothing } - let conn = MkConn - { connSock = sock - , connHandle = h - , connBuffer = bufferOps - , connInput = mb - , connEndPoint = EndPoint hst port - , connHooks = Nothing - , connCloseEOF = False - } - v <- newMVar conn - return (HandleStream v) - -closeConnection :: HStream a => HandleStream a -> IO Bool -> IO () -closeConnection ref readL = do - -- won't hold onto the lock for the duration - -- we are draining it...ToDo: have Connection - -- into a shutting-down state so that other - -- threads will simply back off if/when attempting - -- to also close it. - c <- readMVar (getRef ref) - closeConn c `catchIO` (\_ -> return ()) - modifyMVar_ (getRef ref) (\ _ -> return ConnClosed) - where - -- Be kind to peer & close gracefully. - closeConn ConnClosed = return () - closeConn conn = do - let sk = connSock conn - hFlush (connHandle conn) - shutdown sk ShutdownSend - suck readL - hClose (connHandle conn) - shutdown sk ShutdownReceive - sClose sk - - suck :: IO Bool -> IO () - suck rd = do - f <- rd - if f then return () else suck rd - --- | Checks both that the underlying Socket is connected --- and that the connection peer matches the given --- host name (which is recorded locally). -isConnectedTo :: Connection -> EndPoint -> IO Bool -isConnectedTo (Connection conn) endPoint = do - v <- readMVar (getRef conn) - case v of - ConnClosed -> print "aa" >> return False - _ - | connEndPoint v == endPoint -> - catchIO (getPeerName (connSock v) >> return True) (const $ return False) - | otherwise -> return False - -isTCPConnectedTo :: HandleStream ty -> EndPoint -> IO Bool -isTCPConnectedTo conn endPoint = do - v <- readMVar (getRef conn) - case v of - ConnClosed -> return False - _ - | connEndPoint v == endPoint -> - catchIO (getPeerName (connSock v) >> return True) (const $ return False) - | otherwise -> return False - -readBlockBS :: HStream a => HandleStream a -> Int -> IO (Result a) -readBlockBS ref n = onNonClosedDo ref $ \ conn -> do - x <- bufferGetBlock ref n - maybe (return ()) - (\ h -> hook_readBlock h (buf_toStr $ connBuffer conn) n x) - (connHooks' conn) - return x - --- This function uses a buffer, at this time the buffer is just 1000 characters. --- (however many bytes this is is left for the user to decipher) -readLineBS :: HStream a => HandleStream a -> IO (Result a) -readLineBS ref = onNonClosedDo ref $ \ conn -> do - x <- bufferReadLine ref - maybe (return ()) - (\ h -> hook_readLine h (buf_toStr $ connBuffer conn) x) - (connHooks' conn) - return x - --- The 'Connection' object allows no outward buffering, --- since in general messages are serialised in their entirety. -writeBlockBS :: HandleStream a -> a -> IO (Result ()) -writeBlockBS ref b = onNonClosedDo ref $ \ conn -> do - x <- bufferPutBlock (connBuffer conn) (connHandle conn) b - maybe (return ()) - (\ h -> hook_writeBlock h (buf_toStr $ connBuffer conn) b x) - (connHooks' conn) - return x - -closeIt :: HStream ty => HandleStream ty -> (ty -> Bool) -> Bool -> IO () -closeIt c p b = do - closeConnection c (if b - then readLineBS c >>= \ x -> case x of { Right xs -> return (p xs); _ -> return True} - else return True) - conn <- readMVar (getRef c) - maybe (return ()) - (hook_close) - (connHooks' conn) - -closeEOF :: HandleStream ty -> Bool -> IO () -closeEOF c flg = modifyMVar_ (getRef c) (\ co -> return co{connCloseEOF=flg}) - -bufferGetBlock :: HStream a => HandleStream a -> Int -> IO (Result a) -bufferGetBlock ref n = onNonClosedDo ref $ \ conn -> do - case connInput conn of - Just c -> do - let (a,b) = buf_splitAt (connBuffer conn) n c - modifyMVar_ (getRef ref) (\ co -> return co{connInput=Just b}) - return (return a) - _ -> do - catchIO (buf_hGet (connBuffer conn) (connHandle conn) n >>= return.return) - (\ e -> - if isEOFError e - then do - when (connCloseEOF conn) $ catchIO (closeQuick ref) (\ _ -> return ()) - return (return (buf_empty (connBuffer conn))) - else return (failMisc (show e))) - -bufferPutBlock :: BufferOp a -> Handle -> a -> IO (Result ()) -bufferPutBlock ops h b = - catchIO (buf_hPut ops h b >> hFlush h >> return (return ())) - (\ e -> return (failMisc (show e))) - -bufferReadLine :: HStream a => HandleStream a -> IO (Result a) -bufferReadLine ref = onNonClosedDo ref $ \ conn -> do - case connInput conn of - Just c -> do - let (a,b0) = buf_span (connBuffer conn) (/='\n') c - let (newl,b1) = buf_splitAt (connBuffer conn) 1 b0 - modifyMVar_ (getRef ref) (\ co -> return co{connInput=Just b1}) - return (return (buf_append (connBuffer conn) a newl)) - _ -> catchIO - (buf_hGetLine (connBuffer conn) (connHandle conn) >>= - return . return . appendNL (connBuffer conn)) - (\ e -> - if isEOFError e - then do - when (connCloseEOF conn) $ catchIO (closeQuick ref) (\ _ -> return ()) - return (return (buf_empty (connBuffer conn))) - else return (failMisc (show e))) - where - -- yes, this s**ks.. _may_ have to be addressed if perf - -- suggests worthiness. - appendNL ops b = buf_snoc ops b nl - - nl :: Word8 - nl = fromIntegral (fromEnum '\n') - -onNonClosedDo :: HandleStream a -> (Conn a -> IO (Result b)) -> IO (Result b) -onNonClosedDo h act = do - x <- readMVar (getRef h) - case x of - ConnClosed{} -> return (failWith ErrorClosed) - _ -> act x - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Setup.lhs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Setup.lhs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/Setup.lhs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/Setup.lhs 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -#!/usr/bin/env runghc - -> module Main where - -> import Distribution.Simple - -> main :: IO () -> main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/test/Httpd.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/test/Httpd.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/test/Httpd.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/test/Httpd.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,158 +0,0 @@ -{-# LANGUAGE CPP #-} - -module Httpd - ( Request, Response, Server - , mkResponse - , reqMethod, reqURI, reqHeaders, reqBody - , shed -#ifdef WARP_TESTS - , warp -#endif - ) - where - -import Control.Applicative -import Control.Arrow ( (***) ) -import Control.DeepSeq -import Control.Monad -import Control.Monad.Trans ( liftIO ) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.ByteString.Char8 as BC -import qualified Data.ByteString.Lazy.Char8 as BLC -#ifdef WARP_TESTS -import qualified Data.CaseInsensitive as CI -#endif -import Data.Maybe ( fromJust ) -import Network.URI ( URI, parseRelativeReference ) - -import Network.Socket - ( getAddrInfo, AddrInfo, defaultHints, addrAddress, addrFamily - , addrFlags, addrSocketType, AddrInfoFlag(AI_PASSIVE), socket, Family(AF_UNSPEC,AF_INET6) - , defaultProtocol, SocketType(Stream), listen, setSocketOption, SocketOption(ReuseAddr) - ) -#ifdef WARP_TESTS -#if MIN_VERSION_network(2,4,0) -import Network.Socket ( bind ) -#else -import Network.Socket ( bindSocket, Socket, SockAddr ) -#endif -#endif - -import qualified Network.Shed.Httpd as Shed - ( Request, Response(Response), initServer - , reqMethod, reqURI, reqHeaders, reqBody - ) -#ifdef WARP_TESTS -#if !MIN_VERSION_wai(3,0,0) -import qualified Data.Conduit.Lazy as Warp -#endif - -import qualified Network.HTTP.Types as Warp - ( Status(..) ) -import qualified Network.Wai as Warp -import qualified Network.Wai.Handler.Warp as Warp - ( runSettingsSocket, defaultSettings, setPort ) -#endif - -data Request = Request - { - reqMethod :: String, - reqURI :: URI, - reqHeaders :: [(String, String)], - reqBody :: String - } - -data Response = Response - { - respStatus :: Int, - respHeaders :: [(String, String)], - respBody :: String - } - -mkResponse :: Int -> [(String, String)] -> String -> Response -mkResponse = Response - -type Server = Int -> (Request -> IO Response) -> IO () - -shed :: Server -shed port handler = - () <$ Shed.initServer - port - (liftM responseToShed . handler . requestFromShed) - where - responseToShed (Response status hdrs body) = - Shed.Response status hdrs body - chomp = reverse . strip '\r' . reverse - strip c (c':str) | c == c' = str - strip c str = str - requestFromShed request = - Request - { - reqMethod = Shed.reqMethod request, - reqURI = Shed.reqURI request, - reqHeaders = map (id *** chomp) $ Shed.reqHeaders request, - reqBody = Shed.reqBody request - } - -#if !MIN_VERSION_bytestring(0,10,0) -instance NFData B.ByteString where - rnf = rnf . B.length -#endif - -#ifdef WARP_TESTS -#if !MIN_VERSION_network(2,4,0) -bind :: Socket -> SockAddr -> IO () -bind = bindSocket -#endif - -warp :: Bool -> Server -warp ipv6 port handler = do - addrinfos <- getAddrInfo (Just $ defaultHints { addrFamily = AF_UNSPEC, addrSocketType = Stream }) - (Just $ if ipv6 then "::1" else "127.0.0.1") - (Just . show $ port) - case addrinfos of - [] -> fail "Couldn't obtain address information in warp" - (addri:_) -> do - sock <- socket (addrFamily addri) Stream defaultProtocol - setSocketOption sock ReuseAddr 1 - bind sock (addrAddress addri) - listen sock 5 -#if MIN_VERSION_wai(3,0,0) - Warp.runSettingsSocket (Warp.setPort port Warp.defaultSettings) sock $ \warpRequest warpRespond -> do - request <- requestFromWarp warpRequest - response <- handler request - warpRespond (responseToWarp response) -#else - Warp.runSettingsSocket (Warp.setPort port Warp.defaultSettings) sock $ \warpRequest -> do - request <- requestFromWarp warpRequest - response <- handler request - return (responseToWarp response) -#endif - where - responseToWarp (Response status hdrs body) = - Warp.responseLBS - (Warp.Status status B.empty) - (map headerToWarp hdrs) - (BLC.pack body) - headerToWarp (name, value) = (CI.mk (BC.pack name), BC.pack value) - headerFromWarp (name, value) = - (BC.unpack (CI.original name), BC.unpack value) - requestFromWarp request = do -#if MIN_VERSION_wai(3,0,1) - body <- fmap BLC.unpack $ Warp.strictRequestBody request -#else - body <- fmap BLC.unpack $ Warp.lazyRequestBody request - body `deepseq` return () -#endif - return $ - Request - { - reqMethod = BC.unpack (Warp.requestMethod request), - reqURI = fromJust . parseRelativeReference . - BC.unpack . Warp.rawPathInfo $ - request, - reqHeaders = map headerFromWarp (Warp.requestHeaders request), - reqBody = body - } -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/test/httpTests.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/test/httpTests.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/test/httpTests.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/test/httpTests.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,668 +0,0 @@ -{-# LANGUAGE ImplicitParams, ViewPatterns, NoMonomorphismRestriction, CPP #-} -import Control.Concurrent - -import Control.Applicative ((<$)) -import Control.Concurrent (threadDelay) -import Control.Exception (try) -import qualified Data.ByteString.Lazy.Char8 as BL (pack) -import Data.Char (isSpace) -import qualified Data.Digest.Pure.MD5 as MD5 (md5) -import Data.List.Split (splitOn) -import Data.Maybe (fromJust) -import System.IO.Error (userError) - -import qualified Httpd -import qualified UnitTests - -import Network.Browser -import Network.HTTP -import Network.HTTP.Base -import Network.HTTP.Auth -import Network.HTTP.Headers -import Network.Stream (Result) -import Network.URI (uriPath, parseURI) - -import System.Environment (getArgs) -import System.Info (os) -import System.IO (getChar) - -import Test.Framework (defaultMainWithArgs, testGroup) -import Test.Framework.Providers.HUnit -import Test.HUnit - - -basicGetRequest :: (?testUrl :: ServerAddress) => Assertion -basicGetRequest = do - response <- simpleHTTP (getRequest (?testUrl "/basic/get")) - code <- getResponseCode response - assertEqual "HTTP status code" (2, 0, 0) code - body <- getResponseBody response - assertEqual "Receiving expected response" "It works." body - -basicGetRequestLBS :: (?testUrl :: ServerAddress) => Assertion -basicGetRequestLBS = do - response <- simpleHTTP (mkRequest GET (fromJust (parseURI (?testUrl ("/basic/get"))))) - code <- getResponseCode response - assertEqual "HTTP status code" (2, 0, 0) code - body <- getResponseBody response - assertEqual "Receiving expected response" (BL.pack "It works.") body - -basicHeadRequest :: (?testUrl :: ServerAddress) => Assertion -basicHeadRequest = do - response <- simpleHTTP (headRequest (?testUrl "/basic/head")) - code <- getResponseCode response - assertEqual "HTTP status code" (2, 0, 0) code - body <- getResponseBody response - -- the body should be empty, since this is a HEAD request - assertEqual "Receiving expected response" "" body - -basicExample :: (?testUrl :: ServerAddress) => Assertion -basicExample = do - result <- - -- sample code from Network.HTTP haddock, with URL changed - -- Note there's also a copy of the example in the .cabal file - simpleHTTP (getRequest (?testUrl "/basic/example")) >>= fmap (take 100) . getResponseBody - assertEqual "Receiving expected response" (take 100 haskellOrgText) result - -secureGetRequest :: (?secureTestUrl :: ServerAddress) => Assertion -secureGetRequest = do - response <- try $ simpleHTTP (getRequest (?secureTestUrl "/anything")) - assertEqual "Threw expected exception" - (Left (userError "https not supported")) - (fmap show response) -- fmap show because Response isn't in Eq - -basicPostRequest :: (?testUrl :: ServerAddress) => Assertion -basicPostRequest = do - let sendBody = "body" - response <- simpleHTTP $ postRequestWithBody (?testUrl "/basic/post") - "text/plain" - sendBody - code <- getResponseCode response - assertEqual "HTTP status code" (2, 0, 0) code - body <- getResponseBody response - assertEqual "Receiving expected response" - (show (Just "text/plain", Just "4", sendBody)) - body - -userpwAuthFailure :: (?baduserpwUrl :: ServerAddress) => Assertion -userpwAuthFailure = do - response <- simpleHTTP (getRequest (?baduserpwUrl "/auth/basic")) - code <- getResponseCode response - body <- getResponseBody response - assertEqual "HTTP status code" ((4, 0, 1), - "Just \"Basic dGVzdDp3cm9uZ3B3ZA==\"") (code, body) - -- in case of 401, the server returns the contents of the Authz header - -userpwAuthSuccess :: (?userpwUrl :: ServerAddress) => Assertion -userpwAuthSuccess = do - response <- simpleHTTP (getRequest (?userpwUrl "/auth/basic")) - code <- getResponseCode response - body <- getResponseBody response - assertEqual "Receiving expected response" ((2, 0, 0), "Here's the secret") (code, body) - -basicAuthFailure :: (?testUrl :: ServerAddress) => Assertion -basicAuthFailure = do - response <- simpleHTTP (getRequest (?testUrl "/auth/basic")) - code <- getResponseCode response - body <- getResponseBody response - assertEqual "HTTP status code" ((4, 0, 1), "Nothing") (code, body) - -credentialsBasic :: (?testUrl :: ServerAddress) => Authority -credentialsBasic = AuthBasic "Testing realm" "test" "password" - (fromJust . parseURI . ?testUrl $ "/auth/basic") - -basicAuthSuccess :: (?testUrl :: ServerAddress) => Assertion -basicAuthSuccess = do - let req = getRequest (?testUrl "/auth/basic") - let authString = withAuthority credentialsBasic req - let reqWithAuth = req { rqHeaders = mkHeader HdrAuthorization authString:rqHeaders req } - response <- simpleHTTP reqWithAuth - code <- getResponseCode response - body <- getResponseBody response - assertEqual "Receiving expected response" ((2, 0, 0), "Here's the secret") (code, body) - -utf8URLEncode :: Assertion -utf8URLEncode = do - assertEqual "Normal URL" (urlEncode "what-a_mess.com") "what-a_mess.com" - assertEqual "Chinese URL" (urlEncode "好") "%E5%A5%BD" - assertEqual "Russian URL" (urlEncode "ололо") "%D0%BE%D0%BB%D0%BE%D0%BB%D0%BE" - -utf8URLDecode :: Assertion -utf8URLDecode = do - assertEqual "Normal URL" (urlDecode "what-a_mess.com") "what-a_mess.com" - assertEqual "Mixed URL" (urlDecode "UTFin进入-wow") "UTFin进入-wow" - assertEqual "Chinese URL" (urlDecode "%E5%A5%BD") "好" - assertEqual "Russian URL" (urlDecode "%D0%BE%D0%BB%D0%BE%D0%BB%D0%BE") "ололо" - -browserExample :: (?testUrl :: ServerAddress) => Assertion -browserExample = do - result <- - -- sample code from Network.Browser haddock, with URL changed - -- Note there's also a copy of the example in the .cabal file - do - (_, rsp) - <- Network.Browser.browse $ do - setAllowRedirects True -- handle HTTP redirects - request $ getRequest (?testUrl "/browser/example") - return (take 100 (rspBody rsp)) - assertEqual "Receiving expected response" (take 100 haskellOrgText) result - --- A vanilla HTTP request using Browser shouln't send a cookie header -browserNoCookie :: (?testUrl :: ServerAddress) => Assertion -browserNoCookie = do - (_, response) <- browse $ do - setOutHandler (const $ return ()) - request $ getRequest (?testUrl "/browser/no-cookie") - let code = rspCode response - assertEqual "HTTP status code" (2, 0, 0) code - - --- Regression test --- * Browser sends vanilla request to server --- * Server sets one cookie "hello=world" --- * Browser sends a second request --- --- Expected: Server gets single cookie with "hello=world" --- Actual: Server gets 3 extra cookies, which are actually cookie attributes: --- "$Version=0;hello=world;$Domain=localhost:8080\r" -browserOneCookie :: (?testUrl :: ServerAddress) => Assertion -browserOneCookie = do - (_, response) <- browse $ do - setOutHandler (const $ return ()) - -- This first requests returns a single Set-Cookie: hello=world - _ <- request $ getRequest (?testUrl "/browser/one-cookie/1") - - -- This second request should send a single Cookie: hello=world - request $ getRequest (?testUrl "/browser/one-cookie/2") - let body = rspBody response - assertEqual "Receiving expected response" "" body - let code = rspCode response - assertEqual "HTTP status code" (2, 0, 0) code - -browserTwoCookies :: (?testUrl :: ServerAddress) => Assertion -browserTwoCookies = do - (_, response) <- browse $ do - setOutHandler (const $ return ()) - -- This first request returns two cookies - _ <- request $ getRequest (?testUrl "/browser/two-cookies/1") - - -- This second request should send them back - request $ getRequest (?testUrl "/browser/two-cookies/2") - let body = rspBody response - assertEqual "Receiving expected response" "" body - let code = rspCode response - assertEqual "HTTP status code" (2, 0, 0) code - - -browserFollowsRedirect :: (?testUrl :: ServerAddress) => Int -> Assertion -browserFollowsRedirect n = do - (_, response) <- browse $ do - setOutHandler (const $ return ()) - request $ getRequest (?testUrl "/browser/redirect/relative/" ++ show n ++ "/basic/get") - assertEqual "Receiving expected response from server" - ((2, 0, 0), "It works.") - (rspCode response, rspBody response) - -browserReturnsRedirect :: (?testUrl :: ServerAddress) => Int -> Assertion -browserReturnsRedirect n = do - (_, response) <- browse $ do - setOutHandler (const $ return ()) - request $ getRequest (?testUrl "/browser/redirect/relative/" ++ show n ++ "/basic/get") - assertEqual "Receiving expected response from server" - ((n `div` 100, n `mod` 100 `div` 10, n `mod` 10), "") - (rspCode response, rspBody response) - -authGenBasic _ "Testing realm" = return $ Just ("test", "password") -authGenBasic _ realm = fail $ "Unexpected realm " ++ realm - -browserBasicAuth :: (?testUrl :: ServerAddress) => Assertion -browserBasicAuth = do - (_, response) <- browse $ do - setOutHandler (const $ return ()) - - setAuthorityGen authGenBasic - - request $ getRequest (?testUrl "/auth/basic") - - assertEqual "Receiving expected response from server" - ((2, 0, 0), "Here's the secret") - (rspCode response, rspBody response) - -authGenDigest _ "Digest testing realm" = return $ Just ("test", "digestpassword") -authGenDigest _ realm = fail $ "Unexpected digest realm " ++ realm - -browserDigestAuth :: (?testUrl :: ServerAddress) => Assertion -browserDigestAuth = do - (_, response) <- browse $ do - setOutHandler (const $ return ()) - - setAuthorityGen authGenDigest - - request $ getRequest (?testUrl "/auth/digest") - - assertEqual "Receiving expected response from server" - ((2, 0, 0), "Here's the digest secret") - (rspCode response, rspBody response) - - - -browserAlt :: (?altTestUrl :: ServerAddress) => Assertion -browserAlt = do - (response) <- browse $ do - - setOutHandler (const $ return ()) - - (_, response1) <- request $ getRequest (?altTestUrl "/basic/get") - - return response1 - - assertEqual "Receiving expected response from alternate server" - ((2, 0, 0), "This is the alternate server.") - (rspCode response, rspBody response) - --- test that requests to multiple servers on the same host --- don't get confused with each other -browserBoth :: (?testUrl :: ServerAddress, ?altTestUrl :: ServerAddress) => Assertion -browserBoth = do - (response1, response2) <- browse $ do - setOutHandler (const $ return ()) - - (_, response1) <- request $ getRequest (?testUrl "/basic/get") - (_, response2) <- request $ getRequest (?altTestUrl "/basic/get") - - return (response1, response2) - - assertEqual "Receiving expected response from main server" - ((2, 0, 0), "It works.") - (rspCode response1, rspBody response1) - - assertEqual "Receiving expected response from alternate server" - ((2, 0, 0), "This is the alternate server.") - (rspCode response2, rspBody response2) - --- test that requests to multiple servers on the same host --- don't get confused with each other -browserBothReversed :: (?testUrl :: ServerAddress, ?altTestUrl :: ServerAddress) => Assertion -browserBothReversed = do - (response1, response2) <- browse $ do - setOutHandler (const $ return ()) - - (_, response2) <- request $ getRequest (?altTestUrl "/basic/get") - (_, response1) <- request $ getRequest (?testUrl "/basic/get") - - return (response1, response2) - - assertEqual "Receiving expected response from main server" - ((2, 0, 0), "It works.") - (rspCode response1, rspBody response1) - - assertEqual "Receiving expected response from alternate server" - ((2, 0, 0), "This is the alternate server.") - (rspCode response2, rspBody response2) - -browserSecureRequest :: (?secureTestUrl :: ServerAddress) => Assertion -browserSecureRequest = do - res <- try $ browse $ do - setOutHandler (const $ return ()) - - request $ getRequest (?secureTestUrl "/anything") - - assertEqual "Threw expected exception" - (Left (userError "https not supported")) - (fmap show res) -- fmap show because Response isn't in Eq - --- in case it tries to reuse the connection -browserSecureRequestAfterInsecure :: (?testUrl :: ServerAddress, ?secureTestUrl :: ServerAddress) => Assertion -browserSecureRequestAfterInsecure = do - res <- try $ browse $ do - setOutHandler (const $ return ()) - - request $ getRequest (?testUrl "/basic/get") - request $ getRequest (?secureTestUrl "/anything") - - assertEqual "Threw expected exception" - (Left (userError "https not supported")) - (fmap show res) -- fmap show because Response isn't in Eq - -browserRedirectToSecure :: (?testUrl :: ServerAddress, ?secureTestUrl :: ServerAddress) => Assertion -browserRedirectToSecure = do - res <- try $ browse $ do - setOutHandler (const $ return ()) - setErrHandler fail - - request $ getRequest (?testUrl "/browser/redirect/secure/301/anything") - - assertEqual "Threw expected exception" - (Left (userError $ "Unable to handle redirect, unsupported scheme: " ++ ?secureTestUrl "/anything")) - (fmap show res) -- fmap show because Response isn't in Eq - -browserTwoRequests :: (?testUrl :: ServerAddress) => Assertion -browserTwoRequests = do - (response1, response2) <- browse $ do - setOutHandler (const $ return ()) - - (_, response1) <- request $ getRequest (?testUrl "/basic/get") - (_, response2) <- request $ getRequest (?testUrl "/basic/get2") - - return (response1, response2) - - assertEqual "Receiving expected response from main server" - ((2, 0, 0), "It works.") - (rspCode response1, rspBody response1) - - assertEqual "Receiving expected response from main server" - ((2, 0, 0), "It works (2).") - (rspCode response2, rspBody response2) - - -browserTwoRequestsAlt :: (?altTestUrl :: ServerAddress) => Assertion -browserTwoRequestsAlt = do - (response1, response2) <- browse $ do - - setOutHandler (const $ return ()) - - (_, response1) <- request $ getRequest (?altTestUrl "/basic/get") - (_, response2) <- request $ getRequest (?altTestUrl "/basic/get2") - - return (response1, response2) - - assertEqual "Receiving expected response from alternate server" - ((2, 0, 0), "This is the alternate server.") - (rspCode response1, rspBody response1) - - assertEqual "Receiving expected response from alternate server" - ((2, 0, 0), "This is the alternate server (2).") - (rspCode response2, rspBody response2) - -browserTwoRequestsBoth :: (?testUrl :: ServerAddress, ?altTestUrl :: ServerAddress) => Assertion -browserTwoRequestsBoth = do - (response1, response2, response3, response4) <- browse $ do - setOutHandler (const $ return ()) - - (_, response1) <- request $ getRequest (?testUrl "/basic/get") - (_, response2) <- request $ getRequest (?altTestUrl "/basic/get") - (_, response3) <- request $ getRequest (?testUrl "/basic/get2") - (_, response4) <- request $ getRequest (?altTestUrl "/basic/get2") - - return (response1, response2, response3, response4) - - assertEqual "Receiving expected response from main server" - ((2, 0, 0), "It works.") - (rspCode response1, rspBody response1) - - assertEqual "Receiving expected response from alternate server" - ((2, 0, 0), "This is the alternate server.") - (rspCode response2, rspBody response2) - - assertEqual "Receiving expected response from main server" - ((2, 0, 0), "It works (2).") - (rspCode response3, rspBody response3) - - assertEqual "Receiving expected response from alternate server" - ((2, 0, 0), "This is the alternate server (2).") - (rspCode response4, rspBody response4) - -hasPrefix :: String -> String -> Maybe String -hasPrefix [] ys = Just ys -hasPrefix (x:xs) (y:ys) | x == y = hasPrefix xs ys -hasPrefix _ _ = Nothing - -maybeRead :: Read a => String -> Maybe a -maybeRead s = - case reads s of - [(v, "")] -> Just v - _ -> Nothing - -splitFields = map (toPair '=' . trim isSpace) . splitOn "," - -toPair c str = case break (==c) str of - (left, _:right) -> (left, right) - _ -> error $ "No " ++ show c ++ " in " ++ str -trim f = dropWhile f . reverse . dropWhile f . reverse - -isSubsetOf xs ys = all (`elem` ys) xs - --- first bits of result text from haskell.org (just to give some representative text) -haskellOrgText = - "\ -\\t\ -\\t\ -\\t\t\ -\\t\t\t\t" - -digestMatch - username realm password - nonce opaque - method relativeURI makeAbsolute - headers - = - common `isSubsetOf` headers && (relative `isSubsetOf` headers || absolute `isSubsetOf` headers) - where - common = [("username", show username), ("realm", show realm), ("nonce", show nonce), - ("opaque", show opaque)] - md5 = show . MD5.md5 . BL.pack - ha1 = md5 (username++":"++realm++":"++password) - ha2 uri = md5 (method++":"++uri) - response uri = md5 (ha1 ++ ":" ++ nonce ++ ":" ++ ha2 uri) - mkUncommon uri hash = [("uri", show uri), ("response", show hash)] - relative = mkUncommon relativeURI (response relativeURI) - absoluteURI = makeAbsolute relativeURI - absolute = mkUncommon absoluteURI (response absoluteURI) - -processRequest :: (?testUrl :: ServerAddress, ?secureTestUrl :: ServerAddress) - => Httpd.Request - -> IO Httpd.Response -processRequest req = do - case (Httpd.reqMethod req, Network.URI.uriPath (Httpd.reqURI req)) of - ("GET", "/basic/get") -> return $ Httpd.mkResponse 200 [] "It works." - ("GET", "/basic/get2") -> return $ Httpd.mkResponse 200 [] "It works (2)." - ("GET", "/basic/head") -> return $ Httpd.mkResponse 200 [] "Body for /basic/head." - ("HEAD", "/basic/head") -> return $ Httpd.mkResponse 200 [] "Body for /basic/head." - ("POST", "/basic/post") -> - let typ = lookup "Content-Type" (Httpd.reqHeaders req) - len = lookup "Content-Length" (Httpd.reqHeaders req) - body = Httpd.reqBody req - in return $ Httpd.mkResponse 200 [] (show (typ, len, body)) - - ("GET", "/basic/example") -> - return $ Httpd.mkResponse 200 [] haskellOrgText - - ("GET", "/auth/basic") -> - case lookup "Authorization" (Httpd.reqHeaders req) of - Just "Basic dGVzdDpwYXNzd29yZA==" -> return $ Httpd.mkResponse 200 [] "Here's the secret" - x -> return $ Httpd.mkResponse 401 [("WWW-Authenticate", "Basic realm=\"Testing realm\"")] (show x) - - ("GET", "/auth/digest") -> - case lookup "Authorization" (Httpd.reqHeaders req) of - Just (hasPrefix "Digest " -> Just (splitFields -> items)) - | digestMatch "test" "Digest testing realm" "digestpassword" - "87e4" "057d" - "GET" "/auth/digest" ?testUrl - items - -> return $ Httpd.mkResponse 200 [] "Here's the digest secret" - x -> return $ Httpd.mkResponse - 401 - [("WWW-Authenticate", - "Digest realm=\"Digest testing realm\", opaque=\"057d\", nonce=\"87e4\"")] - (show x) - - ("GET", "/browser/example") -> - return $ Httpd.mkResponse 200 [] haskellOrgText - ("GET", "/browser/no-cookie") -> - case lookup "Cookie" (Httpd.reqHeaders req) of - Nothing -> return $ Httpd.mkResponse 200 [] "" - Just s -> return $ Httpd.mkResponse 500 [] s - ("GET", "/browser/one-cookie/1") -> - return $ Httpd.mkResponse 200 [("Set-Cookie", "hello=world")] "" - ("GET", "/browser/one-cookie/2") -> - case lookup "Cookie" (Httpd.reqHeaders req) of - Just "hello=world" -> return $ Httpd.mkResponse 200 [] "" - Just s -> return $ Httpd.mkResponse 500 [] s - Nothing -> return $ Httpd.mkResponse 500 [] (show $ Httpd.reqHeaders req) - ("GET", "/browser/two-cookies/1") -> - return $ Httpd.mkResponse 200 - [("Set-Cookie", "hello=world") - ,("Set-Cookie", "goodbye=cruelworld")] - "" - ("GET", "/browser/two-cookies/2") -> - case lookup "Cookie" (Httpd.reqHeaders req) of - -- TODO generalise the cookie parsing to allow for whitespace/ordering variations - Just "goodbye=cruelworld; hello=world" -> return $ Httpd.mkResponse 200 [] "" - Just s -> return $ Httpd.mkResponse 500 [] s - Nothing -> return $ Httpd.mkResponse 500 [] (show $ Httpd.reqHeaders req) - ("GET", hasPrefix "/browser/redirect/relative/" -> Just (break (=='/') -> (maybeRead -> Just n, rest))) -> - return $ Httpd.mkResponse n [("Location", rest)] "" - ("GET", hasPrefix "/browser/redirect/absolute/" -> Just (break (=='/') -> (maybeRead -> Just n, rest))) -> - return $ Httpd.mkResponse n [("Location", ?testUrl rest)] "" - ("GET", hasPrefix "/browser/redirect/secure/" -> Just (break (=='/') -> (maybeRead -> Just n, rest))) -> - return $ Httpd.mkResponse n [("Location", ?secureTestUrl rest)] "" - _ -> return $ Httpd.mkResponse 500 [] "Unknown request" - -altProcessRequest :: Httpd.Request -> IO Httpd.Response -altProcessRequest req = do - case (Httpd.reqMethod req, Network.URI.uriPath (Httpd.reqURI req)) of - ("GET", "/basic/get") -> return $ Httpd.mkResponse 200 [] "This is the alternate server." - ("GET", "/basic/get2") -> return $ Httpd.mkResponse 200 [] "This is the alternate server (2)." - _ -> return $ Httpd.mkResponse 500 [] "Unknown request" - -maybeTestGroup True name xs = testGroup name xs -maybeTestGroup False name _ = testGroup name [] - -basicTests = - testGroup "Basic tests" - [ testCase "Basic GET request" basicGetRequest - , testCase "Basic GET request (lazy bytestring)" basicGetRequestLBS - , testCase "Network.HTTP example code" basicExample - , testCase "Secure GET request" secureGetRequest - , testCase "Basic POST request" basicPostRequest - , testCase "Basic HEAD request" basicHeadRequest - , testCase "URI user:pass Auth failure" userpwAuthFailure - , testCase "URI user:pass Auth success" userpwAuthSuccess - , testCase "Basic Auth failure" basicAuthFailure - , testCase "Basic Auth success" basicAuthSuccess - , testCase "UTF-8 urlEncode" utf8URLEncode - , testCase "UTF-8 urlDecode" utf8URLDecode - ] - -browserTests = - testGroup "Browser tests" - [ testGroup "Basic" - [ - testCase "Network.Browser example code" browserExample - , testCase "Two requests" browserTwoRequests - ] - , testGroup "Secure" - [ - testCase "Secure request" browserSecureRequest - , testCase "After insecure" browserSecureRequestAfterInsecure - , testCase "Redirection" browserRedirectToSecure - ] - , testGroup "Cookies" - [ testCase "No cookie header" browserNoCookie - , testCase "One cookie" browserOneCookie - , testCase "Two cookies" browserTwoCookies - ] - , testGroup "Redirection" - [ -- See http://en.wikipedia.org/wiki/List_of_HTTP_status_codes#3xx_Redirection - -- 300 Multiple Choices: client has to handle this - testCase "300" (browserReturnsRedirect 300) - -- 301 Moved Permanently: should follow - , testCase "301" (browserFollowsRedirect 301) - -- 302 Found: should follow - , testCase "302" (browserFollowsRedirect 302) - -- 303 See Other: should follow (directly for GETs) - , testCase "303" (browserFollowsRedirect 303) - -- 304 Not Modified: maybe Browser could do something intelligent based on - -- being given locally cached content and sending If-Modified-Since, but it - -- doesn't at the moment - , testCase "304" (browserReturnsRedirect 304) - -- 305 Use Proxy: test harness doesn't have a proxy (yet) - -- 306 Switch Proxy: obsolete - -- 307 Temporary Redirect: should follow - , testCase "307" (browserFollowsRedirect 307) - -- 308 Resume Incomplete: no support for Resumable HTTP so client has to handle this - , testCase "308" (browserReturnsRedirect 308) - ] - , testGroup "Authentication" - [ testCase "Basic" browserBasicAuth - , testCase "Digest" browserDigestAuth - ] - ] - -port80Tests = - testGroup "Multiple servers" - [ testCase "Alternate server" browserAlt - , testCase "Both servers" browserBoth - , testCase "Both servers (reversed)" browserBothReversed - , testCase "Two requests - alternate server" browserTwoRequestsAlt - , testCase "Two requests - both servers" browserTwoRequestsBoth - ] - -data InetFamily = IPv4 | IPv6 - -familyToLocalhost :: InetFamily -> String -familyToLocalhost IPv4 = "127.0.0.1" -familyToLocalhost IPv6 = "[::1]" - -urlRoot :: InetFamily -> String -> Int -> String -urlRoot fam userpw 80 = "http://" ++ userpw ++ familyToLocalhost fam -urlRoot fam userpw n = "http://" ++ userpw ++ familyToLocalhost fam ++ ":" ++ show n - -secureRoot :: InetFamily -> String -> Int -> String -secureRoot fam userpw 443 = "https://" ++ userpw ++ familyToLocalhost fam -secureRoot fam userpw n = "https://" ++ userpw ++ familyToLocalhost fam ++ ":" ++ show n - -type ServerAddress = String -> String - -httpAddress, httpsAddress :: InetFamily -> String -> Int -> ServerAddress -httpAddress fam userpw port p = urlRoot fam userpw port ++ p -httpsAddress fam userpw port p = secureRoot fam userpw port ++ p - -main :: IO () -main = do - args <- getArgs - - let servers = - [ ("httpd-shed", Httpd.shed, IPv4) -#ifdef WARP_TESTS - , ("warp.v6", Httpd.warp True, IPv6) - , ("warp.v4", Httpd.warp False, IPv4) -#endif - ] - basePortNum, altPortNum :: Int - basePortNum = 5812 - altPortNum = 80 - numberedServers = zip [basePortNum..] servers - - let setupNormalTests = do - flip mapM numberedServers $ \(portNum, (serverName, server, family)) -> do - let ?testUrl = httpAddress family "" portNum - ?userpwUrl = httpAddress family "test:password@" portNum - ?baduserpwUrl = httpAddress family "test:wrongpwd@" portNum - ?secureTestUrl = httpsAddress family "" portNum - _ <- forkIO $ server portNum processRequest - return $ testGroup serverName [basicTests, browserTests] - - let setupAltTests = do - let (portNum, (_, server,family)) = head numberedServers - let ?testUrl = httpAddress family "" portNum - ?altTestUrl = httpAddress family "" altPortNum - _ <- forkIO $ server altPortNum altProcessRequest - return port80Tests - - case args of - ["server"] -> do -- run only the harness servers for diagnostic/debug purposes - -- halt on any keypress - _ <- setupNormalTests - _ <- setupAltTests - _ <- getChar - return () - ("--withport80":args) -> do - normalTests <- setupNormalTests - altTests <- setupAltTests - _ <- threadDelay 1000000 -- Give the server time to start :-( - defaultMainWithArgs (UnitTests.unitTests ++ normalTests ++ [altTests]) args - args -> do -- run the test harness as normal - normalTests <- setupNormalTests - _ <- threadDelay 1000000 -- Give the server time to start :-( - defaultMainWithArgs (UnitTests.unitTests ++ normalTests) args diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar4=/test/UnitTests.hs cabal-install-1.22-1.22.9.0/=unpacked-tar4=/test/UnitTests.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar4=/test/UnitTests.hs 2014-12-18 21:12:40.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar4=/test/UnitTests.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -module UnitTests ( unitTests ) where - -import Network.HTTP.Base -import Network.URI - -import Data.Maybe ( fromJust ) - -import Test.Framework ( testGroup ) -import Test.Framework.Providers.HUnit -import Test.HUnit - -parseIPv4Address :: Assertion -parseIPv4Address = - assertEqual "127.0.0.1 address is recognised" - (Just (URIAuthority {user = Nothing, password = Nothing, host = "127.0.0.1", port = Just 5313})) - (parseURIAuthority (uriToAuthorityString (fromJust (parseURI "http://127.0.0.1:5313/foo")))) - - -parseIPv6Address :: Assertion -parseIPv6Address = - assertEqual "::1 address" - (Just (URIAuthority {user = Nothing, password = Nothing, host = "::1", port = Just 5313})) - (parseURIAuthority (uriToAuthorityString (fromJust (parseURI "http://[::1]:5313/foo")))) - -unitTests = - [testGroup "Unit tests" - [ testGroup "URI parsing" - [ testCase "Parse IPv4 address" parseIPv4Address - , testCase "Parse IPv6 address" parseIPv6Address - ] - ] - ] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/CHANGELOG.markdown cabal-install-1.22-1.22.9.0/=unpacked-tar5=/CHANGELOG.markdown --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/CHANGELOG.markdown 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/CHANGELOG.markdown 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -2.2.1 -------- -* Provide MINIMAL pragmas for `MonadState`, `MonadWriter`, `MonadReader` -* Added a cyclic definition of `ask` in terms of `reader` for consistency with `get`/`put` vs. `state` and `tell` vs. `writer` -* Fix deprecation warnings caused by `transformers` 0.4 deprecating `ErrorT`. -* Added `Control.Monad.Except` in the style of the other `mtl` re-export modules - -2.2.0.1 -------- -* Fixed a bug caused by the change in how `transformers` 0.4 exports its data types. We will now export `runFooT` for each transformer again! - -2.2 ---- -* `transformers` 0.4 support -* Added instances for `ExceptT` -* Added `modify'` to `Control.Monad.State.*` - -2.1.3.1 -------- -* Avoid importing `Control.Monad.Instances` on GHC 7.8 to build without deprecation warnings. - -2.1.3 ------ -* Removed the now-irrelevant `Error` constraint from the `MonadError` instance for `Either e`. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Cont/Class.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Cont/Class.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Cont/Class.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Cont/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,134 +0,0 @@ -{- | -Module : Control.Monad.Cont.Class -Copyright : (c) The University of Glasgow 2001, - (c) Jeff Newbern 2003-2007, - (c) Andriy Palamarchuk 2007 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : portable - -[Computation type:] Computations which can be interrupted and resumed. - -[Binding strategy:] Binding a function to a monadic value creates -a new continuation which uses the function as the continuation of the monadic -computation. - -[Useful for:] Complex control structures, error handling, -and creating co-routines. - -[Zero and plus:] None. - -[Example type:] @'Cont' r a@ - -The Continuation monad represents computations in continuation-passing style -(CPS). -In continuation-passing style function result is not returned, -but instead is passed to another function, -received as a parameter (continuation). -Computations are built up from sequences -of nested continuations, terminated by a final continuation (often @id@) -which produces the final result. -Since continuations are functions which represent the future of a computation, -manipulation of the continuation functions can achieve complex manipulations -of the future of the computation, -such as interrupting a computation in the middle, aborting a portion -of a computation, restarting a computation, and interleaving execution of -computations. -The Continuation monad adapts CPS to the structure of a monad. - -Before using the Continuation monad, be sure that you have -a firm understanding of continuation-passing style -and that continuations represent the best solution to your particular -design problem. -Many algorithms which require continuations in other languages do not require -them in Haskell, due to Haskell's lazy semantics. -Abuse of the Continuation monad can produce code that is impossible -to understand and maintain. --} - -module Control.Monad.Cont.Class ( - MonadCont(..), - ) where - -import Control.Monad.Trans.Cont (ContT) -import qualified Control.Monad.Trans.Cont as ContT -import Control.Monad.Trans.Error as Error -import Control.Monad.Trans.Except as Except -import Control.Monad.Trans.Identity as Identity -import Control.Monad.Trans.List as List -import Control.Monad.Trans.Maybe as Maybe -import Control.Monad.Trans.Reader as Reader -import Control.Monad.Trans.RWS.Lazy as LazyRWS -import Control.Monad.Trans.RWS.Strict as StrictRWS -import Control.Monad.Trans.State.Lazy as LazyState -import Control.Monad.Trans.State.Strict as StrictState -import Control.Monad.Trans.Writer.Lazy as LazyWriter -import Control.Monad.Trans.Writer.Strict as StrictWriter - -import Control.Monad -import Data.Monoid - -class Monad m => MonadCont m where - {- | @callCC@ (call-with-current-continuation) - calls a function with the current continuation as its argument. - Provides an escape continuation mechanism for use with Continuation monads. - Escape continuations allow to abort the current computation and return - a value immediately. - They achieve a similar effect to 'Control.Monad.Error.throwError' - and 'Control.Monad.Error.catchError' - within an 'Control.Monad.Error.Error' monad. - Advantage of this function over calling @return@ is that it makes - the continuation explicit, - allowing more flexibility and better control - (see examples in "Control.Monad.Cont"). - - The standard idiom used with @callCC@ is to provide a lambda-expression - to name the continuation. Then calling the named continuation anywhere - within its scope will escape from the computation, - even if it is many layers deep within nested computations. - -} - callCC :: ((a -> m b) -> m a) -> m a - -instance MonadCont (ContT r m) where - callCC = ContT.callCC - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers - -instance (Error e, MonadCont m) => MonadCont (ErrorT e m) where - callCC = Error.liftCallCC callCC - -instance MonadCont m => MonadCont (ExceptT e m) where - callCC = Except.liftCallCC callCC - -instance MonadCont m => MonadCont (IdentityT m) where - callCC = Identity.liftCallCC callCC - -instance MonadCont m => MonadCont (ListT m) where - callCC = List.liftCallCC callCC - -instance MonadCont m => MonadCont (MaybeT m) where - callCC = Maybe.liftCallCC callCC - -instance MonadCont m => MonadCont (ReaderT r m) where - callCC = Reader.liftCallCC callCC - -instance (Monoid w, MonadCont m) => MonadCont (LazyRWS.RWST r w s m) where - callCC = LazyRWS.liftCallCC' callCC - -instance (Monoid w, MonadCont m) => MonadCont (StrictRWS.RWST r w s m) where - callCC = StrictRWS.liftCallCC' callCC - -instance MonadCont m => MonadCont (LazyState.StateT s m) where - callCC = LazyState.liftCallCC' callCC - -instance MonadCont m => MonadCont (StrictState.StateT s m) where - callCC = StrictState.liftCallCC' callCC - -instance (Monoid w, MonadCont m) => MonadCont (LazyWriter.WriterT w m) where - callCC = LazyWriter.liftCallCC callCC - -instance (Monoid w, MonadCont m) => MonadCont (StrictWriter.WriterT w m) where - callCC = StrictWriter.liftCallCC callCC diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Cont.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Cont.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Cont.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Cont.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,169 +0,0 @@ -{- | -Module : Control.Monad.Cont -Copyright : (c) The University of Glasgow 2001, - (c) Jeff Newbern 2003-2007, - (c) Andriy Palamarchuk 2007 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : portable - -[Computation type:] Computations which can be interrupted and resumed. - -[Binding strategy:] Binding a function to a monadic value creates -a new continuation which uses the function as the continuation of the monadic -computation. - -[Useful for:] Complex control structures, error handling, -and creating co-routines. - -[Zero and plus:] None. - -[Example type:] @'Cont' r a@ - -The Continuation monad represents computations in continuation-passing style -(CPS). -In continuation-passing style function result is not returned, -but instead is passed to another function, -received as a parameter (continuation). -Computations are built up from sequences -of nested continuations, terminated by a final continuation (often @id@) -which produces the final result. -Since continuations are functions which represent the future of a computation, -manipulation of the continuation functions can achieve complex manipulations -of the future of the computation, -such as interrupting a computation in the middle, aborting a portion -of a computation, restarting a computation, and interleaving execution of -computations. -The Continuation monad adapts CPS to the structure of a monad. - -Before using the Continuation monad, be sure that you have -a firm understanding of continuation-passing style -and that continuations represent the best solution to your particular -design problem. -Many algorithms which require continuations in other languages do not require -them in Haskell, due to Haskell's lazy semantics. -Abuse of the Continuation monad can produce code that is impossible -to understand and maintain. --} - -module Control.Monad.Cont ( - -- * MonadCont class - MonadCont(..), - -- * The Cont monad - Cont, - cont, - runCont, - mapCont, - withCont, - -- * The ContT monad transformer - ContT(ContT), - runContT, - mapContT, - withContT, - module Control.Monad, - module Control.Monad.Trans, - -- * Example 1: Simple Continuation Usage - -- $simpleContExample - - -- * Example 2: Using @callCC@ - -- $callCCExample - - -- * Example 3: Using @ContT@ Monad Transformer - -- $ContTExample - ) where - -import Control.Monad.Cont.Class - -import Control.Monad.Trans -import Control.Monad.Trans.Cont - -import Control.Monad - -{- $simpleContExample -Calculating length of a list continuation-style: - ->calculateLength :: [a] -> Cont r Int ->calculateLength l = return (length l) - -Here we use @calculateLength@ by making it to pass its result to @print@: - ->main = do -> runCont (calculateLength "123") print -> -- result: 3 - -It is possible to chain 'Cont' blocks with @>>=@. - ->double :: Int -> Cont r Int ->double n = return (n * 2) -> ->main = do -> runCont (calculateLength "123" >>= double) print -> -- result: 6 --} - -{- $callCCExample -This example gives a taste of how escape continuations work, shows a typical -pattern for their usage. - ->-- Returns a string depending on the length of the name parameter. ->-- If the provided string is empty, returns an error. ->-- Otherwise, returns a welcome message. ->whatsYourName :: String -> String ->whatsYourName name = -> (`runCont` id) $ do -- 1 -> response <- callCC $ \exit -> do -- 2 -> validateName name exit -- 3 -> return $ "Welcome, " ++ name ++ "!" -- 4 -> return response -- 5 -> ->validateName name exit = do -> when (null name) (exit "You forgot to tell me your name!") - -Here is what this example does: - -(1) Runs an anonymous 'Cont' block and extracts value from it with -@(\`runCont\` id)@. Here @id@ is the continuation, passed to the @Cont@ block. - -(1) Binds @response@ to the result of the following 'Control.Monad.Cont.Class.callCC' block, -binds @exit@ to the continuation. - -(1) Validates @name@. -This approach illustrates advantage of using 'Control.Monad.Cont.Class.callCC' over @return@. -We pass the continuation to @validateName@, -and interrupt execution of the @Cont@ block from /inside/ of @validateName@. - -(1) Returns the welcome message from the 'Control.Monad.Cont.Class.callCC' block. -This line is not executed if @validateName@ fails. - -(1) Returns from the @Cont@ block. --} - -{-$ContTExample -'ContT' can be used to add continuation handling to other monads. -Here is an example how to combine it with @IO@ monad: - ->import Control.Monad.Cont ->import System.IO -> ->main = do -> hSetBuffering stdout NoBuffering -> runContT (callCC askString) reportResult -> ->askString :: (String -> ContT () IO String) -> ContT () IO String ->askString next = do -> liftIO $ putStrLn "Please enter a string" -> s <- liftIO $ getLine -> next s -> ->reportResult :: String -> IO () ->reportResult s = do -> putStrLn ("You entered: " ++ s) - -Action @askString@ requests user to enter a string, -and passes it to the continuation. -@askString@ takes as a parameter a continuation taking a string parameter, -and returning @IO ()@. -Compare its signature to 'runContT' definition. --} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Error/Class.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Error/Class.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Error/Class.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Error/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE UndecidableInstances #-} - -{- | -Module : Control.Monad.Error.Class -Copyright : (c) Michael Weber 2001, - (c) Jeff Newbern 2003-2006, - (c) Andriy Palamarchuk 2006 - (c) Edward Kmett 2012 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-parameter type classes) - -[Computation type:] Computations which may fail or throw exceptions. - -[Binding strategy:] Failure records information about the cause\/location -of the failure. Failure values bypass the bound function, -other values are used as inputs to the bound function. - -[Useful for:] Building computations from sequences of functions that may fail -or using exception handling to structure error handling. - -[Zero and plus:] Zero is represented by an empty error and the plus operation -executes its second argument if the first fails. - -[Example type:] @'Either' 'String' a@ - -The Error monad (also called the Exception monad). --} - -{- - Rendered by Michael Weber , - inspired by the Haskell Monad Template Library from - Andy Gill () --} -module Control.Monad.Error.Class ( - Error(..), - MonadError(..), - ) where - -import Control.Monad.Trans.Except (Except, ExceptT) -import Control.Monad.Trans.Error (Error(..), ErrorT) -import qualified Control.Monad.Trans.Except as ExceptT (throwE, catchE) -import qualified Control.Monad.Trans.Error as ErrorT (throwError, catchError) -import Control.Monad.Trans.Identity as Identity -import Control.Monad.Trans.List as List -import Control.Monad.Trans.Maybe as Maybe -import Control.Monad.Trans.Reader as Reader -import Control.Monad.Trans.RWS.Lazy as LazyRWS -import Control.Monad.Trans.RWS.Strict as StrictRWS -import Control.Monad.Trans.State.Lazy as LazyState -import Control.Monad.Trans.State.Strict as StrictState -import Control.Monad.Trans.Writer.Lazy as LazyWriter -import Control.Monad.Trans.Writer.Strict as StrictWriter - -import Control.Monad.Trans.Class (lift) -import Control.Exception (IOException, catch, ioError) -import Control.Monad - -#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ < 707 -import Control.Monad.Instances () -#endif - -import Data.Monoid -import Prelude (Either(..), (.), IO) - -{- | -The strategy of combining computations that can throw exceptions -by bypassing bound functions -from the point an exception is thrown to the point that it is handled. - -Is parameterized over the type of error information and -the monad type constructor. -It is common to use @'Data.Either' String@ as the monad type constructor -for an error monad in which error descriptions take the form of strings. -In that case and many other common cases the resulting monad is already defined -as an instance of the 'MonadError' class. -You can also define your own error type and\/or use a monad type constructor -other than @'Either' 'String'@ or @'Either' 'IOError'@. -In these cases you will have to explicitly define instances of the 'Error' -and\/or 'MonadError' classes. --} -class (Monad m) => MonadError e m | m -> e where - -- | Is used within a monadic computation to begin exception processing. - throwError :: e -> m a - - {- | - A handler function to handle previous errors and return to normal execution. - A common idiom is: - - > do { action1; action2; action3 } `catchError` handler - - where the @action@ functions can call 'throwError'. - Note that @handler@ and the do-block must have the same return type. - -} - catchError :: m a -> (e -> m a) -> m a - -instance MonadError IOException IO where - throwError = ioError - catchError = catch - --- --------------------------------------------------------------------------- --- Our parameterizable error monad - -instance MonadError e (Either e) where - throwError = Left - Left l `catchError` h = h l - Right r `catchError` _ = Right r - -instance (Monad m, Error e) => MonadError e (ErrorT e m) where - throwError = ErrorT.throwError - catchError = ErrorT.catchError - -instance Monad m => MonadError e (ExceptT e m) where - throwError = ExceptT.throwE - catchError = ExceptT.catchE - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - -instance MonadError e m => MonadError e (IdentityT m) where - throwError = lift . throwError - catchError = Identity.liftCatch catchError - -instance MonadError e m => MonadError e (ListT m) where - throwError = lift . throwError - catchError = List.liftCatch catchError - -instance MonadError e m => MonadError e (MaybeT m) where - throwError = lift . throwError - catchError = Maybe.liftCatch catchError - -instance MonadError e m => MonadError e (ReaderT r m) where - throwError = lift . throwError - catchError = Reader.liftCatch catchError - -instance (Monoid w, MonadError e m) => MonadError e (LazyRWS.RWST r w s m) where - throwError = lift . throwError - catchError = LazyRWS.liftCatch catchError - -instance (Monoid w, MonadError e m) => MonadError e (StrictRWS.RWST r w s m) where - throwError = lift . throwError - catchError = StrictRWS.liftCatch catchError - -instance MonadError e m => MonadError e (LazyState.StateT s m) where - throwError = lift . throwError - catchError = LazyState.liftCatch catchError - -instance MonadError e m => MonadError e (StrictState.StateT s m) where - throwError = lift . throwError - catchError = StrictState.liftCatch catchError - -instance (Monoid w, MonadError e m) => MonadError e (LazyWriter.WriterT w m) where - throwError = lift . throwError - catchError = LazyWriter.liftCatch catchError - -instance (Monoid w, MonadError e m) => MonadError e (StrictWriter.WriterT w m) where - throwError = lift . throwError - catchError = StrictWriter.liftCatch catchError diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Error.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Error.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Error.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Error.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,151 +0,0 @@ -{-# LANGUAGE CPP #-} -{- | -Module : Control.Monad.Error -Copyright : (c) Michael Weber 2001, - (c) Jeff Newbern 2003-2006, - (c) Andriy Palamarchuk 2006 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-parameter type classes) - -[Computation type:] Computations which may fail or throw exceptions. - -[Binding strategy:] Failure records information about the cause\/location -of the failure. Failure values bypass the bound function, -other values are used as inputs to the bound function. - -[Useful for:] Building computations from sequences of functions that may fail -or using exception handling to structure error handling. - -[Zero and plus:] Zero is represented by an empty error and the plus operation -executes its second argument if the first fails. - -[Example type:] @'Data.Either' String a@ - -The Error monad (also called the Exception monad). --} - -{- - Rendered by Michael Weber , - inspired by the Haskell Monad Template Library from - Andy Gill () --} -module Control.Monad.Error - {-# DEPRECATED "Use Control.Monad.Except instead" #-} ( - -- * Monads with error handling - MonadError(..), - Error(..), - -- * The ErrorT monad transformer - ErrorT(ErrorT), - runErrorT, - mapErrorT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Example 1: Custom Error Data Type - -- $customErrorExample - - -- * Example 2: Using ErrorT Monad Transformer - -- $ErrorTExample - ) where - -import Control.Monad.Error.Class -import Control.Monad.Trans -import Control.Monad.Trans.Error (ErrorT(ErrorT), runErrorT, mapErrorT) - -import Control.Monad -import Control.Monad.Fix - -#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ < 707 -import Control.Monad.Instances () -#endif - -{- $customErrorExample -Here is an example that demonstrates the use of a custom 'Error' data type with -the 'throwError' and 'catchError' exception mechanism from 'MonadError'. -The example throws an exception if the user enters an empty string -or a string longer than 5 characters. Otherwise it prints length of the string. - ->-- This is the type to represent length calculation error. ->data LengthError = EmptyString -- Entered string was empty. -> | StringTooLong Int -- A string is longer than 5 characters. -> -- Records a length of the string. -> | OtherError String -- Other error, stores the problem description. -> ->-- We make LengthError an instance of the Error class ->-- to be able to throw it as an exception. ->instance Error LengthError where -> noMsg = OtherError "A String Error!" -> strMsg s = OtherError s -> ->-- Converts LengthError to a readable message. ->instance Show LengthError where -> show EmptyString = "The string was empty!" -> show (StringTooLong len) = -> "The length of the string (" ++ (show len) ++ ") is bigger than 5!" -> show (OtherError msg) = msg -> ->-- For our monad type constructor, we use Either LengthError ->-- which represents failure using Left LengthError ->-- or a successful result of type a using Right a. ->type LengthMonad = Either LengthError -> ->main = do -> putStrLn "Please enter a string:" -> s <- getLine -> reportResult (calculateLength s) -> ->-- Wraps length calculation to catch the errors. ->-- Returns either length of the string or an error. ->calculateLength :: String -> LengthMonad Int ->calculateLength s = (calculateLengthOrFail s) `catchError` Left -> ->-- Attempts to calculate length and throws an error if the provided string is ->-- empty or longer than 5 characters. ->-- The processing is done in Either monad. ->calculateLengthOrFail :: String -> LengthMonad Int ->calculateLengthOrFail [] = throwError EmptyString ->calculateLengthOrFail s | len > 5 = throwError (StringTooLong len) -> | otherwise = return len -> where len = length s -> ->-- Prints result of the string length calculation. ->reportResult :: LengthMonad Int -> IO () ->reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) ->reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) --} - -{- $ErrorTExample -@'ErrorT'@ monad transformer can be used to add error handling to another monad. -Here is an example how to combine it with an @IO@ monad: - ->import Control.Monad.Error -> ->-- An IO monad which can return String failure. ->-- It is convenient to define the monad type of the combined monad, ->-- especially if we combine more monad transformers. ->type LengthMonad = ErrorT String IO -> ->main = do -> -- runErrorT removes the ErrorT wrapper -> r <- runErrorT calculateLength -> reportResult r -> ->-- Asks user for a non-empty string and returns its length. ->-- Throws an error if user enters an empty string. ->calculateLength :: LengthMonad Int ->calculateLength = do -> -- all the IO operations have to be lifted to the IO monad in the monad stack -> liftIO $ putStrLn "Please enter a non-empty string: " -> s <- liftIO getLine -> if null s -> then throwError "The string was empty!" -> else return $ length s -> ->-- Prints result of the string length calculation. ->reportResult :: Either String Int -> IO () ->reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) ->reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) --} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Except.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Except.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Except.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Except.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,153 +0,0 @@ -{-# LANGUAGE CPP #-} -{- | -Module : Control.Monad.Error -Copyright : (c) Michael Weber 2001, - (c) Jeff Newbern 2003-2006, - (c) Andriy Palamarchuk 2006 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-parameter type classes) - -[Computation type:] Computations which may fail or throw exceptions. - -[Binding strategy:] Failure records information about the cause\/location -of the failure. Failure values bypass the bound function, -other values are used as inputs to the bound function. - -[Useful for:] Building computations from sequences of functions that may fail -or using exception handling to structure error handling. - -[Example type:] @'Data.Either' String a@ - -The Error monad (also called the Exception monad). --} - -{- - Rendered by Michael Weber , - inspired by the Haskell Monad Template Library from - Andy Gill () --} -module Control.Monad.Except - ( - -- * Monads with error handling - MonadError(..), - -- * The ErrorT monad transformer - ExceptT(ExceptT), - Except, - - runExceptT, - mapExceptT, - withExceptT, - runExcept, - mapExcept, - withExcept, - - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Example 1: Custom Error Data Type - -- $customErrorExample - - -- * Example 2: Using ExceptT Monad Transformer - -- $ExceptTExample - ) where - -import Control.Monad.Error.Class -import Control.Monad.Trans -import Control.Monad.Trans.Except - ( ExceptT(ExceptT), Except, except - , runExcept, runExceptT - , mapExcept, mapExceptT - , withExcept, withExceptT - ) - -import Control.Monad -import Control.Monad.Fix - -#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ < 707 -import Control.Monad.Instances () -#endif - -{- $customErrorExample -Here is an example that demonstrates the use of a custom error data type with -the 'throwError' and 'catchError' exception mechanism from 'MonadError'. -The example throws an exception if the user enters an empty string -or a string longer than 5 characters. Otherwise it prints length of the string. - ->-- This is the type to represent length calculation error. ->data LengthError = EmptyString -- Entered string was empty. -> | StringTooLong Int -- A string is longer than 5 characters. -> -- Records a length of the string. -> | OtherError String -- Other error, stores the problem description. -> ->-- Converts LengthError to a readable message. ->instance Show LengthError where -> show EmptyString = "The string was empty!" -> show (StringTooLong len) = -> "The length of the string (" ++ (show len) ++ ") is bigger than 5!" -> show (OtherError msg) = msg -> ->-- For our monad type constructor, we use Either LengthError ->-- which represents failure using Left LengthError ->-- or a successful result of type a using Right a. ->type LengthMonad = Either LengthError -> ->main = do -> putStrLn "Please enter a string:" -> s <- getLine -> reportResult (calculateLength s) -> ->-- Wraps length calculation to catch the errors. ->-- Returns either length of the string or an error. ->calculateLength :: String -> LengthMonad Int ->calculateLength s = (calculateLengthOrFail s) `catchError` Left -> ->-- Attempts to calculate length and throws an error if the provided string is ->-- empty or longer than 5 characters. ->-- The processing is done in Either monad. ->calculateLengthOrFail :: String -> LengthMonad Int ->calculateLengthOrFail [] = throwError EmptyString ->calculateLengthOrFail s | len > 5 = throwError (StringTooLong len) -> | otherwise = return len -> where len = length s -> ->-- Prints result of the string length calculation. ->reportResult :: LengthMonad Int -> IO () ->reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) ->reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) --} - -{- $ExceptTExample -@'ExceptT'@ monad transformer can be used to add error handling to another monad. -Here is an example how to combine it with an @IO@ monad: - ->import Control.Monad.Except -> ->-- An IO monad which can return String failure. ->-- It is convenient to define the monad type of the combined monad, ->-- especially if we combine more monad transformers. ->type LengthMonad = ExceptT String IO -> ->main = do -> -- runExceptT removes the ExceptT wrapper -> r <- runExceptT calculateLength -> reportResult r -> ->-- Asks user for a non-empty string and returns its length. ->-- Throws an error if user enters an empty string. ->calculateLength :: LengthMonad Int ->calculateLength = do -> -- all the IO operations have to be lifted to the IO monad in the monad stack -> liftIO $ putStrLn "Please enter a non-empty string: " -> s <- liftIO getLine -> if null s -> then throwError "The string was empty!" -> else return $ length s -> ->-- Prints result of the string length calculation. ->reportResult :: Either String Int -> IO () ->reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) ->reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) --} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Identity.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Identity.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Identity.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Identity.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -{- | -Module : Control.Monad.Identity -Copyright : (c) Andy Gill 2001, - (c) Oregon Graduate Institute of Science and Technology 2001, - (c) Jeff Newbern 2003-2006, - (c) Andriy Palamarchuk 2006 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : portable - -[Computation type:] Simple function application. - -[Binding strategy:] The bound function is applied to the input value. -@'Identity' x >>= f == 'Identity' (f x)@ - -[Useful for:] Monads can be derived from monad transformers applied to the -'Identity' monad. - -[Zero and plus:] None. - -[Example type:] @'Identity' a@ - -The @Identity@ monad is a monad that does not embody any computational strategy. -It simply applies the bound function to its input without any modification. -Computationally, there is no reason to use the @Identity@ monad -instead of the much simpler act of simply applying functions to their arguments. -The purpose of the @Identity@ monad is its fundamental role in the theory -of monad transformers. -Any monad transformer applied to the @Identity@ monad yields a non-transformer -version of that monad. --} - -module Control.Monad.Identity ( - module Data.Functor.Identity, - - module Control.Monad, - module Control.Monad.Fix, - ) where - -import Control.Monad -import Control.Monad.Fix -import Data.Functor.Identity diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/List.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/List.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/List.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/List.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.List --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : portable --- --- The List monad. --- ------------------------------------------------------------------------------ - -module Control.Monad.List ( - ListT(..), - mapListT, - module Control.Monad, - module Control.Monad.Trans, - ) where - -import Control.Monad -import Control.Monad.Trans -import Control.Monad.Trans.List diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Reader/Class.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Reader/Class.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Reader/Class.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Reader/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,175 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE UndecidableInstances #-} --- Search for UndecidableInstances to see why this is needed -{- | -Module : Control.Monad.Reader.Class -Copyright : (c) Andy Gill 2001, - (c) Oregon Graduate Institute of Science and Technology 2001, - (c) Jeff Newbern 2003-2007, - (c) Andriy Palamarchuk 2007 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-param classes, functional dependencies) - -[Computation type:] Computations which read values from a shared environment. - -[Binding strategy:] Monad values are functions from the environment to a value. -The bound function is applied to the bound value, and both have access -to the shared environment. - -[Useful for:] Maintaining variable bindings, or other shared environment. - -[Zero and plus:] None. - -[Example type:] @'Reader' [(String,Value)] a@ - -The 'Reader' monad (also called the Environment monad). -Represents a computation, which can read values from -a shared environment, pass values from function to function, -and execute sub-computations in a modified environment. -Using 'Reader' monad for such computations is often clearer and easier -than using the 'Control.Monad.State.State' monad. - - Inspired by the paper - /Functional Programming with Overloading and Higher-Order Polymorphism/, - Mark P Jones () - Advanced School of Functional Programming, 1995. --} - -module Control.Monad.Reader.Class ( - MonadReader(..), - asks, - ) where - -import Control.Monad.Trans.Cont as Cont -import Control.Monad.Trans.Except -import Control.Monad.Trans.Error -import Control.Monad.Trans.Identity -import Control.Monad.Trans.List -import Control.Monad.Trans.Maybe -import Control.Monad.Trans.Reader (ReaderT) -import qualified Control.Monad.Trans.Reader as ReaderT (ask, local, reader) -import qualified Control.Monad.Trans.RWS.Lazy as LazyRWS (RWST, ask, local, reader) -import qualified Control.Monad.Trans.RWS.Strict as StrictRWS (RWST, ask, local, reader) -import Control.Monad.Trans.State.Lazy as Lazy -import Control.Monad.Trans.State.Strict as Strict -import Control.Monad.Trans.Writer.Lazy as Lazy -import Control.Monad.Trans.Writer.Strict as Strict - -import Control.Monad.Trans.Class (lift) -import Control.Monad -import Data.Monoid - --- ---------------------------------------------------------------------------- --- class MonadReader --- asks for the internal (non-mutable) state. - --- | See examples in "Control.Monad.Reader". --- Note, the partially applied function type @(->) r@ is a simple reader monad. --- See the @instance@ declaration below. -class Monad m => MonadReader r m | m -> r where -#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ >= 707 - {-# MINIMAL (ask | reader), local #-} -#endif - -- | Retrieves the monad environment. - ask :: m r - ask = reader id - - -- | Executes a computation in a modified environment. - local :: (r -> r) -- ^ The function to modify the environment. - -> m a -- ^ @Reader@ to run in the modified environment. - -> m a - - -- | Retrieves a function of the current environment. - reader :: (r -> a) -- ^ The selector function to apply to the environment. - -> m a - reader f = do - r <- ask - return (f r) - --- | Retrieves a function of the current environment. -asks :: MonadReader r m - => (r -> a) -- ^ The selector function to apply to the environment. - -> m a -asks = reader - --- ---------------------------------------------------------------------------- --- The partially applied function type is a simple reader monad - -instance MonadReader r ((->) r) where - ask = id - local f m = m . f - reader = id - -instance Monad m => MonadReader r (ReaderT r m) where - ask = ReaderT.ask - local = ReaderT.local - reader = ReaderT.reader - -instance (Monad m, Monoid w) => MonadReader r (LazyRWS.RWST r w s m) where - ask = LazyRWS.ask - local = LazyRWS.local - reader = LazyRWS.reader - -instance (Monad m, Monoid w) => MonadReader r (StrictRWS.RWST r w s m) where - ask = StrictRWS.ask - local = StrictRWS.local - reader = StrictRWS.reader - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - -instance MonadReader r' m => MonadReader r' (ContT r m) where - ask = lift ask - local = Cont.liftLocal ask local - reader = lift . reader - -instance (Error e, MonadReader r m) => MonadReader r (ErrorT e m) where - ask = lift ask - local = mapErrorT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (ExceptT e m) where - ask = lift ask - local = mapExceptT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (IdentityT m) where - ask = lift ask - local = mapIdentityT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (ListT m) where - ask = lift ask - local = mapListT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (MaybeT m) where - ask = lift ask - local = mapMaybeT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (Lazy.StateT s m) where - ask = lift ask - local = Lazy.mapStateT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (Strict.StateT s m) where - ask = lift ask - local = Strict.mapStateT . local - reader = lift . reader - -instance (Monoid w, MonadReader r m) => MonadReader r (Lazy.WriterT w m) where - ask = lift ask - local = Lazy.mapWriterT . local - reader = lift . reader - -instance (Monoid w, MonadReader r m) => MonadReader r (Strict.WriterT w m) where - ask = lift ask - local = Strict.mapWriterT . local - reader = lift . reader diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Reader.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Reader.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Reader.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Reader.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,144 +0,0 @@ -{- | -Module : Control.Monad.Reader -Copyright : (c) Andy Gill 2001, - (c) Oregon Graduate Institute of Science and Technology 2001, - (c) Jeff Newbern 2003-2007, - (c) Andriy Palamarchuk 2007 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-param classes, functional dependencies) - -[Computation type:] Computations which read values from a shared environment. - -[Binding strategy:] Monad values are functions from the environment to a value. -The bound function is applied to the bound value, and both have access -to the shared environment. - -[Useful for:] Maintaining variable bindings, or other shared environment. - -[Zero and plus:] None. - -[Example type:] @'Reader' [(String,Value)] a@ - -The 'Reader' monad (also called the Environment monad). -Represents a computation, which can read values from -a shared environment, pass values from function to function, -and execute sub-computations in a modified environment. -Using 'Reader' monad for such computations is often clearer and easier -than using the 'Control.Monad.State.State' monad. - - Inspired by the paper - /Functional Programming with Overloading and Higher-Order Polymorphism/, - Mark P Jones () - Advanced School of Functional Programming, 1995. --} - -module Control.Monad.Reader ( - -- * MonadReader class - MonadReader(..), - asks, - -- * The Reader monad - Reader, - runReader, - mapReader, - withReader, - -- * The ReaderT monad transformer - ReaderT(ReaderT), - runReaderT, - mapReaderT, - withReaderT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Example 1: Simple Reader Usage - -- $simpleReaderExample - - -- * Example 2: Modifying Reader Content With @local@ - -- $localExample - - -- * Example 3: @ReaderT@ Monad Transformer - -- $ReaderTExample - ) where - -import Control.Monad.Reader.Class - -import Control.Monad.Trans.Reader ( - Reader, runReader, mapReader, withReader, - ReaderT(ReaderT), runReaderT, mapReaderT, withReaderT) -import Control.Monad.Trans - -import Control.Monad -import Control.Monad.Fix - -{- $simpleReaderExample - -In this example the @Reader@ monad provides access to variable bindings. -Bindings are a @Map@ of integer variables. -The variable @count@ contains number of variables in the bindings. -You can see how to run a Reader monad and retrieve data from it -with 'runReader', how to access the Reader data with 'ask' and 'asks'. - -> type Bindings = Map String Int; -> ->-- Returns True if the "count" variable contains correct bindings size. ->isCountCorrect :: Bindings -> Bool ->isCountCorrect bindings = runReader calc_isCountCorrect bindings -> ->-- The Reader monad, which implements this complicated check. ->calc_isCountCorrect :: Reader Bindings Bool ->calc_isCountCorrect = do -> count <- asks (lookupVar "count") -> bindings <- ask -> return (count == (Map.size bindings)) -> ->-- The selector function to use with 'asks'. ->-- Returns value of the variable with specified name. ->lookupVar :: String -> Bindings -> Int ->lookupVar name bindings = fromJust (Map.lookup name bindings) -> ->sampleBindings = Map.fromList [("count",3), ("1",1), ("b",2)] -> ->main = do -> putStr $ "Count is correct for bindings " ++ (show sampleBindings) ++ ": "; -> putStrLn $ show (isCountCorrect sampleBindings); --} - -{- $localExample - -Shows how to modify Reader content with 'local'. - ->calculateContentLen :: Reader String Int ->calculateContentLen = do -> content <- ask -> return (length content); -> ->-- Calls calculateContentLen after adding a prefix to the Reader content. ->calculateModifiedContentLen :: Reader String Int ->calculateModifiedContentLen = local ("Prefix " ++) calculateContentLen -> ->main = do -> let s = "12345"; -> let modifiedLen = runReader calculateModifiedContentLen s -> let len = runReader calculateContentLen s -> putStrLn $ "Modified 's' length: " ++ (show modifiedLen) -> putStrLn $ "Original 's' length: " ++ (show len) --} - -{- $ReaderTExample - -Now you are thinking: 'Wow, what a great monad! I wish I could use -Reader functionality in MyFavoriteComplexMonad!'. Don't worry. -This can be easy done with the 'ReaderT' monad transformer. -This example shows how to combine @ReaderT@ with the IO monad. - ->-- The Reader/IO combined monad, where Reader stores a string. ->printReaderContent :: ReaderT String IO () ->printReaderContent = do -> content <- ask -> liftIO $ putStrLn ("The Reader Content: " ++ content) -> ->main = do -> runReaderT printReaderContent "Some Content" --} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/RWS/Class.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/RWS/Class.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/RWS/Class.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/RWS/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,60 +0,0 @@ -{-# LANGUAGE UndecidableInstances #-} --- Search for UndecidableInstances to see why this is needed - ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.RWS.Class --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Declaration of the MonadRWS class. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.RWS.Class ( - MonadRWS, - module Control.Monad.Reader.Class, - module Control.Monad.State.Class, - module Control.Monad.Writer.Class, - ) where - -import Control.Monad.Reader.Class -import Control.Monad.State.Class -import Control.Monad.Writer.Class - -import Control.Monad.Trans.Class -import Control.Monad.Trans.Error(Error, ErrorT) -import Control.Monad.Trans.Except(ExceptT) -import Control.Monad.Trans.Maybe(MaybeT) -import Control.Monad.Trans.Identity(IdentityT) -import Control.Monad.Trans.RWS.Lazy as Lazy (RWST) -import qualified Control.Monad.Trans.RWS.Strict as Strict (RWST) - -import Data.Monoid - -class (Monoid w, MonadReader r m, MonadWriter w m, MonadState s m) - => MonadRWS r w s m | m -> r, m -> w, m -> s - -instance (Monoid w, Monad m) => MonadRWS r w s (Lazy.RWST r w s m) - -instance (Monoid w, Monad m) => MonadRWS r w s (Strict.RWST r w s m) - ---------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - -instance MonadRWS r w s m => MonadRWS r w s (ExceptT e m) -instance (Error e, MonadRWS r w s m) => MonadRWS r w s (ErrorT e m) -instance MonadRWS r w s m => MonadRWS r w s (IdentityT m) -instance MonadRWS r w s m => MonadRWS r w s (MaybeT m) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/RWS/Lazy.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/RWS/Lazy.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/RWS/Lazy.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/RWS/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.RWS.Lazy --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Lazy RWS monad. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.RWS.Lazy ( - -- * The RWS monad - RWS, - rws, - runRWS, - evalRWS, - execRWS, - mapRWS, - withRWS, - -- * The RWST monad transformer - RWST(RWST), - runRWST, - evalRWST, - execRWST, - mapRWST, - withRWST, - -- * Lazy Reader-writer-state monads - module Control.Monad.RWS.Class, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - module Data.Monoid, - ) where - -import Control.Monad.RWS.Class - -import Control.Monad.Trans -import Control.Monad.Trans.RWS.Lazy ( - RWS, rws, runRWS, evalRWS, execRWS, mapRWS, withRWS, - RWST(RWST), runRWST, evalRWST, execRWST, mapRWST, withRWST) - -import Control.Monad -import Control.Monad.Fix -import Data.Monoid diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/RWS/Strict.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/RWS/Strict.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/RWS/Strict.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/RWS/Strict.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.RWS.Strict --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Strict RWS monad. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.RWS.Strict ( - -- * The RWS monad - RWS, - rws, - runRWS, - evalRWS, - execRWS, - mapRWS, - withRWS, - -- * The RWST monad transformer - RWST(RWST), - runRWST, - evalRWST, - execRWST, - mapRWST, - withRWST, - -- * Strict Reader-writer-state monads - module Control.Monad.RWS.Class, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - module Data.Monoid, - ) where - -import Control.Monad.RWS.Class - -import Control.Monad.Trans -import Control.Monad.Trans.RWS.Strict ( - RWS, rws, runRWS, evalRWS, execRWS, mapRWS, withRWS, - RWST(RWST), runRWST, evalRWST, execRWST, mapRWST, withRWST) - -import Control.Monad -import Control.Monad.Fix -import Data.Monoid diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/RWS.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/RWS.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/RWS.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/RWS.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.RWS --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Declaration of the MonadRWS class. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.RWS ( - module Control.Monad.RWS.Lazy - ) where - -import Control.Monad.RWS.Lazy diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/State/Class.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/State/Class.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/State/Class.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/State/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,168 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE UndecidableInstances #-} --- Search for UndecidableInstances to see why this is needed - ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.State.Class --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- MonadState class. --- --- This module is inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. - ------------------------------------------------------------------------------ - -module Control.Monad.State.Class ( - MonadState(..), - modify, - modify', - gets - ) where - -import Control.Monad.Trans.Cont -import Control.Monad.Trans.Error -import Control.Monad.Trans.Except -import Control.Monad.Trans.Identity -import Control.Monad.Trans.List -import Control.Monad.Trans.Maybe -import Control.Monad.Trans.Reader -import qualified Control.Monad.Trans.RWS.Lazy as LazyRWS (RWST, get, put, state) -import qualified Control.Monad.Trans.RWS.Strict as StrictRWS (RWST, get, put, state) -import qualified Control.Monad.Trans.State.Lazy as Lazy (StateT, get, put, state) -import qualified Control.Monad.Trans.State.Strict as Strict (StateT, get, put, state) -import Control.Monad.Trans.Writer.Lazy as Lazy -import Control.Monad.Trans.Writer.Strict as Strict - -import Control.Monad.Trans.Class (lift) -import Control.Monad -import Data.Monoid - --- --------------------------------------------------------------------------- - --- | Minimal definition is either both of @get@ and @put@ or just @state@ -class Monad m => MonadState s m | m -> s where - -- | Return the state from the internals of the monad. - get :: m s - get = state (\s -> (s, s)) - - -- | Replace the state inside the monad. - put :: s -> m () - put s = state (\_ -> ((), s)) - - -- | Embed a simple state action into the monad. - state :: (s -> (a, s)) -> m a - state f = do - s <- get - let ~(a, s') = f s - put s' - return a -#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ >= 707 - {-# MINIMAL state | get, put #-} -#endif - --- | Monadic state transformer. --- --- Maps an old state to a new state inside a state monad. --- The old state is thrown away. --- --- > Main> :t modify ((+1) :: Int -> Int) --- > modify (...) :: (MonadState Int a) => a () --- --- This says that @modify (+1)@ acts over any --- Monad that is a member of the @MonadState@ class, --- with an @Int@ state. -modify :: MonadState s m => (s -> s) -> m () -modify f = state (\s -> ((), f s)) - --- | A variant of 'modify' in which the computation is strict in the --- new state. -modify' :: MonadState s m => (s -> s) -> m () -modify' f = state (\s -> let s' = f s in s' `seq` ((), s')) - --- | Gets specific component of the state, using a projection function --- supplied. -gets :: MonadState s m => (s -> a) -> m a -gets f = do - s <- get - return (f s) - -instance Monad m => MonadState s (Lazy.StateT s m) where - get = Lazy.get - put = Lazy.put - state = Lazy.state - -instance Monad m => MonadState s (Strict.StateT s m) where - get = Strict.get - put = Strict.put - state = Strict.state - -instance (Monad m, Monoid w) => MonadState s (LazyRWS.RWST r w s m) where - get = LazyRWS.get - put = LazyRWS.put - state = LazyRWS.state - -instance (Monad m, Monoid w) => MonadState s (StrictRWS.RWST r w s m) where - get = StrictRWS.get - put = StrictRWS.put - state = StrictRWS.state - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - -instance MonadState s m => MonadState s (ContT r m) where - get = lift get - put = lift . put - state = lift . state - -instance (Error e, MonadState s m) => MonadState s (ErrorT e m) where - get = lift get - put = lift . put - state = lift . state - -instance MonadState s m => MonadState s (ExceptT e m) where - get = lift get - put = lift . put - state = lift . state - -instance MonadState s m => MonadState s (IdentityT m) where - get = lift get - put = lift . put - state = lift . state - -instance MonadState s m => MonadState s (ListT m) where - get = lift get - put = lift . put - state = lift . state - -instance MonadState s m => MonadState s (MaybeT m) where - get = lift get - put = lift . put - state = lift . state - -instance MonadState s m => MonadState s (ReaderT r m) where - get = lift get - put = lift . put - state = lift . state - -instance (Monoid w, MonadState s m) => MonadState s (Lazy.WriterT w m) where - get = lift get - put = lift . put - state = lift . state - -instance (Monoid w, MonadState s m) => MonadState s (Strict.WriterT w m) where - get = lift get - put = lift . put - state = lift . state diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/State/Lazy.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/State/Lazy.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/State/Lazy.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/State/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.State.Lazy --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Lazy state monads. --- --- This module is inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. - ------------------------------------------------------------------------------ - -module Control.Monad.State.Lazy ( - -- * MonadState class - MonadState(..), - modify, - modify', - gets, - -- * The State monad - State, - runState, - evalState, - execState, - mapState, - withState, - -- * The StateT monad transformer - StateT(StateT), - runStateT, - evalStateT, - execStateT, - mapStateT, - withStateT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Examples - -- $examples - ) where - -import Control.Monad.State.Class - -import Control.Monad.Trans -import Control.Monad.Trans.State.Lazy - (State, runState, evalState, execState, mapState, withState, - StateT(StateT), runStateT, evalStateT, execStateT, mapStateT, withStateT) - -import Control.Monad -import Control.Monad.Fix - --- --------------------------------------------------------------------------- --- $examples --- A function to increment a counter. Taken from the paper --- /Generalising Monads to Arrows/, John --- Hughes (), November 1998: --- --- > tick :: State Int Int --- > tick = do n <- get --- > put (n+1) --- > return n --- --- Add one to the given number using the state monad: --- --- > plusOne :: Int -> Int --- > plusOne n = execState tick n --- --- A contrived addition example. Works only with positive numbers: --- --- > plus :: Int -> Int -> Int --- > plus n x = execState (sequence $ replicate n tick) x --- --- An example from /The Craft of Functional Programming/, Simon --- Thompson (), --- Addison-Wesley 1999: \"Given an arbitrary tree, transform it to a --- tree of integers in which the original elements are replaced by --- natural numbers, starting from 0. The same element has to be --- replaced by the same number at every occurrence, and when we meet --- an as-yet-unvisited element we have to find a \'new\' number to match --- it with:\" --- --- > data Tree a = Nil | Node a (Tree a) (Tree a) deriving (Show, Eq) --- > type Table a = [a] --- --- > numberTree :: Eq a => Tree a -> State (Table a) (Tree Int) --- > numberTree Nil = return Nil --- > numberTree (Node x t1 t2) --- > = do num <- numberNode x --- > nt1 <- numberTree t1 --- > nt2 <- numberTree t2 --- > return (Node num nt1 nt2) --- > where --- > numberNode :: Eq a => a -> State (Table a) Int --- > numberNode x --- > = do table <- get --- > (newTable, newPos) <- return (nNode x table) --- > put newTable --- > return newPos --- > nNode:: (Eq a) => a -> Table a -> (Table a, Int) --- > nNode x table --- > = case (findIndexInList (== x) table) of --- > Nothing -> (table ++ [x], length table) --- > Just i -> (table, i) --- > findIndexInList :: (a -> Bool) -> [a] -> Maybe Int --- > findIndexInList = findIndexInListHelp 0 --- > findIndexInListHelp _ _ [] = Nothing --- > findIndexInListHelp count f (h:t) --- > = if (f h) --- > then Just count --- > else findIndexInListHelp (count+1) f t --- --- numTree applies numberTree with an initial state: --- --- > numTree :: (Eq a) => Tree a -> Tree Int --- > numTree t = evalState (numberTree t) [] --- --- > testTree = Node "Zero" (Node "One" (Node "Two" Nil Nil) (Node "One" (Node "Zero" Nil Nil) Nil)) Nil --- > numTree testTree => Node 0 (Node 1 (Node 2 Nil Nil) (Node 1 (Node 0 Nil Nil) Nil)) Nil --- --- sumTree is a little helper function that does not use the State monad: --- --- > sumTree :: (Num a) => Tree a -> a --- > sumTree Nil = 0 --- > sumTree (Node e t1 t2) = e + (sumTree t1) + (sumTree t2) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/State/Strict.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/State/Strict.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/State/Strict.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/State/Strict.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.State.Strict --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Strict state monads. --- --- This module is inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. - ------------------------------------------------------------------------------ - -module Control.Monad.State.Strict ( - -- * MonadState class - MonadState(..), - modify, - modify', - gets, - -- * The State monad - State, - runState, - evalState, - execState, - mapState, - withState, - -- * The StateT monad transformer - StateT(StateT), - runStateT, - evalStateT, - execStateT, - mapStateT, - withStateT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Examples - -- $examples - ) where - -import Control.Monad.State.Class - -import Control.Monad.Trans -import Control.Monad.Trans.State.Strict - (State, runState, evalState, execState, mapState, withState, - StateT(StateT), runStateT, evalStateT, execStateT, mapStateT, withStateT) - -import Control.Monad -import Control.Monad.Fix - --- --------------------------------------------------------------------------- --- $examples --- A function to increment a counter. Taken from the paper --- /Generalising Monads to Arrows/, John --- Hughes (), November 1998: --- --- > tick :: State Int Int --- > tick = do n <- get --- > put (n+1) --- > return n --- --- Add one to the given number using the state monad: --- --- > plusOne :: Int -> Int --- > plusOne n = execState tick n --- --- A contrived addition example. Works only with positive numbers: --- --- > plus :: Int -> Int -> Int --- > plus n x = execState (sequence $ replicate n tick) x --- --- An example from /The Craft of Functional Programming/, Simon --- Thompson (), --- Addison-Wesley 1999: \"Given an arbitrary tree, transform it to a --- tree of integers in which the original elements are replaced by --- natural numbers, starting from 0. The same element has to be --- replaced by the same number at every occurrence, and when we meet --- an as-yet-unvisited element we have to find a \'new\' number to match --- it with:\" --- --- > data Tree a = Nil | Node a (Tree a) (Tree a) deriving (Show, Eq) --- > type Table a = [a] --- --- > numberTree :: Eq a => Tree a -> State (Table a) (Tree Int) --- > numberTree Nil = return Nil --- > numberTree (Node x t1 t2) --- > = do num <- numberNode x --- > nt1 <- numberTree t1 --- > nt2 <- numberTree t2 --- > return (Node num nt1 nt2) --- > where --- > numberNode :: Eq a => a -> State (Table a) Int --- > numberNode x --- > = do table <- get --- > (newTable, newPos) <- return (nNode x table) --- > put newTable --- > return newPos --- > nNode:: (Eq a) => a -> Table a -> (Table a, Int) --- > nNode x table --- > = case (findIndexInList (== x) table) of --- > Nothing -> (table ++ [x], length table) --- > Just i -> (table, i) --- > findIndexInList :: (a -> Bool) -> [a] -> Maybe Int --- > findIndexInList = findIndexInListHelp 0 --- > findIndexInListHelp _ _ [] = Nothing --- > findIndexInListHelp count f (h:t) --- > = if (f h) --- > then Just count --- > else findIndexInListHelp (count+1) f t --- --- numTree applies numberTree with an initial state: --- --- > numTree :: (Eq a) => Tree a -> Tree Int --- > numTree t = evalState (numberTree t) [] --- --- > testTree = Node "Zero" (Node "One" (Node "Two" Nil Nil) (Node "One" (Node "Zero" Nil Nil) Nil)) Nil --- > numTree testTree => Node 0 (Node 1 (Node 2 Nil Nil) (Node 1 (Node 0 Nil Nil) Nil)) Nil --- --- sumTree is a little helper function that does not use the State monad: --- --- > sumTree :: (Num a) => Tree a -> a --- > sumTree Nil = 0 --- > sumTree (Node e t1 t2) = e + (sumTree t1) + (sumTree t2) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/State.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/State.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/State.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/State.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.State --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- State monads. --- --- This module is inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. - ------------------------------------------------------------------------------ - -module Control.Monad.State ( - module Control.Monad.State.Lazy - ) where - -import Control.Monad.State.Lazy diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Trans.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Trans.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Trans.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Trans.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Trans --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : portable --- --- Classes for monad transformers. --- --- A monad transformer makes new monad out of an existing monad, such --- that computations of the old monad may be embedded in the new one. --- To construct a monad with a desired set of features, one typically --- starts with a base monad, such as @Identity@, @[]@ or 'IO', and --- applies a sequence of monad transformers. --- --- Most monad transformer modules include the special case of applying the --- transformer to @Identity@. For example, @State s@ is an abbreviation --- for @StateT s Identity@. --- --- Each monad transformer also comes with an operation @run@/XXX/ to --- unwrap the transformer, exposing a computation of the inner monad. ------------------------------------------------------------------------------ - -module Control.Monad.Trans ( - module Control.Monad.Trans.Class, - module Control.Monad.IO.Class - ) where - -import Control.Monad.IO.Class -import Control.Monad.Trans.Class diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Writer/Class.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Writer/Class.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Writer/Class.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Writer/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,173 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE UndecidableInstances #-} --- Search for UndecidableInstances to see why this is needed - ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Writer.Class --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- The MonadWriter class. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.Writer.Class ( - MonadWriter(..), - listens, - censor, - ) where - -import Control.Monad.Trans.Error as Error -import Control.Monad.Trans.Except as Except -import Control.Monad.Trans.Identity as Identity -import Control.Monad.Trans.Maybe as Maybe -import Control.Monad.Trans.Reader -import qualified Control.Monad.Trans.RWS.Lazy as LazyRWS ( - RWST, writer, tell, listen, pass) -import qualified Control.Monad.Trans.RWS.Strict as StrictRWS ( - RWST, writer, tell, listen, pass) -import Control.Monad.Trans.State.Lazy as Lazy -import Control.Monad.Trans.State.Strict as Strict -import qualified Control.Monad.Trans.Writer.Lazy as Lazy ( - WriterT, writer, tell, listen, pass) -import qualified Control.Monad.Trans.Writer.Strict as Strict ( - WriterT, writer, tell, listen, pass) - -import Control.Monad.Trans.Class (lift) -import Control.Monad -import Data.Monoid - --- --------------------------------------------------------------------------- --- MonadWriter class --- --- tell is like tell on the MUD's it shouts to monad --- what you want to be heard. The monad carries this 'packet' --- upwards, merging it if needed (hence the Monoid requirement). --- --- listen listens to a monad acting, and returns what the monad "said". --- --- pass lets you provide a writer transformer which changes internals of --- the written object. - -class (Monoid w, Monad m) => MonadWriter w m | m -> w where -#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ >= 707 - {-# MINIMAL (writer | tell), listen, pass #-} -#endif - -- | @'writer' (a,w)@ embeds a simple writer action. - writer :: (a,w) -> m a - writer ~(a, w) = do - tell w - return a - - -- | @'tell' w@ is an action that produces the output @w@. - tell :: w -> m () - tell w = writer ((),w) - - -- | @'listen' m@ is an action that executes the action @m@ and adds - -- its output to the value of the computation. - listen :: m a -> m (a, w) - -- | @'pass' m@ is an action that executes the action @m@, which - -- returns a value and a function, and returns the value, applying - -- the function to the output. - pass :: m (a, w -> w) -> m a - --- | @'listens' f m@ is an action that executes the action @m@ and adds --- the result of applying @f@ to the output to the value of the computation. --- --- * @'listens' f m = 'liftM' (id *** f) ('listen' m)@ -listens :: MonadWriter w m => (w -> b) -> m a -> m (a, b) -listens f m = do - ~(a, w) <- listen m - return (a, f w) - --- | @'censor' f m@ is an action that executes the action @m@ and --- applies the function @f@ to its output, leaving the return value --- unchanged. --- --- * @'censor' f m = 'pass' ('liftM' (\\x -> (x,f)) m)@ -censor :: MonadWriter w m => (w -> w) -> m a -> m a -censor f m = pass $ do - a <- m - return (a, f) - -instance (Monoid w, Monad m) => MonadWriter w (Lazy.WriterT w m) where - writer = Lazy.writer - tell = Lazy.tell - listen = Lazy.listen - pass = Lazy.pass - -instance (Monoid w, Monad m) => MonadWriter w (Strict.WriterT w m) where - writer = Strict.writer - tell = Strict.tell - listen = Strict.listen - pass = Strict.pass - -instance (Monoid w, Monad m) => MonadWriter w (LazyRWS.RWST r w s m) where - writer = LazyRWS.writer - tell = LazyRWS.tell - listen = LazyRWS.listen - pass = LazyRWS.pass - -instance (Monoid w, Monad m) => MonadWriter w (StrictRWS.RWST r w s m) where - writer = StrictRWS.writer - tell = StrictRWS.tell - listen = StrictRWS.listen - pass = StrictRWS.pass - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - -instance (Error e, MonadWriter w m) => MonadWriter w (ErrorT e m) where - writer = lift . writer - tell = lift . tell - listen = Error.liftListen listen - pass = Error.liftPass pass - -instance MonadWriter w m => MonadWriter w (ExceptT e m) where - writer = lift . writer - tell = lift . tell - listen = Except.liftListen listen - pass = Except.liftPass pass - -instance MonadWriter w m => MonadWriter w (IdentityT m) where - writer = lift . writer - tell = lift . tell - listen = Identity.mapIdentityT listen - pass = Identity.mapIdentityT pass - -instance MonadWriter w m => MonadWriter w (MaybeT m) where - writer = lift . writer - tell = lift . tell - listen = Maybe.liftListen listen - pass = Maybe.liftPass pass - -instance MonadWriter w m => MonadWriter w (ReaderT r m) where - writer = lift . writer - tell = lift . tell - listen = mapReaderT listen - pass = mapReaderT pass - -instance MonadWriter w m => MonadWriter w (Lazy.StateT s m) where - writer = lift . writer - tell = lift . tell - listen = Lazy.liftListen listen - pass = Lazy.liftPass pass - -instance MonadWriter w m => MonadWriter w (Strict.StateT s m) where - writer = lift . writer - tell = lift . tell - listen = Strict.liftListen listen - pass = Strict.liftPass pass diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Writer/Lazy.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Writer/Lazy.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Writer/Lazy.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Writer/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Writer.Lazy --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Lazy writer monads. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.Writer.Lazy ( - -- * MonadWriter class - MonadWriter(..), - listens, - censor, - -- * The Writer monad - Writer, - runWriter, - execWriter, - mapWriter, - -- * The WriterT monad transformer - WriterT(WriterT), - runWriterT, - execWriterT, - mapWriterT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - module Data.Monoid, - ) where - -import Control.Monad.Writer.Class - -import Control.Monad.Trans -import Control.Monad.Trans.Writer.Lazy ( - Writer, runWriter, execWriter, mapWriter, - WriterT(WriterT), runWriterT, execWriterT, mapWriterT) - -import Control.Monad -import Control.Monad.Fix -import Data.Monoid diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Writer/Strict.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Writer/Strict.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Writer/Strict.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Writer/Strict.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Writer.Strict --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Strict writer monads. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.Writer.Strict ( - -- * MonadWriter class - MonadWriter(..), - listens, - censor, - -- * The Writer monad - Writer, - runWriter, - execWriter, - mapWriter, - -- * The WriterT monad transformer - WriterT(..), - execWriterT, - mapWriterT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - module Data.Monoid, - ) where - -import Control.Monad.Writer.Class - -import Control.Monad.Trans -import Control.Monad.Trans.Writer.Strict ( - Writer, runWriter, execWriter, mapWriter, - WriterT(..), execWriterT, mapWriterT) - -import Control.Monad -import Control.Monad.Fix -import Data.Monoid diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Writer.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Writer.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Control/Monad/Writer.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Control/Monad/Writer.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Writer --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- The MonadWriter class. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.Writer ( - module Control.Monad.Writer.Lazy - ) where - -import Control.Monad.Writer.Lazy diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar5=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/LICENSE 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -The Glasgow Haskell Compiler License - -Copyright 2004, The University Court of the University of Glasgow. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -- Neither name of the University nor the names of its contributors may be -used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF -GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/mtl.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar5=/mtl.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/mtl.cabal 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/mtl.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -name: mtl -version: 2.2.1 -cabal-version: >= 1.6 -license: BSD3 -license-file: LICENSE -author: Andy Gill -maintainer: Edward Kmett -category: Control -synopsis: Monad classes, using functional dependencies -homepage: http://github.com/ekmett/mtl -bug-reports: http://github.com/ekmett/mtl/issues -description: - Monad classes using functional dependencies, with instances - for various monad transformers, inspired by the paper - /Functional Programming with Overloading and Higher-Order Polymorphism/, - by Mark P Jones, in /Advanced School of Functional Programming/, 1995 - (). -build-type: Simple -extra-source-files: CHANGELOG.markdown - -source-repository head - type: git - location: git://github.com/ekmett/mtl.git - -Library - exposed-modules: - Control.Monad.Cont - Control.Monad.Cont.Class - Control.Monad.Error - Control.Monad.Error.Class - Control.Monad.Except - Control.Monad.Identity - Control.Monad.List - Control.Monad.RWS - Control.Monad.RWS.Class - Control.Monad.RWS.Lazy - Control.Monad.RWS.Strict - Control.Monad.Reader - Control.Monad.Reader.Class - Control.Monad.State - Control.Monad.State.Class - Control.Monad.State.Lazy - Control.Monad.State.Strict - Control.Monad.Trans - Control.Monad.Writer - Control.Monad.Writer.Class - Control.Monad.Writer.Lazy - Control.Monad.Writer.Strict - build-depends: base < 6, transformers == 0.4.* - extensions: - MultiParamTypeClasses - FunctionalDependencies - FlexibleInstances - ghc-options: -Wall -fno-warn-unused-imports -fno-warn-warnings-deprecations diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar5=/Setup.hs 2014-06-02 02:22:37.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar5=/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -import Distribution.Simple -main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/adler32.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/adler32.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/adler32.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/adler32.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,169 +0,0 @@ -/* adler32.c -- compute the Adler-32 checksum of a data stream - * Copyright (C) 1995-2007 Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* @(#) $Id$ */ - -#include "zutil.h" - -#define local static - -local uLong adler32_combine_(uLong adler1, uLong adler2, z_off64_t len2); - -#define BASE 65521UL /* largest prime smaller than 65536 */ -#define NMAX 5552 -/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ - -#define DO1(buf,i) {adler += (buf)[i]; sum2 += adler;} -#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); -#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); -#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); -#define DO16(buf) DO8(buf,0); DO8(buf,8); - -/* use NO_DIVIDE if your processor does not do division in hardware */ -#ifdef NO_DIVIDE -# define MOD(a) \ - do { \ - if (a >= (BASE << 16)) a -= (BASE << 16); \ - if (a >= (BASE << 15)) a -= (BASE << 15); \ - if (a >= (BASE << 14)) a -= (BASE << 14); \ - if (a >= (BASE << 13)) a -= (BASE << 13); \ - if (a >= (BASE << 12)) a -= (BASE << 12); \ - if (a >= (BASE << 11)) a -= (BASE << 11); \ - if (a >= (BASE << 10)) a -= (BASE << 10); \ - if (a >= (BASE << 9)) a -= (BASE << 9); \ - if (a >= (BASE << 8)) a -= (BASE << 8); \ - if (a >= (BASE << 7)) a -= (BASE << 7); \ - if (a >= (BASE << 6)) a -= (BASE << 6); \ - if (a >= (BASE << 5)) a -= (BASE << 5); \ - if (a >= (BASE << 4)) a -= (BASE << 4); \ - if (a >= (BASE << 3)) a -= (BASE << 3); \ - if (a >= (BASE << 2)) a -= (BASE << 2); \ - if (a >= (BASE << 1)) a -= (BASE << 1); \ - if (a >= BASE) a -= BASE; \ - } while (0) -# define MOD4(a) \ - do { \ - if (a >= (BASE << 4)) a -= (BASE << 4); \ - if (a >= (BASE << 3)) a -= (BASE << 3); \ - if (a >= (BASE << 2)) a -= (BASE << 2); \ - if (a >= (BASE << 1)) a -= (BASE << 1); \ - if (a >= BASE) a -= BASE; \ - } while (0) -#else -# define MOD(a) a %= BASE -# define MOD4(a) a %= BASE -#endif - -/* ========================================================================= */ -uLong ZEXPORT adler32(adler, buf, len) - uLong adler; - const Bytef *buf; - uInt len; -{ - unsigned long sum2; - unsigned n; - - /* split Adler-32 into component sums */ - sum2 = (adler >> 16) & 0xffff; - adler &= 0xffff; - - /* in case user likes doing a byte at a time, keep it fast */ - if (len == 1) { - adler += buf[0]; - if (adler >= BASE) - adler -= BASE; - sum2 += adler; - if (sum2 >= BASE) - sum2 -= BASE; - return adler | (sum2 << 16); - } - - /* initial Adler-32 value (deferred check for len == 1 speed) */ - if (buf == Z_NULL) - return 1L; - - /* in case short lengths are provided, keep it somewhat fast */ - if (len < 16) { - while (len--) { - adler += *buf++; - sum2 += adler; - } - if (adler >= BASE) - adler -= BASE; - MOD4(sum2); /* only added so many BASE's */ - return adler | (sum2 << 16); - } - - /* do length NMAX blocks -- requires just one modulo operation */ - while (len >= NMAX) { - len -= NMAX; - n = NMAX / 16; /* NMAX is divisible by 16 */ - do { - DO16(buf); /* 16 sums unrolled */ - buf += 16; - } while (--n); - MOD(adler); - MOD(sum2); - } - - /* do remaining bytes (less than NMAX, still just one modulo) */ - if (len) { /* avoid modulos if none remaining */ - while (len >= 16) { - len -= 16; - DO16(buf); - buf += 16; - } - while (len--) { - adler += *buf++; - sum2 += adler; - } - MOD(adler); - MOD(sum2); - } - - /* return recombined sums */ - return adler | (sum2 << 16); -} - -/* ========================================================================= */ -local uLong adler32_combine_(adler1, adler2, len2) - uLong adler1; - uLong adler2; - z_off64_t len2; -{ - unsigned long sum1; - unsigned long sum2; - unsigned rem; - - /* the derivation of this formula is left as an exercise for the reader */ - rem = (unsigned)(len2 % BASE); - sum1 = adler1 & 0xffff; - sum2 = rem * sum1; - MOD(sum2); - sum1 += (adler2 & 0xffff) + BASE - 1; - sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; - if (sum1 >= BASE) sum1 -= BASE; - if (sum1 >= BASE) sum1 -= BASE; - if (sum2 >= (BASE << 1)) sum2 -= (BASE << 1); - if (sum2 >= BASE) sum2 -= BASE; - return sum1 | (sum2 << 16); -} - -/* ========================================================================= */ -uLong ZEXPORT adler32_combine(adler1, adler2, len2) - uLong adler1; - uLong adler2; - z_off_t len2; -{ - return adler32_combine_(adler1, adler2, len2); -} - -uLong ZEXPORT adler32_combine64(adler1, adler2, len2) - uLong adler1; - uLong adler2; - z_off64_t len2; -{ - return adler32_combine_(adler1, adler2, len2); -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/compress.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/compress.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/compress.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/compress.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -/* compress.c -- compress a memory buffer - * Copyright (C) 1995-2005 Jean-loup Gailly. - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* @(#) $Id$ */ - -#define ZLIB_INTERNAL -#include "zlib.h" - -/* =========================================================================== - Compresses the source buffer into the destination buffer. The level - parameter has the same meaning as in deflateInit. sourceLen is the byte - length of the source buffer. Upon entry, destLen is the total size of the - destination buffer, which must be at least 0.1% larger than sourceLen plus - 12 bytes. Upon exit, destLen is the actual size of the compressed buffer. - - compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_BUF_ERROR if there was not enough room in the output buffer, - Z_STREAM_ERROR if the level parameter is invalid. -*/ -int ZEXPORT compress2 (dest, destLen, source, sourceLen, level) - Bytef *dest; - uLongf *destLen; - const Bytef *source; - uLong sourceLen; - int level; -{ - z_stream stream; - int err; - - stream.next_in = (Bytef*)source; - stream.avail_in = (uInt)sourceLen; -#ifdef MAXSEG_64K - /* Check for source > 64K on 16-bit machine: */ - if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR; -#endif - stream.next_out = dest; - stream.avail_out = (uInt)*destLen; - if ((uLong)stream.avail_out != *destLen) return Z_BUF_ERROR; - - stream.zalloc = (alloc_func)0; - stream.zfree = (free_func)0; - stream.opaque = (voidpf)0; - - err = deflateInit(&stream, level); - if (err != Z_OK) return err; - - err = deflate(&stream, Z_FINISH); - if (err != Z_STREAM_END) { - deflateEnd(&stream); - return err == Z_OK ? Z_BUF_ERROR : err; - } - *destLen = stream.total_out; - - err = deflateEnd(&stream); - return err; -} - -/* =========================================================================== - */ -int ZEXPORT compress (dest, destLen, source, sourceLen) - Bytef *dest; - uLongf *destLen; - const Bytef *source; - uLong sourceLen; -{ - return compress2(dest, destLen, source, sourceLen, Z_DEFAULT_COMPRESSION); -} - -/* =========================================================================== - If the default memLevel or windowBits for deflateInit() is changed, then - this function needs to be updated. - */ -uLong ZEXPORT compressBound (sourceLen) - uLong sourceLen; -{ - return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + - (sourceLen >> 25) + 13; -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/crc32.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/crc32.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/crc32.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/crc32.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,442 +0,0 @@ -/* crc32.c -- compute the CRC-32 of a data stream - * Copyright (C) 1995-2006, 2010 Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - * - * Thanks to Rodney Brown for his contribution of faster - * CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing - * tables for updating the shift register in one step with three exclusive-ors - * instead of four steps with four exclusive-ors. This results in about a - * factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3. - */ - -/* @(#) $Id$ */ - -/* - Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore - protection on the static variables used to control the first-use generation - of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should - first call get_crc_table() to initialize the tables before allowing more than - one thread to use crc32(). - */ - -#ifdef MAKECRCH -# include -# ifndef DYNAMIC_CRC_TABLE -# define DYNAMIC_CRC_TABLE -# endif /* !DYNAMIC_CRC_TABLE */ -#endif /* MAKECRCH */ - -#include "zutil.h" /* for STDC and FAR definitions */ - -#define local static - -/* Find a four-byte integer type for crc32_little() and crc32_big(). */ -#ifndef NOBYFOUR -# ifdef STDC /* need ANSI C limits.h to determine sizes */ -# include -# define BYFOUR -# if (UINT_MAX == 0xffffffffUL) - typedef unsigned int u4; -# else -# if (ULONG_MAX == 0xffffffffUL) - typedef unsigned long u4; -# else -# if (USHRT_MAX == 0xffffffffUL) - typedef unsigned short u4; -# else -# undef BYFOUR /* can't find a four-byte integer type! */ -# endif -# endif -# endif -# endif /* STDC */ -#endif /* !NOBYFOUR */ - -/* Definitions for doing the crc four data bytes at a time. */ -#ifdef BYFOUR -# define REV(w) ((((w)>>24)&0xff)+(((w)>>8)&0xff00)+ \ - (((w)&0xff00)<<8)+(((w)&0xff)<<24)) - local unsigned long crc32_little OF((unsigned long, - const unsigned char FAR *, unsigned)); - local unsigned long crc32_big OF((unsigned long, - const unsigned char FAR *, unsigned)); -# define TBLS 8 -#else -# define TBLS 1 -#endif /* BYFOUR */ - -/* Local functions for crc concatenation */ -local unsigned long gf2_matrix_times OF((unsigned long *mat, - unsigned long vec)); -local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat)); -local uLong crc32_combine_(uLong crc1, uLong crc2, z_off64_t len2); - - -#ifdef DYNAMIC_CRC_TABLE - -local volatile int crc_table_empty = 1; -local unsigned long FAR crc_table[TBLS][256]; -local void make_crc_table OF((void)); -#ifdef MAKECRCH - local void write_table OF((FILE *, const unsigned long FAR *)); -#endif /* MAKECRCH */ -/* - Generate tables for a byte-wise 32-bit CRC calculation on the polynomial: - x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. - - Polynomials over GF(2) are represented in binary, one bit per coefficient, - with the lowest powers in the most significant bit. Then adding polynomials - is just exclusive-or, and multiplying a polynomial by x is a right shift by - one. If we call the above polynomial p, and represent a byte as the - polynomial q, also with the lowest power in the most significant bit (so the - byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p, - where a mod b means the remainder after dividing a by b. - - This calculation is done using the shift-register method of multiplying and - taking the remainder. The register is initialized to zero, and for each - incoming bit, x^32 is added mod p to the register if the bit is a one (where - x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by - x (which is shifting right by one and adding x^32 mod p if the bit shifted - out is a one). We start with the highest power (least significant bit) of - q and repeat for all eight bits of q. - - The first table is simply the CRC of all possible eight bit values. This is - all the information needed to generate CRCs on data a byte at a time for all - combinations of CRC register values and incoming bytes. The remaining tables - allow for word-at-a-time CRC calculation for both big-endian and little- - endian machines, where a word is four bytes. -*/ -local void make_crc_table() -{ - unsigned long c; - int n, k; - unsigned long poly; /* polynomial exclusive-or pattern */ - /* terms of polynomial defining this crc (except x^32): */ - static volatile int first = 1; /* flag to limit concurrent making */ - static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; - - /* See if another task is already doing this (not thread-safe, but better - than nothing -- significantly reduces duration of vulnerability in - case the advice about DYNAMIC_CRC_TABLE is ignored) */ - if (first) { - first = 0; - - /* make exclusive-or pattern from polynomial (0xedb88320UL) */ - poly = 0UL; - for (n = 0; n < sizeof(p)/sizeof(unsigned char); n++) - poly |= 1UL << (31 - p[n]); - - /* generate a crc for every 8-bit value */ - for (n = 0; n < 256; n++) { - c = (unsigned long)n; - for (k = 0; k < 8; k++) - c = c & 1 ? poly ^ (c >> 1) : c >> 1; - crc_table[0][n] = c; - } - -#ifdef BYFOUR - /* generate crc for each value followed by one, two, and three zeros, - and then the byte reversal of those as well as the first table */ - for (n = 0; n < 256; n++) { - c = crc_table[0][n]; - crc_table[4][n] = REV(c); - for (k = 1; k < 4; k++) { - c = crc_table[0][c & 0xff] ^ (c >> 8); - crc_table[k][n] = c; - crc_table[k + 4][n] = REV(c); - } - } -#endif /* BYFOUR */ - - crc_table_empty = 0; - } - else { /* not first */ - /* wait for the other guy to finish (not efficient, but rare) */ - while (crc_table_empty) - ; - } - -#ifdef MAKECRCH - /* write out CRC tables to crc32.h */ - { - FILE *out; - - out = fopen("crc32.h", "w"); - if (out == NULL) return; - fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n"); - fprintf(out, " * Generated automatically by crc32.c\n */\n\n"); - fprintf(out, "local const unsigned long FAR "); - fprintf(out, "crc_table[TBLS][256] =\n{\n {\n"); - write_table(out, crc_table[0]); -# ifdef BYFOUR - fprintf(out, "#ifdef BYFOUR\n"); - for (k = 1; k < 8; k++) { - fprintf(out, " },\n {\n"); - write_table(out, crc_table[k]); - } - fprintf(out, "#endif\n"); -# endif /* BYFOUR */ - fprintf(out, " }\n};\n"); - fclose(out); - } -#endif /* MAKECRCH */ -} - -#ifdef MAKECRCH -local void write_table(out, table) - FILE *out; - const unsigned long FAR *table; -{ - int n; - - for (n = 0; n < 256; n++) - fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", table[n], - n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", ")); -} -#endif /* MAKECRCH */ - -#else /* !DYNAMIC_CRC_TABLE */ -/* ======================================================================== - * Tables of CRC-32s of all single-byte values, made by make_crc_table(). - */ -#include "crc32.h" -#endif /* DYNAMIC_CRC_TABLE */ - -/* ========================================================================= - * This function can be used by asm versions of crc32() - */ -const unsigned long FAR * ZEXPORT get_crc_table() -{ -#ifdef DYNAMIC_CRC_TABLE - if (crc_table_empty) - make_crc_table(); -#endif /* DYNAMIC_CRC_TABLE */ - return (const unsigned long FAR *)crc_table; -} - -/* ========================================================================= */ -#define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8) -#define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1 - -/* ========================================================================= */ -unsigned long ZEXPORT crc32(crc, buf, len) - unsigned long crc; - const unsigned char FAR *buf; - uInt len; -{ - if (buf == Z_NULL) return 0UL; - -#ifdef DYNAMIC_CRC_TABLE - if (crc_table_empty) - make_crc_table(); -#endif /* DYNAMIC_CRC_TABLE */ - -#ifdef BYFOUR - if (sizeof(void *) == sizeof(ptrdiff_t)) { - u4 endian; - - endian = 1; - if (*((unsigned char *)(&endian))) - return crc32_little(crc, buf, len); - else - return crc32_big(crc, buf, len); - } -#endif /* BYFOUR */ - crc = crc ^ 0xffffffffUL; - while (len >= 8) { - DO8; - len -= 8; - } - if (len) do { - DO1; - } while (--len); - return crc ^ 0xffffffffUL; -} - -#ifdef BYFOUR - -/* ========================================================================= */ -#define DOLIT4 c ^= *buf4++; \ - c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ - crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24] -#define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 - -/* ========================================================================= */ -local unsigned long crc32_little(crc, buf, len) - unsigned long crc; - const unsigned char FAR *buf; - unsigned len; -{ - register u4 c; - register const u4 FAR *buf4; - - c = (u4)crc; - c = ~c; - while (len && ((ptrdiff_t)buf & 3)) { - c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); - len--; - } - - buf4 = (const u4 FAR *)(const void FAR *)buf; - while (len >= 32) { - DOLIT32; - len -= 32; - } - while (len >= 4) { - DOLIT4; - len -= 4; - } - buf = (const unsigned char FAR *)buf4; - - if (len) do { - c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); - } while (--len); - c = ~c; - return (unsigned long)c; -} - -/* ========================================================================= */ -#define DOBIG4 c ^= *++buf4; \ - c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ - crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] -#define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 - -/* ========================================================================= */ -local unsigned long crc32_big(crc, buf, len) - unsigned long crc; - const unsigned char FAR *buf; - unsigned len; -{ - register u4 c; - register const u4 FAR *buf4; - - c = REV((u4)crc); - c = ~c; - while (len && ((ptrdiff_t)buf & 3)) { - c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); - len--; - } - - buf4 = (const u4 FAR *)(const void FAR *)buf; - buf4--; - while (len >= 32) { - DOBIG32; - len -= 32; - } - while (len >= 4) { - DOBIG4; - len -= 4; - } - buf4++; - buf = (const unsigned char FAR *)buf4; - - if (len) do { - c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); - } while (--len); - c = ~c; - return (unsigned long)(REV(c)); -} - -#endif /* BYFOUR */ - -#define GF2_DIM 32 /* dimension of GF(2) vectors (length of CRC) */ - -/* ========================================================================= */ -local unsigned long gf2_matrix_times(mat, vec) - unsigned long *mat; - unsigned long vec; -{ - unsigned long sum; - - sum = 0; - while (vec) { - if (vec & 1) - sum ^= *mat; - vec >>= 1; - mat++; - } - return sum; -} - -/* ========================================================================= */ -local void gf2_matrix_square(square, mat) - unsigned long *square; - unsigned long *mat; -{ - int n; - - for (n = 0; n < GF2_DIM; n++) - square[n] = gf2_matrix_times(mat, mat[n]); -} - -/* ========================================================================= */ -local uLong crc32_combine_(crc1, crc2, len2) - uLong crc1; - uLong crc2; - z_off64_t len2; -{ - int n; - unsigned long row; - unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */ - unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */ - - /* degenerate case (also disallow negative lengths) */ - if (len2 <= 0) - return crc1; - - /* put operator for one zero bit in odd */ - odd[0] = 0xedb88320UL; /* CRC-32 polynomial */ - row = 1; - for (n = 1; n < GF2_DIM; n++) { - odd[n] = row; - row <<= 1; - } - - /* put operator for two zero bits in even */ - gf2_matrix_square(even, odd); - - /* put operator for four zero bits in odd */ - gf2_matrix_square(odd, even); - - /* apply len2 zeros to crc1 (first square will put the operator for one - zero byte, eight zero bits, in even) */ - do { - /* apply zeros operator for this bit of len2 */ - gf2_matrix_square(even, odd); - if (len2 & 1) - crc1 = gf2_matrix_times(even, crc1); - len2 >>= 1; - - /* if no more bits set, then done */ - if (len2 == 0) - break; - - /* another iteration of the loop with odd and even swapped */ - gf2_matrix_square(odd, even); - if (len2 & 1) - crc1 = gf2_matrix_times(odd, crc1); - len2 >>= 1; - - /* if no more bits set, then done */ - } while (len2 != 0); - - /* return combined crc */ - crc1 ^= crc2; - return crc1; -} - -/* ========================================================================= */ -uLong ZEXPORT crc32_combine(crc1, crc2, len2) - uLong crc1; - uLong crc2; - z_off_t len2; -{ - return crc32_combine_(crc1, crc2, len2); -} - -uLong ZEXPORT crc32_combine64(crc1, crc2, len2) - uLong crc1; - uLong crc2; - z_off64_t len2; -{ - return crc32_combine_(crc1, crc2, len2); -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/crc32.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/crc32.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/crc32.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/crc32.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,441 +0,0 @@ -/* crc32.h -- tables for rapid CRC calculation - * Generated automatically by crc32.c - */ - -local const unsigned long FAR crc_table[TBLS][256] = -{ - { - 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, - 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL, - 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL, - 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL, - 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL, - 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL, - 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL, - 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL, - 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL, - 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL, - 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL, - 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL, - 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL, - 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL, - 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL, - 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL, - 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL, - 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL, - 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL, - 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL, - 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL, - 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL, - 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL, - 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL, - 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL, - 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL, - 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL, - 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL, - 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL, - 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL, - 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL, - 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL, - 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL, - 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL, - 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL, - 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL, - 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL, - 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL, - 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL, - 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL, - 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL, - 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL, - 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL, - 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL, - 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL, - 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL, - 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL, - 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL, - 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL, - 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL, - 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL, - 0x2d02ef8dUL -#ifdef BYFOUR - }, - { - 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL, - 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL, - 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL, - 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL, - 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL, - 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL, - 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL, - 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL, - 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL, - 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL, - 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL, - 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL, - 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL, - 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL, - 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL, - 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL, - 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL, - 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL, - 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL, - 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL, - 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL, - 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL, - 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL, - 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL, - 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL, - 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL, - 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL, - 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL, - 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL, - 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL, - 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL, - 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL, - 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL, - 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL, - 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL, - 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL, - 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL, - 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL, - 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL, - 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL, - 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL, - 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL, - 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL, - 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL, - 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL, - 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL, - 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL, - 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL, - 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL, - 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL, - 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL, - 0x9324fd72UL - }, - { - 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL, - 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL, - 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL, - 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL, - 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL, - 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL, - 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL, - 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL, - 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL, - 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL, - 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL, - 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL, - 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL, - 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL, - 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL, - 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL, - 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL, - 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL, - 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL, - 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL, - 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL, - 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL, - 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL, - 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL, - 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL, - 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL, - 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL, - 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL, - 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL, - 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL, - 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL, - 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL, - 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL, - 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL, - 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL, - 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL, - 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL, - 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL, - 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL, - 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL, - 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL, - 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL, - 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL, - 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL, - 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL, - 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL, - 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL, - 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL, - 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL, - 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL, - 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL, - 0xbe9834edUL - }, - { - 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL, - 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL, - 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL, - 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL, - 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL, - 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL, - 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL, - 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL, - 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL, - 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL, - 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL, - 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL, - 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL, - 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL, - 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL, - 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL, - 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL, - 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL, - 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL, - 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL, - 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL, - 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL, - 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL, - 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL, - 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL, - 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL, - 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL, - 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL, - 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL, - 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL, - 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL, - 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL, - 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL, - 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL, - 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL, - 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL, - 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL, - 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL, - 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL, - 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL, - 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL, - 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL, - 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL, - 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL, - 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL, - 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL, - 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL, - 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL, - 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL, - 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL, - 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL, - 0xde0506f1UL - }, - { - 0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL, - 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL, - 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL, - 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL, - 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL, - 0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL, - 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL, - 0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL, - 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL, - 0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL, - 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL, - 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL, - 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL, - 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL, - 0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL, - 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL, - 0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL, - 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL, - 0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL, - 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL, - 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL, - 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL, - 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL, - 0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL, - 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL, - 0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL, - 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL, - 0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL, - 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL, - 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL, - 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL, - 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL, - 0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL, - 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL, - 0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL, - 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL, - 0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL, - 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL, - 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL, - 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL, - 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL, - 0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL, - 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL, - 0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL, - 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL, - 0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL, - 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL, - 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL, - 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL, - 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL, - 0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL, - 0x8def022dUL - }, - { - 0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL, - 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL, - 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL, - 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL, - 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL, - 0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL, - 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL, - 0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL, - 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL, - 0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL, - 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL, - 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL, - 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL, - 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL, - 0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL, - 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL, - 0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL, - 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL, - 0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL, - 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL, - 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL, - 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL, - 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL, - 0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL, - 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL, - 0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL, - 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL, - 0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL, - 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL, - 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL, - 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL, - 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL, - 0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL, - 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL, - 0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL, - 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL, - 0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL, - 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL, - 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL, - 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL, - 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL, - 0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL, - 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL, - 0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL, - 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL, - 0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL, - 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL, - 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL, - 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL, - 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL, - 0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL, - 0x72fd2493UL - }, - { - 0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL, - 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL, - 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL, - 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL, - 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL, - 0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL, - 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL, - 0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL, - 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL, - 0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL, - 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL, - 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL, - 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL, - 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL, - 0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL, - 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL, - 0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL, - 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL, - 0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL, - 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL, - 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL, - 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL, - 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL, - 0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL, - 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL, - 0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL, - 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL, - 0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL, - 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL, - 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL, - 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL, - 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL, - 0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL, - 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL, - 0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL, - 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL, - 0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL, - 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL, - 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL, - 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL, - 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL, - 0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL, - 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL, - 0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL, - 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL, - 0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL, - 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL, - 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL, - 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL, - 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL, - 0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL, - 0xed3498beUL - }, - { - 0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL, - 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL, - 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL, - 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL, - 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL, - 0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL, - 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL, - 0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL, - 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL, - 0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL, - 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL, - 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL, - 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL, - 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL, - 0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL, - 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL, - 0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL, - 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL, - 0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL, - 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL, - 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL, - 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL, - 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL, - 0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL, - 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL, - 0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL, - 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL, - 0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL, - 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL, - 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL, - 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL, - 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL, - 0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL, - 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL, - 0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL, - 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL, - 0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL, - 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL, - 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL, - 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL, - 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL, - 0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL, - 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL, - 0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL, - 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL, - 0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL, - 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL, - 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL, - 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL, - 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL, - 0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL, - 0xf10605deUL -#endif - } -}; diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/deflate.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/deflate.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/deflate.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/deflate.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,1834 +0,0 @@ -/* deflate.c -- compress data using the deflation algorithm - * Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* - * ALGORITHM - * - * The "deflation" process depends on being able to identify portions - * of the input text which are identical to earlier input (within a - * sliding window trailing behind the input currently being processed). - * - * The most straightforward technique turns out to be the fastest for - * most input files: try all possible matches and select the longest. - * The key feature of this algorithm is that insertions into the string - * dictionary are very simple and thus fast, and deletions are avoided - * completely. Insertions are performed at each input character, whereas - * string matches are performed only when the previous match ends. So it - * is preferable to spend more time in matches to allow very fast string - * insertions and avoid deletions. The matching algorithm for small - * strings is inspired from that of Rabin & Karp. A brute force approach - * is used to find longer strings when a small match has been found. - * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze - * (by Leonid Broukhis). - * A previous version of this file used a more sophisticated algorithm - * (by Fiala and Greene) which is guaranteed to run in linear amortized - * time, but has a larger average cost, uses more memory and is patented. - * However the F&G algorithm may be faster for some highly redundant - * files if the parameter max_chain_length (described below) is too large. - * - * ACKNOWLEDGEMENTS - * - * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and - * I found it in 'freeze' written by Leonid Broukhis. - * Thanks to many people for bug reports and testing. - * - * REFERENCES - * - * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". - * Available in http://www.ietf.org/rfc/rfc1951.txt - * - * A description of the Rabin and Karp algorithm is given in the book - * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. - * - * Fiala,E.R., and Greene,D.H. - * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 - * - */ - -/* @(#) $Id$ */ - -#include "deflate.h" - -const char deflate_copyright[] = - " deflate 1.2.5 Copyright 1995-2010 Jean-loup Gailly and Mark Adler "; -/* - If you use the zlib library in a product, an acknowledgment is welcome - in the documentation of your product. If for some reason you cannot - include such an acknowledgment, I would appreciate that you keep this - copyright string in the executable of your product. - */ - -/* =========================================================================== - * Function prototypes. - */ -typedef enum { - need_more, /* block not completed, need more input or more output */ - block_done, /* block flush performed */ - finish_started, /* finish started, need only more output at next deflate */ - finish_done /* finish done, accept no more input or output */ -} block_state; - -typedef block_state (*compress_func) OF((deflate_state *s, int flush)); -/* Compression function. Returns the block state after the call. */ - -local void fill_window OF((deflate_state *s)); -local block_state deflate_stored OF((deflate_state *s, int flush)); -local block_state deflate_fast OF((deflate_state *s, int flush)); -#ifndef FASTEST -local block_state deflate_slow OF((deflate_state *s, int flush)); -#endif -local block_state deflate_rle OF((deflate_state *s, int flush)); -local block_state deflate_huff OF((deflate_state *s, int flush)); -local void lm_init OF((deflate_state *s)); -local void putShortMSB OF((deflate_state *s, uInt b)); -local void flush_pending OF((z_streamp strm)); -local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); -#ifdef ASMV - void match_init OF((void)); /* asm code initialization */ - uInt longest_match OF((deflate_state *s, IPos cur_match)); -#else -local uInt longest_match OF((deflate_state *s, IPos cur_match)); -#endif - -#ifdef DEBUG -local void check_match OF((deflate_state *s, IPos start, IPos match, - int length)); -#endif - -/* =========================================================================== - * Local data - */ - -#define NIL 0 -/* Tail of hash chains */ - -#ifndef TOO_FAR -# define TOO_FAR 4096 -#endif -/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ - -/* Values for max_lazy_match, good_match and max_chain_length, depending on - * the desired pack level (0..9). The values given below have been tuned to - * exclude worst case performance for pathological files. Better values may be - * found for specific files. - */ -typedef struct config_s { - ush good_length; /* reduce lazy search above this match length */ - ush max_lazy; /* do not perform lazy search above this match length */ - ush nice_length; /* quit search above this match length */ - ush max_chain; - compress_func func; -} config; - -#ifdef FASTEST -local const config configuration_table[2] = { -/* good lazy nice chain */ -/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ -/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ -#else -local const config configuration_table[10] = { -/* good lazy nice chain */ -/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ -/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ -/* 2 */ {4, 5, 16, 8, deflate_fast}, -/* 3 */ {4, 6, 32, 32, deflate_fast}, - -/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ -/* 5 */ {8, 16, 32, 32, deflate_slow}, -/* 6 */ {8, 16, 128, 128, deflate_slow}, -/* 7 */ {8, 32, 128, 256, deflate_slow}, -/* 8 */ {32, 128, 258, 1024, deflate_slow}, -/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ -#endif - -/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 - * For deflate_fast() (levels <= 3) good is ignored and lazy has a different - * meaning. - */ - -#define EQUAL 0 -/* result of memcmp for equal strings */ - -#ifndef NO_DUMMY_DECL -struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ -#endif - -/* =========================================================================== - * Update a hash value with the given input byte - * IN assertion: all calls to to UPDATE_HASH are made with consecutive - * input characters, so that a running hash key can be computed from the - * previous key instead of complete recalculation each time. - */ -#define UPDATE_HASH(s,h,c) (h = (((h)<hash_shift) ^ (c)) & s->hash_mask) - - -/* =========================================================================== - * Insert string str in the dictionary and set match_head to the previous head - * of the hash chain (the most recent string with same hash key). Return - * the previous length of the hash chain. - * If this file is compiled with -DFASTEST, the compression level is forced - * to 1, and no hash chains are maintained. - * IN assertion: all calls to to INSERT_STRING are made with consecutive - * input characters and the first MIN_MATCH bytes of str are valid - * (except for the last MIN_MATCH-1 bytes of the input file). - */ -#ifdef FASTEST -#define INSERT_STRING(s, str, match_head) \ - (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ - match_head = s->head[s->ins_h], \ - s->head[s->ins_h] = (Pos)(str)) -#else -#define INSERT_STRING(s, str, match_head) \ - (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ - match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ - s->head[s->ins_h] = (Pos)(str)) -#endif - -/* =========================================================================== - * Initialize the hash table (avoiding 64K overflow for 16 bit systems). - * prev[] will be initialized on the fly. - */ -#define CLEAR_HASH(s) \ - s->head[s->hash_size-1] = NIL; \ - zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); - -/* ========================================================================= */ -int ZEXPORT deflateInit_(strm, level, version, stream_size) - z_streamp strm; - int level; - const char *version; - int stream_size; -{ - return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, - Z_DEFAULT_STRATEGY, version, stream_size); - /* To do: ignore strm->next_in if we use it as window */ -} - -/* ========================================================================= */ -int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, - version, stream_size) - z_streamp strm; - int level; - int method; - int windowBits; - int memLevel; - int strategy; - const char *version; - int stream_size; -{ - deflate_state *s; - int wrap = 1; - static const char my_version[] = ZLIB_VERSION; - - ushf *overlay; - /* We overlay pending_buf and d_buf+l_buf. This works since the average - * output size for (length,distance) codes is <= 24 bits. - */ - - if (version == Z_NULL || version[0] != my_version[0] || - stream_size != sizeof(z_stream)) { - return Z_VERSION_ERROR; - } - if (strm == Z_NULL) return Z_STREAM_ERROR; - - strm->msg = Z_NULL; - if (strm->zalloc == (alloc_func)0) { - strm->zalloc = zcalloc; - strm->opaque = (voidpf)0; - } - if (strm->zfree == (free_func)0) strm->zfree = zcfree; - -#ifdef FASTEST - if (level != 0) level = 1; -#else - if (level == Z_DEFAULT_COMPRESSION) level = 6; -#endif - - if (windowBits < 0) { /* suppress zlib wrapper */ - wrap = 0; - windowBits = -windowBits; - } -#ifdef GZIP - else if (windowBits > 15) { - wrap = 2; /* write gzip wrapper instead */ - windowBits -= 16; - } -#endif - if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || - windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || - strategy < 0 || strategy > Z_FIXED) { - return Z_STREAM_ERROR; - } - if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ - s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); - if (s == Z_NULL) return Z_MEM_ERROR; - strm->state = (struct internal_state FAR *)s; - s->strm = strm; - - s->wrap = wrap; - s->gzhead = Z_NULL; - s->w_bits = windowBits; - s->w_size = 1 << s->w_bits; - s->w_mask = s->w_size - 1; - - s->hash_bits = memLevel + 7; - s->hash_size = 1 << s->hash_bits; - s->hash_mask = s->hash_size - 1; - s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); - - s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); - s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); - s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); - - s->high_water = 0; /* nothing written to s->window yet */ - - s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ - - overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); - s->pending_buf = (uchf *) overlay; - s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); - - if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || - s->pending_buf == Z_NULL) { - s->status = FINISH_STATE; - strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); - deflateEnd (strm); - return Z_MEM_ERROR; - } - s->d_buf = overlay + s->lit_bufsize/sizeof(ush); - s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; - - s->level = level; - s->strategy = strategy; - s->method = (Byte)method; - - return deflateReset(strm); -} - -/* ========================================================================= */ -int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) - z_streamp strm; - const Bytef *dictionary; - uInt dictLength; -{ - deflate_state *s; - uInt length = dictLength; - uInt n; - IPos hash_head = 0; - - if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL || - strm->state->wrap == 2 || - (strm->state->wrap == 1 && strm->state->status != INIT_STATE)) - return Z_STREAM_ERROR; - - s = strm->state; - if (s->wrap) - strm->adler = adler32(strm->adler, dictionary, dictLength); - - if (length < MIN_MATCH) return Z_OK; - if (length > s->w_size) { - length = s->w_size; - dictionary += dictLength - length; /* use the tail of the dictionary */ - } - zmemcpy(s->window, dictionary, length); - s->strstart = length; - s->block_start = (long)length; - - /* Insert all strings in the hash table (except for the last two bytes). - * s->lookahead stays null, so s->ins_h will be recomputed at the next - * call of fill_window. - */ - s->ins_h = s->window[0]; - UPDATE_HASH(s, s->ins_h, s->window[1]); - for (n = 0; n <= length - MIN_MATCH; n++) { - INSERT_STRING(s, n, hash_head); - } - if (hash_head) hash_head = 0; /* to make compiler happy */ - return Z_OK; -} - -/* ========================================================================= */ -int ZEXPORT deflateReset (strm) - z_streamp strm; -{ - deflate_state *s; - - if (strm == Z_NULL || strm->state == Z_NULL || - strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) { - return Z_STREAM_ERROR; - } - - strm->total_in = strm->total_out = 0; - strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ - strm->data_type = Z_UNKNOWN; - - s = (deflate_state *)strm->state; - s->pending = 0; - s->pending_out = s->pending_buf; - - if (s->wrap < 0) { - s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ - } - s->status = s->wrap ? INIT_STATE : BUSY_STATE; - strm->adler = -#ifdef GZIP - s->wrap == 2 ? crc32(0L, Z_NULL, 0) : -#endif - adler32(0L, Z_NULL, 0); - s->last_flush = Z_NO_FLUSH; - - _tr_init(s); - lm_init(s); - - return Z_OK; -} - -/* ========================================================================= */ -int ZEXPORT deflateSetHeader (strm, head) - z_streamp strm; - gz_headerp head; -{ - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - if (strm->state->wrap != 2) return Z_STREAM_ERROR; - strm->state->gzhead = head; - return Z_OK; -} - -/* ========================================================================= */ -int ZEXPORT deflatePrime (strm, bits, value) - z_streamp strm; - int bits; - int value; -{ - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - strm->state->bi_valid = bits; - strm->state->bi_buf = (ush)(value & ((1 << bits) - 1)); - return Z_OK; -} - -/* ========================================================================= */ -int ZEXPORT deflateParams(strm, level, strategy) - z_streamp strm; - int level; - int strategy; -{ - deflate_state *s; - compress_func func; - int err = Z_OK; - - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - s = strm->state; - -#ifdef FASTEST - if (level != 0) level = 1; -#else - if (level == Z_DEFAULT_COMPRESSION) level = 6; -#endif - if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { - return Z_STREAM_ERROR; - } - func = configuration_table[s->level].func; - - if ((strategy != s->strategy || func != configuration_table[level].func) && - strm->total_in != 0) { - /* Flush the last buffer: */ - err = deflate(strm, Z_BLOCK); - } - if (s->level != level) { - s->level = level; - s->max_lazy_match = configuration_table[level].max_lazy; - s->good_match = configuration_table[level].good_length; - s->nice_match = configuration_table[level].nice_length; - s->max_chain_length = configuration_table[level].max_chain; - } - s->strategy = strategy; - return err; -} - -/* ========================================================================= */ -int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) - z_streamp strm; - int good_length; - int max_lazy; - int nice_length; - int max_chain; -{ - deflate_state *s; - - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - s = strm->state; - s->good_match = good_length; - s->max_lazy_match = max_lazy; - s->nice_match = nice_length; - s->max_chain_length = max_chain; - return Z_OK; -} - -/* ========================================================================= - * For the default windowBits of 15 and memLevel of 8, this function returns - * a close to exact, as well as small, upper bound on the compressed size. - * They are coded as constants here for a reason--if the #define's are - * changed, then this function needs to be changed as well. The return - * value for 15 and 8 only works for those exact settings. - * - * For any setting other than those defaults for windowBits and memLevel, - * the value returned is a conservative worst case for the maximum expansion - * resulting from using fixed blocks instead of stored blocks, which deflate - * can emit on compressed data for some combinations of the parameters. - * - * This function could be more sophisticated to provide closer upper bounds for - * every combination of windowBits and memLevel. But even the conservative - * upper bound of about 14% expansion does not seem onerous for output buffer - * allocation. - */ -uLong ZEXPORT deflateBound(strm, sourceLen) - z_streamp strm; - uLong sourceLen; -{ - deflate_state *s; - uLong complen, wraplen; - Bytef *str; - - /* conservative upper bound for compressed data */ - complen = sourceLen + - ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; - - /* if can't get parameters, return conservative bound plus zlib wrapper */ - if (strm == Z_NULL || strm->state == Z_NULL) - return complen + 6; - - /* compute wrapper length */ - s = strm->state; - switch (s->wrap) { - case 0: /* raw deflate */ - wraplen = 0; - break; - case 1: /* zlib wrapper */ - wraplen = 6 + (s->strstart ? 4 : 0); - break; - case 2: /* gzip wrapper */ - wraplen = 18; - if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ - if (s->gzhead->extra != Z_NULL) - wraplen += 2 + s->gzhead->extra_len; - str = s->gzhead->name; - if (str != Z_NULL) - do { - wraplen++; - } while (*str++); - str = s->gzhead->comment; - if (str != Z_NULL) - do { - wraplen++; - } while (*str++); - if (s->gzhead->hcrc) - wraplen += 2; - } - break; - default: /* for compiler happiness */ - wraplen = 6; - } - - /* if not default parameters, return conservative bound */ - if (s->w_bits != 15 || s->hash_bits != 8 + 7) - return complen + wraplen; - - /* default settings: return tight bound for that case */ - return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + - (sourceLen >> 25) + 13 - 6 + wraplen; -} - -/* ========================================================================= - * Put a short in the pending buffer. The 16-bit value is put in MSB order. - * IN assertion: the stream state is correct and there is enough room in - * pending_buf. - */ -local void putShortMSB (s, b) - deflate_state *s; - uInt b; -{ - put_byte(s, (Byte)(b >> 8)); - put_byte(s, (Byte)(b & 0xff)); -} - -/* ========================================================================= - * Flush as much pending output as possible. All deflate() output goes - * through this function so some applications may wish to modify it - * to avoid allocating a large strm->next_out buffer and copying into it. - * (See also read_buf()). - */ -local void flush_pending(strm) - z_streamp strm; -{ - unsigned len = strm->state->pending; - - if (len > strm->avail_out) len = strm->avail_out; - if (len == 0) return; - - zmemcpy(strm->next_out, strm->state->pending_out, len); - strm->next_out += len; - strm->state->pending_out += len; - strm->total_out += len; - strm->avail_out -= len; - strm->state->pending -= len; - if (strm->state->pending == 0) { - strm->state->pending_out = strm->state->pending_buf; - } -} - -/* ========================================================================= */ -int ZEXPORT deflate (strm, flush) - z_streamp strm; - int flush; -{ - int old_flush; /* value of flush param for previous deflate call */ - deflate_state *s; - - if (strm == Z_NULL || strm->state == Z_NULL || - flush > Z_BLOCK || flush < 0) { - return Z_STREAM_ERROR; - } - s = strm->state; - - if (strm->next_out == Z_NULL || - (strm->next_in == Z_NULL && strm->avail_in != 0) || - (s->status == FINISH_STATE && flush != Z_FINISH)) { - ERR_RETURN(strm, Z_STREAM_ERROR); - } - if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); - - s->strm = strm; /* just in case */ - old_flush = s->last_flush; - s->last_flush = flush; - - /* Write the header */ - if (s->status == INIT_STATE) { -#ifdef GZIP - if (s->wrap == 2) { - strm->adler = crc32(0L, Z_NULL, 0); - put_byte(s, 31); - put_byte(s, 139); - put_byte(s, 8); - if (s->gzhead == Z_NULL) { - put_byte(s, 0); - put_byte(s, 0); - put_byte(s, 0); - put_byte(s, 0); - put_byte(s, 0); - put_byte(s, s->level == 9 ? 2 : - (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? - 4 : 0)); - put_byte(s, OS_CODE); - s->status = BUSY_STATE; - } - else { - put_byte(s, (s->gzhead->text ? 1 : 0) + - (s->gzhead->hcrc ? 2 : 0) + - (s->gzhead->extra == Z_NULL ? 0 : 4) + - (s->gzhead->name == Z_NULL ? 0 : 8) + - (s->gzhead->comment == Z_NULL ? 0 : 16) - ); - put_byte(s, (Byte)(s->gzhead->time & 0xff)); - put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); - put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); - put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); - put_byte(s, s->level == 9 ? 2 : - (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? - 4 : 0)); - put_byte(s, s->gzhead->os & 0xff); - if (s->gzhead->extra != Z_NULL) { - put_byte(s, s->gzhead->extra_len & 0xff); - put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); - } - if (s->gzhead->hcrc) - strm->adler = crc32(strm->adler, s->pending_buf, - s->pending); - s->gzindex = 0; - s->status = EXTRA_STATE; - } - } - else -#endif - { - uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; - uInt level_flags; - - if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) - level_flags = 0; - else if (s->level < 6) - level_flags = 1; - else if (s->level == 6) - level_flags = 2; - else - level_flags = 3; - header |= (level_flags << 6); - if (s->strstart != 0) header |= PRESET_DICT; - header += 31 - (header % 31); - - s->status = BUSY_STATE; - putShortMSB(s, header); - - /* Save the adler32 of the preset dictionary: */ - if (s->strstart != 0) { - putShortMSB(s, (uInt)(strm->adler >> 16)); - putShortMSB(s, (uInt)(strm->adler & 0xffff)); - } - strm->adler = adler32(0L, Z_NULL, 0); - } - } -#ifdef GZIP - if (s->status == EXTRA_STATE) { - if (s->gzhead->extra != Z_NULL) { - uInt beg = s->pending; /* start of bytes to update crc */ - - while (s->gzindex < (s->gzhead->extra_len & 0xffff)) { - if (s->pending == s->pending_buf_size) { - if (s->gzhead->hcrc && s->pending > beg) - strm->adler = crc32(strm->adler, s->pending_buf + beg, - s->pending - beg); - flush_pending(strm); - beg = s->pending; - if (s->pending == s->pending_buf_size) - break; - } - put_byte(s, s->gzhead->extra[s->gzindex]); - s->gzindex++; - } - if (s->gzhead->hcrc && s->pending > beg) - strm->adler = crc32(strm->adler, s->pending_buf + beg, - s->pending - beg); - if (s->gzindex == s->gzhead->extra_len) { - s->gzindex = 0; - s->status = NAME_STATE; - } - } - else - s->status = NAME_STATE; - } - if (s->status == NAME_STATE) { - if (s->gzhead->name != Z_NULL) { - uInt beg = s->pending; /* start of bytes to update crc */ - int val; - - do { - if (s->pending == s->pending_buf_size) { - if (s->gzhead->hcrc && s->pending > beg) - strm->adler = crc32(strm->adler, s->pending_buf + beg, - s->pending - beg); - flush_pending(strm); - beg = s->pending; - if (s->pending == s->pending_buf_size) { - val = 1; - break; - } - } - val = s->gzhead->name[s->gzindex++]; - put_byte(s, val); - } while (val != 0); - if (s->gzhead->hcrc && s->pending > beg) - strm->adler = crc32(strm->adler, s->pending_buf + beg, - s->pending - beg); - if (val == 0) { - s->gzindex = 0; - s->status = COMMENT_STATE; - } - } - else - s->status = COMMENT_STATE; - } - if (s->status == COMMENT_STATE) { - if (s->gzhead->comment != Z_NULL) { - uInt beg = s->pending; /* start of bytes to update crc */ - int val; - - do { - if (s->pending == s->pending_buf_size) { - if (s->gzhead->hcrc && s->pending > beg) - strm->adler = crc32(strm->adler, s->pending_buf + beg, - s->pending - beg); - flush_pending(strm); - beg = s->pending; - if (s->pending == s->pending_buf_size) { - val = 1; - break; - } - } - val = s->gzhead->comment[s->gzindex++]; - put_byte(s, val); - } while (val != 0); - if (s->gzhead->hcrc && s->pending > beg) - strm->adler = crc32(strm->adler, s->pending_buf + beg, - s->pending - beg); - if (val == 0) - s->status = HCRC_STATE; - } - else - s->status = HCRC_STATE; - } - if (s->status == HCRC_STATE) { - if (s->gzhead->hcrc) { - if (s->pending + 2 > s->pending_buf_size) - flush_pending(strm); - if (s->pending + 2 <= s->pending_buf_size) { - put_byte(s, (Byte)(strm->adler & 0xff)); - put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); - strm->adler = crc32(0L, Z_NULL, 0); - s->status = BUSY_STATE; - } - } - else - s->status = BUSY_STATE; - } -#endif - - /* Flush as much pending output as possible */ - if (s->pending != 0) { - flush_pending(strm); - if (strm->avail_out == 0) { - /* Since avail_out is 0, deflate will be called again with - * more output space, but possibly with both pending and - * avail_in equal to zero. There won't be anything to do, - * but this is not an error situation so make sure we - * return OK instead of BUF_ERROR at next call of deflate: - */ - s->last_flush = -1; - return Z_OK; - } - - /* Make sure there is something to do and avoid duplicate consecutive - * flushes. For repeated and useless calls with Z_FINISH, we keep - * returning Z_STREAM_END instead of Z_BUF_ERROR. - */ - } else if (strm->avail_in == 0 && flush <= old_flush && - flush != Z_FINISH) { - ERR_RETURN(strm, Z_BUF_ERROR); - } - - /* User must not provide more input after the first FINISH: */ - if (s->status == FINISH_STATE && strm->avail_in != 0) { - ERR_RETURN(strm, Z_BUF_ERROR); - } - - /* Start a new block or continue the current one. - */ - if (strm->avail_in != 0 || s->lookahead != 0 || - (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { - block_state bstate; - - bstate = s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : - (s->strategy == Z_RLE ? deflate_rle(s, flush) : - (*(configuration_table[s->level].func))(s, flush)); - - if (bstate == finish_started || bstate == finish_done) { - s->status = FINISH_STATE; - } - if (bstate == need_more || bstate == finish_started) { - if (strm->avail_out == 0) { - s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ - } - return Z_OK; - /* If flush != Z_NO_FLUSH && avail_out == 0, the next call - * of deflate should use the same flush parameter to make sure - * that the flush is complete. So we don't have to output an - * empty block here, this will be done at next call. This also - * ensures that for a very small output buffer, we emit at most - * one empty block. - */ - } - if (bstate == block_done) { - if (flush == Z_PARTIAL_FLUSH) { - _tr_align(s); - } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ - _tr_stored_block(s, (char*)0, 0L, 0); - /* For a full flush, this empty block will be recognized - * as a special marker by inflate_sync(). - */ - if (flush == Z_FULL_FLUSH) { - CLEAR_HASH(s); /* forget history */ - if (s->lookahead == 0) { - s->strstart = 0; - s->block_start = 0L; - } - } - } - flush_pending(strm); - if (strm->avail_out == 0) { - s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ - return Z_OK; - } - } - } - Assert(strm->avail_out > 0, "bug2"); - - if (flush != Z_FINISH) return Z_OK; - if (s->wrap <= 0) return Z_STREAM_END; - - /* Write the trailer */ -#ifdef GZIP - if (s->wrap == 2) { - put_byte(s, (Byte)(strm->adler & 0xff)); - put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); - put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); - put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); - put_byte(s, (Byte)(strm->total_in & 0xff)); - put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); - put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); - put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); - } - else -#endif - { - putShortMSB(s, (uInt)(strm->adler >> 16)); - putShortMSB(s, (uInt)(strm->adler & 0xffff)); - } - flush_pending(strm); - /* If avail_out is zero, the application will call deflate again - * to flush the rest. - */ - if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ - return s->pending != 0 ? Z_OK : Z_STREAM_END; -} - -/* ========================================================================= */ -int ZEXPORT deflateEnd (strm) - z_streamp strm; -{ - int status; - - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - - status = strm->state->status; - if (status != INIT_STATE && - status != EXTRA_STATE && - status != NAME_STATE && - status != COMMENT_STATE && - status != HCRC_STATE && - status != BUSY_STATE && - status != FINISH_STATE) { - return Z_STREAM_ERROR; - } - - /* Deallocate in reverse order of allocations: */ - TRY_FREE(strm, strm->state->pending_buf); - TRY_FREE(strm, strm->state->head); - TRY_FREE(strm, strm->state->prev); - TRY_FREE(strm, strm->state->window); - - ZFREE(strm, strm->state); - strm->state = Z_NULL; - - return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; -} - -/* ========================================================================= - * Copy the source state to the destination state. - * To simplify the source, this is not supported for 16-bit MSDOS (which - * doesn't have enough memory anyway to duplicate compression states). - */ -int ZEXPORT deflateCopy (dest, source) - z_streamp dest; - z_streamp source; -{ -#ifdef MAXSEG_64K - return Z_STREAM_ERROR; -#else - deflate_state *ds; - deflate_state *ss; - ushf *overlay; - - - if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { - return Z_STREAM_ERROR; - } - - ss = source->state; - - zmemcpy(dest, source, sizeof(z_stream)); - - ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); - if (ds == Z_NULL) return Z_MEM_ERROR; - dest->state = (struct internal_state FAR *) ds; - zmemcpy(ds, ss, sizeof(deflate_state)); - ds->strm = dest; - - ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); - ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); - ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); - overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); - ds->pending_buf = (uchf *) overlay; - - if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || - ds->pending_buf == Z_NULL) { - deflateEnd (dest); - return Z_MEM_ERROR; - } - /* following zmemcpy do not work for 16-bit MSDOS */ - zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); - zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); - zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); - zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); - - ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); - ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); - ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; - - ds->l_desc.dyn_tree = ds->dyn_ltree; - ds->d_desc.dyn_tree = ds->dyn_dtree; - ds->bl_desc.dyn_tree = ds->bl_tree; - - return Z_OK; -#endif /* MAXSEG_64K */ -} - -/* =========================================================================== - * Read a new buffer from the current input stream, update the adler32 - * and total number of bytes read. All deflate() input goes through - * this function so some applications may wish to modify it to avoid - * allocating a large strm->next_in buffer and copying from it. - * (See also flush_pending()). - */ -local int read_buf(strm, buf, size) - z_streamp strm; - Bytef *buf; - unsigned size; -{ - unsigned len = strm->avail_in; - - if (len > size) len = size; - if (len == 0) return 0; - - strm->avail_in -= len; - - if (strm->state->wrap == 1) { - strm->adler = adler32(strm->adler, strm->next_in, len); - } -#ifdef GZIP - else if (strm->state->wrap == 2) { - strm->adler = crc32(strm->adler, strm->next_in, len); - } -#endif - zmemcpy(buf, strm->next_in, len); - strm->next_in += len; - strm->total_in += len; - - return (int)len; -} - -/* =========================================================================== - * Initialize the "longest match" routines for a new zlib stream - */ -local void lm_init (s) - deflate_state *s; -{ - s->window_size = (ulg)2L*s->w_size; - - CLEAR_HASH(s); - - /* Set the default configuration parameters: - */ - s->max_lazy_match = configuration_table[s->level].max_lazy; - s->good_match = configuration_table[s->level].good_length; - s->nice_match = configuration_table[s->level].nice_length; - s->max_chain_length = configuration_table[s->level].max_chain; - - s->strstart = 0; - s->block_start = 0L; - s->lookahead = 0; - s->match_length = s->prev_length = MIN_MATCH-1; - s->match_available = 0; - s->ins_h = 0; -#ifndef FASTEST -#ifdef ASMV - match_init(); /* initialize the asm code */ -#endif -#endif -} - -#ifndef FASTEST -/* =========================================================================== - * Set match_start to the longest match starting at the given string and - * return its length. Matches shorter or equal to prev_length are discarded, - * in which case the result is equal to prev_length and match_start is - * garbage. - * IN assertions: cur_match is the head of the hash chain for the current - * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 - * OUT assertion: the match length is not greater than s->lookahead. - */ -#ifndef ASMV -/* For 80x86 and 680x0, an optimized version will be provided in match.asm or - * match.S. The code will be functionally equivalent. - */ -local uInt longest_match(s, cur_match) - deflate_state *s; - IPos cur_match; /* current match */ -{ - unsigned chain_length = s->max_chain_length;/* max hash chain length */ - register Bytef *scan = s->window + s->strstart; /* current string */ - register Bytef *match; /* matched string */ - register int len; /* length of current match */ - int best_len = s->prev_length; /* best match length so far */ - int nice_match = s->nice_match; /* stop if match long enough */ - IPos limit = s->strstart > (IPos)MAX_DIST(s) ? - s->strstart - (IPos)MAX_DIST(s) : NIL; - /* Stop when cur_match becomes <= limit. To simplify the code, - * we prevent matches with the string of window index 0. - */ - Posf *prev = s->prev; - uInt wmask = s->w_mask; - -#ifdef UNALIGNED_OK - /* Compare two bytes at a time. Note: this is not always beneficial. - * Try with and without -DUNALIGNED_OK to check. - */ - register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; - register ush scan_start = *(ushf*)scan; - register ush scan_end = *(ushf*)(scan+best_len-1); -#else - register Bytef *strend = s->window + s->strstart + MAX_MATCH; - register Byte scan_end1 = scan[best_len-1]; - register Byte scan_end = scan[best_len]; -#endif - - /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. - * It is easy to get rid of this optimization if necessary. - */ - Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); - - /* Do not waste too much time if we already have a good match: */ - if (s->prev_length >= s->good_match) { - chain_length >>= 2; - } - /* Do not look for matches beyond the end of the input. This is necessary - * to make deflate deterministic. - */ - if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; - - Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); - - do { - Assert(cur_match < s->strstart, "no future"); - match = s->window + cur_match; - - /* Skip to next match if the match length cannot increase - * or if the match length is less than 2. Note that the checks below - * for insufficient lookahead only occur occasionally for performance - * reasons. Therefore uninitialized memory will be accessed, and - * conditional jumps will be made that depend on those values. - * However the length of the match is limited to the lookahead, so - * the output of deflate is not affected by the uninitialized values. - */ -#if (defined(UNALIGNED_OK) && MAX_MATCH == 258) - /* This code assumes sizeof(unsigned short) == 2. Do not use - * UNALIGNED_OK if your compiler uses a different size. - */ - if (*(ushf*)(match+best_len-1) != scan_end || - *(ushf*)match != scan_start) continue; - - /* It is not necessary to compare scan[2] and match[2] since they are - * always equal when the other bytes match, given that the hash keys - * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at - * strstart+3, +5, ... up to strstart+257. We check for insufficient - * lookahead only every 4th comparison; the 128th check will be made - * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is - * necessary to put more guard bytes at the end of the window, or - * to check more often for insufficient lookahead. - */ - Assert(scan[2] == match[2], "scan[2]?"); - scan++, match++; - do { - } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && - *(ushf*)(scan+=2) == *(ushf*)(match+=2) && - *(ushf*)(scan+=2) == *(ushf*)(match+=2) && - *(ushf*)(scan+=2) == *(ushf*)(match+=2) && - scan < strend); - /* The funny "do {}" generates better code on most compilers */ - - /* Here, scan <= window+strstart+257 */ - Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); - if (*scan == *match) scan++; - - len = (MAX_MATCH - 1) - (int)(strend-scan); - scan = strend - (MAX_MATCH-1); - -#else /* UNALIGNED_OK */ - - if (match[best_len] != scan_end || - match[best_len-1] != scan_end1 || - *match != *scan || - *++match != scan[1]) continue; - - /* The check at best_len-1 can be removed because it will be made - * again later. (This heuristic is not always a win.) - * It is not necessary to compare scan[2] and match[2] since they - * are always equal when the other bytes match, given that - * the hash keys are equal and that HASH_BITS >= 8. - */ - scan += 2, match++; - Assert(*scan == *match, "match[2]?"); - - /* We check for insufficient lookahead only every 8th comparison; - * the 256th check will be made at strstart+258. - */ - do { - } while (*++scan == *++match && *++scan == *++match && - *++scan == *++match && *++scan == *++match && - *++scan == *++match && *++scan == *++match && - *++scan == *++match && *++scan == *++match && - scan < strend); - - Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); - - len = MAX_MATCH - (int)(strend - scan); - scan = strend - MAX_MATCH; - -#endif /* UNALIGNED_OK */ - - if (len > best_len) { - s->match_start = cur_match; - best_len = len; - if (len >= nice_match) break; -#ifdef UNALIGNED_OK - scan_end = *(ushf*)(scan+best_len-1); -#else - scan_end1 = scan[best_len-1]; - scan_end = scan[best_len]; -#endif - } - } while ((cur_match = prev[cur_match & wmask]) > limit - && --chain_length != 0); - - if ((uInt)best_len <= s->lookahead) return (uInt)best_len; - return s->lookahead; -} -#endif /* ASMV */ - -#else /* FASTEST */ - -/* --------------------------------------------------------------------------- - * Optimized version for FASTEST only - */ -local uInt longest_match(s, cur_match) - deflate_state *s; - IPos cur_match; /* current match */ -{ - register Bytef *scan = s->window + s->strstart; /* current string */ - register Bytef *match; /* matched string */ - register int len; /* length of current match */ - register Bytef *strend = s->window + s->strstart + MAX_MATCH; - - /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. - * It is easy to get rid of this optimization if necessary. - */ - Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); - - Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); - - Assert(cur_match < s->strstart, "no future"); - - match = s->window + cur_match; - - /* Return failure if the match length is less than 2: - */ - if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; - - /* The check at best_len-1 can be removed because it will be made - * again later. (This heuristic is not always a win.) - * It is not necessary to compare scan[2] and match[2] since they - * are always equal when the other bytes match, given that - * the hash keys are equal and that HASH_BITS >= 8. - */ - scan += 2, match += 2; - Assert(*scan == *match, "match[2]?"); - - /* We check for insufficient lookahead only every 8th comparison; - * the 256th check will be made at strstart+258. - */ - do { - } while (*++scan == *++match && *++scan == *++match && - *++scan == *++match && *++scan == *++match && - *++scan == *++match && *++scan == *++match && - *++scan == *++match && *++scan == *++match && - scan < strend); - - Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); - - len = MAX_MATCH - (int)(strend - scan); - - if (len < MIN_MATCH) return MIN_MATCH - 1; - - s->match_start = cur_match; - return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; -} - -#endif /* FASTEST */ - -#ifdef DEBUG -/* =========================================================================== - * Check that the match at match_start is indeed a match. - */ -local void check_match(s, start, match, length) - deflate_state *s; - IPos start, match; - int length; -{ - /* check that the match is indeed a match */ - if (zmemcmp(s->window + match, - s->window + start, length) != EQUAL) { - fprintf(stderr, " start %u, match %u, length %d\n", - start, match, length); - do { - fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); - } while (--length != 0); - z_error("invalid match"); - } - if (z_verbose > 1) { - fprintf(stderr,"\\[%d,%d]", start-match, length); - do { putc(s->window[start++], stderr); } while (--length != 0); - } -} -#else -# define check_match(s, start, match, length) -#endif /* DEBUG */ - -/* =========================================================================== - * Fill the window when the lookahead becomes insufficient. - * Updates strstart and lookahead. - * - * IN assertion: lookahead < MIN_LOOKAHEAD - * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD - * At least one byte has been read, or avail_in == 0; reads are - * performed for at least two bytes (required for the zip translate_eol - * option -- not supported here). - */ -local void fill_window(s) - deflate_state *s; -{ - register unsigned n, m; - register Posf *p; - unsigned more; /* Amount of free space at the end of the window. */ - uInt wsize = s->w_size; - - do { - more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); - - /* Deal with !@#$% 64K limit: */ - if (sizeof(int) <= 2) { - if (more == 0 && s->strstart == 0 && s->lookahead == 0) { - more = wsize; - - } else if (more == (unsigned)(-1)) { - /* Very unlikely, but possible on 16 bit machine if - * strstart == 0 && lookahead == 1 (input done a byte at time) - */ - more--; - } - } - - /* If the window is almost full and there is insufficient lookahead, - * move the upper half to the lower one to make room in the upper half. - */ - if (s->strstart >= wsize+MAX_DIST(s)) { - - zmemcpy(s->window, s->window+wsize, (unsigned)wsize); - s->match_start -= wsize; - s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ - s->block_start -= (long) wsize; - - /* Slide the hash table (could be avoided with 32 bit values - at the expense of memory usage). We slide even when level == 0 - to keep the hash table consistent if we switch back to level > 0 - later. (Using level 0 permanently is not an optimal usage of - zlib, so we don't care about this pathological case.) - */ - n = s->hash_size; - p = &s->head[n]; - do { - m = *--p; - *p = (Pos)(m >= wsize ? m-wsize : NIL); - } while (--n); - - n = wsize; -#ifndef FASTEST - p = &s->prev[n]; - do { - m = *--p; - *p = (Pos)(m >= wsize ? m-wsize : NIL); - /* If n is not on any hash chain, prev[n] is garbage but - * its value will never be used. - */ - } while (--n); -#endif - more += wsize; - } - if (s->strm->avail_in == 0) return; - - /* If there was no sliding: - * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && - * more == window_size - lookahead - strstart - * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) - * => more >= window_size - 2*WSIZE + 2 - * In the BIG_MEM or MMAP case (not yet supported), - * window_size == input_size + MIN_LOOKAHEAD && - * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. - * Otherwise, window_size == 2*WSIZE so more >= 2. - * If there was sliding, more >= WSIZE. So in all cases, more >= 2. - */ - Assert(more >= 2, "more < 2"); - - n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); - s->lookahead += n; - - /* Initialize the hash value now that we have some input: */ - if (s->lookahead >= MIN_MATCH) { - s->ins_h = s->window[s->strstart]; - UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); -#if MIN_MATCH != 3 - Call UPDATE_HASH() MIN_MATCH-3 more times -#endif - } - /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, - * but this is not important since only literal bytes will be emitted. - */ - - } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); - - /* If the WIN_INIT bytes after the end of the current data have never been - * written, then zero those bytes in order to avoid memory check reports of - * the use of uninitialized (or uninitialised as Julian writes) bytes by - * the longest match routines. Update the high water mark for the next - * time through here. WIN_INIT is set to MAX_MATCH since the longest match - * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. - */ - if (s->high_water < s->window_size) { - ulg curr = s->strstart + (ulg)(s->lookahead); - ulg init; - - if (s->high_water < curr) { - /* Previous high water mark below current data -- zero WIN_INIT - * bytes or up to end of window, whichever is less. - */ - init = s->window_size - curr; - if (init > WIN_INIT) - init = WIN_INIT; - zmemzero(s->window + curr, (unsigned)init); - s->high_water = curr + init; - } - else if (s->high_water < (ulg)curr + WIN_INIT) { - /* High water mark at or above current data, but below current data - * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up - * to end of window, whichever is less. - */ - init = (ulg)curr + WIN_INIT - s->high_water; - if (init > s->window_size - s->high_water) - init = s->window_size - s->high_water; - zmemzero(s->window + s->high_water, (unsigned)init); - s->high_water += init; - } - } -} - -/* =========================================================================== - * Flush the current block, with given end-of-file flag. - * IN assertion: strstart is set to the end of the current match. - */ -#define FLUSH_BLOCK_ONLY(s, last) { \ - _tr_flush_block(s, (s->block_start >= 0L ? \ - (charf *)&s->window[(unsigned)s->block_start] : \ - (charf *)Z_NULL), \ - (ulg)((long)s->strstart - s->block_start), \ - (last)); \ - s->block_start = s->strstart; \ - flush_pending(s->strm); \ - Tracev((stderr,"[FLUSH]")); \ -} - -/* Same but force premature exit if necessary. */ -#define FLUSH_BLOCK(s, last) { \ - FLUSH_BLOCK_ONLY(s, last); \ - if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ -} - -/* =========================================================================== - * Copy without compression as much as possible from the input stream, return - * the current block state. - * This function does not insert new strings in the dictionary since - * uncompressible data is probably not useful. This function is used - * only for the level=0 compression option. - * NOTE: this function should be optimized to avoid extra copying from - * window to pending_buf. - */ -local block_state deflate_stored(s, flush) - deflate_state *s; - int flush; -{ - /* Stored blocks are limited to 0xffff bytes, pending_buf is limited - * to pending_buf_size, and each stored block has a 5 byte header: - */ - ulg max_block_size = 0xffff; - ulg max_start; - - if (max_block_size > s->pending_buf_size - 5) { - max_block_size = s->pending_buf_size - 5; - } - - /* Copy as much as possible from input to output: */ - for (;;) { - /* Fill the window as much as possible: */ - if (s->lookahead <= 1) { - - Assert(s->strstart < s->w_size+MAX_DIST(s) || - s->block_start >= (long)s->w_size, "slide too late"); - - fill_window(s); - if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; - - if (s->lookahead == 0) break; /* flush the current block */ - } - Assert(s->block_start >= 0L, "block gone"); - - s->strstart += s->lookahead; - s->lookahead = 0; - - /* Emit a stored block if pending_buf will be full: */ - max_start = s->block_start + max_block_size; - if (s->strstart == 0 || (ulg)s->strstart >= max_start) { - /* strstart == 0 is possible when wraparound on 16-bit machine */ - s->lookahead = (uInt)(s->strstart - max_start); - s->strstart = (uInt)max_start; - FLUSH_BLOCK(s, 0); - } - /* Flush if we may have to slide, otherwise block_start may become - * negative and the data will be gone: - */ - if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { - FLUSH_BLOCK(s, 0); - } - } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; -} - -/* =========================================================================== - * Compress as much as possible from the input stream, return the current - * block state. - * This function does not perform lazy evaluation of matches and inserts - * new strings in the dictionary only for unmatched strings or for short - * matches. It is used only for the fast compression options. - */ -local block_state deflate_fast(s, flush) - deflate_state *s; - int flush; -{ - IPos hash_head; /* head of the hash chain */ - int bflush; /* set if current block must be flushed */ - - for (;;) { - /* Make sure that we always have enough lookahead, except - * at the end of the input file. We need MAX_MATCH bytes - * for the next match, plus MIN_MATCH bytes to insert the - * string following the next match. - */ - if (s->lookahead < MIN_LOOKAHEAD) { - fill_window(s); - if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { - return need_more; - } - if (s->lookahead == 0) break; /* flush the current block */ - } - - /* Insert the string window[strstart .. strstart+2] in the - * dictionary, and set hash_head to the head of the hash chain: - */ - hash_head = NIL; - if (s->lookahead >= MIN_MATCH) { - INSERT_STRING(s, s->strstart, hash_head); - } - - /* Find the longest match, discarding those <= prev_length. - * At this point we have always match_length < MIN_MATCH - */ - if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { - /* To simplify the code, we prevent matches with the string - * of window index 0 (in particular we have to avoid a match - * of the string with itself at the start of the input file). - */ - s->match_length = longest_match (s, hash_head); - /* longest_match() sets match_start */ - } - if (s->match_length >= MIN_MATCH) { - check_match(s, s->strstart, s->match_start, s->match_length); - - _tr_tally_dist(s, s->strstart - s->match_start, - s->match_length - MIN_MATCH, bflush); - - s->lookahead -= s->match_length; - - /* Insert new strings in the hash table only if the match length - * is not too large. This saves time but degrades compression. - */ -#ifndef FASTEST - if (s->match_length <= s->max_insert_length && - s->lookahead >= MIN_MATCH) { - s->match_length--; /* string at strstart already in table */ - do { - s->strstart++; - INSERT_STRING(s, s->strstart, hash_head); - /* strstart never exceeds WSIZE-MAX_MATCH, so there are - * always MIN_MATCH bytes ahead. - */ - } while (--s->match_length != 0); - s->strstart++; - } else -#endif - { - s->strstart += s->match_length; - s->match_length = 0; - s->ins_h = s->window[s->strstart]; - UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); -#if MIN_MATCH != 3 - Call UPDATE_HASH() MIN_MATCH-3 more times -#endif - /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not - * matter since it will be recomputed at next deflate call. - */ - } - } else { - /* No match, output a literal byte */ - Tracevv((stderr,"%c", s->window[s->strstart])); - _tr_tally_lit (s, s->window[s->strstart], bflush); - s->lookahead--; - s->strstart++; - } - if (bflush) FLUSH_BLOCK(s, 0); - } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; -} - -#ifndef FASTEST -/* =========================================================================== - * Same as above, but achieves better compression. We use a lazy - * evaluation for matches: a match is finally adopted only if there is - * no better match at the next window position. - */ -local block_state deflate_slow(s, flush) - deflate_state *s; - int flush; -{ - IPos hash_head; /* head of hash chain */ - int bflush; /* set if current block must be flushed */ - - /* Process the input block. */ - for (;;) { - /* Make sure that we always have enough lookahead, except - * at the end of the input file. We need MAX_MATCH bytes - * for the next match, plus MIN_MATCH bytes to insert the - * string following the next match. - */ - if (s->lookahead < MIN_LOOKAHEAD) { - fill_window(s); - if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { - return need_more; - } - if (s->lookahead == 0) break; /* flush the current block */ - } - - /* Insert the string window[strstart .. strstart+2] in the - * dictionary, and set hash_head to the head of the hash chain: - */ - hash_head = NIL; - if (s->lookahead >= MIN_MATCH) { - INSERT_STRING(s, s->strstart, hash_head); - } - - /* Find the longest match, discarding those <= prev_length. - */ - s->prev_length = s->match_length, s->prev_match = s->match_start; - s->match_length = MIN_MATCH-1; - - if (hash_head != NIL && s->prev_length < s->max_lazy_match && - s->strstart - hash_head <= MAX_DIST(s)) { - /* To simplify the code, we prevent matches with the string - * of window index 0 (in particular we have to avoid a match - * of the string with itself at the start of the input file). - */ - s->match_length = longest_match (s, hash_head); - /* longest_match() sets match_start */ - - if (s->match_length <= 5 && (s->strategy == Z_FILTERED -#if TOO_FAR <= 32767 - || (s->match_length == MIN_MATCH && - s->strstart - s->match_start > TOO_FAR) -#endif - )) { - - /* If prev_match is also MIN_MATCH, match_start is garbage - * but we will ignore the current match anyway. - */ - s->match_length = MIN_MATCH-1; - } - } - /* If there was a match at the previous step and the current - * match is not better, output the previous match: - */ - if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { - uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; - /* Do not insert strings in hash table beyond this. */ - - check_match(s, s->strstart-1, s->prev_match, s->prev_length); - - _tr_tally_dist(s, s->strstart -1 - s->prev_match, - s->prev_length - MIN_MATCH, bflush); - - /* Insert in hash table all strings up to the end of the match. - * strstart-1 and strstart are already inserted. If there is not - * enough lookahead, the last two strings are not inserted in - * the hash table. - */ - s->lookahead -= s->prev_length-1; - s->prev_length -= 2; - do { - if (++s->strstart <= max_insert) { - INSERT_STRING(s, s->strstart, hash_head); - } - } while (--s->prev_length != 0); - s->match_available = 0; - s->match_length = MIN_MATCH-1; - s->strstart++; - - if (bflush) FLUSH_BLOCK(s, 0); - - } else if (s->match_available) { - /* If there was no match at the previous position, output a - * single literal. If there was a match but the current match - * is longer, truncate the previous match to a single literal. - */ - Tracevv((stderr,"%c", s->window[s->strstart-1])); - _tr_tally_lit(s, s->window[s->strstart-1], bflush); - if (bflush) { - FLUSH_BLOCK_ONLY(s, 0); - } - s->strstart++; - s->lookahead--; - if (s->strm->avail_out == 0) return need_more; - } else { - /* There is no previous match to compare with, wait for - * the next step to decide. - */ - s->match_available = 1; - s->strstart++; - s->lookahead--; - } - } - Assert (flush != Z_NO_FLUSH, "no flush?"); - if (s->match_available) { - Tracevv((stderr,"%c", s->window[s->strstart-1])); - _tr_tally_lit(s, s->window[s->strstart-1], bflush); - s->match_available = 0; - } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; -} -#endif /* FASTEST */ - -/* =========================================================================== - * For Z_RLE, simply look for runs of bytes, generate matches only of distance - * one. Do not maintain a hash table. (It will be regenerated if this run of - * deflate switches away from Z_RLE.) - */ -local block_state deflate_rle(s, flush) - deflate_state *s; - int flush; -{ - int bflush; /* set if current block must be flushed */ - uInt prev; /* byte at distance one to match */ - Bytef *scan, *strend; /* scan goes up to strend for length of run */ - - for (;;) { - /* Make sure that we always have enough lookahead, except - * at the end of the input file. We need MAX_MATCH bytes - * for the longest encodable run. - */ - if (s->lookahead < MAX_MATCH) { - fill_window(s); - if (s->lookahead < MAX_MATCH && flush == Z_NO_FLUSH) { - return need_more; - } - if (s->lookahead == 0) break; /* flush the current block */ - } - - /* See how many times the previous byte repeats */ - s->match_length = 0; - if (s->lookahead >= MIN_MATCH && s->strstart > 0) { - scan = s->window + s->strstart - 1; - prev = *scan; - if (prev == *++scan && prev == *++scan && prev == *++scan) { - strend = s->window + s->strstart + MAX_MATCH; - do { - } while (prev == *++scan && prev == *++scan && - prev == *++scan && prev == *++scan && - prev == *++scan && prev == *++scan && - prev == *++scan && prev == *++scan && - scan < strend); - s->match_length = MAX_MATCH - (int)(strend - scan); - if (s->match_length > s->lookahead) - s->match_length = s->lookahead; - } - } - - /* Emit match if have run of MIN_MATCH or longer, else emit literal */ - if (s->match_length >= MIN_MATCH) { - check_match(s, s->strstart, s->strstart - 1, s->match_length); - - _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); - - s->lookahead -= s->match_length; - s->strstart += s->match_length; - s->match_length = 0; - } else { - /* No match, output a literal byte */ - Tracevv((stderr,"%c", s->window[s->strstart])); - _tr_tally_lit (s, s->window[s->strstart], bflush); - s->lookahead--; - s->strstart++; - } - if (bflush) FLUSH_BLOCK(s, 0); - } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; -} - -/* =========================================================================== - * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. - * (It will be regenerated if this run of deflate switches away from Huffman.) - */ -local block_state deflate_huff(s, flush) - deflate_state *s; - int flush; -{ - int bflush; /* set if current block must be flushed */ - - for (;;) { - /* Make sure that we have a literal to write. */ - if (s->lookahead == 0) { - fill_window(s); - if (s->lookahead == 0) { - if (flush == Z_NO_FLUSH) - return need_more; - break; /* flush the current block */ - } - } - - /* Output a literal byte */ - s->match_length = 0; - Tracevv((stderr,"%c", s->window[s->strstart])); - _tr_tally_lit (s, s->window[s->strstart], bflush); - s->lookahead--; - s->strstart++; - if (bflush) FLUSH_BLOCK(s, 0); - } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/deflate.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/deflate.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/deflate.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/deflate.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,342 +0,0 @@ -/* deflate.h -- internal compression state - * Copyright (C) 1995-2010 Jean-loup Gailly - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* WARNING: this file should *not* be used by applications. It is - part of the implementation of the compression library and is - subject to change. Applications should only use zlib.h. - */ - -/* @(#) $Id$ */ - -#ifndef DEFLATE_H -#define DEFLATE_H - -#include "zutil.h" - -/* define NO_GZIP when compiling if you want to disable gzip header and - trailer creation by deflate(). NO_GZIP would be used to avoid linking in - the crc code when it is not needed. For shared libraries, gzip encoding - should be left enabled. */ -#ifndef NO_GZIP -# define GZIP -#endif - -/* =========================================================================== - * Internal compression state. - */ - -#define LENGTH_CODES 29 -/* number of length codes, not counting the special END_BLOCK code */ - -#define LITERALS 256 -/* number of literal bytes 0..255 */ - -#define L_CODES (LITERALS+1+LENGTH_CODES) -/* number of Literal or Length codes, including the END_BLOCK code */ - -#define D_CODES 30 -/* number of distance codes */ - -#define BL_CODES 19 -/* number of codes used to transfer the bit lengths */ - -#define HEAP_SIZE (2*L_CODES+1) -/* maximum heap size */ - -#define MAX_BITS 15 -/* All codes must not exceed MAX_BITS bits */ - -#define INIT_STATE 42 -#define EXTRA_STATE 69 -#define NAME_STATE 73 -#define COMMENT_STATE 91 -#define HCRC_STATE 103 -#define BUSY_STATE 113 -#define FINISH_STATE 666 -/* Stream status */ - - -/* Data structure describing a single value and its code string. */ -typedef struct ct_data_s { - union { - ush freq; /* frequency count */ - ush code; /* bit string */ - } fc; - union { - ush dad; /* father node in Huffman tree */ - ush len; /* length of bit string */ - } dl; -} FAR ct_data; - -#define Freq fc.freq -#define Code fc.code -#define Dad dl.dad -#define Len dl.len - -typedef struct static_tree_desc_s static_tree_desc; - -typedef struct tree_desc_s { - ct_data *dyn_tree; /* the dynamic tree */ - int max_code; /* largest code with non zero frequency */ - static_tree_desc *stat_desc; /* the corresponding static tree */ -} FAR tree_desc; - -typedef ush Pos; -typedef Pos FAR Posf; -typedef unsigned IPos; - -/* A Pos is an index in the character window. We use short instead of int to - * save space in the various tables. IPos is used only for parameter passing. - */ - -typedef struct internal_state { - z_streamp strm; /* pointer back to this zlib stream */ - int status; /* as the name implies */ - Bytef *pending_buf; /* output still pending */ - ulg pending_buf_size; /* size of pending_buf */ - Bytef *pending_out; /* next pending byte to output to the stream */ - uInt pending; /* nb of bytes in the pending buffer */ - int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ - gz_headerp gzhead; /* gzip header information to write */ - uInt gzindex; /* where in extra, name, or comment */ - Byte method; /* STORED (for zip only) or DEFLATED */ - int last_flush; /* value of flush param for previous deflate call */ - - /* used by deflate.c: */ - - uInt w_size; /* LZ77 window size (32K by default) */ - uInt w_bits; /* log2(w_size) (8..16) */ - uInt w_mask; /* w_size - 1 */ - - Bytef *window; - /* Sliding window. Input bytes are read into the second half of the window, - * and move to the first half later to keep a dictionary of at least wSize - * bytes. With this organization, matches are limited to a distance of - * wSize-MAX_MATCH bytes, but this ensures that IO is always - * performed with a length multiple of the block size. Also, it limits - * the window size to 64K, which is quite useful on MSDOS. - * To do: use the user input buffer as sliding window. - */ - - ulg window_size; - /* Actual size of window: 2*wSize, except when the user input buffer - * is directly used as sliding window. - */ - - Posf *prev; - /* Link to older string with same hash index. To limit the size of this - * array to 64K, this link is maintained only for the last 32K strings. - * An index in this array is thus a window index modulo 32K. - */ - - Posf *head; /* Heads of the hash chains or NIL. */ - - uInt ins_h; /* hash index of string to be inserted */ - uInt hash_size; /* number of elements in hash table */ - uInt hash_bits; /* log2(hash_size) */ - uInt hash_mask; /* hash_size-1 */ - - uInt hash_shift; - /* Number of bits by which ins_h must be shifted at each input - * step. It must be such that after MIN_MATCH steps, the oldest - * byte no longer takes part in the hash key, that is: - * hash_shift * MIN_MATCH >= hash_bits - */ - - long block_start; - /* Window position at the beginning of the current output block. Gets - * negative when the window is moved backwards. - */ - - uInt match_length; /* length of best match */ - IPos prev_match; /* previous match */ - int match_available; /* set if previous match exists */ - uInt strstart; /* start of string to insert */ - uInt match_start; /* start of matching string */ - uInt lookahead; /* number of valid bytes ahead in window */ - - uInt prev_length; - /* Length of the best match at previous step. Matches not greater than this - * are discarded. This is used in the lazy match evaluation. - */ - - uInt max_chain_length; - /* To speed up deflation, hash chains are never searched beyond this - * length. A higher limit improves compression ratio but degrades the - * speed. - */ - - uInt max_lazy_match; - /* Attempt to find a better match only when the current match is strictly - * smaller than this value. This mechanism is used only for compression - * levels >= 4. - */ -# define max_insert_length max_lazy_match - /* Insert new strings in the hash table only if the match length is not - * greater than this length. This saves time but degrades compression. - * max_insert_length is used only for compression levels <= 3. - */ - - int level; /* compression level (1..9) */ - int strategy; /* favor or force Huffman coding*/ - - uInt good_match; - /* Use a faster search when the previous match is longer than this */ - - int nice_match; /* Stop searching when current match exceeds this */ - - /* used by trees.c: */ - /* Didn't use ct_data typedef below to supress compiler warning */ - struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ - struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ - struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ - - struct tree_desc_s l_desc; /* desc. for literal tree */ - struct tree_desc_s d_desc; /* desc. for distance tree */ - struct tree_desc_s bl_desc; /* desc. for bit length tree */ - - ush bl_count[MAX_BITS+1]; - /* number of codes at each bit length for an optimal tree */ - - int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ - int heap_len; /* number of elements in the heap */ - int heap_max; /* element of largest frequency */ - /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. - * The same heap array is used to build all trees. - */ - - uch depth[2*L_CODES+1]; - /* Depth of each subtree used as tie breaker for trees of equal frequency - */ - - uchf *l_buf; /* buffer for literals or lengths */ - - uInt lit_bufsize; - /* Size of match buffer for literals/lengths. There are 4 reasons for - * limiting lit_bufsize to 64K: - * - frequencies can be kept in 16 bit counters - * - if compression is not successful for the first block, all input - * data is still in the window so we can still emit a stored block even - * when input comes from standard input. (This can also be done for - * all blocks if lit_bufsize is not greater than 32K.) - * - if compression is not successful for a file smaller than 64K, we can - * even emit a stored file instead of a stored block (saving 5 bytes). - * This is applicable only for zip (not gzip or zlib). - * - creating new Huffman trees less frequently may not provide fast - * adaptation to changes in the input data statistics. (Take for - * example a binary file with poorly compressible code followed by - * a highly compressible string table.) Smaller buffer sizes give - * fast adaptation but have of course the overhead of transmitting - * trees more frequently. - * - I can't count above 4 - */ - - uInt last_lit; /* running index in l_buf */ - - ushf *d_buf; - /* Buffer for distances. To simplify the code, d_buf and l_buf have - * the same number of elements. To use different lengths, an extra flag - * array would be necessary. - */ - - ulg opt_len; /* bit length of current block with optimal trees */ - ulg static_len; /* bit length of current block with static trees */ - uInt matches; /* number of string matches in current block */ - int last_eob_len; /* bit length of EOB code for last block */ - -#ifdef DEBUG - ulg compressed_len; /* total bit length of compressed file mod 2^32 */ - ulg bits_sent; /* bit length of compressed data sent mod 2^32 */ -#endif - - ush bi_buf; - /* Output buffer. bits are inserted starting at the bottom (least - * significant bits). - */ - int bi_valid; - /* Number of valid bits in bi_buf. All bits above the last valid bit - * are always zero. - */ - - ulg high_water; - /* High water mark offset in window for initialized bytes -- bytes above - * this are set to zero in order to avoid memory check warnings when - * longest match routines access bytes past the input. This is then - * updated to the new high water mark. - */ - -} FAR deflate_state; - -/* Output a byte on the stream. - * IN assertion: there is enough room in pending_buf. - */ -#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);} - - -#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) -/* Minimum amount of lookahead, except at the end of the input file. - * See deflate.c for comments about the MIN_MATCH+1. - */ - -#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) -/* In order to simplify the code, particularly on 16 bit machines, match - * distances are limited to MAX_DIST instead of WSIZE. - */ - -#define WIN_INIT MAX_MATCH -/* Number of bytes after end of data in window to initialize in order to avoid - memory checker errors from longest match routines */ - - /* in trees.c */ -void ZLIB_INTERNAL _tr_init OF((deflate_state *s)); -int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); -void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf, - ulg stored_len, int last)); -void ZLIB_INTERNAL _tr_align OF((deflate_state *s)); -void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, - ulg stored_len, int last)); - -#define d_code(dist) \ - ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) -/* Mapping from a distance to a distance code. dist is the distance - 1 and - * must not have side effects. _dist_code[256] and _dist_code[257] are never - * used. - */ - -#ifndef DEBUG -/* Inline versions of _tr_tally for speed: */ - -#if defined(GEN_TREES_H) || !defined(STDC) - extern uch ZLIB_INTERNAL _length_code[]; - extern uch ZLIB_INTERNAL _dist_code[]; -#else - extern const uch ZLIB_INTERNAL _length_code[]; - extern const uch ZLIB_INTERNAL _dist_code[]; -#endif - -# define _tr_tally_lit(s, c, flush) \ - { uch cc = (c); \ - s->d_buf[s->last_lit] = 0; \ - s->l_buf[s->last_lit++] = cc; \ - s->dyn_ltree[cc].Freq++; \ - flush = (s->last_lit == s->lit_bufsize-1); \ - } -# define _tr_tally_dist(s, distance, length, flush) \ - { uch len = (length); \ - ush dist = (distance); \ - s->d_buf[s->last_lit] = dist; \ - s->l_buf[s->last_lit++] = len; \ - dist--; \ - s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ - s->dyn_dtree[d_code(dist)].Freq++; \ - flush = (s->last_lit == s->lit_bufsize-1); \ - } -#else -# define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) -# define _tr_tally_dist(s, distance, length, flush) \ - flush = _tr_tally(s, distance, length) -#endif - -#endif /* DEFLATE_H */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/infback.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/infback.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/infback.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/infback.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,632 +0,0 @@ -/* infback.c -- inflate using a call-back interface - * Copyright (C) 1995-2009 Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* - This code is largely copied from inflate.c. Normally either infback.o or - inflate.o would be linked into an application--not both. The interface - with inffast.c is retained so that optimized assembler-coded versions of - inflate_fast() can be used with either inflate.c or infback.c. - */ - -#include "zutil.h" -#include "inftrees.h" -#include "inflate.h" -#include "inffast.h" - -/* function prototypes */ -local void fixedtables OF((struct inflate_state FAR *state)); - -/* - strm provides memory allocation functions in zalloc and zfree, or - Z_NULL to use the library memory allocation functions. - - windowBits is in the range 8..15, and window is a user-supplied - window and output buffer that is 2**windowBits bytes. - */ -int ZEXPORT inflateBackInit_(strm, windowBits, window, version, stream_size) -z_streamp strm; -int windowBits; -unsigned char FAR *window; -const char *version; -int stream_size; -{ - struct inflate_state FAR *state; - - if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || - stream_size != (int)(sizeof(z_stream))) - return Z_VERSION_ERROR; - if (strm == Z_NULL || window == Z_NULL || - windowBits < 8 || windowBits > 15) - return Z_STREAM_ERROR; - strm->msg = Z_NULL; /* in case we return an error */ - if (strm->zalloc == (alloc_func)0) { - strm->zalloc = zcalloc; - strm->opaque = (voidpf)0; - } - if (strm->zfree == (free_func)0) strm->zfree = zcfree; - state = (struct inflate_state FAR *)ZALLOC(strm, 1, - sizeof(struct inflate_state)); - if (state == Z_NULL) return Z_MEM_ERROR; - Tracev((stderr, "inflate: allocated\n")); - strm->state = (struct internal_state FAR *)state; - state->dmax = 32768U; - state->wbits = windowBits; - state->wsize = 1U << windowBits; - state->window = window; - state->wnext = 0; - state->whave = 0; - return Z_OK; -} - -/* - Return state with length and distance decoding tables and index sizes set to - fixed code decoding. Normally this returns fixed tables from inffixed.h. - If BUILDFIXED is defined, then instead this routine builds the tables the - first time it's called, and returns those tables the first time and - thereafter. This reduces the size of the code by about 2K bytes, in - exchange for a little execution time. However, BUILDFIXED should not be - used for threaded applications, since the rewriting of the tables and virgin - may not be thread-safe. - */ -local void fixedtables(state) -struct inflate_state FAR *state; -{ -#ifdef BUILDFIXED - static int virgin = 1; - static code *lenfix, *distfix; - static code fixed[544]; - - /* build fixed huffman tables if first call (may not be thread safe) */ - if (virgin) { - unsigned sym, bits; - static code *next; - - /* literal/length table */ - sym = 0; - while (sym < 144) state->lens[sym++] = 8; - while (sym < 256) state->lens[sym++] = 9; - while (sym < 280) state->lens[sym++] = 7; - while (sym < 288) state->lens[sym++] = 8; - next = fixed; - lenfix = next; - bits = 9; - inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); - - /* distance table */ - sym = 0; - while (sym < 32) state->lens[sym++] = 5; - distfix = next; - bits = 5; - inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); - - /* do this just once */ - virgin = 0; - } -#else /* !BUILDFIXED */ -# include "inffixed.h" -#endif /* BUILDFIXED */ - state->lencode = lenfix; - state->lenbits = 9; - state->distcode = distfix; - state->distbits = 5; -} - -/* Macros for inflateBack(): */ - -/* Load returned state from inflate_fast() */ -#define LOAD() \ - do { \ - put = strm->next_out; \ - left = strm->avail_out; \ - next = strm->next_in; \ - have = strm->avail_in; \ - hold = state->hold; \ - bits = state->bits; \ - } while (0) - -/* Set state from registers for inflate_fast() */ -#define RESTORE() \ - do { \ - strm->next_out = put; \ - strm->avail_out = left; \ - strm->next_in = next; \ - strm->avail_in = have; \ - state->hold = hold; \ - state->bits = bits; \ - } while (0) - -/* Clear the input bit accumulator */ -#define INITBITS() \ - do { \ - hold = 0; \ - bits = 0; \ - } while (0) - -/* Assure that some input is available. If input is requested, but denied, - then return a Z_BUF_ERROR from inflateBack(). */ -#define PULL() \ - do { \ - if (have == 0) { \ - have = in(in_desc, &next); \ - if (have == 0) { \ - next = Z_NULL; \ - ret = Z_BUF_ERROR; \ - goto inf_leave; \ - } \ - } \ - } while (0) - -/* Get a byte of input into the bit accumulator, or return from inflateBack() - with an error if there is no input available. */ -#define PULLBYTE() \ - do { \ - PULL(); \ - have--; \ - hold += (unsigned long)(*next++) << bits; \ - bits += 8; \ - } while (0) - -/* Assure that there are at least n bits in the bit accumulator. If there is - not enough available input to do that, then return from inflateBack() with - an error. */ -#define NEEDBITS(n) \ - do { \ - while (bits < (unsigned)(n)) \ - PULLBYTE(); \ - } while (0) - -/* Return the low n bits of the bit accumulator (n < 16) */ -#define BITS(n) \ - ((unsigned)hold & ((1U << (n)) - 1)) - -/* Remove n bits from the bit accumulator */ -#define DROPBITS(n) \ - do { \ - hold >>= (n); \ - bits -= (unsigned)(n); \ - } while (0) - -/* Remove zero to seven bits as needed to go to a byte boundary */ -#define BYTEBITS() \ - do { \ - hold >>= bits & 7; \ - bits -= bits & 7; \ - } while (0) - -/* Assure that some output space is available, by writing out the window - if it's full. If the write fails, return from inflateBack() with a - Z_BUF_ERROR. */ -#define ROOM() \ - do { \ - if (left == 0) { \ - put = state->window; \ - left = state->wsize; \ - state->whave = left; \ - if (out(out_desc, put, left)) { \ - ret = Z_BUF_ERROR; \ - goto inf_leave; \ - } \ - } \ - } while (0) - -/* - strm provides the memory allocation functions and window buffer on input, - and provides information on the unused input on return. For Z_DATA_ERROR - returns, strm will also provide an error message. - - in() and out() are the call-back input and output functions. When - inflateBack() needs more input, it calls in(). When inflateBack() has - filled the window with output, or when it completes with data in the - window, it calls out() to write out the data. The application must not - change the provided input until in() is called again or inflateBack() - returns. The application must not change the window/output buffer until - inflateBack() returns. - - in() and out() are called with a descriptor parameter provided in the - inflateBack() call. This parameter can be a structure that provides the - information required to do the read or write, as well as accumulated - information on the input and output such as totals and check values. - - in() should return zero on failure. out() should return non-zero on - failure. If either in() or out() fails, than inflateBack() returns a - Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it - was in() or out() that caused in the error. Otherwise, inflateBack() - returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format - error, or Z_MEM_ERROR if it could not allocate memory for the state. - inflateBack() can also return Z_STREAM_ERROR if the input parameters - are not correct, i.e. strm is Z_NULL or the state was not initialized. - */ -int ZEXPORT inflateBack(strm, in, in_desc, out, out_desc) -z_streamp strm; -in_func in; -void FAR *in_desc; -out_func out; -void FAR *out_desc; -{ - struct inflate_state FAR *state; - unsigned char FAR *next; /* next input */ - unsigned char FAR *put; /* next output */ - unsigned have, left; /* available input and output */ - unsigned long hold; /* bit buffer */ - unsigned bits; /* bits in bit buffer */ - unsigned copy; /* number of stored or match bytes to copy */ - unsigned char FAR *from; /* where to copy match bytes from */ - code here; /* current decoding table entry */ - code last; /* parent table entry */ - unsigned len; /* length to copy for repeats, bits to drop */ - int ret; /* return code */ - static const unsigned short order[19] = /* permutation of code lengths */ - {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; - - /* Check that the strm exists and that the state was initialized */ - if (strm == Z_NULL || strm->state == Z_NULL) - return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - - /* Reset the state */ - strm->msg = Z_NULL; - state->mode = TYPE; - state->last = 0; - state->whave = 0; - next = strm->next_in; - have = next != Z_NULL ? strm->avail_in : 0; - hold = 0; - bits = 0; - put = state->window; - left = state->wsize; - - /* Inflate until end of block marked as last */ - for (;;) - switch (state->mode) { - case TYPE: - /* determine and dispatch block type */ - if (state->last) { - BYTEBITS(); - state->mode = DONE; - break; - } - NEEDBITS(3); - state->last = BITS(1); - DROPBITS(1); - switch (BITS(2)) { - case 0: /* stored block */ - Tracev((stderr, "inflate: stored block%s\n", - state->last ? " (last)" : "")); - state->mode = STORED; - break; - case 1: /* fixed block */ - fixedtables(state); - Tracev((stderr, "inflate: fixed codes block%s\n", - state->last ? " (last)" : "")); - state->mode = LEN; /* decode codes */ - break; - case 2: /* dynamic block */ - Tracev((stderr, "inflate: dynamic codes block%s\n", - state->last ? " (last)" : "")); - state->mode = TABLE; - break; - case 3: - strm->msg = (char *)"invalid block type"; - state->mode = BAD; - } - DROPBITS(2); - break; - - case STORED: - /* get and verify stored block length */ - BYTEBITS(); /* go to byte boundary */ - NEEDBITS(32); - if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { - strm->msg = (char *)"invalid stored block lengths"; - state->mode = BAD; - break; - } - state->length = (unsigned)hold & 0xffff; - Tracev((stderr, "inflate: stored length %u\n", - state->length)); - INITBITS(); - - /* copy stored block from input to output */ - while (state->length != 0) { - copy = state->length; - PULL(); - ROOM(); - if (copy > have) copy = have; - if (copy > left) copy = left; - zmemcpy(put, next, copy); - have -= copy; - next += copy; - left -= copy; - put += copy; - state->length -= copy; - } - Tracev((stderr, "inflate: stored end\n")); - state->mode = TYPE; - break; - - case TABLE: - /* get dynamic table entries descriptor */ - NEEDBITS(14); - state->nlen = BITS(5) + 257; - DROPBITS(5); - state->ndist = BITS(5) + 1; - DROPBITS(5); - state->ncode = BITS(4) + 4; - DROPBITS(4); -#ifndef PKZIP_BUG_WORKAROUND - if (state->nlen > 286 || state->ndist > 30) { - strm->msg = (char *)"too many length or distance symbols"; - state->mode = BAD; - break; - } -#endif - Tracev((stderr, "inflate: table sizes ok\n")); - - /* get code length code lengths (not a typo) */ - state->have = 0; - while (state->have < state->ncode) { - NEEDBITS(3); - state->lens[order[state->have++]] = (unsigned short)BITS(3); - DROPBITS(3); - } - while (state->have < 19) - state->lens[order[state->have++]] = 0; - state->next = state->codes; - state->lencode = (code const FAR *)(state->next); - state->lenbits = 7; - ret = inflate_table(CODES, state->lens, 19, &(state->next), - &(state->lenbits), state->work); - if (ret) { - strm->msg = (char *)"invalid code lengths set"; - state->mode = BAD; - break; - } - Tracev((stderr, "inflate: code lengths ok\n")); - - /* get length and distance code code lengths */ - state->have = 0; - while (state->have < state->nlen + state->ndist) { - for (;;) { - here = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(here.bits) <= bits) break; - PULLBYTE(); - } - if (here.val < 16) { - NEEDBITS(here.bits); - DROPBITS(here.bits); - state->lens[state->have++] = here.val; - } - else { - if (here.val == 16) { - NEEDBITS(here.bits + 2); - DROPBITS(here.bits); - if (state->have == 0) { - strm->msg = (char *)"invalid bit length repeat"; - state->mode = BAD; - break; - } - len = (unsigned)(state->lens[state->have - 1]); - copy = 3 + BITS(2); - DROPBITS(2); - } - else if (here.val == 17) { - NEEDBITS(here.bits + 3); - DROPBITS(here.bits); - len = 0; - copy = 3 + BITS(3); - DROPBITS(3); - } - else { - NEEDBITS(here.bits + 7); - DROPBITS(here.bits); - len = 0; - copy = 11 + BITS(7); - DROPBITS(7); - } - if (state->have + copy > state->nlen + state->ndist) { - strm->msg = (char *)"invalid bit length repeat"; - state->mode = BAD; - break; - } - while (copy--) - state->lens[state->have++] = (unsigned short)len; - } - } - - /* handle error breaks in while */ - if (state->mode == BAD) break; - - /* check for end-of-block code (better have one) */ - if (state->lens[256] == 0) { - strm->msg = (char *)"invalid code -- missing end-of-block"; - state->mode = BAD; - break; - } - - /* build code tables -- note: do not change the lenbits or distbits - values here (9 and 6) without reading the comments in inftrees.h - concerning the ENOUGH constants, which depend on those values */ - state->next = state->codes; - state->lencode = (code const FAR *)(state->next); - state->lenbits = 9; - ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), - &(state->lenbits), state->work); - if (ret) { - strm->msg = (char *)"invalid literal/lengths set"; - state->mode = BAD; - break; - } - state->distcode = (code const FAR *)(state->next); - state->distbits = 6; - ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, - &(state->next), &(state->distbits), state->work); - if (ret) { - strm->msg = (char *)"invalid distances set"; - state->mode = BAD; - break; - } - Tracev((stderr, "inflate: codes ok\n")); - state->mode = LEN; - - case LEN: - /* use inflate_fast() if we have enough input and output */ - if (have >= 6 && left >= 258) { - RESTORE(); - if (state->whave < state->wsize) - state->whave = state->wsize - left; - inflate_fast(strm, state->wsize); - LOAD(); - break; - } - - /* get a literal, length, or end-of-block code */ - for (;;) { - here = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(here.bits) <= bits) break; - PULLBYTE(); - } - if (here.op && (here.op & 0xf0) == 0) { - last = here; - for (;;) { - here = state->lencode[last.val + - (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + here.bits) <= bits) break; - PULLBYTE(); - } - DROPBITS(last.bits); - } - DROPBITS(here.bits); - state->length = (unsigned)here.val; - - /* process literal */ - if (here.op == 0) { - Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? - "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", here.val)); - ROOM(); - *put++ = (unsigned char)(state->length); - left--; - state->mode = LEN; - break; - } - - /* process end of block */ - if (here.op & 32) { - Tracevv((stderr, "inflate: end of block\n")); - state->mode = TYPE; - break; - } - - /* invalid code */ - if (here.op & 64) { - strm->msg = (char *)"invalid literal/length code"; - state->mode = BAD; - break; - } - - /* length code -- get extra bits, if any */ - state->extra = (unsigned)(here.op) & 15; - if (state->extra != 0) { - NEEDBITS(state->extra); - state->length += BITS(state->extra); - DROPBITS(state->extra); - } - Tracevv((stderr, "inflate: length %u\n", state->length)); - - /* get distance code */ - for (;;) { - here = state->distcode[BITS(state->distbits)]; - if ((unsigned)(here.bits) <= bits) break; - PULLBYTE(); - } - if ((here.op & 0xf0) == 0) { - last = here; - for (;;) { - here = state->distcode[last.val + - (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + here.bits) <= bits) break; - PULLBYTE(); - } - DROPBITS(last.bits); - } - DROPBITS(here.bits); - if (here.op & 64) { - strm->msg = (char *)"invalid distance code"; - state->mode = BAD; - break; - } - state->offset = (unsigned)here.val; - - /* get distance extra bits, if any */ - state->extra = (unsigned)(here.op) & 15; - if (state->extra != 0) { - NEEDBITS(state->extra); - state->offset += BITS(state->extra); - DROPBITS(state->extra); - } - if (state->offset > state->wsize - (state->whave < state->wsize ? - left : 0)) { - strm->msg = (char *)"invalid distance too far back"; - state->mode = BAD; - break; - } - Tracevv((stderr, "inflate: distance %u\n", state->offset)); - - /* copy match from window to output */ - do { - ROOM(); - copy = state->wsize - state->offset; - if (copy < left) { - from = put + copy; - copy = left - copy; - } - else { - from = put - state->offset; - copy = left; - } - if (copy > state->length) copy = state->length; - state->length -= copy; - left -= copy; - do { - *put++ = *from++; - } while (--copy); - } while (state->length != 0); - break; - - case DONE: - /* inflate stream terminated properly -- write leftover output */ - ret = Z_STREAM_END; - if (left < state->wsize) { - if (out(out_desc, state->window, state->wsize - left)) - ret = Z_BUF_ERROR; - } - goto inf_leave; - - case BAD: - ret = Z_DATA_ERROR; - goto inf_leave; - - default: /* can't happen, but makes compilers happy */ - ret = Z_STREAM_ERROR; - goto inf_leave; - } - - /* Return unused input */ - inf_leave: - strm->next_in = next; - strm->avail_in = have; - return ret; -} - -int ZEXPORT inflateBackEnd(strm) -z_streamp strm; -{ - if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) - return Z_STREAM_ERROR; - ZFREE(strm, strm->state); - strm->state = Z_NULL; - Tracev((stderr, "inflate: end\n")); - return Z_OK; -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inffast.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inffast.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inffast.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inffast.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,340 +0,0 @@ -/* inffast.c -- fast decoding - * Copyright (C) 1995-2008, 2010 Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -#include "zutil.h" -#include "inftrees.h" -#include "inflate.h" -#include "inffast.h" - -#ifndef ASMINF - -/* Allow machine dependent optimization for post-increment or pre-increment. - Based on testing to date, - Pre-increment preferred for: - - PowerPC G3 (Adler) - - MIPS R5000 (Randers-Pehrson) - Post-increment preferred for: - - none - No measurable difference: - - Pentium III (Anderson) - - M68060 (Nikl) - */ -#ifdef POSTINC -# define OFF 0 -# define PUP(a) *(a)++ -#else -# define OFF 1 -# define PUP(a) *++(a) -#endif - -/* - Decode literal, length, and distance codes and write out the resulting - literal and match bytes until either not enough input or output is - available, an end-of-block is encountered, or a data error is encountered. - When large enough input and output buffers are supplied to inflate(), for - example, a 16K input buffer and a 64K output buffer, more than 95% of the - inflate execution time is spent in this routine. - - Entry assumptions: - - state->mode == LEN - strm->avail_in >= 6 - strm->avail_out >= 258 - start >= strm->avail_out - state->bits < 8 - - On return, state->mode is one of: - - LEN -- ran out of enough output space or enough available input - TYPE -- reached end of block code, inflate() to interpret next block - BAD -- error in block data - - Notes: - - - The maximum input bits used by a length/distance pair is 15 bits for the - length code, 5 bits for the length extra, 15 bits for the distance code, - and 13 bits for the distance extra. This totals 48 bits, or six bytes. - Therefore if strm->avail_in >= 6, then there is enough input to avoid - checking for available input while decoding. - - - The maximum bytes that a single length/distance pair can output is 258 - bytes, which is the maximum length that can be coded. inflate_fast() - requires strm->avail_out >= 258 for each loop to avoid checking for - output space. - */ -void ZLIB_INTERNAL inflate_fast(strm, start) -z_streamp strm; -unsigned start; /* inflate()'s starting value for strm->avail_out */ -{ - struct inflate_state FAR *state; - unsigned char FAR *in; /* local strm->next_in */ - unsigned char FAR *last; /* while in < last, enough input available */ - unsigned char FAR *out; /* local strm->next_out */ - unsigned char FAR *beg; /* inflate()'s initial strm->next_out */ - unsigned char FAR *end; /* while out < end, enough space available */ -#ifdef INFLATE_STRICT - unsigned dmax; /* maximum distance from zlib header */ -#endif - unsigned wsize; /* window size or zero if not using window */ - unsigned whave; /* valid bytes in the window */ - unsigned wnext; /* window write index */ - unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */ - unsigned long hold; /* local strm->hold */ - unsigned bits; /* local strm->bits */ - code const FAR *lcode; /* local strm->lencode */ - code const FAR *dcode; /* local strm->distcode */ - unsigned lmask; /* mask for first level of length codes */ - unsigned dmask; /* mask for first level of distance codes */ - code here; /* retrieved table entry */ - unsigned op; /* code bits, operation, extra bits, or */ - /* window position, window bytes to copy */ - unsigned len; /* match length, unused bytes */ - unsigned dist; /* match distance */ - unsigned char FAR *from; /* where to copy match from */ - - /* copy state to local variables */ - state = (struct inflate_state FAR *)strm->state; - in = strm->next_in - OFF; - last = in + (strm->avail_in - 5); - out = strm->next_out - OFF; - beg = out - (start - strm->avail_out); - end = out + (strm->avail_out - 257); -#ifdef INFLATE_STRICT - dmax = state->dmax; -#endif - wsize = state->wsize; - whave = state->whave; - wnext = state->wnext; - window = state->window; - hold = state->hold; - bits = state->bits; - lcode = state->lencode; - dcode = state->distcode; - lmask = (1U << state->lenbits) - 1; - dmask = (1U << state->distbits) - 1; - - /* decode literals and length/distances until end-of-block or not enough - input data or output space */ - do { - if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; - bits += 8; - hold += (unsigned long)(PUP(in)) << bits; - bits += 8; - } - here = lcode[hold & lmask]; - dolen: - op = (unsigned)(here.bits); - hold >>= op; - bits -= op; - op = (unsigned)(here.op); - if (op == 0) { /* literal */ - Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? - "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", here.val)); - PUP(out) = (unsigned char)(here.val); - } - else if (op & 16) { /* length base */ - len = (unsigned)(here.val); - op &= 15; /* number of extra bits */ - if (op) { - if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; - bits += 8; - } - len += (unsigned)hold & ((1U << op) - 1); - hold >>= op; - bits -= op; - } - Tracevv((stderr, "inflate: length %u\n", len)); - if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; - bits += 8; - hold += (unsigned long)(PUP(in)) << bits; - bits += 8; - } - here = dcode[hold & dmask]; - dodist: - op = (unsigned)(here.bits); - hold >>= op; - bits -= op; - op = (unsigned)(here.op); - if (op & 16) { /* distance base */ - dist = (unsigned)(here.val); - op &= 15; /* number of extra bits */ - if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; - bits += 8; - if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; - bits += 8; - } - } - dist += (unsigned)hold & ((1U << op) - 1); -#ifdef INFLATE_STRICT - if (dist > dmax) { - strm->msg = (char *)"invalid distance too far back"; - state->mode = BAD; - break; - } -#endif - hold >>= op; - bits -= op; - Tracevv((stderr, "inflate: distance %u\n", dist)); - op = (unsigned)(out - beg); /* max distance in output */ - if (dist > op) { /* see if copy from window */ - op = dist - op; /* distance back in window */ - if (op > whave) { - if (state->sane) { - strm->msg = - (char *)"invalid distance too far back"; - state->mode = BAD; - break; - } -#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR - if (len <= op - whave) { - do { - PUP(out) = 0; - } while (--len); - continue; - } - len -= op - whave; - do { - PUP(out) = 0; - } while (--op > whave); - if (op == 0) { - from = out - dist; - do { - PUP(out) = PUP(from); - } while (--len); - continue; - } -#endif - } - from = window - OFF; - if (wnext == 0) { /* very common case */ - from += wsize - op; - if (op < len) { /* some from window */ - len -= op; - do { - PUP(out) = PUP(from); - } while (--op); - from = out - dist; /* rest from output */ - } - } - else if (wnext < op) { /* wrap around window */ - from += wsize + wnext - op; - op -= wnext; - if (op < len) { /* some from end of window */ - len -= op; - do { - PUP(out) = PUP(from); - } while (--op); - from = window - OFF; - if (wnext < len) { /* some from start of window */ - op = wnext; - len -= op; - do { - PUP(out) = PUP(from); - } while (--op); - from = out - dist; /* rest from output */ - } - } - } - else { /* contiguous in window */ - from += wnext - op; - if (op < len) { /* some from window */ - len -= op; - do { - PUP(out) = PUP(from); - } while (--op); - from = out - dist; /* rest from output */ - } - } - while (len > 2) { - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); - len -= 3; - } - if (len) { - PUP(out) = PUP(from); - if (len > 1) - PUP(out) = PUP(from); - } - } - else { - from = out - dist; /* copy direct from output */ - do { /* minimum length is three */ - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); - len -= 3; - } while (len > 2); - if (len) { - PUP(out) = PUP(from); - if (len > 1) - PUP(out) = PUP(from); - } - } - } - else if ((op & 64) == 0) { /* 2nd level distance code */ - here = dcode[here.val + (hold & ((1U << op) - 1))]; - goto dodist; - } - else { - strm->msg = (char *)"invalid distance code"; - state->mode = BAD; - break; - } - } - else if ((op & 64) == 0) { /* 2nd level length code */ - here = lcode[here.val + (hold & ((1U << op) - 1))]; - goto dolen; - } - else if (op & 32) { /* end-of-block */ - Tracevv((stderr, "inflate: end of block\n")); - state->mode = TYPE; - break; - } - else { - strm->msg = (char *)"invalid literal/length code"; - state->mode = BAD; - break; - } - } while (in < last && out < end); - - /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ - len = bits >> 3; - in -= len; - bits -= len << 3; - hold &= (1U << bits) - 1; - - /* update state and return */ - strm->next_in = in + OFF; - strm->next_out = out + OFF; - strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); - strm->avail_out = (unsigned)(out < end ? - 257 + (end - out) : 257 - (out - end)); - state->hold = hold; - state->bits = bits; - return; -} - -/* - inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): - - Using bit fields for code structure - - Different op definition to avoid & for extra bits (do & for table bits) - - Three separate decoding do-loops for direct, window, and wnext == 0 - - Special case for distance > 1 copies to do overlapped load and store copy - - Explicit branch predictions (based on measured branch probabilities) - - Deferring match copy and interspersed it with decoding subsequent codes - - Swapping literal/length else - - Swapping window/direct else - - Larger unrolled copy loops (three is about right) - - Moving len -= 3 statement into middle of loop - */ - -#endif /* !ASMINF */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inffast.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inffast.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inffast.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inffast.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -/* inffast.h -- header to use inffast.c - * Copyright (C) 1995-2003, 2010 Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* WARNING: this file should *not* be used by applications. It is - part of the implementation of the compression library and is - subject to change. Applications should only use zlib.h. - */ - -void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start)); diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inffixed.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inffixed.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inffixed.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inffixed.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,94 +0,0 @@ - /* inffixed.h -- table for decoding fixed codes - * Generated automatically by makefixed(). - */ - - /* WARNING: this file should *not* be used by applications. It - is part of the implementation of the compression library and - is subject to change. Applications should only use zlib.h. - */ - - static const code lenfix[512] = { - {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48}, - {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128}, - {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59}, - {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176}, - {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20}, - {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100}, - {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8}, - {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216}, - {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76}, - {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114}, - {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2}, - {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148}, - {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42}, - {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86}, - {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15}, - {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236}, - {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62}, - {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142}, - {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31}, - {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162}, - {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25}, - {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105}, - {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4}, - {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202}, - {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69}, - {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125}, - {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13}, - {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195}, - {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35}, - {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91}, - {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19}, - {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246}, - {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55}, - {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135}, - {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99}, - {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190}, - {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16}, - {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96}, - {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6}, - {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209}, - {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72}, - {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116}, - {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4}, - {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153}, - {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44}, - {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82}, - {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11}, - {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229}, - {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58}, - {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138}, - {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51}, - {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173}, - {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30}, - {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110}, - {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0}, - {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195}, - {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65}, - {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121}, - {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9}, - {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258}, - {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37}, - {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93}, - {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23}, - {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251}, - {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51}, - {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131}, - {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67}, - {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183}, - {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23}, - {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103}, - {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9}, - {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223}, - {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79}, - {0,9,255} - }; - - static const code distfix[32] = { - {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025}, - {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193}, - {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385}, - {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577}, - {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073}, - {22,5,193},{64,5,0} - }; diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inflate.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inflate.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inflate.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inflate.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,1480 +0,0 @@ -/* inflate.c -- zlib decompression - * Copyright (C) 1995-2010 Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* - * Change history: - * - * 1.2.beta0 24 Nov 2002 - * - First version -- complete rewrite of inflate to simplify code, avoid - * creation of window when not needed, minimize use of window when it is - * needed, make inffast.c even faster, implement gzip decoding, and to - * improve code readability and style over the previous zlib inflate code - * - * 1.2.beta1 25 Nov 2002 - * - Use pointers for available input and output checking in inffast.c - * - Remove input and output counters in inffast.c - * - Change inffast.c entry and loop from avail_in >= 7 to >= 6 - * - Remove unnecessary second byte pull from length extra in inffast.c - * - Unroll direct copy to three copies per loop in inffast.c - * - * 1.2.beta2 4 Dec 2002 - * - Change external routine names to reduce potential conflicts - * - Correct filename to inffixed.h for fixed tables in inflate.c - * - Make hbuf[] unsigned char to match parameter type in inflate.c - * - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset) - * to avoid negation problem on Alphas (64 bit) in inflate.c - * - * 1.2.beta3 22 Dec 2002 - * - Add comments on state->bits assertion in inffast.c - * - Add comments on op field in inftrees.h - * - Fix bug in reuse of allocated window after inflateReset() - * - Remove bit fields--back to byte structure for speed - * - Remove distance extra == 0 check in inflate_fast()--only helps for lengths - * - Change post-increments to pre-increments in inflate_fast(), PPC biased? - * - Add compile time option, POSTINC, to use post-increments instead (Intel?) - * - Make MATCH copy in inflate() much faster for when inflate_fast() not used - * - Use local copies of stream next and avail values, as well as local bit - * buffer and bit count in inflate()--for speed when inflate_fast() not used - * - * 1.2.beta4 1 Jan 2003 - * - Split ptr - 257 statements in inflate_table() to avoid compiler warnings - * - Move a comment on output buffer sizes from inffast.c to inflate.c - * - Add comments in inffast.c to introduce the inflate_fast() routine - * - Rearrange window copies in inflate_fast() for speed and simplification - * - Unroll last copy for window match in inflate_fast() - * - Use local copies of window variables in inflate_fast() for speed - * - Pull out common wnext == 0 case for speed in inflate_fast() - * - Make op and len in inflate_fast() unsigned for consistency - * - Add FAR to lcode and dcode declarations in inflate_fast() - * - Simplified bad distance check in inflate_fast() - * - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new - * source file infback.c to provide a call-back interface to inflate for - * programs like gzip and unzip -- uses window as output buffer to avoid - * window copying - * - * 1.2.beta5 1 Jan 2003 - * - Improved inflateBack() interface to allow the caller to provide initial - * input in strm. - * - Fixed stored blocks bug in inflateBack() - * - * 1.2.beta6 4 Jan 2003 - * - Added comments in inffast.c on effectiveness of POSTINC - * - Typecasting all around to reduce compiler warnings - * - Changed loops from while (1) or do {} while (1) to for (;;), again to - * make compilers happy - * - Changed type of window in inflateBackInit() to unsigned char * - * - * 1.2.beta7 27 Jan 2003 - * - Changed many types to unsigned or unsigned short to avoid warnings - * - Added inflateCopy() function - * - * 1.2.0 9 Mar 2003 - * - Changed inflateBack() interface to provide separate opaque descriptors - * for the in() and out() functions - * - Changed inflateBack() argument and in_func typedef to swap the length - * and buffer address return values for the input function - * - Check next_in and next_out for Z_NULL on entry to inflate() - * - * The history for versions after 1.2.0 are in ChangeLog in zlib distribution. - */ - -#include "zutil.h" -#include "inftrees.h" -#include "inflate.h" -#include "inffast.h" - -#ifdef MAKEFIXED -# ifndef BUILDFIXED -# define BUILDFIXED -# endif -#endif - -/* function prototypes */ -local void fixedtables OF((struct inflate_state FAR *state)); -local int updatewindow OF((z_streamp strm, unsigned out)); -#ifdef BUILDFIXED - void makefixed OF((void)); -#endif -local unsigned syncsearch OF((unsigned FAR *have, unsigned char FAR *buf, - unsigned len)); - -int ZEXPORT inflateReset(strm) -z_streamp strm; -{ - struct inflate_state FAR *state; - - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - strm->total_in = strm->total_out = state->total = 0; - strm->msg = Z_NULL; - strm->adler = 1; /* to support ill-conceived Java test suite */ - state->mode = HEAD; - state->last = 0; - state->havedict = 0; - state->dmax = 32768U; - state->head = Z_NULL; - state->wsize = 0; - state->whave = 0; - state->wnext = 0; - state->hold = 0; - state->bits = 0; - state->lencode = state->distcode = state->next = state->codes; - state->sane = 1; - state->back = -1; - Tracev((stderr, "inflate: reset\n")); - return Z_OK; -} - -int ZEXPORT inflateReset2(strm, windowBits) -z_streamp strm; -int windowBits; -{ - int wrap; - struct inflate_state FAR *state; - - /* get the state */ - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - - /* extract wrap request from windowBits parameter */ - if (windowBits < 0) { - wrap = 0; - windowBits = -windowBits; - } - else { - wrap = (windowBits >> 4) + 1; -#ifdef GUNZIP - if (windowBits < 48) - windowBits &= 15; -#endif - } - - /* set number of window bits, free window if different */ - if (windowBits && (windowBits < 8 || windowBits > 15)) - return Z_STREAM_ERROR; - if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) { - ZFREE(strm, state->window); - state->window = Z_NULL; - } - - /* update state and reset the rest of it */ - state->wrap = wrap; - state->wbits = (unsigned)windowBits; - return inflateReset(strm); -} - -int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size) -z_streamp strm; -int windowBits; -const char *version; -int stream_size; -{ - int ret; - struct inflate_state FAR *state; - - if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || - stream_size != (int)(sizeof(z_stream))) - return Z_VERSION_ERROR; - if (strm == Z_NULL) return Z_STREAM_ERROR; - strm->msg = Z_NULL; /* in case we return an error */ - if (strm->zalloc == (alloc_func)0) { - strm->zalloc = zcalloc; - strm->opaque = (voidpf)0; - } - if (strm->zfree == (free_func)0) strm->zfree = zcfree; - state = (struct inflate_state FAR *) - ZALLOC(strm, 1, sizeof(struct inflate_state)); - if (state == Z_NULL) return Z_MEM_ERROR; - Tracev((stderr, "inflate: allocated\n")); - strm->state = (struct internal_state FAR *)state; - state->window = Z_NULL; - ret = inflateReset2(strm, windowBits); - if (ret != Z_OK) { - ZFREE(strm, state); - strm->state = Z_NULL; - } - return ret; -} - -int ZEXPORT inflateInit_(strm, version, stream_size) -z_streamp strm; -const char *version; -int stream_size; -{ - return inflateInit2_(strm, DEF_WBITS, version, stream_size); -} - -int ZEXPORT inflatePrime(strm, bits, value) -z_streamp strm; -int bits; -int value; -{ - struct inflate_state FAR *state; - - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - if (bits < 0) { - state->hold = 0; - state->bits = 0; - return Z_OK; - } - if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR; - value &= (1L << bits) - 1; - state->hold += value << state->bits; - state->bits += bits; - return Z_OK; -} - -/* - Return state with length and distance decoding tables and index sizes set to - fixed code decoding. Normally this returns fixed tables from inffixed.h. - If BUILDFIXED is defined, then instead this routine builds the tables the - first time it's called, and returns those tables the first time and - thereafter. This reduces the size of the code by about 2K bytes, in - exchange for a little execution time. However, BUILDFIXED should not be - used for threaded applications, since the rewriting of the tables and virgin - may not be thread-safe. - */ -local void fixedtables(state) -struct inflate_state FAR *state; -{ -#ifdef BUILDFIXED - static int virgin = 1; - static code *lenfix, *distfix; - static code fixed[544]; - - /* build fixed huffman tables if first call (may not be thread safe) */ - if (virgin) { - unsigned sym, bits; - static code *next; - - /* literal/length table */ - sym = 0; - while (sym < 144) state->lens[sym++] = 8; - while (sym < 256) state->lens[sym++] = 9; - while (sym < 280) state->lens[sym++] = 7; - while (sym < 288) state->lens[sym++] = 8; - next = fixed; - lenfix = next; - bits = 9; - inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); - - /* distance table */ - sym = 0; - while (sym < 32) state->lens[sym++] = 5; - distfix = next; - bits = 5; - inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); - - /* do this just once */ - virgin = 0; - } -#else /* !BUILDFIXED */ -# include "inffixed.h" -#endif /* BUILDFIXED */ - state->lencode = lenfix; - state->lenbits = 9; - state->distcode = distfix; - state->distbits = 5; -} - -#ifdef MAKEFIXED -#include - -/* - Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also - defines BUILDFIXED, so the tables are built on the fly. makefixed() writes - those tables to stdout, which would be piped to inffixed.h. A small program - can simply call makefixed to do this: - - void makefixed(void); - - int main(void) - { - makefixed(); - return 0; - } - - Then that can be linked with zlib built with MAKEFIXED defined and run: - - a.out > inffixed.h - */ -void makefixed() -{ - unsigned low, size; - struct inflate_state state; - - fixedtables(&state); - puts(" /* inffixed.h -- table for decoding fixed codes"); - puts(" * Generated automatically by makefixed()."); - puts(" */"); - puts(""); - puts(" /* WARNING: this file should *not* be used by applications."); - puts(" It is part of the implementation of this library and is"); - puts(" subject to change. Applications should only use zlib.h."); - puts(" */"); - puts(""); - size = 1U << 9; - printf(" static const code lenfix[%u] = {", size); - low = 0; - for (;;) { - if ((low % 7) == 0) printf("\n "); - printf("{%u,%u,%d}", state.lencode[low].op, state.lencode[low].bits, - state.lencode[low].val); - if (++low == size) break; - putchar(','); - } - puts("\n };"); - size = 1U << 5; - printf("\n static const code distfix[%u] = {", size); - low = 0; - for (;;) { - if ((low % 6) == 0) printf("\n "); - printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits, - state.distcode[low].val); - if (++low == size) break; - putchar(','); - } - puts("\n };"); -} -#endif /* MAKEFIXED */ - -/* - Update the window with the last wsize (normally 32K) bytes written before - returning. If window does not exist yet, create it. This is only called - when a window is already in use, or when output has been written during this - inflate call, but the end of the deflate stream has not been reached yet. - It is also called to create a window for dictionary data when a dictionary - is loaded. - - Providing output buffers larger than 32K to inflate() should provide a speed - advantage, since only the last 32K of output is copied to the sliding window - upon return from inflate(), and since all distances after the first 32K of - output will fall in the output data, making match copies simpler and faster. - The advantage may be dependent on the size of the processor's data caches. - */ -local int updatewindow(strm, out) -z_streamp strm; -unsigned out; -{ - struct inflate_state FAR *state; - unsigned copy, dist; - - state = (struct inflate_state FAR *)strm->state; - - /* if it hasn't been done already, allocate space for the window */ - if (state->window == Z_NULL) { - state->window = (unsigned char FAR *) - ZALLOC(strm, 1U << state->wbits, - sizeof(unsigned char)); - if (state->window == Z_NULL) return 1; - } - - /* if window not in use yet, initialize */ - if (state->wsize == 0) { - state->wsize = 1U << state->wbits; - state->wnext = 0; - state->whave = 0; - } - - /* copy state->wsize or less output bytes into the circular window */ - copy = out - strm->avail_out; - if (copy >= state->wsize) { - zmemcpy(state->window, strm->next_out - state->wsize, state->wsize); - state->wnext = 0; - state->whave = state->wsize; - } - else { - dist = state->wsize - state->wnext; - if (dist > copy) dist = copy; - zmemcpy(state->window + state->wnext, strm->next_out - copy, dist); - copy -= dist; - if (copy) { - zmemcpy(state->window, strm->next_out - copy, copy); - state->wnext = copy; - state->whave = state->wsize; - } - else { - state->wnext += dist; - if (state->wnext == state->wsize) state->wnext = 0; - if (state->whave < state->wsize) state->whave += dist; - } - } - return 0; -} - -/* Macros for inflate(): */ - -/* check function to use adler32() for zlib or crc32() for gzip */ -#ifdef GUNZIP -# define UPDATE(check, buf, len) \ - (state->flags ? crc32(check, buf, len) : adler32(check, buf, len)) -#else -# define UPDATE(check, buf, len) adler32(check, buf, len) -#endif - -/* check macros for header crc */ -#ifdef GUNZIP -# define CRC2(check, word) \ - do { \ - hbuf[0] = (unsigned char)(word); \ - hbuf[1] = (unsigned char)((word) >> 8); \ - check = crc32(check, hbuf, 2); \ - } while (0) - -# define CRC4(check, word) \ - do { \ - hbuf[0] = (unsigned char)(word); \ - hbuf[1] = (unsigned char)((word) >> 8); \ - hbuf[2] = (unsigned char)((word) >> 16); \ - hbuf[3] = (unsigned char)((word) >> 24); \ - check = crc32(check, hbuf, 4); \ - } while (0) -#endif - -/* Load registers with state in inflate() for speed */ -#define LOAD() \ - do { \ - put = strm->next_out; \ - left = strm->avail_out; \ - next = strm->next_in; \ - have = strm->avail_in; \ - hold = state->hold; \ - bits = state->bits; \ - } while (0) - -/* Restore state from registers in inflate() */ -#define RESTORE() \ - do { \ - strm->next_out = put; \ - strm->avail_out = left; \ - strm->next_in = next; \ - strm->avail_in = have; \ - state->hold = hold; \ - state->bits = bits; \ - } while (0) - -/* Clear the input bit accumulator */ -#define INITBITS() \ - do { \ - hold = 0; \ - bits = 0; \ - } while (0) - -/* Get a byte of input into the bit accumulator, or return from inflate() - if there is no input available. */ -#define PULLBYTE() \ - do { \ - if (have == 0) goto inf_leave; \ - have--; \ - hold += (unsigned long)(*next++) << bits; \ - bits += 8; \ - } while (0) - -/* Assure that there are at least n bits in the bit accumulator. If there is - not enough available input to do that, then return from inflate(). */ -#define NEEDBITS(n) \ - do { \ - while (bits < (unsigned)(n)) \ - PULLBYTE(); \ - } while (0) - -/* Return the low n bits of the bit accumulator (n < 16) */ -#define BITS(n) \ - ((unsigned)hold & ((1U << (n)) - 1)) - -/* Remove n bits from the bit accumulator */ -#define DROPBITS(n) \ - do { \ - hold >>= (n); \ - bits -= (unsigned)(n); \ - } while (0) - -/* Remove zero to seven bits as needed to go to a byte boundary */ -#define BYTEBITS() \ - do { \ - hold >>= bits & 7; \ - bits -= bits & 7; \ - } while (0) - -/* Reverse the bytes in a 32-bit value */ -#define REVERSE(q) \ - ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ - (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) - -/* - inflate() uses a state machine to process as much input data and generate as - much output data as possible before returning. The state machine is - structured roughly as follows: - - for (;;) switch (state) { - ... - case STATEn: - if (not enough input data or output space to make progress) - return; - ... make progress ... - state = STATEm; - break; - ... - } - - so when inflate() is called again, the same case is attempted again, and - if the appropriate resources are provided, the machine proceeds to the - next state. The NEEDBITS() macro is usually the way the state evaluates - whether it can proceed or should return. NEEDBITS() does the return if - the requested bits are not available. The typical use of the BITS macros - is: - - NEEDBITS(n); - ... do something with BITS(n) ... - DROPBITS(n); - - where NEEDBITS(n) either returns from inflate() if there isn't enough - input left to load n bits into the accumulator, or it continues. BITS(n) - gives the low n bits in the accumulator. When done, DROPBITS(n) drops - the low n bits off the accumulator. INITBITS() clears the accumulator - and sets the number of available bits to zero. BYTEBITS() discards just - enough bits to put the accumulator on a byte boundary. After BYTEBITS() - and a NEEDBITS(8), then BITS(8) would return the next byte in the stream. - - NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return - if there is no input available. The decoding of variable length codes uses - PULLBYTE() directly in order to pull just enough bytes to decode the next - code, and no more. - - Some states loop until they get enough input, making sure that enough - state information is maintained to continue the loop where it left off - if NEEDBITS() returns in the loop. For example, want, need, and keep - would all have to actually be part of the saved state in case NEEDBITS() - returns: - - case STATEw: - while (want < need) { - NEEDBITS(n); - keep[want++] = BITS(n); - DROPBITS(n); - } - state = STATEx; - case STATEx: - - As shown above, if the next state is also the next case, then the break - is omitted. - - A state may also return if there is not enough output space available to - complete that state. Those states are copying stored data, writing a - literal byte, and copying a matching string. - - When returning, a "goto inf_leave" is used to update the total counters, - update the check value, and determine whether any progress has been made - during that inflate() call in order to return the proper return code. - Progress is defined as a change in either strm->avail_in or strm->avail_out. - When there is a window, goto inf_leave will update the window with the last - output written. If a goto inf_leave occurs in the middle of decompression - and there is no window currently, goto inf_leave will create one and copy - output to the window for the next call of inflate(). - - In this implementation, the flush parameter of inflate() only affects the - return code (per zlib.h). inflate() always writes as much as possible to - strm->next_out, given the space available and the provided input--the effect - documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers - the allocation of and copying into a sliding window until necessary, which - provides the effect documented in zlib.h for Z_FINISH when the entire input - stream available. So the only thing the flush parameter actually does is: - when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it - will return Z_BUF_ERROR if it has not reached the end of the stream. - */ - -int ZEXPORT inflate(strm, flush) -z_streamp strm; -int flush; -{ - struct inflate_state FAR *state; - unsigned char FAR *next; /* next input */ - unsigned char FAR *put; /* next output */ - unsigned have, left; /* available input and output */ - unsigned long hold; /* bit buffer */ - unsigned bits; /* bits in bit buffer */ - unsigned in, out; /* save starting available input and output */ - unsigned copy; /* number of stored or match bytes to copy */ - unsigned char FAR *from; /* where to copy match bytes from */ - code here; /* current decoding table entry */ - code last; /* parent table entry */ - unsigned len; /* length to copy for repeats, bits to drop */ - int ret; /* return code */ -#ifdef GUNZIP - unsigned char hbuf[4]; /* buffer for gzip header crc calculation */ -#endif - static const unsigned short order[19] = /* permutation of code lengths */ - {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; - - if (strm == Z_NULL || strm->state == Z_NULL || strm->next_out == Z_NULL || - (strm->next_in == Z_NULL && strm->avail_in != 0)) - return Z_STREAM_ERROR; - - state = (struct inflate_state FAR *)strm->state; - if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */ - LOAD(); - in = have; - out = left; - ret = Z_OK; - for (;;) - switch (state->mode) { - case HEAD: - if (state->wrap == 0) { - state->mode = TYPEDO; - break; - } - NEEDBITS(16); -#ifdef GUNZIP - if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */ - state->check = crc32(0L, Z_NULL, 0); - CRC2(state->check, hold); - INITBITS(); - state->mode = FLAGS; - break; - } - state->flags = 0; /* expect zlib header */ - if (state->head != Z_NULL) - state->head->done = -1; - if (!(state->wrap & 1) || /* check if zlib header allowed */ -#else - if ( -#endif - ((BITS(8) << 8) + (hold >> 8)) % 31) { - strm->msg = (char *)"incorrect header check"; - state->mode = BAD; - break; - } - if (BITS(4) != Z_DEFLATED) { - strm->msg = (char *)"unknown compression method"; - state->mode = BAD; - break; - } - DROPBITS(4); - len = BITS(4) + 8; - if (state->wbits == 0) - state->wbits = len; - else if (len > state->wbits) { - strm->msg = (char *)"invalid window size"; - state->mode = BAD; - break; - } - state->dmax = 1U << len; - Tracev((stderr, "inflate: zlib header ok\n")); - strm->adler = state->check = adler32(0L, Z_NULL, 0); - state->mode = hold & 0x200 ? DICTID : TYPE; - INITBITS(); - break; -#ifdef GUNZIP - case FLAGS: - NEEDBITS(16); - state->flags = (int)(hold); - if ((state->flags & 0xff) != Z_DEFLATED) { - strm->msg = (char *)"unknown compression method"; - state->mode = BAD; - break; - } - if (state->flags & 0xe000) { - strm->msg = (char *)"unknown header flags set"; - state->mode = BAD; - break; - } - if (state->head != Z_NULL) - state->head->text = (int)((hold >> 8) & 1); - if (state->flags & 0x0200) CRC2(state->check, hold); - INITBITS(); - state->mode = TIME; - case TIME: - NEEDBITS(32); - if (state->head != Z_NULL) - state->head->time = hold; - if (state->flags & 0x0200) CRC4(state->check, hold); - INITBITS(); - state->mode = OS; - case OS: - NEEDBITS(16); - if (state->head != Z_NULL) { - state->head->xflags = (int)(hold & 0xff); - state->head->os = (int)(hold >> 8); - } - if (state->flags & 0x0200) CRC2(state->check, hold); - INITBITS(); - state->mode = EXLEN; - case EXLEN: - if (state->flags & 0x0400) { - NEEDBITS(16); - state->length = (unsigned)(hold); - if (state->head != Z_NULL) - state->head->extra_len = (unsigned)hold; - if (state->flags & 0x0200) CRC2(state->check, hold); - INITBITS(); - } - else if (state->head != Z_NULL) - state->head->extra = Z_NULL; - state->mode = EXTRA; - case EXTRA: - if (state->flags & 0x0400) { - copy = state->length; - if (copy > have) copy = have; - if (copy) { - if (state->head != Z_NULL && - state->head->extra != Z_NULL) { - len = state->head->extra_len - state->length; - zmemcpy(state->head->extra + len, next, - len + copy > state->head->extra_max ? - state->head->extra_max - len : copy); - } - if (state->flags & 0x0200) - state->check = crc32(state->check, next, copy); - have -= copy; - next += copy; - state->length -= copy; - } - if (state->length) goto inf_leave; - } - state->length = 0; - state->mode = NAME; - case NAME: - if (state->flags & 0x0800) { - if (have == 0) goto inf_leave; - copy = 0; - do { - len = (unsigned)(next[copy++]); - if (state->head != Z_NULL && - state->head->name != Z_NULL && - state->length < state->head->name_max) - state->head->name[state->length++] = len; - } while (len && copy < have); - if (state->flags & 0x0200) - state->check = crc32(state->check, next, copy); - have -= copy; - next += copy; - if (len) goto inf_leave; - } - else if (state->head != Z_NULL) - state->head->name = Z_NULL; - state->length = 0; - state->mode = COMMENT; - case COMMENT: - if (state->flags & 0x1000) { - if (have == 0) goto inf_leave; - copy = 0; - do { - len = (unsigned)(next[copy++]); - if (state->head != Z_NULL && - state->head->comment != Z_NULL && - state->length < state->head->comm_max) - state->head->comment[state->length++] = len; - } while (len && copy < have); - if (state->flags & 0x0200) - state->check = crc32(state->check, next, copy); - have -= copy; - next += copy; - if (len) goto inf_leave; - } - else if (state->head != Z_NULL) - state->head->comment = Z_NULL; - state->mode = HCRC; - case HCRC: - if (state->flags & 0x0200) { - NEEDBITS(16); - if (hold != (state->check & 0xffff)) { - strm->msg = (char *)"header crc mismatch"; - state->mode = BAD; - break; - } - INITBITS(); - } - if (state->head != Z_NULL) { - state->head->hcrc = (int)((state->flags >> 9) & 1); - state->head->done = 1; - } - strm->adler = state->check = crc32(0L, Z_NULL, 0); - state->mode = TYPE; - break; -#endif - case DICTID: - NEEDBITS(32); - strm->adler = state->check = REVERSE(hold); - INITBITS(); - state->mode = DICT; - case DICT: - if (state->havedict == 0) { - RESTORE(); - return Z_NEED_DICT; - } - strm->adler = state->check = adler32(0L, Z_NULL, 0); - state->mode = TYPE; - case TYPE: - if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave; - case TYPEDO: - if (state->last) { - BYTEBITS(); - state->mode = CHECK; - break; - } - NEEDBITS(3); - state->last = BITS(1); - DROPBITS(1); - switch (BITS(2)) { - case 0: /* stored block */ - Tracev((stderr, "inflate: stored block%s\n", - state->last ? " (last)" : "")); - state->mode = STORED; - break; - case 1: /* fixed block */ - fixedtables(state); - Tracev((stderr, "inflate: fixed codes block%s\n", - state->last ? " (last)" : "")); - state->mode = LEN_; /* decode codes */ - if (flush == Z_TREES) { - DROPBITS(2); - goto inf_leave; - } - break; - case 2: /* dynamic block */ - Tracev((stderr, "inflate: dynamic codes block%s\n", - state->last ? " (last)" : "")); - state->mode = TABLE; - break; - case 3: - strm->msg = (char *)"invalid block type"; - state->mode = BAD; - } - DROPBITS(2); - break; - case STORED: - BYTEBITS(); /* go to byte boundary */ - NEEDBITS(32); - if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { - strm->msg = (char *)"invalid stored block lengths"; - state->mode = BAD; - break; - } - state->length = (unsigned)hold & 0xffff; - Tracev((stderr, "inflate: stored length %u\n", - state->length)); - INITBITS(); - state->mode = COPY_; - if (flush == Z_TREES) goto inf_leave; - case COPY_: - state->mode = COPY; - case COPY: - copy = state->length; - if (copy) { - if (copy > have) copy = have; - if (copy > left) copy = left; - if (copy == 0) goto inf_leave; - zmemcpy(put, next, copy); - have -= copy; - next += copy; - left -= copy; - put += copy; - state->length -= copy; - break; - } - Tracev((stderr, "inflate: stored end\n")); - state->mode = TYPE; - break; - case TABLE: - NEEDBITS(14); - state->nlen = BITS(5) + 257; - DROPBITS(5); - state->ndist = BITS(5) + 1; - DROPBITS(5); - state->ncode = BITS(4) + 4; - DROPBITS(4); -#ifndef PKZIP_BUG_WORKAROUND - if (state->nlen > 286 || state->ndist > 30) { - strm->msg = (char *)"too many length or distance symbols"; - state->mode = BAD; - break; - } -#endif - Tracev((stderr, "inflate: table sizes ok\n")); - state->have = 0; - state->mode = LENLENS; - case LENLENS: - while (state->have < state->ncode) { - NEEDBITS(3); - state->lens[order[state->have++]] = (unsigned short)BITS(3); - DROPBITS(3); - } - while (state->have < 19) - state->lens[order[state->have++]] = 0; - state->next = state->codes; - state->lencode = (code const FAR *)(state->next); - state->lenbits = 7; - ret = inflate_table(CODES, state->lens, 19, &(state->next), - &(state->lenbits), state->work); - if (ret) { - strm->msg = (char *)"invalid code lengths set"; - state->mode = BAD; - break; - } - Tracev((stderr, "inflate: code lengths ok\n")); - state->have = 0; - state->mode = CODELENS; - case CODELENS: - while (state->have < state->nlen + state->ndist) { - for (;;) { - here = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(here.bits) <= bits) break; - PULLBYTE(); - } - if (here.val < 16) { - NEEDBITS(here.bits); - DROPBITS(here.bits); - state->lens[state->have++] = here.val; - } - else { - if (here.val == 16) { - NEEDBITS(here.bits + 2); - DROPBITS(here.bits); - if (state->have == 0) { - strm->msg = (char *)"invalid bit length repeat"; - state->mode = BAD; - break; - } - len = state->lens[state->have - 1]; - copy = 3 + BITS(2); - DROPBITS(2); - } - else if (here.val == 17) { - NEEDBITS(here.bits + 3); - DROPBITS(here.bits); - len = 0; - copy = 3 + BITS(3); - DROPBITS(3); - } - else { - NEEDBITS(here.bits + 7); - DROPBITS(here.bits); - len = 0; - copy = 11 + BITS(7); - DROPBITS(7); - } - if (state->have + copy > state->nlen + state->ndist) { - strm->msg = (char *)"invalid bit length repeat"; - state->mode = BAD; - break; - } - while (copy--) - state->lens[state->have++] = (unsigned short)len; - } - } - - /* handle error breaks in while */ - if (state->mode == BAD) break; - - /* check for end-of-block code (better have one) */ - if (state->lens[256] == 0) { - strm->msg = (char *)"invalid code -- missing end-of-block"; - state->mode = BAD; - break; - } - - /* build code tables -- note: do not change the lenbits or distbits - values here (9 and 6) without reading the comments in inftrees.h - concerning the ENOUGH constants, which depend on those values */ - state->next = state->codes; - state->lencode = (code const FAR *)(state->next); - state->lenbits = 9; - ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), - &(state->lenbits), state->work); - if (ret) { - strm->msg = (char *)"invalid literal/lengths set"; - state->mode = BAD; - break; - } - state->distcode = (code const FAR *)(state->next); - state->distbits = 6; - ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, - &(state->next), &(state->distbits), state->work); - if (ret) { - strm->msg = (char *)"invalid distances set"; - state->mode = BAD; - break; - } - Tracev((stderr, "inflate: codes ok\n")); - state->mode = LEN_; - if (flush == Z_TREES) goto inf_leave; - case LEN_: - state->mode = LEN; - case LEN: - if (have >= 6 && left >= 258) { - RESTORE(); - inflate_fast(strm, out); - LOAD(); - if (state->mode == TYPE) - state->back = -1; - break; - } - state->back = 0; - for (;;) { - here = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(here.bits) <= bits) break; - PULLBYTE(); - } - if (here.op && (here.op & 0xf0) == 0) { - last = here; - for (;;) { - here = state->lencode[last.val + - (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + here.bits) <= bits) break; - PULLBYTE(); - } - DROPBITS(last.bits); - state->back += last.bits; - } - DROPBITS(here.bits); - state->back += here.bits; - state->length = (unsigned)here.val; - if ((int)(here.op) == 0) { - Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? - "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", here.val)); - state->mode = LIT; - break; - } - if (here.op & 32) { - Tracevv((stderr, "inflate: end of block\n")); - state->back = -1; - state->mode = TYPE; - break; - } - if (here.op & 64) { - strm->msg = (char *)"invalid literal/length code"; - state->mode = BAD; - break; - } - state->extra = (unsigned)(here.op) & 15; - state->mode = LENEXT; - case LENEXT: - if (state->extra) { - NEEDBITS(state->extra); - state->length += BITS(state->extra); - DROPBITS(state->extra); - state->back += state->extra; - } - Tracevv((stderr, "inflate: length %u\n", state->length)); - state->was = state->length; - state->mode = DIST; - case DIST: - for (;;) { - here = state->distcode[BITS(state->distbits)]; - if ((unsigned)(here.bits) <= bits) break; - PULLBYTE(); - } - if ((here.op & 0xf0) == 0) { - last = here; - for (;;) { - here = state->distcode[last.val + - (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + here.bits) <= bits) break; - PULLBYTE(); - } - DROPBITS(last.bits); - state->back += last.bits; - } - DROPBITS(here.bits); - state->back += here.bits; - if (here.op & 64) { - strm->msg = (char *)"invalid distance code"; - state->mode = BAD; - break; - } - state->offset = (unsigned)here.val; - state->extra = (unsigned)(here.op) & 15; - state->mode = DISTEXT; - case DISTEXT: - if (state->extra) { - NEEDBITS(state->extra); - state->offset += BITS(state->extra); - DROPBITS(state->extra); - state->back += state->extra; - } -#ifdef INFLATE_STRICT - if (state->offset > state->dmax) { - strm->msg = (char *)"invalid distance too far back"; - state->mode = BAD; - break; - } -#endif - Tracevv((stderr, "inflate: distance %u\n", state->offset)); - state->mode = MATCH; - case MATCH: - if (left == 0) goto inf_leave; - copy = out - left; - if (state->offset > copy) { /* copy from window */ - copy = state->offset - copy; - if (copy > state->whave) { - if (state->sane) { - strm->msg = (char *)"invalid distance too far back"; - state->mode = BAD; - break; - } -#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR - Trace((stderr, "inflate.c too far\n")); - copy -= state->whave; - if (copy > state->length) copy = state->length; - if (copy > left) copy = left; - left -= copy; - state->length -= copy; - do { - *put++ = 0; - } while (--copy); - if (state->length == 0) state->mode = LEN; - break; -#endif - } - if (copy > state->wnext) { - copy -= state->wnext; - from = state->window + (state->wsize - copy); - } - else - from = state->window + (state->wnext - copy); - if (copy > state->length) copy = state->length; - } - else { /* copy from output */ - from = put - state->offset; - copy = state->length; - } - if (copy > left) copy = left; - left -= copy; - state->length -= copy; - do { - *put++ = *from++; - } while (--copy); - if (state->length == 0) state->mode = LEN; - break; - case LIT: - if (left == 0) goto inf_leave; - *put++ = (unsigned char)(state->length); - left--; - state->mode = LEN; - break; - case CHECK: - if (state->wrap) { - NEEDBITS(32); - out -= left; - strm->total_out += out; - state->total += out; - if (out) - strm->adler = state->check = - UPDATE(state->check, put - out, out); - out = left; - if (( -#ifdef GUNZIP - state->flags ? hold : -#endif - REVERSE(hold)) != state->check) { - strm->msg = (char *)"incorrect data check"; - state->mode = BAD; - break; - } - INITBITS(); - Tracev((stderr, "inflate: check matches trailer\n")); - } -#ifdef GUNZIP - state->mode = LENGTH; - case LENGTH: - if (state->wrap && state->flags) { - NEEDBITS(32); - if (hold != (state->total & 0xffffffffUL)) { - strm->msg = (char *)"incorrect length check"; - state->mode = BAD; - break; - } - INITBITS(); - Tracev((stderr, "inflate: length matches trailer\n")); - } -#endif - state->mode = DONE; - case DONE: - ret = Z_STREAM_END; - goto inf_leave; - case BAD: - ret = Z_DATA_ERROR; - goto inf_leave; - case MEM: - return Z_MEM_ERROR; - case SYNC: - default: - return Z_STREAM_ERROR; - } - - /* - Return from inflate(), updating the total counts and the check value. - If there was no progress during the inflate() call, return a buffer - error. Call updatewindow() to create and/or update the window state. - Note: a memory error from inflate() is non-recoverable. - */ - inf_leave: - RESTORE(); - if (state->wsize || (state->mode < CHECK && out != strm->avail_out)) - if (updatewindow(strm, out)) { - state->mode = MEM; - return Z_MEM_ERROR; - } - in -= strm->avail_in; - out -= strm->avail_out; - strm->total_in += in; - strm->total_out += out; - state->total += out; - if (state->wrap && out) - strm->adler = state->check = - UPDATE(state->check, strm->next_out - out, out); - strm->data_type = state->bits + (state->last ? 64 : 0) + - (state->mode == TYPE ? 128 : 0) + - (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0); - if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) - ret = Z_BUF_ERROR; - return ret; -} - -int ZEXPORT inflateEnd(strm) -z_streamp strm; -{ - struct inflate_state FAR *state; - if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) - return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - if (state->window != Z_NULL) ZFREE(strm, state->window); - ZFREE(strm, strm->state); - strm->state = Z_NULL; - Tracev((stderr, "inflate: end\n")); - return Z_OK; -} - -int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength) -z_streamp strm; -const Bytef *dictionary; -uInt dictLength; -{ - struct inflate_state FAR *state; - unsigned long id; - - /* check state */ - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - if (state->wrap != 0 && state->mode != DICT) - return Z_STREAM_ERROR; - - /* check for correct dictionary id */ - if (state->mode == DICT) { - id = adler32(0L, Z_NULL, 0); - id = adler32(id, dictionary, dictLength); - if (id != state->check) - return Z_DATA_ERROR; - } - - /* copy dictionary to window */ - if (updatewindow(strm, strm->avail_out)) { - state->mode = MEM; - return Z_MEM_ERROR; - } - if (dictLength > state->wsize) { - zmemcpy(state->window, dictionary + dictLength - state->wsize, - state->wsize); - state->whave = state->wsize; - } - else { - zmemcpy(state->window + state->wsize - dictLength, dictionary, - dictLength); - state->whave = dictLength; - } - state->havedict = 1; - Tracev((stderr, "inflate: dictionary set\n")); - return Z_OK; -} - -int ZEXPORT inflateGetHeader(strm, head) -z_streamp strm; -gz_headerp head; -{ - struct inflate_state FAR *state; - - /* check state */ - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - if ((state->wrap & 2) == 0) return Z_STREAM_ERROR; - - /* save header structure */ - state->head = head; - head->done = 0; - return Z_OK; -} - -/* - Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found - or when out of input. When called, *have is the number of pattern bytes - found in order so far, in 0..3. On return *have is updated to the new - state. If on return *have equals four, then the pattern was found and the - return value is how many bytes were read including the last byte of the - pattern. If *have is less than four, then the pattern has not been found - yet and the return value is len. In the latter case, syncsearch() can be - called again with more data and the *have state. *have is initialized to - zero for the first call. - */ -local unsigned syncsearch(have, buf, len) -unsigned FAR *have; -unsigned char FAR *buf; -unsigned len; -{ - unsigned got; - unsigned next; - - got = *have; - next = 0; - while (next < len && got < 4) { - if ((int)(buf[next]) == (got < 2 ? 0 : 0xff)) - got++; - else if (buf[next]) - got = 0; - else - got = 4 - got; - next++; - } - *have = got; - return next; -} - -int ZEXPORT inflateSync(strm) -z_streamp strm; -{ - unsigned len; /* number of bytes to look at or looked at */ - unsigned long in, out; /* temporary to save total_in and total_out */ - unsigned char buf[4]; /* to restore bit buffer to byte string */ - struct inflate_state FAR *state; - - /* check parameters */ - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR; - - /* if first time, start search in bit buffer */ - if (state->mode != SYNC) { - state->mode = SYNC; - state->hold <<= state->bits & 7; - state->bits -= state->bits & 7; - len = 0; - while (state->bits >= 8) { - buf[len++] = (unsigned char)(state->hold); - state->hold >>= 8; - state->bits -= 8; - } - state->have = 0; - syncsearch(&(state->have), buf, len); - } - - /* search available input */ - len = syncsearch(&(state->have), strm->next_in, strm->avail_in); - strm->avail_in -= len; - strm->next_in += len; - strm->total_in += len; - - /* return no joy or set up to restart inflate() on a new block */ - if (state->have != 4) return Z_DATA_ERROR; - in = strm->total_in; out = strm->total_out; - inflateReset(strm); - strm->total_in = in; strm->total_out = out; - state->mode = TYPE; - return Z_OK; -} - -/* - Returns true if inflate is currently at the end of a block generated by - Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP - implementation to provide an additional safety check. PPP uses - Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored - block. When decompressing, PPP checks that at the end of input packet, - inflate is waiting for these length bytes. - */ -int ZEXPORT inflateSyncPoint(strm) -z_streamp strm; -{ - struct inflate_state FAR *state; - - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - return state->mode == STORED && state->bits == 0; -} - -int ZEXPORT inflateCopy(dest, source) -z_streamp dest; -z_streamp source; -{ - struct inflate_state FAR *state; - struct inflate_state FAR *copy; - unsigned char FAR *window; - unsigned wsize; - - /* check input */ - if (dest == Z_NULL || source == Z_NULL || source->state == Z_NULL || - source->zalloc == (alloc_func)0 || source->zfree == (free_func)0) - return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)source->state; - - /* allocate space */ - copy = (struct inflate_state FAR *) - ZALLOC(source, 1, sizeof(struct inflate_state)); - if (copy == Z_NULL) return Z_MEM_ERROR; - window = Z_NULL; - if (state->window != Z_NULL) { - window = (unsigned char FAR *) - ZALLOC(source, 1U << state->wbits, sizeof(unsigned char)); - if (window == Z_NULL) { - ZFREE(source, copy); - return Z_MEM_ERROR; - } - } - - /* copy state */ - zmemcpy(dest, source, sizeof(z_stream)); - zmemcpy(copy, state, sizeof(struct inflate_state)); - if (state->lencode >= state->codes && - state->lencode <= state->codes + ENOUGH - 1) { - copy->lencode = copy->codes + (state->lencode - state->codes); - copy->distcode = copy->codes + (state->distcode - state->codes); - } - copy->next = copy->codes + (state->next - state->codes); - if (window != Z_NULL) { - wsize = 1U << state->wbits; - zmemcpy(window, state->window, wsize); - } - copy->window = window; - dest->state = (struct internal_state FAR *)copy; - return Z_OK; -} - -int ZEXPORT inflateUndermine(strm, subvert) -z_streamp strm; -int subvert; -{ - struct inflate_state FAR *state; - - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - state = (struct inflate_state FAR *)strm->state; - state->sane = !subvert; -#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR - return Z_OK; -#else - state->sane = 1; - return Z_DATA_ERROR; -#endif -} - -long ZEXPORT inflateMark(strm) -z_streamp strm; -{ - struct inflate_state FAR *state; - - if (strm == Z_NULL || strm->state == Z_NULL) return -1L << 16; - state = (struct inflate_state FAR *)strm->state; - return ((long)(state->back) << 16) + - (state->mode == COPY ? state->length : - (state->mode == MATCH ? state->was - state->length : 0)); -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inflate.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inflate.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inflate.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inflate.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,122 +0,0 @@ -/* inflate.h -- internal inflate state definition - * Copyright (C) 1995-2009 Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* WARNING: this file should *not* be used by applications. It is - part of the implementation of the compression library and is - subject to change. Applications should only use zlib.h. - */ - -/* define NO_GZIP when compiling if you want to disable gzip header and - trailer decoding by inflate(). NO_GZIP would be used to avoid linking in - the crc code when it is not needed. For shared libraries, gzip decoding - should be left enabled. */ -#ifndef NO_GZIP -# define GUNZIP -#endif - -/* Possible inflate modes between inflate() calls */ -typedef enum { - HEAD, /* i: waiting for magic header */ - FLAGS, /* i: waiting for method and flags (gzip) */ - TIME, /* i: waiting for modification time (gzip) */ - OS, /* i: waiting for extra flags and operating system (gzip) */ - EXLEN, /* i: waiting for extra length (gzip) */ - EXTRA, /* i: waiting for extra bytes (gzip) */ - NAME, /* i: waiting for end of file name (gzip) */ - COMMENT, /* i: waiting for end of comment (gzip) */ - HCRC, /* i: waiting for header crc (gzip) */ - DICTID, /* i: waiting for dictionary check value */ - DICT, /* waiting for inflateSetDictionary() call */ - TYPE, /* i: waiting for type bits, including last-flag bit */ - TYPEDO, /* i: same, but skip check to exit inflate on new block */ - STORED, /* i: waiting for stored size (length and complement) */ - COPY_, /* i/o: same as COPY below, but only first time in */ - COPY, /* i/o: waiting for input or output to copy stored block */ - TABLE, /* i: waiting for dynamic block table lengths */ - LENLENS, /* i: waiting for code length code lengths */ - CODELENS, /* i: waiting for length/lit and distance code lengths */ - LEN_, /* i: same as LEN below, but only first time in */ - LEN, /* i: waiting for length/lit/eob code */ - LENEXT, /* i: waiting for length extra bits */ - DIST, /* i: waiting for distance code */ - DISTEXT, /* i: waiting for distance extra bits */ - MATCH, /* o: waiting for output space to copy string */ - LIT, /* o: waiting for output space to write literal */ - CHECK, /* i: waiting for 32-bit check value */ - LENGTH, /* i: waiting for 32-bit length (gzip) */ - DONE, /* finished check, done -- remain here until reset */ - BAD, /* got a data error -- remain here until reset */ - MEM, /* got an inflate() memory error -- remain here until reset */ - SYNC /* looking for synchronization bytes to restart inflate() */ -} inflate_mode; - -/* - State transitions between above modes - - - (most modes can go to BAD or MEM on error -- not shown for clarity) - - Process header: - HEAD -> (gzip) or (zlib) or (raw) - (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME -> COMMENT -> - HCRC -> TYPE - (zlib) -> DICTID or TYPE - DICTID -> DICT -> TYPE - (raw) -> TYPEDO - Read deflate blocks: - TYPE -> TYPEDO -> STORED or TABLE or LEN_ or CHECK - STORED -> COPY_ -> COPY -> TYPE - TABLE -> LENLENS -> CODELENS -> LEN_ - LEN_ -> LEN - Read deflate codes in fixed or dynamic block: - LEN -> LENEXT or LIT or TYPE - LENEXT -> DIST -> DISTEXT -> MATCH -> LEN - LIT -> LEN - Process trailer: - CHECK -> LENGTH -> DONE - */ - -/* state maintained between inflate() calls. Approximately 10K bytes. */ -struct inflate_state { - inflate_mode mode; /* current inflate mode */ - int last; /* true if processing last block */ - int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ - int havedict; /* true if dictionary provided */ - int flags; /* gzip header method and flags (0 if zlib) */ - unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */ - unsigned long check; /* protected copy of check value */ - unsigned long total; /* protected copy of output count */ - gz_headerp head; /* where to save gzip header information */ - /* sliding window */ - unsigned wbits; /* log base 2 of requested window size */ - unsigned wsize; /* window size or zero if not using window */ - unsigned whave; /* valid bytes in the window */ - unsigned wnext; /* window write index */ - unsigned char FAR *window; /* allocated sliding window, if needed */ - /* bit accumulator */ - unsigned long hold; /* input bit accumulator */ - unsigned bits; /* number of bits in "in" */ - /* for string and stored block copying */ - unsigned length; /* literal or length of data to copy */ - unsigned offset; /* distance back to copy string from */ - /* for table and code decoding */ - unsigned extra; /* extra bits needed */ - /* fixed and dynamic code tables */ - code const FAR *lencode; /* starting table for length/literal codes */ - code const FAR *distcode; /* starting table for distance codes */ - unsigned lenbits; /* index bits for lencode */ - unsigned distbits; /* index bits for distcode */ - /* dynamic table building */ - unsigned ncode; /* number of code length code lengths */ - unsigned nlen; /* number of length code lengths */ - unsigned ndist; /* number of distance code lengths */ - unsigned have; /* number of code lengths in lens[] */ - code FAR *next; /* next available space in codes[] */ - unsigned short lens[320]; /* temporary storage for code lengths */ - unsigned short work[288]; /* work area for code table building */ - code codes[ENOUGH]; /* space for code tables */ - int sane; /* if false, allow invalid distance too far */ - int back; /* bits back of last unprocessed length/lit */ - unsigned was; /* initial length of match */ -}; diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inftrees.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inftrees.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inftrees.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inftrees.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,330 +0,0 @@ -/* inftrees.c -- generate Huffman trees for efficient decoding - * Copyright (C) 1995-2010 Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -#include "zutil.h" -#include "inftrees.h" - -#define MAXBITS 15 - -const char inflate_copyright[] = - " inflate 1.2.5 Copyright 1995-2010 Mark Adler "; -/* - If you use the zlib library in a product, an acknowledgment is welcome - in the documentation of your product. If for some reason you cannot - include such an acknowledgment, I would appreciate that you keep this - copyright string in the executable of your product. - */ - -/* - Build a set of tables to decode the provided canonical Huffman code. - The code lengths are lens[0..codes-1]. The result starts at *table, - whose indices are 0..2^bits-1. work is a writable array of at least - lens shorts, which is used as a work area. type is the type of code - to be generated, CODES, LENS, or DISTS. On return, zero is success, - -1 is an invalid code, and +1 means that ENOUGH isn't enough. table - on return points to the next available entry's address. bits is the - requested root table index bits, and on return it is the actual root - table index bits. It will differ if the request is greater than the - longest code or if it is less than the shortest code. - */ -int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work) -codetype type; -unsigned short FAR *lens; -unsigned codes; -code FAR * FAR *table; -unsigned FAR *bits; -unsigned short FAR *work; -{ - unsigned len; /* a code's length in bits */ - unsigned sym; /* index of code symbols */ - unsigned min, max; /* minimum and maximum code lengths */ - unsigned root; /* number of index bits for root table */ - unsigned curr; /* number of index bits for current table */ - unsigned drop; /* code bits to drop for sub-table */ - int left; /* number of prefix codes available */ - unsigned used; /* code entries in table used */ - unsigned huff; /* Huffman code */ - unsigned incr; /* for incrementing code, index */ - unsigned fill; /* index for replicating entries */ - unsigned low; /* low bits for current root entry */ - unsigned mask; /* mask for low root bits */ - code here; /* table entry for duplication */ - code FAR *next; /* next available space in table */ - const unsigned short FAR *base; /* base value table to use */ - const unsigned short FAR *extra; /* extra bits table to use */ - int end; /* use base and extra for symbol > end */ - unsigned short count[MAXBITS+1]; /* number of codes of each length */ - unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ - static const unsigned short lbase[31] = { /* Length codes 257..285 base */ - 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, - 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; - static const unsigned short lext[31] = { /* Length codes 257..285 extra */ - 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, - 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 73, 195}; - static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ - 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, - 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, - 8193, 12289, 16385, 24577, 0, 0}; - static const unsigned short dext[32] = { /* Distance codes 0..29 extra */ - 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, - 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, - 28, 28, 29, 29, 64, 64}; - - /* - Process a set of code lengths to create a canonical Huffman code. The - code lengths are lens[0..codes-1]. Each length corresponds to the - symbols 0..codes-1. The Huffman code is generated by first sorting the - symbols by length from short to long, and retaining the symbol order - for codes with equal lengths. Then the code starts with all zero bits - for the first code of the shortest length, and the codes are integer - increments for the same length, and zeros are appended as the length - increases. For the deflate format, these bits are stored backwards - from their more natural integer increment ordering, and so when the - decoding tables are built in the large loop below, the integer codes - are incremented backwards. - - This routine assumes, but does not check, that all of the entries in - lens[] are in the range 0..MAXBITS. The caller must assure this. - 1..MAXBITS is interpreted as that code length. zero means that that - symbol does not occur in this code. - - The codes are sorted by computing a count of codes for each length, - creating from that a table of starting indices for each length in the - sorted table, and then entering the symbols in order in the sorted - table. The sorted table is work[], with that space being provided by - the caller. - - The length counts are used for other purposes as well, i.e. finding - the minimum and maximum length codes, determining if there are any - codes at all, checking for a valid set of lengths, and looking ahead - at length counts to determine sub-table sizes when building the - decoding tables. - */ - - /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ - for (len = 0; len <= MAXBITS; len++) - count[len] = 0; - for (sym = 0; sym < codes; sym++) - count[lens[sym]]++; - - /* bound code lengths, force root to be within code lengths */ - root = *bits; - for (max = MAXBITS; max >= 1; max--) - if (count[max] != 0) break; - if (root > max) root = max; - if (max == 0) { /* no symbols to code at all */ - here.op = (unsigned char)64; /* invalid code marker */ - here.bits = (unsigned char)1; - here.val = (unsigned short)0; - *(*table)++ = here; /* make a table to force an error */ - *(*table)++ = here; - *bits = 1; - return 0; /* no symbols, but wait for decoding to report error */ - } - for (min = 1; min < max; min++) - if (count[min] != 0) break; - if (root < min) root = min; - - /* check for an over-subscribed or incomplete set of lengths */ - left = 1; - for (len = 1; len <= MAXBITS; len++) { - left <<= 1; - left -= count[len]; - if (left < 0) return -1; /* over-subscribed */ - } - if (left > 0 && (type == CODES || max != 1)) - return -1; /* incomplete set */ - - /* generate offsets into symbol table for each length for sorting */ - offs[1] = 0; - for (len = 1; len < MAXBITS; len++) - offs[len + 1] = offs[len] + count[len]; - - /* sort symbols by length, by symbol order within each length */ - for (sym = 0; sym < codes; sym++) - if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; - - /* - Create and fill in decoding tables. In this loop, the table being - filled is at next and has curr index bits. The code being used is huff - with length len. That code is converted to an index by dropping drop - bits off of the bottom. For codes where len is less than drop + curr, - those top drop + curr - len bits are incremented through all values to - fill the table with replicated entries. - - root is the number of index bits for the root table. When len exceeds - root, sub-tables are created pointed to by the root entry with an index - of the low root bits of huff. This is saved in low to check for when a - new sub-table should be started. drop is zero when the root table is - being filled, and drop is root when sub-tables are being filled. - - When a new sub-table is needed, it is necessary to look ahead in the - code lengths to determine what size sub-table is needed. The length - counts are used for this, and so count[] is decremented as codes are - entered in the tables. - - used keeps track of how many table entries have been allocated from the - provided *table space. It is checked for LENS and DIST tables against - the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in - the initial root table size constants. See the comments in inftrees.h - for more information. - - sym increments through all symbols, and the loop terminates when - all codes of length max, i.e. all codes, have been processed. This - routine permits incomplete codes, so another loop after this one fills - in the rest of the decoding tables with invalid code markers. - */ - - /* set up for code type */ - switch (type) { - case CODES: - base = extra = work; /* dummy value--not used */ - end = 19; - break; - case LENS: - base = lbase; - base -= 257; - extra = lext; - extra -= 257; - end = 256; - break; - default: /* DISTS */ - base = dbase; - extra = dext; - end = -1; - } - - /* initialize state for loop */ - huff = 0; /* starting code */ - sym = 0; /* starting code symbol */ - len = min; /* starting code length */ - next = *table; /* current table to fill in */ - curr = root; /* current table index bits */ - drop = 0; /* current bits to drop from code for index */ - low = (unsigned)(-1); /* trigger new sub-table when len > root */ - used = 1U << root; /* use root table entries */ - mask = used - 1; /* mask for comparing low */ - - /* check available table space */ - if ((type == LENS && used >= ENOUGH_LENS) || - (type == DISTS && used >= ENOUGH_DISTS)) - return 1; - - /* process all codes and make table entries */ - for (;;) { - /* create table entry */ - here.bits = (unsigned char)(len - drop); - if ((int)(work[sym]) < end) { - here.op = (unsigned char)0; - here.val = work[sym]; - } - else if ((int)(work[sym]) > end) { - here.op = (unsigned char)(extra[work[sym]]); - here.val = base[work[sym]]; - } - else { - here.op = (unsigned char)(32 + 64); /* end of block */ - here.val = 0; - } - - /* replicate for those indices with low len bits equal to huff */ - incr = 1U << (len - drop); - fill = 1U << curr; - min = fill; /* save offset to next table */ - do { - fill -= incr; - next[(huff >> drop) + fill] = here; - } while (fill != 0); - - /* backwards increment the len-bit code huff */ - incr = 1U << (len - 1); - while (huff & incr) - incr >>= 1; - if (incr != 0) { - huff &= incr - 1; - huff += incr; - } - else - huff = 0; - - /* go to next symbol, update count, len */ - sym++; - if (--(count[len]) == 0) { - if (len == max) break; - len = lens[work[sym]]; - } - - /* create new sub-table if needed */ - if (len > root && (huff & mask) != low) { - /* if first time, transition to sub-tables */ - if (drop == 0) - drop = root; - - /* increment past last table */ - next += min; /* here min is 1 << curr */ - - /* determine length of next table */ - curr = len - drop; - left = (int)(1 << curr); - while (curr + drop < max) { - left -= count[curr + drop]; - if (left <= 0) break; - curr++; - left <<= 1; - } - - /* check for enough space */ - used += 1U << curr; - if ((type == LENS && used >= ENOUGH_LENS) || - (type == DISTS && used >= ENOUGH_DISTS)) - return 1; - - /* point entry in root table to sub-table */ - low = huff & mask; - (*table)[low].op = (unsigned char)curr; - (*table)[low].bits = (unsigned char)root; - (*table)[low].val = (unsigned short)(next - *table); - } - } - - /* - Fill in rest of table for incomplete codes. This loop is similar to the - loop above in incrementing huff for table indices. It is assumed that - len is equal to curr + drop, so there is no loop needed to increment - through high index bits. When the current sub-table is filled, the loop - drops back to the root table to fill in any remaining entries there. - */ - here.op = (unsigned char)64; /* invalid code marker */ - here.bits = (unsigned char)(len - drop); - here.val = (unsigned short)0; - while (huff != 0) { - /* when done with sub-table, drop back to root table */ - if (drop != 0 && (huff & mask) != low) { - drop = 0; - len = root; - next = *table; - here.bits = (unsigned char)len; - } - - /* put invalid code marker in table */ - next[huff >> drop] = here; - - /* backwards increment the len-bit code huff */ - incr = 1U << (len - 1); - while (huff & incr) - incr >>= 1; - if (incr != 0) { - huff &= incr - 1; - huff += incr; - } - else - huff = 0; - } - - /* set return parameters */ - *table += used; - *bits = root; - return 0; -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inftrees.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inftrees.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/inftrees.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/inftrees.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -/* inftrees.h -- header to use inftrees.c - * Copyright (C) 1995-2005, 2010 Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* WARNING: this file should *not* be used by applications. It is - part of the implementation of the compression library and is - subject to change. Applications should only use zlib.h. - */ - -/* Structure for decoding tables. Each entry provides either the - information needed to do the operation requested by the code that - indexed that table entry, or it provides a pointer to another - table that indexes more bits of the code. op indicates whether - the entry is a pointer to another table, a literal, a length or - distance, an end-of-block, or an invalid code. For a table - pointer, the low four bits of op is the number of index bits of - that table. For a length or distance, the low four bits of op - is the number of extra bits to get after the code. bits is - the number of bits in this code or part of the code to drop off - of the bit buffer. val is the actual byte to output in the case - of a literal, the base length or distance, or the offset from - the current table to the next table. Each entry is four bytes. */ -typedef struct { - unsigned char op; /* operation, extra bits, table bits */ - unsigned char bits; /* bits in this part of the code */ - unsigned short val; /* offset in table or code value */ -} code; - -/* op values as set by inflate_table(): - 00000000 - literal - 0000tttt - table link, tttt != 0 is the number of table index bits - 0001eeee - length or distance, eeee is the number of extra bits - 01100000 - end of block - 01000000 - invalid code - */ - -/* Maximum size of the dynamic table. The maximum number of code structures is - 1444, which is the sum of 852 for literal/length codes and 592 for distance - codes. These values were found by exhaustive searches using the program - examples/enough.c found in the zlib distribtution. The arguments to that - program are the number of symbols, the initial root table size, and the - maximum bit length of a code. "enough 286 9 15" for literal/length codes - returns returns 852, and "enough 30 6 15" for distance codes returns 592. - The initial root table size (9 or 6) is found in the fifth argument of the - inflate_table() calls in inflate.c and infback.c. If the root table size is - changed, then these maximum sizes would be need to be recalculated and - updated. */ -#define ENOUGH_LENS 852 -#define ENOUGH_DISTS 592 -#define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS) - -/* Type of code to build for inflate_table() */ -typedef enum { - CODES, - LENS, - DISTS -} codetype; - -int ZLIB_INTERNAL inflate_table OF((codetype type, unsigned short FAR *lens, - unsigned codes, code FAR * FAR *table, - unsigned FAR *bits, unsigned short FAR *work)); diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/trees.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/trees.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/trees.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/trees.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,1244 +0,0 @@ -/* trees.c -- output deflated data using Huffman coding - * Copyright (C) 1995-2010 Jean-loup Gailly - * detect_data_type() function provided freely by Cosmin Truta, 2006 - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* - * ALGORITHM - * - * The "deflation" process uses several Huffman trees. The more - * common source values are represented by shorter bit sequences. - * - * Each code tree is stored in a compressed form which is itself - * a Huffman encoding of the lengths of all the code strings (in - * ascending order by source values). The actual code strings are - * reconstructed from the lengths in the inflate process, as described - * in the deflate specification. - * - * REFERENCES - * - * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". - * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc - * - * Storer, James A. - * Data Compression: Methods and Theory, pp. 49-50. - * Computer Science Press, 1988. ISBN 0-7167-8156-5. - * - * Sedgewick, R. - * Algorithms, p290. - * Addison-Wesley, 1983. ISBN 0-201-06672-6. - */ - -/* @(#) $Id$ */ - -/* #define GEN_TREES_H */ - -#include "deflate.h" - -#ifdef DEBUG -# include -#endif - -/* =========================================================================== - * Constants - */ - -#define MAX_BL_BITS 7 -/* Bit length codes must not exceed MAX_BL_BITS bits */ - -#define END_BLOCK 256 -/* end of block literal code */ - -#define REP_3_6 16 -/* repeat previous bit length 3-6 times (2 bits of repeat count) */ - -#define REPZ_3_10 17 -/* repeat a zero length 3-10 times (3 bits of repeat count) */ - -#define REPZ_11_138 18 -/* repeat a zero length 11-138 times (7 bits of repeat count) */ - -local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ - = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; - -local const int extra_dbits[D_CODES] /* extra bits for each distance code */ - = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; - -local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ - = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; - -local const uch bl_order[BL_CODES] - = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; -/* The lengths of the bit length codes are sent in order of decreasing - * probability, to avoid transmitting the lengths for unused bit length codes. - */ - -#define Buf_size (8 * 2*sizeof(char)) -/* Number of bits used within bi_buf. (bi_buf might be implemented on - * more than 16 bits on some systems.) - */ - -/* =========================================================================== - * Local data. These are initialized only once. - */ - -#define DIST_CODE_LEN 512 /* see definition of array dist_code below */ - -#if defined(GEN_TREES_H) || !defined(STDC) -/* non ANSI compilers may not accept trees.h */ - -local ct_data static_ltree[L_CODES+2]; -/* The static literal tree. Since the bit lengths are imposed, there is no - * need for the L_CODES extra codes used during heap construction. However - * The codes 286 and 287 are needed to build a canonical tree (see _tr_init - * below). - */ - -local ct_data static_dtree[D_CODES]; -/* The static distance tree. (Actually a trivial tree since all codes use - * 5 bits.) - */ - -uch _dist_code[DIST_CODE_LEN]; -/* Distance codes. The first 256 values correspond to the distances - * 3 .. 258, the last 256 values correspond to the top 8 bits of - * the 15 bit distances. - */ - -uch _length_code[MAX_MATCH-MIN_MATCH+1]; -/* length code for each normalized match length (0 == MIN_MATCH) */ - -local int base_length[LENGTH_CODES]; -/* First normalized length for each code (0 = MIN_MATCH) */ - -local int base_dist[D_CODES]; -/* First normalized distance for each code (0 = distance of 1) */ - -#else -# include "trees.h" -#endif /* GEN_TREES_H */ - -struct static_tree_desc_s { - const ct_data *static_tree; /* static tree or NULL */ - const intf *extra_bits; /* extra bits for each code or NULL */ - int extra_base; /* base index for extra_bits */ - int elems; /* max number of elements in the tree */ - int max_length; /* max bit length for the codes */ -}; - -local static_tree_desc static_l_desc = -{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; - -local static_tree_desc static_d_desc = -{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; - -local static_tree_desc static_bl_desc = -{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; - -/* =========================================================================== - * Local (static) routines in this file. - */ - -local void tr_static_init OF((void)); -local void init_block OF((deflate_state *s)); -local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); -local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); -local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); -local void build_tree OF((deflate_state *s, tree_desc *desc)); -local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); -local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); -local int build_bl_tree OF((deflate_state *s)); -local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, - int blcodes)); -local void compress_block OF((deflate_state *s, ct_data *ltree, - ct_data *dtree)); -local int detect_data_type OF((deflate_state *s)); -local unsigned bi_reverse OF((unsigned value, int length)); -local void bi_windup OF((deflate_state *s)); -local void bi_flush OF((deflate_state *s)); -local void copy_block OF((deflate_state *s, charf *buf, unsigned len, - int header)); - -#ifdef GEN_TREES_H -local void gen_trees_header OF((void)); -#endif - -#ifndef DEBUG -# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) - /* Send a code of the given tree. c and tree must not have side effects */ - -#else /* DEBUG */ -# define send_code(s, c, tree) \ - { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ - send_bits(s, tree[c].Code, tree[c].Len); } -#endif - -/* =========================================================================== - * Output a short LSB first on the stream. - * IN assertion: there is enough room in pendingBuf. - */ -#define put_short(s, w) { \ - put_byte(s, (uch)((w) & 0xff)); \ - put_byte(s, (uch)((ush)(w) >> 8)); \ -} - -/* =========================================================================== - * Send a value on a given number of bits. - * IN assertion: length <= 16 and value fits in length bits. - */ -#ifdef DEBUG -local void send_bits OF((deflate_state *s, int value, int length)); - -local void send_bits(s, value, length) - deflate_state *s; - int value; /* value to send */ - int length; /* number of bits */ -{ - Tracevv((stderr," l %2d v %4x ", length, value)); - Assert(length > 0 && length <= 15, "invalid length"); - s->bits_sent += (ulg)length; - - /* If not enough room in bi_buf, use (valid) bits from bi_buf and - * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) - * unused bits in value. - */ - if (s->bi_valid > (int)Buf_size - length) { - s->bi_buf |= (ush)value << s->bi_valid; - put_short(s, s->bi_buf); - s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); - s->bi_valid += length - Buf_size; - } else { - s->bi_buf |= (ush)value << s->bi_valid; - s->bi_valid += length; - } -} -#else /* !DEBUG */ - -#define send_bits(s, value, length) \ -{ int len = length;\ - if (s->bi_valid > (int)Buf_size - len) {\ - int val = value;\ - s->bi_buf |= (ush)val << s->bi_valid;\ - put_short(s, s->bi_buf);\ - s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ - s->bi_valid += len - Buf_size;\ - } else {\ - s->bi_buf |= (ush)(value) << s->bi_valid;\ - s->bi_valid += len;\ - }\ -} -#endif /* DEBUG */ - - -/* the arguments must not have side effects */ - -/* =========================================================================== - * Initialize the various 'constant' tables. - */ -local void tr_static_init() -{ -#if defined(GEN_TREES_H) || !defined(STDC) - static int static_init_done = 0; - int n; /* iterates over tree elements */ - int bits; /* bit counter */ - int length; /* length value */ - int code; /* code value */ - int dist; /* distance index */ - ush bl_count[MAX_BITS+1]; - /* number of codes at each bit length for an optimal tree */ - - if (static_init_done) return; - - /* For some embedded targets, global variables are not initialized: */ -#ifdef NO_INIT_GLOBAL_POINTERS - static_l_desc.static_tree = static_ltree; - static_l_desc.extra_bits = extra_lbits; - static_d_desc.static_tree = static_dtree; - static_d_desc.extra_bits = extra_dbits; - static_bl_desc.extra_bits = extra_blbits; -#endif - - /* Initialize the mapping length (0..255) -> length code (0..28) */ - length = 0; - for (code = 0; code < LENGTH_CODES-1; code++) { - base_length[code] = length; - for (n = 0; n < (1< dist code (0..29) */ - dist = 0; - for (code = 0 ; code < 16; code++) { - base_dist[code] = dist; - for (n = 0; n < (1<>= 7; /* from now on, all distances are divided by 128 */ - for ( ; code < D_CODES; code++) { - base_dist[code] = dist << 7; - for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { - _dist_code[256 + dist++] = (uch)code; - } - } - Assert (dist == 256, "tr_static_init: 256+dist != 512"); - - /* Construct the codes of the static literal tree */ - for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; - n = 0; - while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; - while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; - while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; - while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; - /* Codes 286 and 287 do not exist, but we must include them in the - * tree construction to get a canonical Huffman tree (longest code - * all ones) - */ - gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); - - /* The static distance tree is trivial: */ - for (n = 0; n < D_CODES; n++) { - static_dtree[n].Len = 5; - static_dtree[n].Code = bi_reverse((unsigned)n, 5); - } - static_init_done = 1; - -# ifdef GEN_TREES_H - gen_trees_header(); -# endif -#endif /* defined(GEN_TREES_H) || !defined(STDC) */ -} - -/* =========================================================================== - * Genererate the file trees.h describing the static trees. - */ -#ifdef GEN_TREES_H -# ifndef DEBUG -# include -# endif - -# define SEPARATOR(i, last, width) \ - ((i) == (last)? "\n};\n\n" : \ - ((i) % (width) == (width)-1 ? ",\n" : ", ")) - -void gen_trees_header() -{ - FILE *header = fopen("trees.h", "w"); - int i; - - Assert (header != NULL, "Can't open trees.h"); - fprintf(header, - "/* header created automatically with -DGEN_TREES_H */\n\n"); - - fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n"); - for (i = 0; i < L_CODES+2; i++) { - fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code, - static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5)); - } - - fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n"); - for (i = 0; i < D_CODES; i++) { - fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code, - static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); - } - - fprintf(header, "const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {\n"); - for (i = 0; i < DIST_CODE_LEN; i++) { - fprintf(header, "%2u%s", _dist_code[i], - SEPARATOR(i, DIST_CODE_LEN-1, 20)); - } - - fprintf(header, - "const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); - for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { - fprintf(header, "%2u%s", _length_code[i], - SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); - } - - fprintf(header, "local const int base_length[LENGTH_CODES] = {\n"); - for (i = 0; i < LENGTH_CODES; i++) { - fprintf(header, "%1u%s", base_length[i], - SEPARATOR(i, LENGTH_CODES-1, 20)); - } - - fprintf(header, "local const int base_dist[D_CODES] = {\n"); - for (i = 0; i < D_CODES; i++) { - fprintf(header, "%5u%s", base_dist[i], - SEPARATOR(i, D_CODES-1, 10)); - } - - fclose(header); -} -#endif /* GEN_TREES_H */ - -/* =========================================================================== - * Initialize the tree data structures for a new zlib stream. - */ -void ZLIB_INTERNAL _tr_init(s) - deflate_state *s; -{ - tr_static_init(); - - s->l_desc.dyn_tree = s->dyn_ltree; - s->l_desc.stat_desc = &static_l_desc; - - s->d_desc.dyn_tree = s->dyn_dtree; - s->d_desc.stat_desc = &static_d_desc; - - s->bl_desc.dyn_tree = s->bl_tree; - s->bl_desc.stat_desc = &static_bl_desc; - - s->bi_buf = 0; - s->bi_valid = 0; - s->last_eob_len = 8; /* enough lookahead for inflate */ -#ifdef DEBUG - s->compressed_len = 0L; - s->bits_sent = 0L; -#endif - - /* Initialize the first block of the first file: */ - init_block(s); -} - -/* =========================================================================== - * Initialize a new block. - */ -local void init_block(s) - deflate_state *s; -{ - int n; /* iterates over tree elements */ - - /* Initialize the trees. */ - for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; - for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; - for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; - - s->dyn_ltree[END_BLOCK].Freq = 1; - s->opt_len = s->static_len = 0L; - s->last_lit = s->matches = 0; -} - -#define SMALLEST 1 -/* Index within the heap array of least frequent node in the Huffman tree */ - - -/* =========================================================================== - * Remove the smallest element from the heap and recreate the heap with - * one less element. Updates heap and heap_len. - */ -#define pqremove(s, tree, top) \ -{\ - top = s->heap[SMALLEST]; \ - s->heap[SMALLEST] = s->heap[s->heap_len--]; \ - pqdownheap(s, tree, SMALLEST); \ -} - -/* =========================================================================== - * Compares to subtrees, using the tree depth as tie breaker when - * the subtrees have equal frequency. This minimizes the worst case length. - */ -#define smaller(tree, n, m, depth) \ - (tree[n].Freq < tree[m].Freq || \ - (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) - -/* =========================================================================== - * Restore the heap property by moving down the tree starting at node k, - * exchanging a node with the smallest of its two sons if necessary, stopping - * when the heap property is re-established (each father smaller than its - * two sons). - */ -local void pqdownheap(s, tree, k) - deflate_state *s; - ct_data *tree; /* the tree to restore */ - int k; /* node to move down */ -{ - int v = s->heap[k]; - int j = k << 1; /* left son of k */ - while (j <= s->heap_len) { - /* Set j to the smallest of the two sons: */ - if (j < s->heap_len && - smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { - j++; - } - /* Exit if v is smaller than both sons */ - if (smaller(tree, v, s->heap[j], s->depth)) break; - - /* Exchange v with the smallest son */ - s->heap[k] = s->heap[j]; k = j; - - /* And continue down the tree, setting j to the left son of k */ - j <<= 1; - } - s->heap[k] = v; -} - -/* =========================================================================== - * Compute the optimal bit lengths for a tree and update the total bit length - * for the current block. - * IN assertion: the fields freq and dad are set, heap[heap_max] and - * above are the tree nodes sorted by increasing frequency. - * OUT assertions: the field len is set to the optimal bit length, the - * array bl_count contains the frequencies for each bit length. - * The length opt_len is updated; static_len is also updated if stree is - * not null. - */ -local void gen_bitlen(s, desc) - deflate_state *s; - tree_desc *desc; /* the tree descriptor */ -{ - ct_data *tree = desc->dyn_tree; - int max_code = desc->max_code; - const ct_data *stree = desc->stat_desc->static_tree; - const intf *extra = desc->stat_desc->extra_bits; - int base = desc->stat_desc->extra_base; - int max_length = desc->stat_desc->max_length; - int h; /* heap index */ - int n, m; /* iterate over the tree elements */ - int bits; /* bit length */ - int xbits; /* extra bits */ - ush f; /* frequency */ - int overflow = 0; /* number of elements with bit length too large */ - - for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; - - /* In a first pass, compute the optimal bit lengths (which may - * overflow in the case of the bit length tree). - */ - tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ - - for (h = s->heap_max+1; h < HEAP_SIZE; h++) { - n = s->heap[h]; - bits = tree[tree[n].Dad].Len + 1; - if (bits > max_length) bits = max_length, overflow++; - tree[n].Len = (ush)bits; - /* We overwrite tree[n].Dad which is no longer needed */ - - if (n > max_code) continue; /* not a leaf node */ - - s->bl_count[bits]++; - xbits = 0; - if (n >= base) xbits = extra[n-base]; - f = tree[n].Freq; - s->opt_len += (ulg)f * (bits + xbits); - if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); - } - if (overflow == 0) return; - - Trace((stderr,"\nbit length overflow\n")); - /* This happens for example on obj2 and pic of the Calgary corpus */ - - /* Find the first bit length which could increase: */ - do { - bits = max_length-1; - while (s->bl_count[bits] == 0) bits--; - s->bl_count[bits]--; /* move one leaf down the tree */ - s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ - s->bl_count[max_length]--; - /* The brother of the overflow item also moves one step up, - * but this does not affect bl_count[max_length] - */ - overflow -= 2; - } while (overflow > 0); - - /* Now recompute all bit lengths, scanning in increasing frequency. - * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all - * lengths instead of fixing only the wrong ones. This idea is taken - * from 'ar' written by Haruhiko Okumura.) - */ - for (bits = max_length; bits != 0; bits--) { - n = s->bl_count[bits]; - while (n != 0) { - m = s->heap[--h]; - if (m > max_code) continue; - if ((unsigned) tree[m].Len != (unsigned) bits) { - Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); - s->opt_len += ((long)bits - (long)tree[m].Len) - *(long)tree[m].Freq; - tree[m].Len = (ush)bits; - } - n--; - } - } -} - -/* =========================================================================== - * Generate the codes for a given tree and bit counts (which need not be - * optimal). - * IN assertion: the array bl_count contains the bit length statistics for - * the given tree and the field len is set for all tree elements. - * OUT assertion: the field code is set for all tree elements of non - * zero code length. - */ -local void gen_codes (tree, max_code, bl_count) - ct_data *tree; /* the tree to decorate */ - int max_code; /* largest code with non zero frequency */ - ushf *bl_count; /* number of codes at each bit length */ -{ - ush next_code[MAX_BITS+1]; /* next code value for each bit length */ - ush code = 0; /* running code value */ - int bits; /* bit index */ - int n; /* code index */ - - /* The distribution counts are first used to generate the code values - * without bit reversal. - */ - for (bits = 1; bits <= MAX_BITS; bits++) { - next_code[bits] = code = (code + bl_count[bits-1]) << 1; - } - /* Check that the bit counts in bl_count are consistent. The last code - * must be all ones. - */ - Assert (code + bl_count[MAX_BITS]-1 == (1<dyn_tree; - const ct_data *stree = desc->stat_desc->static_tree; - int elems = desc->stat_desc->elems; - int n, m; /* iterate over heap elements */ - int max_code = -1; /* largest code with non zero frequency */ - int node; /* new node being created */ - - /* Construct the initial heap, with least frequent element in - * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. - * heap[0] is not used. - */ - s->heap_len = 0, s->heap_max = HEAP_SIZE; - - for (n = 0; n < elems; n++) { - if (tree[n].Freq != 0) { - s->heap[++(s->heap_len)] = max_code = n; - s->depth[n] = 0; - } else { - tree[n].Len = 0; - } - } - - /* The pkzip format requires that at least one distance code exists, - * and that at least one bit should be sent even if there is only one - * possible code. So to avoid special checks later on we force at least - * two codes of non zero frequency. - */ - while (s->heap_len < 2) { - node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); - tree[node].Freq = 1; - s->depth[node] = 0; - s->opt_len--; if (stree) s->static_len -= stree[node].Len; - /* node is 0 or 1 so it does not have extra bits */ - } - desc->max_code = max_code; - - /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, - * establish sub-heaps of increasing lengths: - */ - for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); - - /* Construct the Huffman tree by repeatedly combining the least two - * frequent nodes. - */ - node = elems; /* next internal node of the tree */ - do { - pqremove(s, tree, n); /* n = node of least frequency */ - m = s->heap[SMALLEST]; /* m = node of next least frequency */ - - s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ - s->heap[--(s->heap_max)] = m; - - /* Create a new node father of n and m */ - tree[node].Freq = tree[n].Freq + tree[m].Freq; - s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ? - s->depth[n] : s->depth[m]) + 1); - tree[n].Dad = tree[m].Dad = (ush)node; -#ifdef DUMP_BL_TREE - if (tree == s->bl_tree) { - fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", - node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); - } -#endif - /* and insert the new node in the heap */ - s->heap[SMALLEST] = node++; - pqdownheap(s, tree, SMALLEST); - - } while (s->heap_len >= 2); - - s->heap[--(s->heap_max)] = s->heap[SMALLEST]; - - /* At this point, the fields freq and dad are set. We can now - * generate the bit lengths. - */ - gen_bitlen(s, (tree_desc *)desc); - - /* The field len is now set, we can generate the bit codes */ - gen_codes ((ct_data *)tree, max_code, s->bl_count); -} - -/* =========================================================================== - * Scan a literal or distance tree to determine the frequencies of the codes - * in the bit length tree. - */ -local void scan_tree (s, tree, max_code) - deflate_state *s; - ct_data *tree; /* the tree to be scanned */ - int max_code; /* and its largest code of non zero frequency */ -{ - int n; /* iterates over all tree elements */ - int prevlen = -1; /* last emitted length */ - int curlen; /* length of current code */ - int nextlen = tree[0].Len; /* length of next code */ - int count = 0; /* repeat count of the current code */ - int max_count = 7; /* max repeat count */ - int min_count = 4; /* min repeat count */ - - if (nextlen == 0) max_count = 138, min_count = 3; - tree[max_code+1].Len = (ush)0xffff; /* guard */ - - for (n = 0; n <= max_code; n++) { - curlen = nextlen; nextlen = tree[n+1].Len; - if (++count < max_count && curlen == nextlen) { - continue; - } else if (count < min_count) { - s->bl_tree[curlen].Freq += count; - } else if (curlen != 0) { - if (curlen != prevlen) s->bl_tree[curlen].Freq++; - s->bl_tree[REP_3_6].Freq++; - } else if (count <= 10) { - s->bl_tree[REPZ_3_10].Freq++; - } else { - s->bl_tree[REPZ_11_138].Freq++; - } - count = 0; prevlen = curlen; - if (nextlen == 0) { - max_count = 138, min_count = 3; - } else if (curlen == nextlen) { - max_count = 6, min_count = 3; - } else { - max_count = 7, min_count = 4; - } - } -} - -/* =========================================================================== - * Send a literal or distance tree in compressed form, using the codes in - * bl_tree. - */ -local void send_tree (s, tree, max_code) - deflate_state *s; - ct_data *tree; /* the tree to be scanned */ - int max_code; /* and its largest code of non zero frequency */ -{ - int n; /* iterates over all tree elements */ - int prevlen = -1; /* last emitted length */ - int curlen; /* length of current code */ - int nextlen = tree[0].Len; /* length of next code */ - int count = 0; /* repeat count of the current code */ - int max_count = 7; /* max repeat count */ - int min_count = 4; /* min repeat count */ - - /* tree[max_code+1].Len = -1; */ /* guard already set */ - if (nextlen == 0) max_count = 138, min_count = 3; - - for (n = 0; n <= max_code; n++) { - curlen = nextlen; nextlen = tree[n+1].Len; - if (++count < max_count && curlen == nextlen) { - continue; - } else if (count < min_count) { - do { send_code(s, curlen, s->bl_tree); } while (--count != 0); - - } else if (curlen != 0) { - if (curlen != prevlen) { - send_code(s, curlen, s->bl_tree); count--; - } - Assert(count >= 3 && count <= 6, " 3_6?"); - send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); - - } else if (count <= 10) { - send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); - - } else { - send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); - } - count = 0; prevlen = curlen; - if (nextlen == 0) { - max_count = 138, min_count = 3; - } else if (curlen == nextlen) { - max_count = 6, min_count = 3; - } else { - max_count = 7, min_count = 4; - } - } -} - -/* =========================================================================== - * Construct the Huffman tree for the bit lengths and return the index in - * bl_order of the last bit length code to send. - */ -local int build_bl_tree(s) - deflate_state *s; -{ - int max_blindex; /* index of last bit length code of non zero freq */ - - /* Determine the bit length frequencies for literal and distance trees */ - scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); - scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); - - /* Build the bit length tree: */ - build_tree(s, (tree_desc *)(&(s->bl_desc))); - /* opt_len now includes the length of the tree representations, except - * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. - */ - - /* Determine the number of bit length codes to send. The pkzip format - * requires that at least 4 bit length codes be sent. (appnote.txt says - * 3 but the actual value used is 4.) - */ - for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { - if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; - } - /* Update opt_len to include the bit length tree and counts */ - s->opt_len += 3*(max_blindex+1) + 5+5+4; - Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", - s->opt_len, s->static_len)); - - return max_blindex; -} - -/* =========================================================================== - * Send the header for a block using dynamic Huffman trees: the counts, the - * lengths of the bit length codes, the literal tree and the distance tree. - * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. - */ -local void send_all_trees(s, lcodes, dcodes, blcodes) - deflate_state *s; - int lcodes, dcodes, blcodes; /* number of codes for each tree */ -{ - int rank; /* index in bl_order */ - - Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); - Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, - "too many codes"); - Tracev((stderr, "\nbl counts: ")); - send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ - send_bits(s, dcodes-1, 5); - send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ - for (rank = 0; rank < blcodes; rank++) { - Tracev((stderr, "\nbl code %2d ", bl_order[rank])); - send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); - } - Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); - - send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ - Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); - - send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ - Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); -} - -/* =========================================================================== - * Send a stored block - */ -void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last) - deflate_state *s; - charf *buf; /* input block */ - ulg stored_len; /* length of input block */ - int last; /* one if this is the last block for a file */ -{ - send_bits(s, (STORED_BLOCK<<1)+last, 3); /* send block type */ -#ifdef DEBUG - s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; - s->compressed_len += (stored_len + 4) << 3; -#endif - copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ -} - -/* =========================================================================== - * Send one empty static block to give enough lookahead for inflate. - * This takes 10 bits, of which 7 may remain in the bit buffer. - * The current inflate code requires 9 bits of lookahead. If the - * last two codes for the previous block (real code plus EOB) were coded - * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode - * the last real code. In this case we send two empty static blocks instead - * of one. (There are no problems if the previous block is stored or fixed.) - * To simplify the code, we assume the worst case of last real code encoded - * on one bit only. - */ -void ZLIB_INTERNAL _tr_align(s) - deflate_state *s; -{ - send_bits(s, STATIC_TREES<<1, 3); - send_code(s, END_BLOCK, static_ltree); -#ifdef DEBUG - s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ -#endif - bi_flush(s); - /* Of the 10 bits for the empty block, we have already sent - * (10 - bi_valid) bits. The lookahead for the last real code (before - * the EOB of the previous block) was thus at least one plus the length - * of the EOB plus what we have just sent of the empty static block. - */ - if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { - send_bits(s, STATIC_TREES<<1, 3); - send_code(s, END_BLOCK, static_ltree); -#ifdef DEBUG - s->compressed_len += 10L; -#endif - bi_flush(s); - } - s->last_eob_len = 7; -} - -/* =========================================================================== - * Determine the best encoding for the current block: dynamic trees, static - * trees or store, and output the encoded block to the zip file. - */ -void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) - deflate_state *s; - charf *buf; /* input block, or NULL if too old */ - ulg stored_len; /* length of input block */ - int last; /* one if this is the last block for a file */ -{ - ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ - int max_blindex = 0; /* index of last bit length code of non zero freq */ - - /* Build the Huffman trees unless a stored block is forced */ - if (s->level > 0) { - - /* Check if the file is binary or text */ - if (s->strm->data_type == Z_UNKNOWN) - s->strm->data_type = detect_data_type(s); - - /* Construct the literal and distance trees */ - build_tree(s, (tree_desc *)(&(s->l_desc))); - Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, - s->static_len)); - - build_tree(s, (tree_desc *)(&(s->d_desc))); - Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, - s->static_len)); - /* At this point, opt_len and static_len are the total bit lengths of - * the compressed block data, excluding the tree representations. - */ - - /* Build the bit length tree for the above two trees, and get the index - * in bl_order of the last bit length code to send. - */ - max_blindex = build_bl_tree(s); - - /* Determine the best encoding. Compute the block lengths in bytes. */ - opt_lenb = (s->opt_len+3+7)>>3; - static_lenb = (s->static_len+3+7)>>3; - - Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", - opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, - s->last_lit)); - - if (static_lenb <= opt_lenb) opt_lenb = static_lenb; - - } else { - Assert(buf != (char*)0, "lost buf"); - opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ - } - -#ifdef FORCE_STORED - if (buf != (char*)0) { /* force stored block */ -#else - if (stored_len+4 <= opt_lenb && buf != (char*)0) { - /* 4: two words for the lengths */ -#endif - /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. - * Otherwise we can't have processed more than WSIZE input bytes since - * the last block flush, because compression would have been - * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to - * transform a block into a stored block. - */ - _tr_stored_block(s, buf, stored_len, last); - -#ifdef FORCE_STATIC - } else if (static_lenb >= 0) { /* force static trees */ -#else - } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) { -#endif - send_bits(s, (STATIC_TREES<<1)+last, 3); - compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); -#ifdef DEBUG - s->compressed_len += 3 + s->static_len; -#endif - } else { - send_bits(s, (DYN_TREES<<1)+last, 3); - send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, - max_blindex+1); - compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); -#ifdef DEBUG - s->compressed_len += 3 + s->opt_len; -#endif - } - Assert (s->compressed_len == s->bits_sent, "bad compressed size"); - /* The above check is made mod 2^32, for files larger than 512 MB - * and uLong implemented on 32 bits. - */ - init_block(s); - - if (last) { - bi_windup(s); -#ifdef DEBUG - s->compressed_len += 7; /* align on byte boundary */ -#endif - } - Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, - s->compressed_len-7*last)); -} - -/* =========================================================================== - * Save the match info and tally the frequency counts. Return true if - * the current block must be flushed. - */ -int ZLIB_INTERNAL _tr_tally (s, dist, lc) - deflate_state *s; - unsigned dist; /* distance of matched string */ - unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ -{ - s->d_buf[s->last_lit] = (ush)dist; - s->l_buf[s->last_lit++] = (uch)lc; - if (dist == 0) { - /* lc is the unmatched char */ - s->dyn_ltree[lc].Freq++; - } else { - s->matches++; - /* Here, lc is the match length - MIN_MATCH */ - dist--; /* dist = match distance - 1 */ - Assert((ush)dist < (ush)MAX_DIST(s) && - (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && - (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); - - s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; - s->dyn_dtree[d_code(dist)].Freq++; - } - -#ifdef TRUNCATE_BLOCK - /* Try to guess if it is profitable to stop the current block here */ - if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { - /* Compute an upper bound for the compressed length */ - ulg out_length = (ulg)s->last_lit*8L; - ulg in_length = (ulg)((long)s->strstart - s->block_start); - int dcode; - for (dcode = 0; dcode < D_CODES; dcode++) { - out_length += (ulg)s->dyn_dtree[dcode].Freq * - (5L+extra_dbits[dcode]); - } - out_length >>= 3; - Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", - s->last_lit, in_length, out_length, - 100L - out_length*100L/in_length)); - if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; - } -#endif - return (s->last_lit == s->lit_bufsize-1); - /* We avoid equality with lit_bufsize because of wraparound at 64K - * on 16 bit machines and because stored blocks are restricted to - * 64K-1 bytes. - */ -} - -/* =========================================================================== - * Send the block data compressed using the given Huffman trees - */ -local void compress_block(s, ltree, dtree) - deflate_state *s; - ct_data *ltree; /* literal tree */ - ct_data *dtree; /* distance tree */ -{ - unsigned dist; /* distance of matched string */ - int lc; /* match length or unmatched char (if dist == 0) */ - unsigned lx = 0; /* running index in l_buf */ - unsigned code; /* the code to send */ - int extra; /* number of extra bits to send */ - - if (s->last_lit != 0) do { - dist = s->d_buf[lx]; - lc = s->l_buf[lx++]; - if (dist == 0) { - send_code(s, lc, ltree); /* send a literal byte */ - Tracecv(isgraph(lc), (stderr," '%c' ", lc)); - } else { - /* Here, lc is the match length - MIN_MATCH */ - code = _length_code[lc]; - send_code(s, code+LITERALS+1, ltree); /* send the length code */ - extra = extra_lbits[code]; - if (extra != 0) { - lc -= base_length[code]; - send_bits(s, lc, extra); /* send the extra length bits */ - } - dist--; /* dist is now the match distance - 1 */ - code = d_code(dist); - Assert (code < D_CODES, "bad d_code"); - - send_code(s, code, dtree); /* send the distance code */ - extra = extra_dbits[code]; - if (extra != 0) { - dist -= base_dist[code]; - send_bits(s, dist, extra); /* send the extra distance bits */ - } - } /* literal or match pair ? */ - - /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ - Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, - "pendingBuf overflow"); - - } while (lx < s->last_lit); - - send_code(s, END_BLOCK, ltree); - s->last_eob_len = ltree[END_BLOCK].Len; -} - -/* =========================================================================== - * Check if the data type is TEXT or BINARY, using the following algorithm: - * - TEXT if the two conditions below are satisfied: - * a) There are no non-portable control characters belonging to the - * "black list" (0..6, 14..25, 28..31). - * b) There is at least one printable character belonging to the - * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). - * - BINARY otherwise. - * - The following partially-portable control characters form a - * "gray list" that is ignored in this detection algorithm: - * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). - * IN assertion: the fields Freq of dyn_ltree are set. - */ -local int detect_data_type(s) - deflate_state *s; -{ - /* black_mask is the bit mask of black-listed bytes - * set bits 0..6, 14..25, and 28..31 - * 0xf3ffc07f = binary 11110011111111111100000001111111 - */ - unsigned long black_mask = 0xf3ffc07fUL; - int n; - - /* Check for non-textual ("black-listed") bytes. */ - for (n = 0; n <= 31; n++, black_mask >>= 1) - if ((black_mask & 1) && (s->dyn_ltree[n].Freq != 0)) - return Z_BINARY; - - /* Check for textual ("white-listed") bytes. */ - if (s->dyn_ltree[9].Freq != 0 || s->dyn_ltree[10].Freq != 0 - || s->dyn_ltree[13].Freq != 0) - return Z_TEXT; - for (n = 32; n < LITERALS; n++) - if (s->dyn_ltree[n].Freq != 0) - return Z_TEXT; - - /* There are no "black-listed" or "white-listed" bytes: - * this stream either is empty or has tolerated ("gray-listed") bytes only. - */ - return Z_BINARY; -} - -/* =========================================================================== - * Reverse the first len bits of a code, using straightforward code (a faster - * method would use a table) - * IN assertion: 1 <= len <= 15 - */ -local unsigned bi_reverse(code, len) - unsigned code; /* the value to invert */ - int len; /* its bit length */ -{ - register unsigned res = 0; - do { - res |= code & 1; - code >>= 1, res <<= 1; - } while (--len > 0); - return res >> 1; -} - -/* =========================================================================== - * Flush the bit buffer, keeping at most 7 bits in it. - */ -local void bi_flush(s) - deflate_state *s; -{ - if (s->bi_valid == 16) { - put_short(s, s->bi_buf); - s->bi_buf = 0; - s->bi_valid = 0; - } else if (s->bi_valid >= 8) { - put_byte(s, (Byte)s->bi_buf); - s->bi_buf >>= 8; - s->bi_valid -= 8; - } -} - -/* =========================================================================== - * Flush the bit buffer and align the output on a byte boundary - */ -local void bi_windup(s) - deflate_state *s; -{ - if (s->bi_valid > 8) { - put_short(s, s->bi_buf); - } else if (s->bi_valid > 0) { - put_byte(s, (Byte)s->bi_buf); - } - s->bi_buf = 0; - s->bi_valid = 0; -#ifdef DEBUG - s->bits_sent = (s->bits_sent+7) & ~7; -#endif -} - -/* =========================================================================== - * Copy a stored block, storing first the length and its - * one's complement if requested. - */ -local void copy_block(s, buf, len, header) - deflate_state *s; - charf *buf; /* the input data */ - unsigned len; /* its length */ - int header; /* true if block header must be written */ -{ - bi_windup(s); /* align on byte boundary */ - s->last_eob_len = 8; /* enough lookahead for inflate */ - - if (header) { - put_short(s, (ush)len); - put_short(s, (ush)~len); -#ifdef DEBUG - s->bits_sent += 2*16; -#endif - } -#ifdef DEBUG - s->bits_sent += (ulg)len<<3; -#endif - while (len--) { - put_byte(s, *buf++); - } -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/trees.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/trees.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/trees.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/trees.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,128 +0,0 @@ -/* header created automatically with -DGEN_TREES_H */ - -local const ct_data static_ltree[L_CODES+2] = { -{{ 12},{ 8}}, {{140},{ 8}}, {{ 76},{ 8}}, {{204},{ 8}}, {{ 44},{ 8}}, -{{172},{ 8}}, {{108},{ 8}}, {{236},{ 8}}, {{ 28},{ 8}}, {{156},{ 8}}, -{{ 92},{ 8}}, {{220},{ 8}}, {{ 60},{ 8}}, {{188},{ 8}}, {{124},{ 8}}, -{{252},{ 8}}, {{ 2},{ 8}}, {{130},{ 8}}, {{ 66},{ 8}}, {{194},{ 8}}, -{{ 34},{ 8}}, {{162},{ 8}}, {{ 98},{ 8}}, {{226},{ 8}}, {{ 18},{ 8}}, -{{146},{ 8}}, {{ 82},{ 8}}, {{210},{ 8}}, {{ 50},{ 8}}, {{178},{ 8}}, -{{114},{ 8}}, {{242},{ 8}}, {{ 10},{ 8}}, {{138},{ 8}}, {{ 74},{ 8}}, -{{202},{ 8}}, {{ 42},{ 8}}, {{170},{ 8}}, {{106},{ 8}}, {{234},{ 8}}, -{{ 26},{ 8}}, {{154},{ 8}}, {{ 90},{ 8}}, {{218},{ 8}}, {{ 58},{ 8}}, -{{186},{ 8}}, {{122},{ 8}}, {{250},{ 8}}, {{ 6},{ 8}}, {{134},{ 8}}, -{{ 70},{ 8}}, {{198},{ 8}}, {{ 38},{ 8}}, {{166},{ 8}}, {{102},{ 8}}, -{{230},{ 8}}, {{ 22},{ 8}}, {{150},{ 8}}, {{ 86},{ 8}}, {{214},{ 8}}, -{{ 54},{ 8}}, {{182},{ 8}}, {{118},{ 8}}, {{246},{ 8}}, {{ 14},{ 8}}, -{{142},{ 8}}, {{ 78},{ 8}}, {{206},{ 8}}, {{ 46},{ 8}}, {{174},{ 8}}, -{{110},{ 8}}, {{238},{ 8}}, {{ 30},{ 8}}, {{158},{ 8}}, {{ 94},{ 8}}, -{{222},{ 8}}, {{ 62},{ 8}}, {{190},{ 8}}, {{126},{ 8}}, {{254},{ 8}}, -{{ 1},{ 8}}, {{129},{ 8}}, {{ 65},{ 8}}, {{193},{ 8}}, {{ 33},{ 8}}, -{{161},{ 8}}, {{ 97},{ 8}}, {{225},{ 8}}, {{ 17},{ 8}}, {{145},{ 8}}, -{{ 81},{ 8}}, {{209},{ 8}}, {{ 49},{ 8}}, {{177},{ 8}}, {{113},{ 8}}, -{{241},{ 8}}, {{ 9},{ 8}}, {{137},{ 8}}, {{ 73},{ 8}}, {{201},{ 8}}, -{{ 41},{ 8}}, {{169},{ 8}}, {{105},{ 8}}, {{233},{ 8}}, {{ 25},{ 8}}, -{{153},{ 8}}, {{ 89},{ 8}}, {{217},{ 8}}, {{ 57},{ 8}}, {{185},{ 8}}, -{{121},{ 8}}, {{249},{ 8}}, {{ 5},{ 8}}, {{133},{ 8}}, {{ 69},{ 8}}, -{{197},{ 8}}, {{ 37},{ 8}}, {{165},{ 8}}, {{101},{ 8}}, {{229},{ 8}}, -{{ 21},{ 8}}, {{149},{ 8}}, {{ 85},{ 8}}, {{213},{ 8}}, {{ 53},{ 8}}, -{{181},{ 8}}, {{117},{ 8}}, {{245},{ 8}}, {{ 13},{ 8}}, {{141},{ 8}}, -{{ 77},{ 8}}, {{205},{ 8}}, {{ 45},{ 8}}, {{173},{ 8}}, {{109},{ 8}}, -{{237},{ 8}}, {{ 29},{ 8}}, {{157},{ 8}}, {{ 93},{ 8}}, {{221},{ 8}}, -{{ 61},{ 8}}, {{189},{ 8}}, {{125},{ 8}}, {{253},{ 8}}, {{ 19},{ 9}}, -{{275},{ 9}}, {{147},{ 9}}, {{403},{ 9}}, {{ 83},{ 9}}, {{339},{ 9}}, -{{211},{ 9}}, {{467},{ 9}}, {{ 51},{ 9}}, {{307},{ 9}}, {{179},{ 9}}, -{{435},{ 9}}, {{115},{ 9}}, {{371},{ 9}}, {{243},{ 9}}, {{499},{ 9}}, -{{ 11},{ 9}}, {{267},{ 9}}, {{139},{ 9}}, {{395},{ 9}}, {{ 75},{ 9}}, -{{331},{ 9}}, {{203},{ 9}}, {{459},{ 9}}, {{ 43},{ 9}}, {{299},{ 9}}, -{{171},{ 9}}, {{427},{ 9}}, {{107},{ 9}}, {{363},{ 9}}, {{235},{ 9}}, -{{491},{ 9}}, {{ 27},{ 9}}, {{283},{ 9}}, {{155},{ 9}}, {{411},{ 9}}, -{{ 91},{ 9}}, {{347},{ 9}}, {{219},{ 9}}, {{475},{ 9}}, {{ 59},{ 9}}, -{{315},{ 9}}, {{187},{ 9}}, {{443},{ 9}}, {{123},{ 9}}, {{379},{ 9}}, -{{251},{ 9}}, {{507},{ 9}}, {{ 7},{ 9}}, {{263},{ 9}}, {{135},{ 9}}, -{{391},{ 9}}, {{ 71},{ 9}}, {{327},{ 9}}, {{199},{ 9}}, {{455},{ 9}}, -{{ 39},{ 9}}, {{295},{ 9}}, {{167},{ 9}}, {{423},{ 9}}, {{103},{ 9}}, -{{359},{ 9}}, {{231},{ 9}}, {{487},{ 9}}, {{ 23},{ 9}}, {{279},{ 9}}, -{{151},{ 9}}, {{407},{ 9}}, {{ 87},{ 9}}, {{343},{ 9}}, {{215},{ 9}}, -{{471},{ 9}}, {{ 55},{ 9}}, {{311},{ 9}}, {{183},{ 9}}, {{439},{ 9}}, -{{119},{ 9}}, {{375},{ 9}}, {{247},{ 9}}, {{503},{ 9}}, {{ 15},{ 9}}, -{{271},{ 9}}, {{143},{ 9}}, {{399},{ 9}}, {{ 79},{ 9}}, {{335},{ 9}}, -{{207},{ 9}}, {{463},{ 9}}, {{ 47},{ 9}}, {{303},{ 9}}, {{175},{ 9}}, -{{431},{ 9}}, {{111},{ 9}}, {{367},{ 9}}, {{239},{ 9}}, {{495},{ 9}}, -{{ 31},{ 9}}, {{287},{ 9}}, {{159},{ 9}}, {{415},{ 9}}, {{ 95},{ 9}}, -{{351},{ 9}}, {{223},{ 9}}, {{479},{ 9}}, {{ 63},{ 9}}, {{319},{ 9}}, -{{191},{ 9}}, {{447},{ 9}}, {{127},{ 9}}, {{383},{ 9}}, {{255},{ 9}}, -{{511},{ 9}}, {{ 0},{ 7}}, {{ 64},{ 7}}, {{ 32},{ 7}}, {{ 96},{ 7}}, -{{ 16},{ 7}}, {{ 80},{ 7}}, {{ 48},{ 7}}, {{112},{ 7}}, {{ 8},{ 7}}, -{{ 72},{ 7}}, {{ 40},{ 7}}, {{104},{ 7}}, {{ 24},{ 7}}, {{ 88},{ 7}}, -{{ 56},{ 7}}, {{120},{ 7}}, {{ 4},{ 7}}, {{ 68},{ 7}}, {{ 36},{ 7}}, -{{100},{ 7}}, {{ 20},{ 7}}, {{ 84},{ 7}}, {{ 52},{ 7}}, {{116},{ 7}}, -{{ 3},{ 8}}, {{131},{ 8}}, {{ 67},{ 8}}, {{195},{ 8}}, {{ 35},{ 8}}, -{{163},{ 8}}, {{ 99},{ 8}}, {{227},{ 8}} -}; - -local const ct_data static_dtree[D_CODES] = { -{{ 0},{ 5}}, {{16},{ 5}}, {{ 8},{ 5}}, {{24},{ 5}}, {{ 4},{ 5}}, -{{20},{ 5}}, {{12},{ 5}}, {{28},{ 5}}, {{ 2},{ 5}}, {{18},{ 5}}, -{{10},{ 5}}, {{26},{ 5}}, {{ 6},{ 5}}, {{22},{ 5}}, {{14},{ 5}}, -{{30},{ 5}}, {{ 1},{ 5}}, {{17},{ 5}}, {{ 9},{ 5}}, {{25},{ 5}}, -{{ 5},{ 5}}, {{21},{ 5}}, {{13},{ 5}}, {{29},{ 5}}, {{ 3},{ 5}}, -{{19},{ 5}}, {{11},{ 5}}, {{27},{ 5}}, {{ 7},{ 5}}, {{23},{ 5}} -}; - -const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = { - 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, - 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, -10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, -11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, -12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, -13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, -13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, -14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, -14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, -14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, -15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, -15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, -15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, -18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, -23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, -24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, -26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, -26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, -27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, -28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, -28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, -28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, -29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, -29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, -29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 -}; - -const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, -13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, -17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, -19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, -21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, -22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, -23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, -24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, -25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, -25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, -26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, -26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 -}; - -local const int base_length[LENGTH_CODES] = { -0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, -64, 80, 96, 112, 128, 160, 192, 224, 0 -}; - -local const int base_dist[D_CODES] = { - 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, - 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, - 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576 -}; - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/uncompr.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/uncompr.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/uncompr.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/uncompr.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -/* uncompr.c -- decompress a memory buffer - * Copyright (C) 1995-2003, 2010 Jean-loup Gailly. - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* @(#) $Id$ */ - -#define ZLIB_INTERNAL -#include "zlib.h" - -/* =========================================================================== - Decompresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total - size of the destination buffer, which must be large enough to hold the - entire uncompressed data. (The size of the uncompressed data must have - been saved previously by the compressor and transmitted to the decompressor - by some mechanism outside the scope of this compression library.) - Upon exit, destLen is the actual size of the compressed buffer. - - uncompress returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer, or Z_DATA_ERROR if the input data was corrupted. -*/ -int ZEXPORT uncompress (dest, destLen, source, sourceLen) - Bytef *dest; - uLongf *destLen; - const Bytef *source; - uLong sourceLen; -{ - z_stream stream; - int err; - - stream.next_in = (Bytef*)source; - stream.avail_in = (uInt)sourceLen; - /* Check for source > 64K on 16-bit machine: */ - if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR; - - stream.next_out = dest; - stream.avail_out = (uInt)*destLen; - if ((uLong)stream.avail_out != *destLen) return Z_BUF_ERROR; - - stream.zalloc = (alloc_func)0; - stream.zfree = (free_func)0; - - err = inflateInit(&stream); - if (err != Z_OK) return err; - - err = inflate(&stream, Z_FINISH); - if (err != Z_STREAM_END) { - inflateEnd(&stream); - if (err == Z_NEED_DICT || (err == Z_BUF_ERROR && stream.avail_in == 0)) - return Z_DATA_ERROR; - return err; - } - *destLen = stream.total_out; - - err = inflateEnd(&stream); - return err; -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/zconf.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/zconf.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/zconf.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/zconf.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,428 +0,0 @@ -/* zconf.h -- configuration of the zlib compression library - * Copyright (C) 1995-2010 Jean-loup Gailly. - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* @(#) $Id$ */ - -#ifndef ZCONF_H -#define ZCONF_H - -/* - * If you *really* need a unique prefix for all types and library functions, - * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. - * Even better than compiling with -DZ_PREFIX would be to use configure to set - * this permanently in zconf.h using "./configure --zprefix". - */ -#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ - -/* all linked symbols */ -# define _dist_code z__dist_code -# define _length_code z__length_code -# define _tr_align z__tr_align -# define _tr_flush_block z__tr_flush_block -# define _tr_init z__tr_init -# define _tr_stored_block z__tr_stored_block -# define _tr_tally z__tr_tally -# define adler32 z_adler32 -# define adler32_combine z_adler32_combine -# define adler32_combine64 z_adler32_combine64 -# define compress z_compress -# define compress2 z_compress2 -# define compressBound z_compressBound -# define crc32 z_crc32 -# define crc32_combine z_crc32_combine -# define crc32_combine64 z_crc32_combine64 -# define deflate z_deflate -# define deflateBound z_deflateBound -# define deflateCopy z_deflateCopy -# define deflateEnd z_deflateEnd -# define deflateInit2_ z_deflateInit2_ -# define deflateInit_ z_deflateInit_ -# define deflateParams z_deflateParams -# define deflatePrime z_deflatePrime -# define deflateReset z_deflateReset -# define deflateSetDictionary z_deflateSetDictionary -# define deflateSetHeader z_deflateSetHeader -# define deflateTune z_deflateTune -# define deflate_copyright z_deflate_copyright -# define get_crc_table z_get_crc_table -# define gz_error z_gz_error -# define gz_intmax z_gz_intmax -# define gz_strwinerror z_gz_strwinerror -# define gzbuffer z_gzbuffer -# define gzclearerr z_gzclearerr -# define gzclose z_gzclose -# define gzclose_r z_gzclose_r -# define gzclose_w z_gzclose_w -# define gzdirect z_gzdirect -# define gzdopen z_gzdopen -# define gzeof z_gzeof -# define gzerror z_gzerror -# define gzflush z_gzflush -# define gzgetc z_gzgetc -# define gzgets z_gzgets -# define gzoffset z_gzoffset -# define gzoffset64 z_gzoffset64 -# define gzopen z_gzopen -# define gzopen64 z_gzopen64 -# define gzprintf z_gzprintf -# define gzputc z_gzputc -# define gzputs z_gzputs -# define gzread z_gzread -# define gzrewind z_gzrewind -# define gzseek z_gzseek -# define gzseek64 z_gzseek64 -# define gzsetparams z_gzsetparams -# define gztell z_gztell -# define gztell64 z_gztell64 -# define gzungetc z_gzungetc -# define gzwrite z_gzwrite -# define inflate z_inflate -# define inflateBack z_inflateBack -# define inflateBackEnd z_inflateBackEnd -# define inflateBackInit_ z_inflateBackInit_ -# define inflateCopy z_inflateCopy -# define inflateEnd z_inflateEnd -# define inflateGetHeader z_inflateGetHeader -# define inflateInit2_ z_inflateInit2_ -# define inflateInit_ z_inflateInit_ -# define inflateMark z_inflateMark -# define inflatePrime z_inflatePrime -# define inflateReset z_inflateReset -# define inflateReset2 z_inflateReset2 -# define inflateSetDictionary z_inflateSetDictionary -# define inflateSync z_inflateSync -# define inflateSyncPoint z_inflateSyncPoint -# define inflateUndermine z_inflateUndermine -# define inflate_copyright z_inflate_copyright -# define inflate_fast z_inflate_fast -# define inflate_table z_inflate_table -# define uncompress z_uncompress -# define zError z_zError -# define zcalloc z_zcalloc -# define zcfree z_zcfree -# define zlibCompileFlags z_zlibCompileFlags -# define zlibVersion z_zlibVersion - -/* all zlib typedefs in zlib.h and zconf.h */ -# define Byte z_Byte -# define Bytef z_Bytef -# define alloc_func z_alloc_func -# define charf z_charf -# define free_func z_free_func -# define gzFile z_gzFile -# define gz_header z_gz_header -# define gz_headerp z_gz_headerp -# define in_func z_in_func -# define intf z_intf -# define out_func z_out_func -# define uInt z_uInt -# define uIntf z_uIntf -# define uLong z_uLong -# define uLongf z_uLongf -# define voidp z_voidp -# define voidpc z_voidpc -# define voidpf z_voidpf - -/* all zlib structs in zlib.h and zconf.h */ -# define gz_header_s z_gz_header_s -# define internal_state z_internal_state - -#endif - -#if defined(__MSDOS__) && !defined(MSDOS) -# define MSDOS -#endif -#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) -# define OS2 -#endif -#if defined(_WINDOWS) && !defined(WINDOWS) -# define WINDOWS -#endif -#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) -# ifndef WIN32 -# define WIN32 -# endif -#endif -#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) -# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) -# ifndef SYS16BIT -# define SYS16BIT -# endif -# endif -#endif - -/* - * Compile with -DMAXSEG_64K if the alloc function cannot allocate more - * than 64k bytes at a time (needed on systems with 16-bit int). - */ -#ifdef SYS16BIT -# define MAXSEG_64K -#endif -#ifdef MSDOS -# define UNALIGNED_OK -#endif - -#ifdef __STDC_VERSION__ -# ifndef STDC -# define STDC -# endif -# if __STDC_VERSION__ >= 199901L -# ifndef STDC99 -# define STDC99 -# endif -# endif -#endif -#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) -# define STDC -#endif -#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) -# define STDC -#endif -#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) -# define STDC -#endif -#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) -# define STDC -#endif - -#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ -# define STDC -#endif - -#ifndef STDC -# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ -# define const /* note: need a more gentle solution here */ -# endif -#endif - -/* Some Mac compilers merge all .h files incorrectly: */ -#if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) -# define NO_DUMMY_DECL -#endif - -/* Maximum value for memLevel in deflateInit2 */ -#ifndef MAX_MEM_LEVEL -# ifdef MAXSEG_64K -# define MAX_MEM_LEVEL 8 -# else -# define MAX_MEM_LEVEL 9 -# endif -#endif - -/* Maximum value for windowBits in deflateInit2 and inflateInit2. - * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files - * created by gzip. (Files created by minigzip can still be extracted by - * gzip.) - */ -#ifndef MAX_WBITS -# define MAX_WBITS 15 /* 32K LZ77 window */ -#endif - -/* The memory requirements for deflate are (in bytes): - (1 << (windowBits+2)) + (1 << (memLevel+9)) - that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) - plus a few kilobytes for small objects. For example, if you want to reduce - the default memory requirements from 256K to 128K, compile with - make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" - Of course this will generally degrade compression (there's no free lunch). - - The memory requirements for inflate are (in bytes) 1 << windowBits - that is, 32K for windowBits=15 (default value) plus a few kilobytes - for small objects. -*/ - - /* Type declarations */ - -#ifndef OF /* function prototypes */ -# ifdef STDC -# define OF(args) args -# else -# define OF(args) () -# endif -#endif - -/* The following definitions for FAR are needed only for MSDOS mixed - * model programming (small or medium model with some far allocations). - * This was tested only with MSC; for other MSDOS compilers you may have - * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, - * just define FAR to be empty. - */ -#ifdef SYS16BIT -# if defined(M_I86SM) || defined(M_I86MM) - /* MSC small or medium model */ -# define SMALL_MEDIUM -# ifdef _MSC_VER -# define FAR _far -# else -# define FAR far -# endif -# endif -# if (defined(__SMALL__) || defined(__MEDIUM__)) - /* Turbo C small or medium model */ -# define SMALL_MEDIUM -# ifdef __BORLANDC__ -# define FAR _far -# else -# define FAR far -# endif -# endif -#endif - -#if defined(WINDOWS) || defined(WIN32) - /* If building or using zlib as a DLL, define ZLIB_DLL. - * This is not mandatory, but it offers a little performance increase. - */ -# ifdef ZLIB_DLL -# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) -# ifdef ZLIB_INTERNAL -# define ZEXTERN extern __declspec(dllexport) -# else -# define ZEXTERN extern __declspec(dllimport) -# endif -# endif -# endif /* ZLIB_DLL */ - /* If building or using zlib with the WINAPI/WINAPIV calling convention, - * define ZLIB_WINAPI. - * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. - */ -# ifdef ZLIB_WINAPI -# ifdef FAR -# undef FAR -# endif -# include - /* No need for _export, use ZLIB.DEF instead. */ - /* For complete Windows compatibility, use WINAPI, not __stdcall. */ -# define ZEXPORT WINAPI -# ifdef WIN32 -# define ZEXPORTVA WINAPIV -# else -# define ZEXPORTVA FAR CDECL -# endif -# endif -#endif - -#if defined (__BEOS__) -# ifdef ZLIB_DLL -# ifdef ZLIB_INTERNAL -# define ZEXPORT __declspec(dllexport) -# define ZEXPORTVA __declspec(dllexport) -# else -# define ZEXPORT __declspec(dllimport) -# define ZEXPORTVA __declspec(dllimport) -# endif -# endif -#endif - -#ifndef ZEXTERN -# define ZEXTERN extern -#endif -#ifndef ZEXPORT -# define ZEXPORT -#endif -#ifndef ZEXPORTVA -# define ZEXPORTVA -#endif - -#ifndef FAR -# define FAR -#endif - -#if !defined(__MACTYPES__) -typedef unsigned char Byte; /* 8 bits */ -#endif -typedef unsigned int uInt; /* 16 bits or more */ -typedef unsigned long uLong; /* 32 bits or more */ - -#ifdef SMALL_MEDIUM - /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ -# define Bytef Byte FAR -#else - typedef Byte FAR Bytef; -#endif -typedef char FAR charf; -typedef int FAR intf; -typedef uInt FAR uIntf; -typedef uLong FAR uLongf; - -#ifdef STDC - typedef void const *voidpc; - typedef void FAR *voidpf; - typedef void *voidp; -#else - typedef Byte const *voidpc; - typedef Byte FAR *voidpf; - typedef Byte *voidp; -#endif - -#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ -# define Z_HAVE_UNISTD_H -#endif - -#ifdef STDC -# include /* for off_t */ -#endif - -/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and - * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even - * though the former does not conform to the LFS document), but considering - * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as - * equivalently requesting no 64-bit operations - */ -#if -_LARGEFILE64_SOURCE - -1 == 1 -# undef _LARGEFILE64_SOURCE -#endif - -#if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) -# include /* for SEEK_* and off_t */ -# ifdef VMS -# include /* for off_t */ -# endif -# ifndef z_off_t -# define z_off_t off_t -# endif -#endif - -#ifndef SEEK_SET -# define SEEK_SET 0 /* Seek from beginning of file. */ -# define SEEK_CUR 1 /* Seek from current position. */ -# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ -#endif - -#ifndef z_off_t -# define z_off_t long -#endif - -#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0 -# define z_off64_t off64_t -#else -# define z_off64_t z_off_t -#endif - -#if defined(__OS400__) -# define NO_vsnprintf -#endif - -#if defined(__MVS__) -# define NO_vsnprintf -#endif - -/* MVS linker does not support external names larger than 8 bytes */ -#if defined(__MVS__) - #pragma map(deflateInit_,"DEIN") - #pragma map(deflateInit2_,"DEIN2") - #pragma map(deflateEnd,"DEEND") - #pragma map(deflateBound,"DEBND") - #pragma map(inflateInit_,"ININ") - #pragma map(inflateInit2_,"ININ2") - #pragma map(inflateEnd,"INEND") - #pragma map(inflateSync,"INSY") - #pragma map(inflateSetDictionary,"INSEDI") - #pragma map(compressBound,"CMBND") - #pragma map(inflate_table,"INTABL") - #pragma map(inflate_fast,"INFA") - #pragma map(inflate_copyright,"INCOPY") -#endif - -#endif /* ZCONF_H */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/zlib.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/zlib.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/zlib.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/zlib.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,1613 +0,0 @@ -/* zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.5, April 19th, 2010 - - Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu - - - The data format used by the zlib library is described by RFCs (Request for - Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt - (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). -*/ - -#ifndef ZLIB_H -#define ZLIB_H - -#include "zconf.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define ZLIB_VERSION "1.2.5" -#define ZLIB_VERNUM 0x1250 -#define ZLIB_VER_MAJOR 1 -#define ZLIB_VER_MINOR 2 -#define ZLIB_VER_REVISION 5 -#define ZLIB_VER_SUBREVISION 0 - -/* - The 'zlib' compression library provides in-memory compression and - decompression functions, including integrity checks of the uncompressed data. - This version of the library supports only one compression method (deflation) - but other algorithms will be added later and will have the same stream - interface. - - Compression can be done in a single step if the buffers are large enough, - or can be done by repeated calls of the compression function. In the latter - case, the application must provide more input and/or consume the output - (providing more output space) before each call. - - The compressed data format used by default by the in-memory functions is - the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped - around a deflate stream, which is itself documented in RFC 1951. - - The library also supports reading and writing files in gzip (.gz) format - with an interface similar to that of stdio using the functions that start - with "gz". The gzip format is different from the zlib format. gzip is a - gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. - - This library can optionally read and write gzip streams in memory as well. - - The zlib format was designed to be compact and fast for use in memory - and on communications channels. The gzip format was designed for single- - file compression on file systems, has a larger header than zlib to maintain - directory information, and uses a different, slower check method than zlib. - - The library does not install any signal handler. The decoder checks - the consistency of the compressed data, so the library should never crash - even in case of corrupted input. -*/ - -typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); -typedef void (*free_func) OF((voidpf opaque, voidpf address)); - -struct internal_state; - -typedef struct z_stream_s { - Bytef *next_in; /* next input byte */ - uInt avail_in; /* number of bytes available at next_in */ - uLong total_in; /* total nb of input bytes read so far */ - - Bytef *next_out; /* next output byte should be put there */ - uInt avail_out; /* remaining free space at next_out */ - uLong total_out; /* total nb of bytes output so far */ - - char *msg; /* last error message, NULL if no error */ - struct internal_state FAR *state; /* not visible by applications */ - - alloc_func zalloc; /* used to allocate the internal state */ - free_func zfree; /* used to free the internal state */ - voidpf opaque; /* private data object passed to zalloc and zfree */ - - int data_type; /* best guess about the data type: binary or text */ - uLong adler; /* adler32 value of the uncompressed data */ - uLong reserved; /* reserved for future use */ -} z_stream; - -typedef z_stream FAR *z_streamp; - -/* - gzip header information passed to and from zlib routines. See RFC 1952 - for more details on the meanings of these fields. -*/ -typedef struct gz_header_s { - int text; /* true if compressed data believed to be text */ - uLong time; /* modification time */ - int xflags; /* extra flags (not used when writing a gzip file) */ - int os; /* operating system */ - Bytef *extra; /* pointer to extra field or Z_NULL if none */ - uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ - uInt extra_max; /* space at extra (only when reading header) */ - Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ - uInt name_max; /* space at name (only when reading header) */ - Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ - uInt comm_max; /* space at comment (only when reading header) */ - int hcrc; /* true if there was or will be a header crc */ - int done; /* true when done reading gzip header (not used - when writing a gzip file) */ -} gz_header; - -typedef gz_header FAR *gz_headerp; - -/* - The application must update next_in and avail_in when avail_in has dropped - to zero. It must update next_out and avail_out when avail_out has dropped - to zero. The application must initialize zalloc, zfree and opaque before - calling the init function. All other fields are set by the compression - library and must not be updated by the application. - - The opaque value provided by the application will be passed as the first - parameter for calls of zalloc and zfree. This can be useful for custom - memory management. The compression library attaches no meaning to the - opaque value. - - zalloc must return Z_NULL if there is not enough memory for the object. - If zlib is used in a multi-threaded application, zalloc and zfree must be - thread safe. - - On 16-bit systems, the functions zalloc and zfree must be able to allocate - exactly 65536 bytes, but will not be required to allocate more than this if - the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers - returned by zalloc for objects of exactly 65536 bytes *must* have their - offset normalized to zero. The default allocation function provided by this - library ensures this (see zutil.c). To reduce memory requirements and avoid - any allocation of 64K objects, at the expense of compression ratio, compile - the library with -DMAX_WBITS=14 (see zconf.h). - - The fields total_in and total_out can be used for statistics or progress - reports. After compression, total_in holds the total size of the - uncompressed data and may be saved for use in the decompressor (particularly - if the decompressor wants to decompress everything in a single step). -*/ - - /* constants */ - -#define Z_NO_FLUSH 0 -#define Z_PARTIAL_FLUSH 1 -#define Z_SYNC_FLUSH 2 -#define Z_FULL_FLUSH 3 -#define Z_FINISH 4 -#define Z_BLOCK 5 -#define Z_TREES 6 -/* Allowed flush values; see deflate() and inflate() below for details */ - -#define Z_OK 0 -#define Z_STREAM_END 1 -#define Z_NEED_DICT 2 -#define Z_ERRNO (-1) -#define Z_STREAM_ERROR (-2) -#define Z_DATA_ERROR (-3) -#define Z_MEM_ERROR (-4) -#define Z_BUF_ERROR (-5) -#define Z_VERSION_ERROR (-6) -/* Return codes for the compression/decompression functions. Negative values - * are errors, positive values are used for special but normal events. - */ - -#define Z_NO_COMPRESSION 0 -#define Z_BEST_SPEED 1 -#define Z_BEST_COMPRESSION 9 -#define Z_DEFAULT_COMPRESSION (-1) -/* compression levels */ - -#define Z_FILTERED 1 -#define Z_HUFFMAN_ONLY 2 -#define Z_RLE 3 -#define Z_FIXED 4 -#define Z_DEFAULT_STRATEGY 0 -/* compression strategy; see deflateInit2() below for details */ - -#define Z_BINARY 0 -#define Z_TEXT 1 -#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ -#define Z_UNKNOWN 2 -/* Possible values of the data_type field (though see inflate()) */ - -#define Z_DEFLATED 8 -/* The deflate compression method (the only one supported in this version) */ - -#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ - -#define zlib_version zlibVersion() -/* for compatibility with versions < 1.0.2 */ - - - /* basic functions */ - -ZEXTERN const char * ZEXPORT zlibVersion OF((void)); -/* The application can compare zlibVersion and ZLIB_VERSION for consistency. - If the first character differs, the library code actually used is not - compatible with the zlib.h header file used by the application. This check - is automatically made by deflateInit and inflateInit. - */ - -/* -ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); - - Initializes the internal stream state for compression. The fields - zalloc, zfree and opaque must be initialized before by the caller. If - zalloc and zfree are set to Z_NULL, deflateInit updates them to use default - allocation functions. - - The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: - 1 gives best speed, 9 gives best compression, 0 gives no compression at all - (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION - requests a default compromise between speed and compression (currently - equivalent to level 6). - - deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if level is not a valid compression level, or - Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible - with the version assumed by the caller (ZLIB_VERSION). msg is set to null - if there is no error message. deflateInit does not perform any compression: - this will be done by deflate(). -*/ - - -ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); -/* - deflate compresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce - some output latency (reading input without producing any output) except when - forced to flush. - - The detailed semantics are as follows. deflate performs one or both of the - following actions: - - - Compress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), next_in and avail_in are updated and - processing will resume at this point for the next call of deflate(). - - - Provide more output starting at next_out and update next_out and avail_out - accordingly. This action is forced if the parameter flush is non zero. - Forcing flush frequently degrades the compression ratio, so this parameter - should be set only when necessary (in interactive applications). Some - output may be provided even if flush is not set. - - Before the call of deflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming more - output, and updating avail_in or avail_out accordingly; avail_out should - never be zero before the call. The application can consume the compressed - output when it wants, for example when the output buffer is full (avail_out - == 0), or after each call of deflate(). If deflate returns Z_OK and with - zero avail_out, it must be called again after making room in the output - buffer because there might be more output pending. - - Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to - decide how much data to accumulate before producing output, in order to - maximize compression. - - If the parameter flush is set to Z_SYNC_FLUSH, all pending output is - flushed to the output buffer and the output is aligned on a byte boundary, so - that the decompressor can get all input data available so far. (In - particular avail_in is zero after the call if enough output space has been - provided before the call.) Flushing may degrade compression for some - compression algorithms and so it should be used only when necessary. This - completes the current deflate block and follows it with an empty stored block - that is three bits plus filler bits to the next byte, followed by four bytes - (00 00 ff ff). - - If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the - output buffer, but the output is not aligned to a byte boundary. All of the - input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. - This completes the current deflate block and follows it with an empty fixed - codes block that is 10 bits long. This assures that enough bytes are output - in order for the decompressor to finish the block before the empty fixed code - block. - - If flush is set to Z_BLOCK, a deflate block is completed and emitted, as - for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to - seven bits of the current block are held to be written as the next byte after - the next deflate block is completed. In this case, the decompressor may not - be provided enough bits at this point in order to complete decompression of - the data provided so far to the compressor. It may need to wait for the next - block to be emitted. This is for advanced applications that need to control - the emission of deflate blocks. - - If flush is set to Z_FULL_FLUSH, all output is flushed as with - Z_SYNC_FLUSH, and the compression state is reset so that decompression can - restart from this point if previous compressed data has been damaged or if - random access is desired. Using Z_FULL_FLUSH too often can seriously degrade - compression. - - If deflate returns with avail_out == 0, this function must be called again - with the same value of the flush parameter and more output space (updated - avail_out), until the flush is complete (deflate returns with non-zero - avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that - avail_out is greater than six to avoid repeated flush markers due to - avail_out == 0 on return. - - If the parameter flush is set to Z_FINISH, pending input is processed, - pending output is flushed and deflate returns with Z_STREAM_END if there was - enough output space; if deflate returns with Z_OK, this function must be - called again with Z_FINISH and more output space (updated avail_out) but no - more input data, until it returns with Z_STREAM_END or an error. After - deflate has returned Z_STREAM_END, the only possible operations on the stream - are deflateReset or deflateEnd. - - Z_FINISH can be used immediately after deflateInit if all the compression - is to be done in a single step. In this case, avail_out must be at least the - value returned by deflateBound (see below). If deflate does not return - Z_STREAM_END, then it must be called again as described above. - - deflate() sets strm->adler to the adler32 checksum of all input read - so far (that is, total_in bytes). - - deflate() may update strm->data_type if it can make a good guess about - the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered - binary. This field is only for information purposes and does not affect the - compression algorithm in any manner. - - deflate() returns Z_OK if some progress has been made (more input - processed or more output produced), Z_STREAM_END if all input has been - consumed and all output has been produced (only when flush is set to - Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example - if next_in or next_out was Z_NULL), Z_BUF_ERROR if no progress is possible - (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not - fatal, and deflate() can be called again with more input and more output - space to continue compressing. -*/ - - -ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); -/* - All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any pending - output. - - deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the - stream state was inconsistent, Z_DATA_ERROR if the stream was freed - prematurely (some input or output was discarded). In the error case, msg - may be set but then points to a static string (which must not be - deallocated). -*/ - - -/* -ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); - - Initializes the internal stream state for decompression. The fields - next_in, avail_in, zalloc, zfree and opaque must be initialized before by - the caller. If next_in is not Z_NULL and avail_in is large enough (the - exact value depends on the compression method), inflateInit determines the - compression method from the zlib header and allocates all data structures - accordingly; otherwise the allocation will be deferred to the first call of - inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to - use default allocation functions. - - inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_VERSION_ERROR if the zlib library version is incompatible with the - version assumed by the caller, or Z_STREAM_ERROR if the parameters are - invalid, such as a null pointer to the structure. msg is set to null if - there is no error message. inflateInit does not perform any decompression - apart from possibly reading the zlib header if present: actual decompression - will be done by inflate(). (So next_in and avail_in may be modified, but - next_out and avail_out are unused and unchanged.) The current implementation - of inflateInit() does not process any header information -- that is deferred - until inflate() is called. -*/ - - -ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); -/* - inflate decompresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce - some output latency (reading input without producing any output) except when - forced to flush. - - The detailed semantics are as follows. inflate performs one or both of the - following actions: - - - Decompress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), next_in is updated and processing will - resume at this point for the next call of inflate(). - - - Provide more output starting at next_out and update next_out and avail_out - accordingly. inflate() provides as much output as possible, until there is - no more input data or no more space in the output buffer (see below about - the flush parameter). - - Before the call of inflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming more - output, and updating the next_* and avail_* values accordingly. The - application can consume the uncompressed output when it wants, for example - when the output buffer is full (avail_out == 0), or after each call of - inflate(). If inflate returns Z_OK and with zero avail_out, it must be - called again after making room in the output buffer because there might be - more output pending. - - The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, - Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much - output as possible to the output buffer. Z_BLOCK requests that inflate() - stop if and when it gets to the next deflate block boundary. When decoding - the zlib or gzip format, this will cause inflate() to return immediately - after the header and before the first block. When doing a raw inflate, - inflate() will go ahead and process the first block, and will return when it - gets to the end of that block, or when it runs out of data. - - The Z_BLOCK option assists in appending to or combining deflate streams. - Also to assist in this, on return inflate() will set strm->data_type to the - number of unused bits in the last byte taken from strm->next_in, plus 64 if - inflate() is currently decoding the last block in the deflate stream, plus - 128 if inflate() returned immediately after decoding an end-of-block code or - decoding the complete header up to just before the first byte of the deflate - stream. The end-of-block will not be indicated until all of the uncompressed - data from that block has been written to strm->next_out. The number of - unused bits may in general be greater than seven, except when bit 7 of - data_type is set, in which case the number of unused bits will be less than - eight. data_type is set as noted here every time inflate() returns for all - flush options, and so can be used to determine the amount of currently - consumed input in bits. - - The Z_TREES option behaves as Z_BLOCK does, but it also returns when the - end of each deflate block header is reached, before any actual data in that - block is decoded. This allows the caller to determine the length of the - deflate block header for later use in random access within a deflate block. - 256 is added to the value of strm->data_type when inflate() returns - immediately after reaching the end of the deflate block header. - - inflate() should normally be called until it returns Z_STREAM_END or an - error. However if all decompression is to be performed in a single step (a - single call of inflate), the parameter flush should be set to Z_FINISH. In - this case all pending input is processed and all pending output is flushed; - avail_out must be large enough to hold all the uncompressed data. (The size - of the uncompressed data may have been saved by the compressor for this - purpose.) The next operation on this stream must be inflateEnd to deallocate - the decompression state. The use of Z_FINISH is never required, but can be - used to inform inflate that a faster approach may be used for the single - inflate() call. - - In this implementation, inflate() always flushes as much output as - possible to the output buffer, and always uses the faster approach on the - first call. So the only effect of the flush parameter in this implementation - is on the return value of inflate(), as noted below, or when it returns early - because Z_BLOCK or Z_TREES is used. - - If a preset dictionary is needed after this call (see inflateSetDictionary - below), inflate sets strm->adler to the adler32 checksum of the dictionary - chosen by the compressor and returns Z_NEED_DICT; otherwise it sets - strm->adler to the adler32 checksum of all output produced so far (that is, - total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described - below. At the end of the stream, inflate() checks that its computed adler32 - checksum is equal to that saved by the compressor and returns Z_STREAM_END - only if the checksum is correct. - - inflate() can decompress and check either zlib-wrapped or gzip-wrapped - deflate data. The header type is detected automatically, if requested when - initializing with inflateInit2(). Any information contained in the gzip - header is not retained, so applications that need that information should - instead use raw inflate, see inflateInit2() below, or inflateBack() and - perform their own processing of the gzip header and trailer. - - inflate() returns Z_OK if some progress has been made (more input processed - or more output produced), Z_STREAM_END if the end of the compressed data has - been reached and all uncompressed output has been produced, Z_NEED_DICT if a - preset dictionary is needed at this point, Z_DATA_ERROR if the input data was - corrupted (input stream not conforming to the zlib format or incorrect check - value), Z_STREAM_ERROR if the stream structure was inconsistent (for example - next_in or next_out was Z_NULL), Z_MEM_ERROR if there was not enough memory, - Z_BUF_ERROR if no progress is possible or if there was not enough room in the - output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and - inflate() can be called again with more input and more output space to - continue decompressing. If Z_DATA_ERROR is returned, the application may - then call inflateSync() to look for a good compression block if a partial - recovery of the data is desired. -*/ - - -ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); -/* - All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any pending - output. - - inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state - was inconsistent. In the error case, msg may be set but then points to a - static string (which must not be deallocated). -*/ - - - /* Advanced functions */ - -/* - The following functions are needed only in some special applications. -*/ - -/* -ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm, - int level, - int method, - int windowBits, - int memLevel, - int strategy)); - - This is another version of deflateInit with more compression options. The - fields next_in, zalloc, zfree and opaque must be initialized before by the - caller. - - The method parameter is the compression method. It must be Z_DEFLATED in - this version of the library. - - The windowBits parameter is the base two logarithm of the window size - (the size of the history buffer). It should be in the range 8..15 for this - version of the library. Larger values of this parameter result in better - compression at the expense of memory usage. The default value is 15 if - deflateInit is used instead. - - windowBits can also be -8..-15 for raw deflate. In this case, -windowBits - determines the window size. deflate() will then generate raw deflate data - with no zlib header or trailer, and will not compute an adler32 check value. - - windowBits can also be greater than 15 for optional gzip encoding. Add - 16 to windowBits to write a simple gzip header and trailer around the - compressed data instead of a zlib wrapper. The gzip header will have no - file name, no extra data, no comment, no modification time (set to zero), no - header crc, and the operating system will be set to 255 (unknown). If a - gzip stream is being written, strm->adler is a crc32 instead of an adler32. - - The memLevel parameter specifies how much memory should be allocated - for the internal compression state. memLevel=1 uses minimum memory but is - slow and reduces compression ratio; memLevel=9 uses maximum memory for - optimal speed. The default value is 8. See zconf.h for total memory usage - as a function of windowBits and memLevel. - - The strategy parameter is used to tune the compression algorithm. Use the - value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a - filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no - string match), or Z_RLE to limit match distances to one (run-length - encoding). Filtered data consists mostly of small values with a somewhat - random distribution. In this case, the compression algorithm is tuned to - compress them better. The effect of Z_FILTERED is to force more Huffman - coding and less string matching; it is somewhat intermediate between - Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as - fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The - strategy parameter only affects the compression ratio but not the - correctness of the compressed output even if it is not set appropriately. - Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler - decoder for special applications. - - deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid - method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is - incompatible with the version assumed by the caller (ZLIB_VERSION). msg is - set to null if there is no error message. deflateInit2 does not perform any - compression: this will be done by deflate(). -*/ - -ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, - const Bytef *dictionary, - uInt dictLength)); -/* - Initializes the compression dictionary from the given byte sequence - without producing any compressed output. This function must be called - immediately after deflateInit, deflateInit2 or deflateReset, before any call - of deflate. The compressor and decompressor must use exactly the same - dictionary (see inflateSetDictionary). - - The dictionary should consist of strings (byte sequences) that are likely - to be encountered later in the data to be compressed, with the most commonly - used strings preferably put towards the end of the dictionary. Using a - dictionary is most useful when the data to be compressed is short and can be - predicted with good accuracy; the data can then be compressed better than - with the default empty dictionary. - - Depending on the size of the compression data structures selected by - deflateInit or deflateInit2, a part of the dictionary may in effect be - discarded, for example if the dictionary is larger than the window size - provided in deflateInit or deflateInit2. Thus the strings most likely to be - useful should be put at the end of the dictionary, not at the front. In - addition, the current implementation of deflate will use at most the window - size minus 262 bytes of the provided dictionary. - - Upon return of this function, strm->adler is set to the adler32 value - of the dictionary; the decompressor may later use this value to determine - which dictionary has been used by the compressor. (The adler32 value - applies to the whole dictionary even if only a subset of the dictionary is - actually used by the compressor.) If a raw deflate was requested, then the - adler32 value is not computed and strm->adler is not set. - - deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a - parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is - inconsistent (for example if deflate has already been called for this stream - or if the compression method is bsort). deflateSetDictionary does not - perform any compression: this will be done by deflate(). -*/ - -ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, - z_streamp source)); -/* - Sets the destination stream as a complete copy of the source stream. - - This function can be useful when several compression strategies will be - tried, for example when there are several ways of pre-processing the input - data with a filter. The streams that will be discarded should then be freed - by calling deflateEnd. Note that deflateCopy duplicates the internal - compression state which can be quite large, so this strategy is slow and can - consume lots of memory. - - deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being Z_NULL). msg is left unchanged in both source and - destination. -*/ - -ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); -/* - This function is equivalent to deflateEnd followed by deflateInit, - but does not free and reallocate all the internal compression state. The - stream will keep the same compression level and any other attributes that - may have been set by deflateInit2. - - deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being Z_NULL). -*/ - -ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, - int level, - int strategy)); -/* - Dynamically update the compression level and compression strategy. The - interpretation of level and strategy is as in deflateInit2. This can be - used to switch between compression and straight copy of the input data, or - to switch to a different kind of input data requiring a different strategy. - If the compression level is changed, the input available so far is - compressed with the old level (and may be flushed); the new level will take - effect only at the next call of deflate(). - - Before the call of deflateParams, the stream state must be set as for - a call of deflate(), since the currently available input may have to be - compressed and flushed. In particular, strm->avail_out must be non-zero. - - deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source - stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR if - strm->avail_out was zero. -*/ - -ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, - int good_length, - int max_lazy, - int nice_length, - int max_chain)); -/* - Fine tune deflate's internal compression parameters. This should only be - used by someone who understands the algorithm used by zlib's deflate for - searching for the best matching string, and even then only by the most - fanatic optimizer trying to squeeze out the last compressed bit for their - specific input data. Read the deflate.c source code for the meaning of the - max_lazy, good_length, nice_length, and max_chain parameters. - - deflateTune() can be called after deflateInit() or deflateInit2(), and - returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. - */ - -ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm, - uLong sourceLen)); -/* - deflateBound() returns an upper bound on the compressed size after - deflation of sourceLen bytes. It must be called after deflateInit() or - deflateInit2(), and after deflateSetHeader(), if used. This would be used - to allocate an output buffer for deflation in a single pass, and so would be - called before deflate(). -*/ - -ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, - int bits, - int value)); -/* - deflatePrime() inserts bits in the deflate output stream. The intent - is that this function is used to start off the deflate output with the bits - leftover from a previous deflate stream when appending to it. As such, this - function can only be used for raw deflate, and must be used before the first - deflate() call after a deflateInit2() or deflateReset(). bits must be less - than or equal to 16, and that many of the least significant bits of value - will be inserted in the output. - - deflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ - -ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, - gz_headerp head)); -/* - deflateSetHeader() provides gzip header information for when a gzip - stream is requested by deflateInit2(). deflateSetHeader() may be called - after deflateInit2() or deflateReset() and before the first call of - deflate(). The text, time, os, extra field, name, and comment information - in the provided gz_header structure are written to the gzip header (xflag is - ignored -- the extra flags are set according to the compression level). The - caller must assure that, if not Z_NULL, name and comment are terminated with - a zero byte, and that if extra is not Z_NULL, that extra_len bytes are - available there. If hcrc is true, a gzip header crc is included. Note that - the current versions of the command-line version of gzip (up through version - 1.3.x) do not support header crc's, and will report that it is a "multi-part - gzip file" and give up. - - If deflateSetHeader is not used, the default gzip header has text false, - the time set to zero, and os set to 255, with no extra, name, or comment - fields. The gzip header is returned to the default state by deflateReset(). - - deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ - -/* -ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, - int windowBits)); - - This is another version of inflateInit with an extra parameter. The - fields next_in, avail_in, zalloc, zfree and opaque must be initialized - before by the caller. - - The windowBits parameter is the base two logarithm of the maximum window - size (the size of the history buffer). It should be in the range 8..15 for - this version of the library. The default value is 15 if inflateInit is used - instead. windowBits must be greater than or equal to the windowBits value - provided to deflateInit2() while compressing, or it must be equal to 15 if - deflateInit2() was not used. If a compressed stream with a larger window - size is given as input, inflate() will return with the error code - Z_DATA_ERROR instead of trying to allocate a larger window. - - windowBits can also be zero to request that inflate use the window size in - the zlib header of the compressed stream. - - windowBits can also be -8..-15 for raw inflate. In this case, -windowBits - determines the window size. inflate() will then process raw deflate data, - not looking for a zlib or gzip header, not generating a check value, and not - looking for any check values for comparison at the end of the stream. This - is for use with other formats that use the deflate compressed data format - such as zip. Those formats provide their own check values. If a custom - format is developed using the raw deflate format for compressed data, it is - recommended that a check value such as an adler32 or a crc32 be applied to - the uncompressed data as is done in the zlib, gzip, and zip formats. For - most applications, the zlib format should be used as is. Note that comments - above on the use in deflateInit2() applies to the magnitude of windowBits. - - windowBits can also be greater than 15 for optional gzip decoding. Add - 32 to windowBits to enable zlib and gzip decoding with automatic header - detection, or add 16 to decode only the gzip format (the zlib format will - return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a - crc32 instead of an adler32. - - inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_VERSION_ERROR if the zlib library version is incompatible with the - version assumed by the caller, or Z_STREAM_ERROR if the parameters are - invalid, such as a null pointer to the structure. msg is set to null if - there is no error message. inflateInit2 does not perform any decompression - apart from possibly reading the zlib header if present: actual decompression - will be done by inflate(). (So next_in and avail_in may be modified, but - next_out and avail_out are unused and unchanged.) The current implementation - of inflateInit2() does not process any header information -- that is - deferred until inflate() is called. -*/ - -ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, - const Bytef *dictionary, - uInt dictLength)); -/* - Initializes the decompression dictionary from the given uncompressed byte - sequence. This function must be called immediately after a call of inflate, - if that call returned Z_NEED_DICT. The dictionary chosen by the compressor - can be determined from the adler32 value returned by that call of inflate. - The compressor and decompressor must use exactly the same dictionary (see - deflateSetDictionary). For raw inflate, this function can be called - immediately after inflateInit2() or inflateReset() and before any call of - inflate() to set the dictionary. The application must insure that the - dictionary that was used for compression is provided. - - inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a - parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is - inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the - expected one (incorrect adler32 value). inflateSetDictionary does not - perform any decompression: this will be done by subsequent calls of - inflate(). -*/ - -ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); -/* - Skips invalid compressed data until a full flush point (see above the - description of deflate with Z_FULL_FLUSH) can be found, or until all - available input is skipped. No output is provided. - - inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR - if no more input was provided, Z_DATA_ERROR if no flush point has been - found, or Z_STREAM_ERROR if the stream structure was inconsistent. In the - success case, the application may save the current current value of total_in - which indicates where valid compressed data was found. In the error case, - the application may repeatedly call inflateSync, providing more input each - time, until success or end of the input data. -*/ - -ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, - z_streamp source)); -/* - Sets the destination stream as a complete copy of the source stream. - - This function can be useful when randomly accessing a large stream. The - first pass through the stream can periodically record the inflate state, - allowing restarting inflate at those points when randomly accessing the - stream. - - inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being Z_NULL). msg is left unchanged in both source and - destination. -*/ - -ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); -/* - This function is equivalent to inflateEnd followed by inflateInit, - but does not free and reallocate all the internal decompression state. The - stream will keep attributes that may have been set by inflateInit2. - - inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being Z_NULL). -*/ - -ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm, - int windowBits)); -/* - This function is the same as inflateReset, but it also permits changing - the wrap and window size requests. The windowBits parameter is interpreted - the same as it is for inflateInit2. - - inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being Z_NULL), or if - the windowBits parameter is invalid. -*/ - -ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, - int bits, - int value)); -/* - This function inserts bits in the inflate input stream. The intent is - that this function is used to start inflating at a bit position in the - middle of a byte. The provided bits will be used before any bytes are used - from next_in. This function should only be used with raw inflate, and - should be used before the first inflate() call after inflateInit2() or - inflateReset(). bits must be less than or equal to 16, and that many of the - least significant bits of value will be inserted in the input. - - If bits is negative, then the input stream bit buffer is emptied. Then - inflatePrime() can be called again to put bits in the buffer. This is used - to clear out bits leftover after feeding inflate a block description prior - to feeding inflate codes. - - inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ - -ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm)); -/* - This function returns two values, one in the lower 16 bits of the return - value, and the other in the remaining upper bits, obtained by shifting the - return value down 16 bits. If the upper value is -1 and the lower value is - zero, then inflate() is currently decoding information outside of a block. - If the upper value is -1 and the lower value is non-zero, then inflate is in - the middle of a stored block, with the lower value equaling the number of - bytes from the input remaining to copy. If the upper value is not -1, then - it is the number of bits back from the current bit position in the input of - the code (literal or length/distance pair) currently being processed. In - that case the lower value is the number of bytes already emitted for that - code. - - A code is being processed if inflate is waiting for more input to complete - decoding of the code, or if it has completed decoding but is waiting for - more output space to write the literal or match data. - - inflateMark() is used to mark locations in the input data for random - access, which may be at bit positions, and to note those cases where the - output of a code may span boundaries of random access blocks. The current - location in the input stream can be determined from avail_in and data_type - as noted in the description for the Z_BLOCK flush parameter for inflate. - - inflateMark returns the value noted above or -1 << 16 if the provided - source stream state was inconsistent. -*/ - -ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, - gz_headerp head)); -/* - inflateGetHeader() requests that gzip header information be stored in the - provided gz_header structure. inflateGetHeader() may be called after - inflateInit2() or inflateReset(), and before the first call of inflate(). - As inflate() processes the gzip stream, head->done is zero until the header - is completed, at which time head->done is set to one. If a zlib stream is - being decoded, then head->done is set to -1 to indicate that there will be - no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be - used to force inflate() to return immediately after header processing is - complete and before any actual data is decompressed. - - The text, time, xflags, and os fields are filled in with the gzip header - contents. hcrc is set to true if there is a header CRC. (The header CRC - was valid if done is set to one.) If extra is not Z_NULL, then extra_max - contains the maximum number of bytes to write to extra. Once done is true, - extra_len contains the actual extra field length, and extra contains the - extra field, or that field truncated if extra_max is less than extra_len. - If name is not Z_NULL, then up to name_max characters are written there, - terminated with a zero unless the length is greater than name_max. If - comment is not Z_NULL, then up to comm_max characters are written there, - terminated with a zero unless the length is greater than comm_max. When any - of extra, name, or comment are not Z_NULL and the respective field is not - present in the header, then that field is set to Z_NULL to signal its - absence. This allows the use of deflateSetHeader() with the returned - structure to duplicate the header. However if those fields are set to - allocated memory, then the application will need to save those pointers - elsewhere so that they can be eventually freed. - - If inflateGetHeader is not used, then the header information is simply - discarded. The header is always checked for validity, including the header - CRC if present. inflateReset() will reset the process to discard the header - information. The application would need to call inflateGetHeader() again to - retrieve the header from the next gzip stream. - - inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ - -/* -ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits, - unsigned char FAR *window)); - - Initialize the internal stream state for decompression using inflateBack() - calls. The fields zalloc, zfree and opaque in strm must be initialized - before the call. If zalloc and zfree are Z_NULL, then the default library- - derived memory allocation routines are used. windowBits is the base two - logarithm of the window size, in the range 8..15. window is a caller - supplied buffer of that size. Except for special applications where it is - assured that deflate was used with small window sizes, windowBits must be 15 - and a 32K byte window must be supplied to be able to decompress general - deflate streams. - - See inflateBack() for the usage of these routines. - - inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of - the paramaters are invalid, Z_MEM_ERROR if the internal state could not be - allocated, or Z_VERSION_ERROR if the version of the library does not match - the version of the header file. -*/ - -typedef unsigned (*in_func) OF((void FAR *, unsigned char FAR * FAR *)); -typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); - -ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, - in_func in, void FAR *in_desc, - out_func out, void FAR *out_desc)); -/* - inflateBack() does a raw inflate with a single call using a call-back - interface for input and output. This is more efficient than inflate() for - file i/o applications in that it avoids copying between the output and the - sliding window by simply making the window itself the output buffer. This - function trusts the application to not change the output buffer passed by - the output function, at least until inflateBack() returns. - - inflateBackInit() must be called first to allocate the internal state - and to initialize the state with the user-provided window buffer. - inflateBack() may then be used multiple times to inflate a complete, raw - deflate stream with each call. inflateBackEnd() is then called to free the - allocated state. - - A raw deflate stream is one with no zlib or gzip header or trailer. - This routine would normally be used in a utility that reads zip or gzip - files and writes out uncompressed files. The utility would decode the - header and process the trailer on its own, hence this routine expects only - the raw deflate stream to decompress. This is different from the normal - behavior of inflate(), which expects either a zlib or gzip header and - trailer around the deflate stream. - - inflateBack() uses two subroutines supplied by the caller that are then - called by inflateBack() for input and output. inflateBack() calls those - routines until it reads a complete deflate stream and writes out all of the - uncompressed data, or until it encounters an error. The function's - parameters and return types are defined above in the in_func and out_func - typedefs. inflateBack() will call in(in_desc, &buf) which should return the - number of bytes of provided input, and a pointer to that input in buf. If - there is no input available, in() must return zero--buf is ignored in that - case--and inflateBack() will return a buffer error. inflateBack() will call - out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. out() - should return zero on success, or non-zero on failure. If out() returns - non-zero, inflateBack() will return with an error. Neither in() nor out() - are permitted to change the contents of the window provided to - inflateBackInit(), which is also the buffer that out() uses to write from. - The length written by out() will be at most the window size. Any non-zero - amount of input may be provided by in(). - - For convenience, inflateBack() can be provided input on the first call by - setting strm->next_in and strm->avail_in. If that input is exhausted, then - in() will be called. Therefore strm->next_in must be initialized before - calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called - immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in - must also be initialized, and then if strm->avail_in is not zero, input will - initially be taken from strm->next_in[0 .. strm->avail_in - 1]. - - The in_desc and out_desc parameters of inflateBack() is passed as the - first parameter of in() and out() respectively when they are called. These - descriptors can be optionally used to pass any information that the caller- - supplied in() and out() functions need to do their job. - - On return, inflateBack() will set strm->next_in and strm->avail_in to - pass back any unused input that was provided by the last in() call. The - return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR - if in() or out() returned an error, Z_DATA_ERROR if there was a format error - in the deflate stream (in which case strm->msg is set to indicate the nature - of the error), or Z_STREAM_ERROR if the stream was not properly initialized. - In the case of Z_BUF_ERROR, an input or output error can be distinguished - using strm->next_in which will be Z_NULL only if in() returned an error. If - strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning - non-zero. (in() will always be called before out(), so strm->next_in is - assured to be defined if out() returns non-zero.) Note that inflateBack() - cannot return Z_OK. -*/ - -ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); -/* - All memory allocated by inflateBackInit() is freed. - - inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream - state was inconsistent. -*/ - -ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void)); -/* Return flags indicating compile-time options. - - Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: - 1.0: size of uInt - 3.2: size of uLong - 5.4: size of voidpf (pointer) - 7.6: size of z_off_t - - Compiler, assembler, and debug options: - 8: DEBUG - 9: ASMV or ASMINF -- use ASM code - 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention - 11: 0 (reserved) - - One-time table building (smaller code, but not thread-safe if true): - 12: BUILDFIXED -- build static block decoding tables when needed - 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed - 14,15: 0 (reserved) - - Library content (indicates missing functionality): - 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking - deflate code when not needed) - 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect - and decode gzip streams (to avoid linking crc code) - 18-19: 0 (reserved) - - Operation variations (changes in library functionality): - 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate - 21: FASTEST -- deflate algorithm with only one, lowest compression level - 22,23: 0 (reserved) - - The sprintf variant used by gzprintf (zero is best): - 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format - 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! - 26: 0 = returns value, 1 = void -- 1 means inferred string length returned - - Remainder: - 27-31: 0 (reserved) - */ - - - /* utility functions */ - -/* - The following utility functions are implemented on top of the basic - stream-oriented functions. To simplify the interface, some default options - are assumed (compression level and memory usage, standard memory allocation - functions). The source code of these utility functions can be modified if - you need special options. -*/ - -ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong sourceLen)); -/* - Compresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total size - of the destination buffer, which must be at least the value returned by - compressBound(sourceLen). Upon exit, destLen is the actual size of the - compressed buffer. - - compress returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer. -*/ - -ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong sourceLen, - int level)); -/* - Compresses the source buffer into the destination buffer. The level - parameter has the same meaning as in deflateInit. sourceLen is the byte - length of the source buffer. Upon entry, destLen is the total size of the - destination buffer, which must be at least the value returned by - compressBound(sourceLen). Upon exit, destLen is the actual size of the - compressed buffer. - - compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_BUF_ERROR if there was not enough room in the output buffer, - Z_STREAM_ERROR if the level parameter is invalid. -*/ - -ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); -/* - compressBound() returns an upper bound on the compressed size after - compress() or compress2() on sourceLen bytes. It would be used before a - compress() or compress2() call to allocate the destination buffer. -*/ - -ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong sourceLen)); -/* - Decompresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total size - of the destination buffer, which must be large enough to hold the entire - uncompressed data. (The size of the uncompressed data must have been saved - previously by the compressor and transmitted to the decompressor by some - mechanism outside the scope of this compression library.) Upon exit, destLen - is the actual size of the uncompressed buffer. - - uncompress returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. -*/ - - - /* gzip file access functions */ - -/* - This library supports reading and writing files in gzip (.gz) format with - an interface similar to that of stdio, using the functions that start with - "gz". The gzip format is different from the zlib format. gzip is a gzip - wrapper, documented in RFC 1952, wrapped around a deflate stream. -*/ - -typedef voidp gzFile; /* opaque gzip file descriptor */ - -/* -ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); - - Opens a gzip (.gz) file for reading or writing. The mode parameter is as - in fopen ("rb" or "wb") but can also include a compression level ("wb9") or - a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only - compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' - for fixed code compression as in "wb9F". (See the description of - deflateInit2 for more information about the strategy parameter.) Also "a" - can be used instead of "w" to request that the gzip stream that will be - written be appended to the file. "+" will result in an error, since reading - and writing to the same gzip file is not supported. - - gzopen can be used to read a file which is not in gzip format; in this - case gzread will directly read from the file without decompression. - - gzopen returns NULL if the file could not be opened, if there was - insufficient memory to allocate the gzFile state, or if an invalid mode was - specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). - errno can be checked to determine if the reason gzopen failed was that the - file could not be opened. -*/ - -ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); -/* - gzdopen associates a gzFile with the file descriptor fd. File descriptors - are obtained from calls like open, dup, creat, pipe or fileno (if the file - has been previously opened with fopen). The mode parameter is as in gzopen. - - The next call of gzclose on the returned gzFile will also close the file - descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor - fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, - mode);. The duplicated descriptor should be saved to avoid a leak, since - gzdopen does not close fd if it fails. - - gzdopen returns NULL if there was insufficient memory to allocate the - gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not - provided, or '+' was provided), or if fd is -1. The file descriptor is not - used until the next gz* read, write, seek, or close operation, so gzdopen - will not detect if fd is invalid (unless fd is -1). -*/ - -ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); -/* - Set the internal buffer size used by this library's functions. The - default buffer size is 8192 bytes. This function must be called after - gzopen() or gzdopen(), and before any other calls that read or write the - file. The buffer memory allocation is always deferred to the first read or - write. Two buffers are allocated, either both of the specified size when - writing, or one of the specified size and the other twice that size when - reading. A larger buffer size of, for example, 64K or 128K bytes will - noticeably increase the speed of decompression (reading). - - The new buffer size also affects the maximum length for gzprintf(). - - gzbuffer() returns 0 on success, or -1 on failure, such as being called - too late. -*/ - -ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); -/* - Dynamically update the compression level or strategy. See the description - of deflateInit2 for the meaning of these parameters. - - gzsetparams returns Z_OK if success, or Z_STREAM_ERROR if the file was not - opened for writing. -*/ - -ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); -/* - Reads the given number of uncompressed bytes from the compressed file. If - the input file was not in gzip format, gzread copies the given number of - bytes into the buffer. - - After reaching the end of a gzip stream in the input, gzread will continue - to read, looking for another gzip stream, or failing that, reading the rest - of the input file directly without decompression. The entire input file - will be read if gzread is called until it returns less than the requested - len. - - gzread returns the number of uncompressed bytes actually read, less than - len for end of file, or -1 for error. -*/ - -ZEXTERN int ZEXPORT gzwrite OF((gzFile file, - voidpc buf, unsigned len)); -/* - Writes the given number of uncompressed bytes into the compressed file. - gzwrite returns the number of uncompressed bytes written or 0 in case of - error. -*/ - -ZEXTERN int ZEXPORTVA gzprintf OF((gzFile file, const char *format, ...)); -/* - Converts, formats, and writes the arguments to the compressed file under - control of the format string, as in fprintf. gzprintf returns the number of - uncompressed bytes actually written, or 0 in case of error. The number of - uncompressed bytes written is limited to 8191, or one less than the buffer - size given to gzbuffer(). The caller should assure that this limit is not - exceeded. If it is exceeded, then gzprintf() will return an error (0) with - nothing written. In this case, there may also be a buffer overflow with - unpredictable consequences, which is possible only if zlib was compiled with - the insecure functions sprintf() or vsprintf() because the secure snprintf() - or vsnprintf() functions were not available. This can be determined using - zlibCompileFlags(). -*/ - -ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); -/* - Writes the given null-terminated string to the compressed file, excluding - the terminating null character. - - gzputs returns the number of characters written, or -1 in case of error. -*/ - -ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); -/* - Reads bytes from the compressed file until len-1 characters are read, or a - newline character is read and transferred to buf, or an end-of-file - condition is encountered. If any characters are read or if len == 1, the - string is terminated with a null character. If no characters are read due - to an end-of-file or len < 1, then the buffer is left untouched. - - gzgets returns buf which is a null-terminated string, or it returns NULL - for end-of-file or in case of error. If there was an error, the contents at - buf are indeterminate. -*/ - -ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); -/* - Writes c, converted to an unsigned char, into the compressed file. gzputc - returns the value that was written, or -1 in case of error. -*/ - -ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); -/* - Reads one byte from the compressed file. gzgetc returns this byte or -1 - in case of end of file or error. -*/ - -ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); -/* - Push one character back onto the stream to be read as the first character - on the next read. At least one character of push-back is allowed. - gzungetc() returns the character pushed, or -1 on failure. gzungetc() will - fail if c is -1, and may fail if a character has been pushed but not read - yet. If gzungetc is used immediately after gzopen or gzdopen, at least the - output buffer size of pushed characters is allowed. (See gzbuffer above.) - The pushed character will be discarded if the stream is repositioned with - gzseek() or gzrewind(). -*/ - -ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); -/* - Flushes all pending output into the compressed file. The parameter flush - is as in the deflate() function. The return value is the zlib error number - (see function gzerror below). gzflush is only permitted when writing. - - If the flush parameter is Z_FINISH, the remaining data is written and the - gzip stream is completed in the output. If gzwrite() is called again, a new - gzip stream will be started in the output. gzread() is able to read such - concatented gzip streams. - - gzflush should be called only when strictly necessary because it will - degrade compression if called too often. -*/ - -/* -ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, - z_off_t offset, int whence)); - - Sets the starting position for the next gzread or gzwrite on the given - compressed file. The offset represents a number of bytes in the - uncompressed data stream. The whence parameter is defined as in lseek(2); - the value SEEK_END is not supported. - - If the file is opened for reading, this function is emulated but can be - extremely slow. If the file is opened for writing, only forward seeks are - supported; gzseek then compresses a sequence of zeroes up to the new - starting position. - - gzseek returns the resulting offset location as measured in bytes from - the beginning of the uncompressed stream, or -1 in case of error, in - particular if the file is opened for writing and the new starting position - would be before the current position. -*/ - -ZEXTERN int ZEXPORT gzrewind OF((gzFile file)); -/* - Rewinds the given file. This function is supported only for reading. - - gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) -*/ - -/* -ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); - - Returns the starting position for the next gzread or gzwrite on the given - compressed file. This position represents a number of bytes in the - uncompressed data stream, and is zero when starting, even if appending or - reading a gzip stream from the middle of a file using gzdopen(). - - gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) -*/ - -/* -ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); - - Returns the current offset in the file being read or written. This offset - includes the count of bytes that precede the gzip stream, for example when - appending or when using gzdopen() for reading. When reading, the offset - does not include as yet unused buffered input. This information can be used - for a progress indicator. On error, gzoffset() returns -1. -*/ - -ZEXTERN int ZEXPORT gzeof OF((gzFile file)); -/* - Returns true (1) if the end-of-file indicator has been set while reading, - false (0) otherwise. Note that the end-of-file indicator is set only if the - read tried to go past the end of the input, but came up short. Therefore, - just like feof(), gzeof() may return false even if there is no more data to - read, in the event that the last read request was for the exact number of - bytes remaining in the input file. This will happen if the input file size - is an exact multiple of the buffer size. - - If gzeof() returns true, then the read functions will return no more data, - unless the end-of-file indicator is reset by gzclearerr() and the input file - has grown since the previous end of file was detected. -*/ - -ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); -/* - Returns true (1) if file is being copied directly while reading, or false - (0) if file is a gzip stream being decompressed. This state can change from - false to true while reading the input file if the end of a gzip stream is - reached, but is followed by data that is not another gzip stream. - - If the input file is empty, gzdirect() will return true, since the input - does not contain a gzip stream. - - If gzdirect() is used immediately after gzopen() or gzdopen() it will - cause buffers to be allocated to allow reading the file to determine if it - is a gzip file. Therefore if gzbuffer() is used, it should be called before - gzdirect(). -*/ - -ZEXTERN int ZEXPORT gzclose OF((gzFile file)); -/* - Flushes all pending output if necessary, closes the compressed file and - deallocates the (de)compression state. Note that once file is closed, you - cannot call gzerror with file, since its structures have been deallocated. - gzclose must not be called more than once on the same file, just as free - must not be called more than once on the same allocation. - - gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a - file operation error, or Z_OK on success. -*/ - -ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); -ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); -/* - Same as gzclose(), but gzclose_r() is only for use when reading, and - gzclose_w() is only for use when writing or appending. The advantage to - using these instead of gzclose() is that they avoid linking in zlib - compression or decompression code that is not used when only reading or only - writing respectively. If gzclose() is used, then both compression and - decompression code will be included the application when linking to a static - zlib library. -*/ - -ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); -/* - Returns the error message for the last error which occurred on the given - compressed file. errnum is set to zlib error number. If an error occurred - in the file system and not in the compression library, errnum is set to - Z_ERRNO and the application may consult errno to get the exact error code. - - The application must not modify the returned string. Future calls to - this function may invalidate the previously returned string. If file is - closed, then the string previously returned by gzerror will no longer be - available. - - gzerror() should be used to distinguish errors from end-of-file for those - functions above that do not distinguish those cases in their return values. -*/ - -ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); -/* - Clears the error and end-of-file flags for file. This is analogous to the - clearerr() function in stdio. This is useful for continuing to read a gzip - file that is being written concurrently. -*/ - - - /* checksum functions */ - -/* - These functions are not related to compression but are exported - anyway because they might be useful in applications using the compression - library. -*/ - -ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); -/* - Update a running Adler-32 checksum with the bytes buf[0..len-1] and - return the updated checksum. If buf is Z_NULL, this function returns the - required initial value for the checksum. - - An Adler-32 checksum is almost as reliable as a CRC32 but can be computed - much faster. - - Usage example: - - uLong adler = adler32(0L, Z_NULL, 0); - - while (read_buffer(buffer, length) != EOF) { - adler = adler32(adler, buffer, length); - } - if (adler != original_adler) error(); -*/ - -/* -ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, - z_off_t len2)); - - Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 - and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for - each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of - seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. -*/ - -ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len)); -/* - Update a running CRC-32 with the bytes buf[0..len-1] and return the - updated CRC-32. If buf is Z_NULL, this function returns the required - initial value for the for the crc. Pre- and post-conditioning (one's - complement) is performed within this function so it shouldn't be done by the - application. - - Usage example: - - uLong crc = crc32(0L, Z_NULL, 0); - - while (read_buffer(buffer, length) != EOF) { - crc = crc32(crc, buffer, length); - } - if (crc != original_crc) error(); -*/ - -/* -ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); - - Combine two CRC-32 check values into one. For two sequences of bytes, - seq1 and seq2 with lengths len1 and len2, CRC-32 check values were - calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 - check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and - len2. -*/ - - - /* various hacks, don't look :) */ - -/* deflateInit and inflateInit are macros to allow checking the zlib version - * and the compiler's view of z_stream: - */ -ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level, - const char *version, int stream_size)); -ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm, - const char *version, int stream_size)); -ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method, - int windowBits, int memLevel, - int strategy, const char *version, - int stream_size)); -ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits, - const char *version, int stream_size)); -ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits, - unsigned char FAR *window, - const char *version, - int stream_size)); -#define deflateInit(strm, level) \ - deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream)) -#define inflateInit(strm) \ - inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream)) -#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ - deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ - (strategy), ZLIB_VERSION, sizeof(z_stream)) -#define inflateInit2(strm, windowBits) \ - inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream)) -#define inflateBackInit(strm, windowBits, window) \ - inflateBackInit_((strm), (windowBits), (window), \ - ZLIB_VERSION, sizeof(z_stream)) - -/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or - * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if - * both are true, the application gets the *64 functions, and the regular - * functions are changed to 64 bits) -- in case these are set on systems - * without large file support, _LFS64_LARGEFILE must also be true - */ -#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0 - ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); - ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); - ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); - ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); - ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); - ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); -#endif - -#if !defined(ZLIB_INTERNAL) && _FILE_OFFSET_BITS-0 == 64 && _LFS64_LARGEFILE-0 -# define gzopen gzopen64 -# define gzseek gzseek64 -# define gztell gztell64 -# define gzoffset gzoffset64 -# define adler32_combine adler32_combine64 -# define crc32_combine crc32_combine64 -# ifdef _LARGEFILE64_SOURCE - ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); - ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int)); - ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile)); - ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile)); - ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); - ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); -# endif -#else - ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *)); - ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int)); - ZEXTERN z_off_t ZEXPORT gztell OF((gzFile)); - ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile)); - ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); - ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); -#endif - -/* hack for buggy compilers */ -#if !defined(ZUTIL_H) && !defined(NO_DUMMY_DECL) - struct internal_state {int dummy;}; -#endif - -/* undocumented functions */ -ZEXTERN const char * ZEXPORT zError OF((int)); -ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp)); -ZEXTERN const uLongf * ZEXPORT get_crc_table OF((void)); -ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int)); - -#ifdef __cplusplus -} -#endif - -#endif /* ZLIB_H */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/zutil.c cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/zutil.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/zutil.c 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/zutil.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,318 +0,0 @@ -/* zutil.c -- target dependent utility functions for the compression library - * Copyright (C) 1995-2005, 2010 Jean-loup Gailly. - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* @(#) $Id$ */ - -#include "zutil.h" - -#ifndef NO_DUMMY_DECL -struct internal_state {int dummy;}; /* for buggy compilers */ -#endif - -const char * const z_errmsg[10] = { -"need dictionary", /* Z_NEED_DICT 2 */ -"stream end", /* Z_STREAM_END 1 */ -"", /* Z_OK 0 */ -"file error", /* Z_ERRNO (-1) */ -"stream error", /* Z_STREAM_ERROR (-2) */ -"data error", /* Z_DATA_ERROR (-3) */ -"insufficient memory", /* Z_MEM_ERROR (-4) */ -"buffer error", /* Z_BUF_ERROR (-5) */ -"incompatible version",/* Z_VERSION_ERROR (-6) */ -""}; - - -const char * ZEXPORT zlibVersion() -{ - return ZLIB_VERSION; -} - -uLong ZEXPORT zlibCompileFlags() -{ - uLong flags; - - flags = 0; - switch ((int)(sizeof(uInt))) { - case 2: break; - case 4: flags += 1; break; - case 8: flags += 2; break; - default: flags += 3; - } - switch ((int)(sizeof(uLong))) { - case 2: break; - case 4: flags += 1 << 2; break; - case 8: flags += 2 << 2; break; - default: flags += 3 << 2; - } - switch ((int)(sizeof(voidpf))) { - case 2: break; - case 4: flags += 1 << 4; break; - case 8: flags += 2 << 4; break; - default: flags += 3 << 4; - } - switch ((int)(sizeof(z_off_t))) { - case 2: break; - case 4: flags += 1 << 6; break; - case 8: flags += 2 << 6; break; - default: flags += 3 << 6; - } -#ifdef DEBUG - flags += 1 << 8; -#endif -#if defined(ASMV) || defined(ASMINF) - flags += 1 << 9; -#endif -#ifdef ZLIB_WINAPI - flags += 1 << 10; -#endif -#ifdef BUILDFIXED - flags += 1 << 12; -#endif -#ifdef DYNAMIC_CRC_TABLE - flags += 1 << 13; -#endif -#ifdef NO_GZCOMPRESS - flags += 1L << 16; -#endif -#ifdef NO_GZIP - flags += 1L << 17; -#endif -#ifdef PKZIP_BUG_WORKAROUND - flags += 1L << 20; -#endif -#ifdef FASTEST - flags += 1L << 21; -#endif -#ifdef STDC -# ifdef NO_vsnprintf - flags += 1L << 25; -# ifdef HAS_vsprintf_void - flags += 1L << 26; -# endif -# else -# ifdef HAS_vsnprintf_void - flags += 1L << 26; -# endif -# endif -#else - flags += 1L << 24; -# ifdef NO_snprintf - flags += 1L << 25; -# ifdef HAS_sprintf_void - flags += 1L << 26; -# endif -# else -# ifdef HAS_snprintf_void - flags += 1L << 26; -# endif -# endif -#endif - return flags; -} - -#ifdef DEBUG - -# ifndef verbose -# define verbose 0 -# endif -int ZLIB_INTERNAL z_verbose = verbose; - -void ZLIB_INTERNAL z_error (m) - char *m; -{ - fprintf(stderr, "%s\n", m); - exit(1); -} -#endif - -/* exported to allow conversion of error code to string for compress() and - * uncompress() - */ -const char * ZEXPORT zError(err) - int err; -{ - return ERR_MSG(err); -} - -#if defined(_WIN32_WCE) - /* The Microsoft C Run-Time Library for Windows CE doesn't have - * errno. We define it as a global variable to simplify porting. - * Its value is always 0 and should not be used. - */ - int errno = 0; -#endif - -#ifndef HAVE_MEMCPY - -void ZLIB_INTERNAL zmemcpy(dest, source, len) - Bytef* dest; - const Bytef* source; - uInt len; -{ - if (len == 0) return; - do { - *dest++ = *source++; /* ??? to be unrolled */ - } while (--len != 0); -} - -int ZLIB_INTERNAL zmemcmp(s1, s2, len) - const Bytef* s1; - const Bytef* s2; - uInt len; -{ - uInt j; - - for (j = 0; j < len; j++) { - if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; - } - return 0; -} - -void ZLIB_INTERNAL zmemzero(dest, len) - Bytef* dest; - uInt len; -{ - if (len == 0) return; - do { - *dest++ = 0; /* ??? to be unrolled */ - } while (--len != 0); -} -#endif - - -#ifdef SYS16BIT - -#ifdef __TURBOC__ -/* Turbo C in 16-bit mode */ - -# define MY_ZCALLOC - -/* Turbo C malloc() does not allow dynamic allocation of 64K bytes - * and farmalloc(64K) returns a pointer with an offset of 8, so we - * must fix the pointer. Warning: the pointer must be put back to its - * original form in order to free it, use zcfree(). - */ - -#define MAX_PTR 10 -/* 10*64K = 640K */ - -local int next_ptr = 0; - -typedef struct ptr_table_s { - voidpf org_ptr; - voidpf new_ptr; -} ptr_table; - -local ptr_table table[MAX_PTR]; -/* This table is used to remember the original form of pointers - * to large buffers (64K). Such pointers are normalized with a zero offset. - * Since MSDOS is not a preemptive multitasking OS, this table is not - * protected from concurrent access. This hack doesn't work anyway on - * a protected system like OS/2. Use Microsoft C instead. - */ - -voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size) -{ - voidpf buf = opaque; /* just to make some compilers happy */ - ulg bsize = (ulg)items*size; - - /* If we allocate less than 65520 bytes, we assume that farmalloc - * will return a usable pointer which doesn't have to be normalized. - */ - if (bsize < 65520L) { - buf = farmalloc(bsize); - if (*(ush*)&buf != 0) return buf; - } else { - buf = farmalloc(bsize + 16L); - } - if (buf == NULL || next_ptr >= MAX_PTR) return NULL; - table[next_ptr].org_ptr = buf; - - /* Normalize the pointer to seg:0 */ - *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; - *(ush*)&buf = 0; - table[next_ptr++].new_ptr = buf; - return buf; -} - -void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) -{ - int n; - if (*(ush*)&ptr != 0) { /* object < 64K */ - farfree(ptr); - return; - } - /* Find the original pointer */ - for (n = 0; n < next_ptr; n++) { - if (ptr != table[n].new_ptr) continue; - - farfree(table[n].org_ptr); - while (++n < next_ptr) { - table[n-1] = table[n]; - } - next_ptr--; - return; - } - ptr = opaque; /* just to make some compilers happy */ - Assert(0, "zcfree: ptr not found"); -} - -#endif /* __TURBOC__ */ - - -#ifdef M_I86 -/* Microsoft C in 16-bit mode */ - -# define MY_ZCALLOC - -#if (!defined(_MSC_VER) || (_MSC_VER <= 600)) -# define _halloc halloc -# define _hfree hfree -#endif - -voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size) -{ - if (opaque) opaque = 0; /* to make compiler happy */ - return _halloc((long)items, size); -} - -void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) -{ - if (opaque) opaque = 0; /* to make compiler happy */ - _hfree(ptr); -} - -#endif /* M_I86 */ - -#endif /* SYS16BIT */ - - -#ifndef MY_ZCALLOC /* Any system without a special alloc function */ - -#ifndef STDC -extern voidp malloc OF((uInt size)); -extern voidp calloc OF((uInt items, uInt size)); -extern void free OF((voidpf ptr)); -#endif - -voidpf ZLIB_INTERNAL zcalloc (opaque, items, size) - voidpf opaque; - unsigned items; - unsigned size; -{ - if (opaque) items += size - size; /* make compiler happy */ - return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) : - (voidpf)calloc(items, size); -} - -void ZLIB_INTERNAL zcfree (opaque, ptr) - voidpf opaque; - voidpf ptr; -{ - free(ptr); - if (opaque) return; /* make compiler happy */ -} - -#endif /* MY_ZCALLOC */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/zutil.h cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/zutil.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/cbits/zutil.h 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/cbits/zutil.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,274 +0,0 @@ -/* zutil.h -- internal interface and configuration of the compression library - * Copyright (C) 1995-2010 Jean-loup Gailly. - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* WARNING: this file should *not* be used by applications. It is - part of the implementation of the compression library and is - subject to change. Applications should only use zlib.h. - */ - -/* @(#) $Id$ */ - -#ifndef ZUTIL_H -#define ZUTIL_H - -#if ((__GNUC__-0) * 10 + __GNUC_MINOR__-0 >= 33) && !defined(NO_VIZ) -# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) -#else -# define ZLIB_INTERNAL -#endif - -#include "zlib.h" - -#ifdef STDC -# if !(defined(_WIN32_WCE) && defined(_MSC_VER)) -# include -# endif -# include -# include -#endif - -#ifndef local -# define local static -#endif -/* compile with -Dlocal if your debugger can't find static symbols */ - -typedef unsigned char uch; -typedef uch FAR uchf; -typedef unsigned short ush; -typedef ush FAR ushf; -typedef unsigned long ulg; - -extern const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ -/* (size given to avoid silly warnings with Visual C++) */ - -#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] - -#define ERR_RETURN(strm,err) \ - return (strm->msg = (char*)ERR_MSG(err), (err)) -/* To be used only when the state is known to be valid */ - - /* common constants */ - -#ifndef DEF_WBITS -# define DEF_WBITS MAX_WBITS -#endif -/* default windowBits for decompression. MAX_WBITS is for compression only */ - -#if MAX_MEM_LEVEL >= 8 -# define DEF_MEM_LEVEL 8 -#else -# define DEF_MEM_LEVEL MAX_MEM_LEVEL -#endif -/* default memLevel */ - -#define STORED_BLOCK 0 -#define STATIC_TREES 1 -#define DYN_TREES 2 -/* The three kinds of block type */ - -#define MIN_MATCH 3 -#define MAX_MATCH 258 -/* The minimum and maximum match lengths */ - -#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ - - /* target dependencies */ - -#if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32)) -# define OS_CODE 0x00 -# if defined(__TURBOC__) || defined(__BORLANDC__) -# if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) - /* Allow compilation with ANSI keywords only enabled */ - void _Cdecl farfree( void *block ); - void *_Cdecl farmalloc( unsigned long nbytes ); -# else -# include -# endif -# else /* MSC or DJGPP */ -# include -# endif -#endif - -#ifdef AMIGA -# define OS_CODE 0x01 -#endif - -#if defined(VAXC) || defined(VMS) -# define OS_CODE 0x02 -# define F_OPEN(name, mode) \ - fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") -#endif - -#if defined(ATARI) || defined(atarist) -# define OS_CODE 0x05 -#endif - -#ifdef OS2 -# define OS_CODE 0x06 -# ifdef M_I86 -# include -# endif -#endif - -#if defined(MACOS) || defined(TARGET_OS_MAC) -# define OS_CODE 0x07 -# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os -# include /* for fdopen */ -# else -# ifndef fdopen -# define fdopen(fd,mode) NULL /* No fdopen() */ -# endif -# endif -#endif - -#ifdef TOPS20 -# define OS_CODE 0x0a -#endif - -#ifdef WIN32 -# ifndef __CYGWIN__ /* Cygwin is Unix, not Win32 */ -# define OS_CODE 0x0b -# endif -#endif - -#ifdef __50SERIES /* Prime/PRIMOS */ -# define OS_CODE 0x0f -#endif - -#if defined(_BEOS_) || defined(RISCOS) -# define fdopen(fd,mode) NULL /* No fdopen() */ -#endif - -#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX -# if defined(_WIN32_WCE) -# define fdopen(fd,mode) NULL /* No fdopen() */ -# ifndef _PTRDIFF_T_DEFINED - typedef int ptrdiff_t; -# define _PTRDIFF_T_DEFINED -# endif -# else -# define fdopen(fd,type) _fdopen(fd,type) -# endif -#endif - -#if defined(__BORLANDC__) - #pragma warn -8004 - #pragma warn -8008 - #pragma warn -8066 -#endif - -/* provide prototypes for these when building zlib without LFS */ -#if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 - ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); - ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); -#endif - - /* common defaults */ - -#ifndef OS_CODE -# define OS_CODE 0x03 /* assume Unix */ -#endif - -#ifndef F_OPEN -# define F_OPEN(name, mode) fopen((name), (mode)) -#endif - - /* functions */ - -#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) -# ifndef HAVE_VSNPRINTF -# define HAVE_VSNPRINTF -# endif -#endif -#if defined(__CYGWIN__) -# ifndef HAVE_VSNPRINTF -# define HAVE_VSNPRINTF -# endif -#endif -#ifndef HAVE_VSNPRINTF -# ifdef MSDOS - /* vsnprintf may exist on some MS-DOS compilers (DJGPP?), - but for now we just assume it doesn't. */ -# define NO_vsnprintf -# endif -# ifdef __TURBOC__ -# define NO_vsnprintf -# endif -# ifdef WIN32 - /* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ -# if !defined(vsnprintf) && !defined(NO_vsnprintf) -# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 ) -# define vsnprintf _vsnprintf -# endif -# endif -# endif -# ifdef __SASC -# define NO_vsnprintf -# endif -#endif -#ifdef VMS -# define NO_vsnprintf -#endif - -#if defined(pyr) -# define NO_MEMCPY -#endif -#if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__) - /* Use our own functions for small and medium model with MSC <= 5.0. - * You may have to use the same strategy for Borland C (untested). - * The __SC__ check is for Symantec. - */ -# define NO_MEMCPY -#endif -#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) -# define HAVE_MEMCPY -#endif -#ifdef HAVE_MEMCPY -# ifdef SMALL_MEDIUM /* MSDOS small or medium model */ -# define zmemcpy _fmemcpy -# define zmemcmp _fmemcmp -# define zmemzero(dest, len) _fmemset(dest, 0, len) -# else -# define zmemcpy memcpy -# define zmemcmp memcmp -# define zmemzero(dest, len) memset(dest, 0, len) -# endif -#else - void ZLIB_INTERNAL zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); - int ZLIB_INTERNAL zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); - void ZLIB_INTERNAL zmemzero OF((Bytef* dest, uInt len)); -#endif - -/* Diagnostic functions */ -#ifdef DEBUG -# include - extern int ZLIB_INTERNAL z_verbose; - extern void ZLIB_INTERNAL z_error OF((char *m)); -# define Assert(cond,msg) {if(!(cond)) z_error(msg);} -# define Trace(x) {if (z_verbose>=0) fprintf x ;} -# define Tracev(x) {if (z_verbose>0) fprintf x ;} -# define Tracevv(x) {if (z_verbose>1) fprintf x ;} -# define Tracec(c,x) {if (z_verbose>0 && (c)) fprintf x ;} -# define Tracecv(c,x) {if (z_verbose>1 && (c)) fprintf x ;} -#else -# define Assert(cond,msg) -# define Trace(x) -# define Tracev(x) -# define Tracevv(x) -# define Tracec(c,x) -# define Tracecv(c,x) -#endif - - -voidpf ZLIB_INTERNAL zcalloc OF((voidpf opaque, unsigned items, - unsigned size)); -void ZLIB_INTERNAL zcfree OF((voidpf opaque, voidpf ptr)); - -#define ZALLOC(strm, items, size) \ - (*((strm)->zalloc))((strm)->opaque, (items), (size)) -#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) -#define TRY_FREE(s, p) {if (p) ZFREE(s, p);} - -#endif /* ZUTIL_H */ diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/GZip.hs cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/GZip.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/GZip.hs 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/GZip.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Copyright : (c) 2006-2008 Duncan Coutts --- License : BSD-style --- --- Maintainer : duncan@haskell.org --- Stability : provisional --- Portability : portable (H98 + FFI) --- --- Compression and decompression of data streams in the gzip format. --- --- The format is described in detail in RFC #1952: --- --- --- See also the zlib home page: --- ------------------------------------------------------------------------------ -module Codec.Compression.GZip ( - - -- | This module provides pure functions for compressing and decompressing - -- streams of data in the gzip format and represented by lazy 'ByteString's. - -- This makes it easy to use either in memory or with disk or network IO. - -- - -- For example a simple gzip compression program is just: - -- - -- > import qualified Data.ByteString.Lazy as ByteString - -- > import qualified Codec.Compression.GZip as GZip - -- > - -- > main = ByteString.interact GZip.compress - -- - -- Or you could lazily read in and decompress a @.gz@ file using: - -- - -- > content <- fmap GZip.decompress (readFile file) - -- - - -- * Simple compression and decompression - compress, - decompress, - - -- * Extended api with control over compression parameters - compressWith, - decompressWith, - - CompressParams(..), defaultCompressParams, - DecompressParams(..), defaultDecompressParams, - - -- ** The compression parameter types - CompressionLevel(..), - defaultCompression, - noCompression, - bestSpeed, - bestCompression, - compressionLevel, - Method(..), - deflateMethod, - WindowBits(..), - defaultWindowBits, - windowBits, - MemoryLevel(..), - defaultMemoryLevel, - minMemoryLevel, - maxMemoryLevel, - memoryLevel, - CompressionStrategy(..), - defaultStrategy, - filteredStrategy, - huffmanOnlyStrategy, - - ) where - -import Data.ByteString.Lazy (ByteString) - -import qualified Codec.Compression.Zlib.Internal as Internal -import Codec.Compression.Zlib.Internal hiding (compress, decompress) - - --- | Decompress a stream of data in the gzip format. --- --- There are a number of errors that can occur. In each case an exception will --- be thrown. The possible error conditions are: --- --- * if the stream does not start with a valid gzip header --- --- * if the compressed stream is corrupted --- --- * if the compressed stream ends permaturely --- --- Note that the decompression is performed /lazily/. Errors in the data stream --- may not be detected until the end of the stream is demanded (since it is --- only at the end that the final checksum can be checked). If this is --- important to you, you must make sure to consume the whole decompressed --- stream before doing any IO action that depends on it. --- -decompress :: ByteString -> ByteString -decompress = decompressWith defaultDecompressParams - - --- | Like 'decompress' but with the ability to specify various decompression --- parameters. Typical usage: --- --- > decompressWith defaultCompressParams { ... } --- -decompressWith :: DecompressParams -> ByteString -> ByteString -decompressWith = Internal.decompress gzipFormat - - --- | Compress a stream of data into the gzip format. --- --- This uses the default compression parameters. In partiular it uses the --- default compression level which favours a higher compression ratio over --- compression speed, though it does not use the maximum compression level. --- --- Use 'compressWith' to adjust the compression level or other compression --- parameters. --- -compress :: ByteString -> ByteString -compress = compressWith defaultCompressParams - - --- | Like 'compress' but with the ability to specify various compression --- parameters. Typical usage: --- --- > compressWith defaultCompressParams { ... } --- --- In particular you can set the compression level: --- --- > compressWith defaultCompressParams { compressLevel = BestCompression } --- -compressWith :: CompressParams -> ByteString -> ByteString -compressWith = Internal.compress gzipFormat diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/Zlib/Internal.hs cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/Zlib/Internal.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/Zlib/Internal.hs 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/Zlib/Internal.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,469 +0,0 @@ -{-# LANGUAGE CPP #-} ------------------------------------------------------------------------------ --- | --- Copyright : (c) 2006-2008 Duncan Coutts --- License : BSD-style --- --- Maintainer : duncan@haskell.org --- Stability : provisional --- Portability : portable (H98 + FFI) --- --- Pure stream based interface to lower level zlib wrapper --- ------------------------------------------------------------------------------ -module Codec.Compression.Zlib.Internal ( - - -- * Compression - compress, - CompressParams(..), - defaultCompressParams, - - -- * Decompression - decompress, - DecompressParams(..), - defaultDecompressParams, - - -- * The compression parameter types - Stream.Format(..), - Stream.gzipFormat, - Stream.zlibFormat, - Stream.rawFormat, - Stream.gzipOrZlibFormat, - Stream.CompressionLevel(..), - Stream.defaultCompression, - Stream.noCompression, - Stream.bestSpeed, - Stream.bestCompression, - Stream.compressionLevel, - Stream.Method(..), - Stream.deflateMethod, - Stream.WindowBits(..), - Stream.defaultWindowBits, - Stream.windowBits, - Stream.MemoryLevel(..), - Stream.defaultMemoryLevel, - Stream.minMemoryLevel, - Stream.maxMemoryLevel, - Stream.memoryLevel, - Stream.CompressionStrategy(..), - Stream.defaultStrategy, - Stream.filteredStrategy, - Stream.huffmanOnlyStrategy, - - -- * Low-level API to get explicit error reports - decompressWithErrors, - DecompressStream(..), - DecompressError(..), - foldDecompressStream, - fromDecompressStream, - ) where - -import Prelude hiding (length) -import Control.Monad (when) -import Control.Exception (assert) -import qualified Data.ByteString.Lazy as L -import qualified Data.ByteString.Lazy.Internal as L -import qualified Data.ByteString.Internal as S - -import qualified Codec.Compression.Zlib.Stream as Stream -import Codec.Compression.Zlib.Stream (Stream) - --- | The full set of parameters for compression. The defaults are --- 'defaultCompressParams'. --- --- The 'compressBufferSize' is the size of the first output buffer containing --- the compressed data. If you know an approximate upper bound on the size of --- the compressed data then setting this parameter can save memory. The default --- compression output buffer size is @16k@. If your extimate is wrong it does --- not matter too much, the default buffer size will be used for the remaining --- chunks. --- -data CompressParams = CompressParams { - compressLevel :: !Stream.CompressionLevel, - compressMethod :: !Stream.Method, - compressWindowBits :: !Stream.WindowBits, - compressMemoryLevel :: !Stream.MemoryLevel, - compressStrategy :: !Stream.CompressionStrategy, - compressBufferSize :: !Int, - compressDictionary :: Maybe S.ByteString -} - --- | The full set of parameters for decompression. The defaults are --- 'defaultDecompressParams'. --- --- The 'decompressBufferSize' is the size of the first output buffer, --- containing the uncompressed data. If you know an exact or approximate upper --- bound on the size of the decompressed data then setting this parameter can --- save memory. The default decompression output buffer size is @32k@. If your --- extimate is wrong it does not matter too much, the default buffer size will --- be used for the remaining chunks. --- --- One particular use case for setting the 'decompressBufferSize' is if you --- know the exact size of the decompressed data and want to produce a strict --- 'Data.ByteString.ByteString'. The compression and deccompression functions --- use lazy 'Data.ByteString.Lazy.ByteString's but if you set the --- 'decompressBufferSize' correctly then you can generate a lazy --- 'Data.ByteString.Lazy.ByteString' with exactly one chunk, which can be --- converted to a strict 'Data.ByteString.ByteString' in @O(1)@ time using --- @'Data.ByteString.concat' . 'Data.ByteString.Lazy.toChunks'@. --- -data DecompressParams = DecompressParams { - decompressWindowBits :: !Stream.WindowBits, - decompressBufferSize :: !Int, - decompressDictionary :: Maybe S.ByteString -} - --- | The default set of parameters for compression. This is typically used with --- the @compressWith@ function with specific parameters overridden. --- -defaultCompressParams :: CompressParams -defaultCompressParams = CompressParams { - compressLevel = Stream.defaultCompression, - compressMethod = Stream.deflateMethod, - compressWindowBits = Stream.defaultWindowBits, - compressMemoryLevel = Stream.defaultMemoryLevel, - compressStrategy = Stream.defaultStrategy, - compressBufferSize = defaultCompressBufferSize, - compressDictionary = Nothing -} - --- | The default set of parameters for decompression. This is typically used with --- the @compressWith@ function with specific parameters overridden. --- -defaultDecompressParams :: DecompressParams -defaultDecompressParams = DecompressParams { - decompressWindowBits = Stream.defaultWindowBits, - decompressBufferSize = defaultDecompressBufferSize, - decompressDictionary = Nothing -} - --- | The default chunk sizes for the output of compression and decompression --- are 16k and 32k respectively (less a small accounting overhead). --- -defaultCompressBufferSize, defaultDecompressBufferSize :: Int -defaultCompressBufferSize = 16 * 1024 - L.chunkOverhead -defaultDecompressBufferSize = 32 * 1024 - L.chunkOverhead - --- | A sequence of chunks of data produced from decompression. --- --- The difference from a simple list is that it contains a representation of --- errors as data rather than as exceptions. This allows you to handle error --- conditions explicitly. --- -data DecompressStream = StreamEnd - | StreamChunk S.ByteString DecompressStream - - -- | An error code and a human readable error message. - | StreamError DecompressError String - --- | The possible error cases when decompressing a stream. --- -data DecompressError = - -- | The compressed data stream ended prematurely. This may happen if the - -- input data stream was truncated. - TruncatedInput - - -- | It is possible to do zlib compression with a custom dictionary. This - -- allows slightly higher compression ratios for short files. However such - -- compressed streams require the same dictionary when decompressing. This - -- error is for when we encounter a compressed stream that needs a - -- dictionary, and it's not provided. - | DictionaryRequired - - -- | If the compressed data stream is corrupted in any way then you will - -- get this error, for example if the input data just isn't a compressed - -- zlib data stream. In particular if the data checksum turns out to be - -- wrong then you will get all the decompressed data but this error at the - -- end, instead of the normal sucessful 'StreamEnd'. - | DataError - --- | Fold an 'DecompressionStream'. Just like 'foldr' but with an extra error --- case. For example to convert to a list and translate the errors into --- exceptions: --- --- > foldDecompressStream (:) [] (\code msg -> error msg) --- -foldDecompressStream :: (S.ByteString -> a -> a) -> a - -> (DecompressError -> String -> a) - -> DecompressStream -> a -foldDecompressStream chunk end err = fold - where - fold StreamEnd = end - fold (StreamChunk bs stream) = chunk bs (fold stream) - fold (StreamError code msg) = err code msg - --- | Convert a 'DecompressStream' to a lazy 'ByteString'. If any decompression --- errors are encountered then they are thrown as exceptions. --- --- This is a special case of 'foldDecompressStream'. --- -fromDecompressStream :: DecompressStream -> L.ByteString -fromDecompressStream = - foldDecompressStream L.Chunk L.Empty - (\_code msg -> error ("Codec.Compression.Zlib: " ++ msg)) - ---TODO: throw DecompressError as an Exception class type and document that it --- does this. - --- | Compress a data stream. --- --- There are no expected error conditions. All input data streams are valid. It --- is possible for unexpected errors to occur, such as running out of memory, --- or finding the wrong version of the zlib C library, these are thrown as --- exceptions. --- -compress - :: Stream.Format - -> CompressParams - -> L.ByteString - -> L.ByteString -compress format - (CompressParams compLevel method bits memLevel strategy initChunkSize mdict) - input = - L.fromChunks $ Stream.run $ do - Stream.deflateInit format compLevel method bits memLevel strategy - setDictionary mdict - case L.toChunks input of - [] -> fillBuffers 20 [] --gzip header is 20 bytes, others even smaller - S.PS inFPtr offset length : chunks -> do - Stream.pushInputBuffer inFPtr offset length - r <- fillBuffers initChunkSize chunks - return r - - where - -- we flick between two states: - -- * where one or other buffer is empty - -- - in which case we refill one or both - -- * where both buffers are non-empty - -- - in which case we compress until a buffer is empty - - fillBuffers :: Int - -> [S.ByteString] - -> Stream [S.ByteString] - fillBuffers outChunkSize inChunks = do -#ifdef DEBUG - Stream.consistencyCheck -#endif - - -- in this state there are two possabilities: - -- * no outbut buffer space is available - -- - in which case we must make more available - -- * no input buffer is available - -- - in which case we must supply more - inputBufferEmpty <- Stream.inputBufferEmpty - outputBufferFull <- Stream.outputBufferFull - - assert (inputBufferEmpty || outputBufferFull) $ return () - - when outputBufferFull $ do - outFPtr <- Stream.unsafeLiftIO (S.mallocByteString outChunkSize) - Stream.pushOutputBuffer outFPtr 0 outChunkSize - - if inputBufferEmpty - then case inChunks of - [] -> drainBuffers [] - S.PS inFPtr offset length : inChunks' -> do - Stream.pushInputBuffer inFPtr offset length - drainBuffers inChunks' - else drainBuffers inChunks - - - drainBuffers :: - [S.ByteString] - -> Stream [S.ByteString] - drainBuffers inChunks = do - - inputBufferEmpty' <- Stream.inputBufferEmpty - outputBufferFull' <- Stream.outputBufferFull - assert(not outputBufferFull' - && (null inChunks || not inputBufferEmpty')) $ return () - -- this invariant guarantees we can always make forward progress - -- and that therefore a BufferError is impossible - - let flush = if null inChunks then Stream.Finish else Stream.NoFlush - status <- Stream.deflate flush - - case status of - Stream.Ok -> do - outputBufferFull <- Stream.outputBufferFull - if outputBufferFull - then do (outFPtr, offset, length) <- Stream.popOutputBuffer - outChunks <- Stream.unsafeInterleave - (fillBuffers defaultCompressBufferSize inChunks) - return (S.PS outFPtr offset length : outChunks) - else do fillBuffers defaultCompressBufferSize inChunks - - Stream.StreamEnd -> do - inputBufferEmpty <- Stream.inputBufferEmpty - assert inputBufferEmpty $ return () - outputBufferBytesAvailable <- Stream.outputBufferBytesAvailable - if outputBufferBytesAvailable > 0 - then do (outFPtr, offset, length) <- Stream.popOutputBuffer - Stream.finalise - return [S.PS outFPtr offset length] - else do Stream.finalise - return [] - - Stream.Error code msg -> case code of - Stream.BufferError -> fail "BufferError should be impossible!" - Stream.NeedDict _ -> fail "NeedDict is impossible!" - _ -> fail msg - - -- Set the custom dictionary, if we were provided with one - -- and if the format supports it (zlib and raw, not gzip). - setDictionary :: Maybe S.ByteString -> Stream () - setDictionary (Just dict) - | Stream.formatSupportsDictionary format = do - status <- Stream.deflateSetDictionary dict - case status of - Stream.Ok -> return () - Stream.Error _ msg -> fail msg - _ -> fail "error when setting deflate dictionary" - setDictionary _ = return () - - --- | Decompress a data stream. --- --- It will throw an exception if any error is encountered in the input data. If --- you need more control over error handling then use 'decompressWithErrors'. --- -decompress - :: Stream.Format - -> DecompressParams - -> L.ByteString - -> L.ByteString -decompress format params = fromDecompressStream - . decompressWithErrors format params - --- | Like 'decompress' but returns a 'DecompressStream' data structure that --- contains an explicit representation of the error conditions that one may --- encounter when decompressing. --- --- Note that in addition to errors in the input data, it is possible for other --- unexpected errors to occur, such as out of memory, or finding the wrong --- version of the zlib C library, these are still thrown as exceptions (because --- representing them as data would make this function impure). --- -decompressWithErrors - :: Stream.Format - -> DecompressParams - -> L.ByteString - -> DecompressStream -decompressWithErrors format (DecompressParams bits initChunkSize mdict) input = - Stream.run $ do - Stream.inflateInit format bits - case L.toChunks input of - [] -> fillBuffers 4 [] --always an error anyway - S.PS inFPtr offset length : chunks -> do - Stream.pushInputBuffer inFPtr offset length - fillBuffers initChunkSize chunks - - where - -- we flick between two states: - -- * where one or other buffer is empty - -- - in which case we refill one or both - -- * where both buffers are non-empty - -- - in which case we compress until a buffer is empty - - fillBuffers :: Int - -> [S.ByteString] - -> Stream DecompressStream - fillBuffers outChunkSize inChunks = do -#ifdef DEBUG - Stream.consistencyCheck -#endif - - -- in this state there are two possabilities: - -- * no outbut buffer space is available - -- - in which case we must make more available - -- * no input buffer is available - -- - in which case we must supply more - inputBufferEmpty <- Stream.inputBufferEmpty - outputBufferFull <- Stream.outputBufferFull - - assert (inputBufferEmpty || outputBufferFull) $ return () - - when outputBufferFull $ do - outFPtr <- Stream.unsafeLiftIO (S.mallocByteString outChunkSize) - Stream.pushOutputBuffer outFPtr 0 outChunkSize - - if inputBufferEmpty - then case inChunks of - [] -> drainBuffers [] - S.PS inFPtr offset length : inChunks' -> do - Stream.pushInputBuffer inFPtr offset length - drainBuffers inChunks' - else drainBuffers inChunks - - - drainBuffers :: - [S.ByteString] - -> Stream DecompressStream - drainBuffers inChunks = do - - inputBufferEmpty' <- Stream.inputBufferEmpty - outputBufferFull' <- Stream.outputBufferFull - assert(not outputBufferFull' - && (null inChunks || not inputBufferEmpty')) $ return () - -- this invariant guarantees we can always make forward progress or at - -- least if a BufferError does occur that it must be due to a premature EOF - - status <- Stream.inflate Stream.NoFlush - - case status of - Stream.Ok -> do - outputBufferFull <- Stream.outputBufferFull - if outputBufferFull - then do (outFPtr, offset, length) <- Stream.popOutputBuffer - outChunks <- Stream.unsafeInterleave - (fillBuffers defaultDecompressBufferSize inChunks) - return $ StreamChunk (S.PS outFPtr offset length) outChunks - else do fillBuffers defaultDecompressBufferSize inChunks - - Stream.StreamEnd -> inChunks `seq` finish StreamEnd - -- The decompressor tells us we're done, but that doesn't mean we have - -- consumed all the input (there could be trailing data). But more - -- subtle than that, the decompressor will actually never demand the - -- tail of the input (in the usual case where it's empty) because - -- the zlib and gzip formats know their own length. So we force the - -- tail of the input here because this can be important for closing - -- file handles etc. - - Stream.Error code msg -> case code of - Stream.BufferError -> finish (StreamError TruncatedInput msg') - where msg' = "premature end of compressed stream" - Stream.NeedDict adler -> do - err <- setDictionary adler mdict - case err of - Just streamErr -> finish streamErr - Nothing -> drainBuffers inChunks - Stream.DataError -> finish (StreamError DataError msg) - _ -> fail msg - - -- Note even if we end with an error we still try to flush the last chunk if - -- there is one. The user just has to decide what they want to trust. - finish end = do - -- Note that there may be input bytes still available if the stream - -- is embeded in some other data stream. Here we just silently discard - -- any trailing data. - outputBufferBytesAvailable <- Stream.outputBufferBytesAvailable - if outputBufferBytesAvailable > 0 - then do (outFPtr, offset, length) <- Stream.popOutputBuffer - Stream.finalise - return (StreamChunk (S.PS outFPtr offset length) end) - else do Stream.finalise - return end - - setDictionary :: Stream.DictionaryHash -> Maybe S.ByteString - -> Stream (Maybe DecompressStream) - setDictionary _adler Nothing = - return $ Just (StreamError DictionaryRequired "custom dictionary needed") - setDictionary _adler (Just dict) = do - status <- Stream.inflateSetDictionary dict - case status of - Stream.Ok -> return Nothing - Stream.Error Stream.StreamError _ -> - return $ Just (StreamError DictionaryRequired "provided dictionary not valid") - Stream.Error Stream.DataError _ -> - return $ Just (StreamError DictionaryRequired "given dictionary does not match the expected one") - _ -> fail "error when setting inflate dictionary" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/Zlib/Raw.hs cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/Zlib/Raw.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/Zlib/Raw.hs 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/Zlib/Raw.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Copyright : (c) 2006-2008 Duncan Coutts --- License : BSD-style --- --- Maintainer : duncan@haskell.org --- Stability : provisional --- Portability : portable (H98 + FFI) --- --- Compression and decompression of data streams in the raw deflate format. --- --- The format is described in detail in RFC #1951: --- --- --- See also the zlib home page: --- ------------------------------------------------------------------------------ -module Codec.Compression.Zlib.Raw ( - - -- * Simple compression and decompression - compress, - decompress, - - -- * Extended api with control over compression parameters - compressWith, - decompressWith, - - CompressParams(..), defaultCompressParams, - DecompressParams(..), defaultDecompressParams, - - -- ** The compression parameter types - CompressionLevel(..), - defaultCompression, - noCompression, - bestSpeed, - bestCompression, - compressionLevel, - Method(..), - deflateMethod, - WindowBits(..), - defaultWindowBits, - windowBits, - MemoryLevel(..), - defaultMemoryLevel, - minMemoryLevel, - maxMemoryLevel, - memoryLevel, - CompressionStrategy(..), - defaultStrategy, - filteredStrategy, - huffmanOnlyStrategy, - - ) where - -import Data.ByteString.Lazy (ByteString) - -import qualified Codec.Compression.Zlib.Internal as Internal -import Codec.Compression.Zlib.Internal hiding (compress, decompress) - -decompress :: ByteString -> ByteString -decompress = decompressWith defaultDecompressParams - -decompressWith :: DecompressParams -> ByteString -> ByteString -decompressWith = Internal.decompress rawFormat - -compress :: ByteString -> ByteString -compress = compressWith defaultCompressParams - -compressWith :: CompressParams -> ByteString -> ByteString -compressWith = Internal.compress rawFormat diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/Zlib/Stream.hsc cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/Zlib/Stream.hsc --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/Zlib/Stream.hsc 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/Zlib/Stream.hsc 1970-01-01 00:00:00.000000000 +0000 @@ -1,979 +0,0 @@ -{-# LANGUAGE ForeignFunctionInterface #-} ------------------------------------------------------------------------------ --- | --- Copyright : (c) 2006-2008 Duncan Coutts --- License : BSD-style --- --- Maintainer : duncan@haskell.org --- Stability : provisional --- Portability : portable (H98 + FFI) --- --- Zlib wrapper layer --- ------------------------------------------------------------------------------ -module Codec.Compression.Zlib.Stream ( - - -- * The Zlib state monad - Stream, - run, - unsafeInterleave, - unsafeLiftIO, - finalise, - - -- * Initialisation - deflateInit, - inflateInit, - - -- ** Initialisation parameters - Format(..), - gzipFormat, - zlibFormat, - rawFormat, - gzipOrZlibFormat, - formatSupportsDictionary, - CompressionLevel(..), - defaultCompression, - noCompression, - bestSpeed, - bestCompression, - compressionLevel, - Method(..), - deflateMethod, - WindowBits(..), - defaultWindowBits, - windowBits, - MemoryLevel(..), - defaultMemoryLevel, - minMemoryLevel, - maxMemoryLevel, - memoryLevel, - CompressionStrategy(..), - defaultStrategy, - filteredStrategy, - huffmanOnlyStrategy, - - -- * The buisness - deflate, - inflate, - Status(..), - Flush(..), - ErrorCode(..), - - -- * Buffer management - -- ** Input buffer - pushInputBuffer, - inputBufferEmpty, - - -- ** Output buffer - pushOutputBuffer, - popOutputBuffer, - outputBufferBytesAvailable, - outputBufferSpaceRemaining, - outputBufferFull, - - -- ** Dictionary - deflateSetDictionary, - inflateSetDictionary, - - -- ** Dictionary hashes - DictionaryHash, - dictionaryHash, - zeroDictionaryHash, - -#ifdef DEBUG - -- * Debugging - consistencyCheck, - dump, - trace, -#endif - - ) where - -import Foreign - ( Word8, Ptr, nullPtr, plusPtr, peekByteOff, pokeByteOff, mallocBytes - , ForeignPtr, FinalizerPtr, newForeignPtr_, addForeignPtrFinalizer - , withForeignPtr, touchForeignPtr ) -#if __GLASGOW_HASKELL__ >= 702 -import Foreign.ForeignPtr.Unsafe ( unsafeForeignPtrToPtr ) -import System.IO.Unsafe ( unsafePerformIO ) -#else -import Foreign ( unsafeForeignPtrToPtr, unsafePerformIO ) -#endif -#ifdef __GLASGOW_HASKELL__ -import Foreign - ( finalizeForeignPtr ) -#endif -import Foreign.C -import Data.ByteString.Internal (nullForeignPtr) -import qualified Data.ByteString.Unsafe as B -import Data.ByteString (ByteString) -import System.IO.Unsafe (unsafeInterleaveIO) -import Control.Applicative (Applicative(..)) -import Control.Monad (ap,liftM) -import Control.Exception (assert) -#ifdef DEBUG -import System.IO (hPutStrLn, stderr) -#endif - -import Prelude hiding (length) - -#include "zlib.h" - - -pushInputBuffer :: ForeignPtr Word8 -> Int -> Int -> Stream () -pushInputBuffer inBuf' offset length = do - - -- must not push a new input buffer if the last one is not used up - inAvail <- getInAvail - assert (inAvail == 0) $ return () - - -- Now that we're setting a new input buffer, we can be sure that zlib no - -- longer has a reference to the old one. Therefore this is the last point - -- at which the old buffer had to be retained. It's safe to release now. - inBuf <- getInBuf - unsafeLiftIO $ touchForeignPtr inBuf - - -- now set the available input buffer ptr and length - setInBuf inBuf' - setInAvail length - setInNext (unsafeForeignPtrToPtr inBuf' `plusPtr` offset) - -- Note the 'unsafe'. We are passing the raw ptr inside inBuf' to zlib. - -- To make this safe we need to hold on to the ForeignPtr for at least as - -- long as zlib is using the underlying raw ptr. - - -inputBufferEmpty :: Stream Bool -inputBufferEmpty = getInAvail >>= return . (==0) - - -pushOutputBuffer :: ForeignPtr Word8 -> Int -> Int -> Stream () -pushOutputBuffer outBuf' offset length = do - - --must not push a new buffer if there is still data in the old one - outAvail <- getOutAvail - assert (outAvail == 0) $ return () - -- Note that there may still be free space in the output buffer, that's ok, - -- you might not want to bother completely filling the output buffer say if - -- there's only a few free bytes left. - - outBuf <- getOutBuf - unsafeLiftIO $ touchForeignPtr outBuf - - -- now set the available input buffer ptr and length - setOutBuf outBuf' - setOutFree length - setOutNext (unsafeForeignPtrToPtr outBuf' `plusPtr` offset) - - setOutOffset offset - setOutAvail 0 - - --- get that part of the output buffer that is currently full --- (might be 0, use outputBufferBytesAvailable to check) --- this may leave some space remaining in the buffer, use --- outputBufferSpaceRemaining to check. -popOutputBuffer :: Stream (ForeignPtr Word8, Int, Int) -popOutputBuffer = do - - outBuf <- getOutBuf - outOffset <- getOutOffset - outAvail <- getOutAvail - - -- there really should be something to pop, otherwise it's silly - assert (outAvail > 0) $ return () - - setOutOffset (outOffset + outAvail) - setOutAvail 0 - - return (outBuf, outOffset, outAvail) - - --- this is the number of bytes available in the output buffer -outputBufferBytesAvailable :: Stream Int -outputBufferBytesAvailable = getOutAvail - - --- you needen't get all the output immediately, you can continue until --- there is no more output space available, this tells you that amount -outputBufferSpaceRemaining :: Stream Int -outputBufferSpaceRemaining = getOutFree - - --- you only need to supply a new buffer when there is no more output buffer --- space remaining -outputBufferFull :: Stream Bool -outputBufferFull = liftM (==0) outputBufferSpaceRemaining - - --- you can only run this when the output buffer is not empty --- you can run it when the input buffer is empty but it doesn't do anything --- after running deflate either the output buffer will be full --- or the input buffer will be empty (or both) -deflate :: Flush -> Stream Status -deflate flush = do - - outFree <- getOutFree - - -- deflate needs free space in the output buffer - assert (outFree > 0) $ return () - - result <- deflate_ flush - outFree' <- getOutFree - - -- number of bytes of extra output there is available as a result of - -- the call to deflate: - let outExtra = outFree - outFree' - - outAvail <- getOutAvail - setOutAvail (outAvail + outExtra) - return result - - -inflate :: Flush -> Stream Status -inflate flush = do - - outFree <- getOutFree - - -- inflate needs free space in the output buffer - assert (outFree > 0) $ return () - - result <- inflate_ flush - outFree' <- getOutFree - - -- number of bytes of extra output there is available as a result of - -- the call to inflate: - let outExtra = outFree - outFree' - - outAvail <- getOutAvail - setOutAvail (outAvail + outExtra) - return result - -deflateSetDictionary :: ByteString -> Stream Status -deflateSetDictionary dict = do - err <- withStreamState $ \zstream -> - B.unsafeUseAsCStringLen dict $ \(ptr, len) -> - c_deflateSetDictionary zstream ptr (fromIntegral len) - toStatus err - -inflateSetDictionary :: ByteString -> Stream Status -inflateSetDictionary dict = do - err <- withStreamState $ \zstream -> do - B.unsafeUseAsCStringLen dict $ \(ptr, len) -> - c_inflateSetDictionary zstream ptr (fromIntegral len) - toStatus err - --- | A hash of a custom compression dictionary. These hashes are used by --- zlib as dictionary identifiers. --- (The particular hash function used is Adler32.) --- -newtype DictionaryHash = DictHash CULong - deriving (Eq, Ord, Read, Show) - --- | Update a running 'DictionaryHash'. You can generate a 'DictionaryHash' --- from one or more 'ByteString's by starting from 'zeroDictionaryHash', e.g. --- --- > dictionaryHash zeroDictionaryHash :: ByteString -> DictionaryHash --- --- or --- --- > foldl' dictionaryHash zeroDictionaryHash :: [ByteString] -> DictionaryHash --- -dictionaryHash :: DictionaryHash -> ByteString -> DictionaryHash -dictionaryHash (DictHash adler) dict = - unsafePerformIO $ - B.unsafeUseAsCStringLen dict $ \(ptr, len) -> - liftM DictHash $ c_adler32 adler ptr (fromIntegral len) - --- | A zero 'DictionaryHash' to use as the initial value with 'dictionaryHash'. --- -zeroDictionaryHash :: DictionaryHash -zeroDictionaryHash = DictHash 0 - ----------------------------- --- Stream monad --- - -newtype Stream a = Z { - unZ :: ForeignPtr StreamState - -> ForeignPtr Word8 - -> ForeignPtr Word8 - -> Int -> Int - -> IO (ForeignPtr Word8 - ,ForeignPtr Word8 - ,Int, Int, a) - } - -instance Functor Stream where - fmap = liftM - -instance Applicative Stream where - pure = return - (<*>) = ap - -instance Monad Stream where - (>>=) = thenZ --- m >>= f = (m `thenZ` \a -> consistencyCheck `thenZ_` returnZ a) `thenZ` f - (>>) = thenZ_ - return = returnZ - fail = (finalise >>) . failZ - -returnZ :: a -> Stream a -returnZ a = Z $ \_ inBuf outBuf outOffset outLength -> - return (inBuf, outBuf, outOffset, outLength, a) -{-# INLINE returnZ #-} - -thenZ :: Stream a -> (a -> Stream b) -> Stream b -thenZ (Z m) f = - Z $ \stream inBuf outBuf outOffset outLength -> - m stream inBuf outBuf outOffset outLength >>= - \(inBuf', outBuf', outOffset', outLength', a) -> - unZ (f a) stream inBuf' outBuf' outOffset' outLength' -{-# INLINE thenZ #-} - -thenZ_ :: Stream a -> Stream b -> Stream b -thenZ_ (Z m) f = - Z $ \stream inBuf outBuf outOffset outLength -> - m stream inBuf outBuf outOffset outLength >>= - \(inBuf', outBuf', outOffset', outLength', _) -> - unZ f stream inBuf' outBuf' outOffset' outLength' -{-# INLINE thenZ_ #-} - -failZ :: String -> Stream a -failZ msg = Z (\_ _ _ _ _ -> fail ("Codec.Compression.Zlib: " ++ msg)) - -{-# NOINLINE run #-} -run :: Stream a -> a -run (Z m) = unsafePerformIO $ do - ptr <- mallocBytes (#{const sizeof(z_stream)}) - #{poke z_stream, msg} ptr nullPtr - #{poke z_stream, zalloc} ptr nullPtr - #{poke z_stream, zfree} ptr nullPtr - #{poke z_stream, opaque} ptr nullPtr - #{poke z_stream, next_in} ptr nullPtr - #{poke z_stream, next_out} ptr nullPtr - #{poke z_stream, avail_in} ptr (0 :: CUInt) - #{poke z_stream, avail_out} ptr (0 :: CUInt) - stream <- newForeignPtr_ ptr - (_,_,_,_,a) <- m stream nullForeignPtr nullForeignPtr 0 0 - return a - --- This is marked as unsafe because run uses unsafePerformIO so anything --- lifted here will end up being unsafePerformIO'd. -unsafeLiftIO :: IO a -> Stream a -unsafeLiftIO m = Z $ \_stream inBuf outBuf outOffset outLength -> do - a <- m - return (inBuf, outBuf, outOffset, outLength, a) - --- It's unsafe because we discard the values here, so if you mutate anything --- between running this and forcing the result then you'll get an inconsistent --- stream state. -unsafeInterleave :: Stream a -> Stream a -unsafeInterleave (Z m) = Z $ \stream inBuf outBuf outOffset outLength -> do - res <- unsafeInterleaveIO (m stream inBuf outBuf outOffset outLength) - let select (_,_,_,_,a) = a - return (inBuf, outBuf, outOffset, outLength, select res) - -getStreamState :: Stream (ForeignPtr StreamState) -getStreamState = Z $ \stream inBuf outBuf outOffset outLength -> do - return (inBuf, outBuf, outOffset, outLength, stream) - -getInBuf :: Stream (ForeignPtr Word8) -getInBuf = Z $ \_stream inBuf outBuf outOffset outLength -> do - return (inBuf, outBuf, outOffset, outLength, inBuf) - -getOutBuf :: Stream (ForeignPtr Word8) -getOutBuf = Z $ \_stream inBuf outBuf outOffset outLength -> do - return (inBuf, outBuf, outOffset, outLength, outBuf) - -getOutOffset :: Stream Int -getOutOffset = Z $ \_stream inBuf outBuf outOffset outLength -> do - return (inBuf, outBuf, outOffset, outLength, outOffset) - -getOutAvail :: Stream Int -getOutAvail = Z $ \_stream inBuf outBuf outOffset outLength -> do - return (inBuf, outBuf, outOffset, outLength, outLength) - -setInBuf :: ForeignPtr Word8 -> Stream () -setInBuf inBuf = Z $ \_stream _ outBuf outOffset outLength -> do - return (inBuf, outBuf, outOffset, outLength, ()) - -setOutBuf :: ForeignPtr Word8 -> Stream () -setOutBuf outBuf = Z $ \_stream inBuf _ outOffset outLength -> do - return (inBuf, outBuf, outOffset, outLength, ()) - -setOutOffset :: Int -> Stream () -setOutOffset outOffset = Z $ \_stream inBuf outBuf _ outLength -> do - return (inBuf, outBuf, outOffset, outLength, ()) - -setOutAvail :: Int -> Stream () -setOutAvail outLength = Z $ \_stream inBuf outBuf outOffset _ -> do - return (inBuf, outBuf, outOffset, outLength, ()) - ----------------------------- --- Debug stuff --- - -#ifdef DEBUG -trace :: String -> Stream () -trace = unsafeLiftIO . hPutStrLn stderr - -dump :: Stream () -dump = do - inNext <- getInNext - inAvail <- getInAvail - - outNext <- getOutNext - outFree <- getOutFree - outAvail <- getOutAvail - outOffset <- getOutOffset - - unsafeLiftIO $ hPutStrLn stderr $ - "Stream {\n" ++ - " inNext = " ++ show inNext ++ ",\n" ++ - " inAvail = " ++ show inAvail ++ ",\n" ++ - "\n" ++ - " outNext = " ++ show outNext ++ ",\n" ++ - " outFree = " ++ show outFree ++ ",\n" ++ - " outAvail = " ++ show outAvail ++ ",\n" ++ - " outOffset = " ++ show outOffset ++ "\n" ++ - "}" - - consistencyCheck - -consistencyCheck :: Stream () -consistencyCheck = do - - outBuf <- getOutBuf - outOffset <- getOutOffset - outAvail <- getOutAvail - outNext <- getOutNext - - let outBufPtr = unsafeForeignPtrToPtr outBuf - - assert (outBufPtr `plusPtr` (outOffset + outAvail) == outNext) $ return () -#endif - - ----------------------------- --- zlib wrapper layer --- - -data Status = - Ok - | StreamEnd - | Error ErrorCode String - -data ErrorCode = - NeedDict DictionaryHash - | FileError - | StreamError - | DataError - | MemoryError - | BufferError -- ^ No progress was possible or there was not enough room in - -- the output buffer when 'Finish' is used. Note that - -- 'BuferError' is not fatal, and 'inflate' can be called - -- again with more input and more output space to continue. - | VersionError - | Unexpected - -toStatus :: CInt -> Stream Status -toStatus errno = case errno of - (#{const Z_OK}) -> return Ok - (#{const Z_STREAM_END}) -> return StreamEnd - (#{const Z_NEED_DICT}) -> do - adler <- withStreamPtr (#{peek z_stream, adler}) - err (NeedDict (DictHash adler)) "custom dictionary needed" - (#{const Z_BUF_ERROR}) -> err BufferError "buffer error" - (#{const Z_ERRNO}) -> err FileError "file error" - (#{const Z_STREAM_ERROR}) -> err StreamError "stream error" - (#{const Z_DATA_ERROR}) -> err DataError "data error" - (#{const Z_MEM_ERROR}) -> err MemoryError "insufficient memory" - (#{const Z_VERSION_ERROR}) -> err VersionError "incompatible zlib version" - other -> return $ Error Unexpected - ("unexpected zlib status: " ++ show other) - where - err errCode altMsg = liftM (Error errCode) $ do - msgPtr <- withStreamPtr (#{peek z_stream, msg}) - if msgPtr /= nullPtr - then unsafeLiftIO (peekCAString msgPtr) - else return altMsg - -failIfError :: CInt -> Stream () -failIfError errno = toStatus errno >>= \status -> case status of - (Error _ msg) -> fail msg - _ -> return () - - -data Flush = - NoFlush - | SyncFlush - | FullFlush - | Finish --- | Block -- only available in zlib 1.2 and later, uncomment if you need it. - -fromFlush :: Flush -> CInt -fromFlush NoFlush = #{const Z_NO_FLUSH} -fromFlush SyncFlush = #{const Z_SYNC_FLUSH} -fromFlush FullFlush = #{const Z_FULL_FLUSH} -fromFlush Finish = #{const Z_FINISH} --- fromFlush Block = #{const Z_BLOCK} - - --- | The format used for compression or decompression. There are three --- variations. --- -data Format = GZip | Zlib | Raw | GZipOrZlib - deriving Eq - -{-# DEPRECATED GZip "Use gzipFormat. Format constructors will be hidden in version 0.7" #-} -{-# DEPRECATED Zlib "Use zlibFormat. Format constructors will be hidden in version 0.7" #-} -{-# DEPRECATED Raw "Use rawFormat. Format constructors will be hidden in version 0.7" #-} -{-# DEPRECATED GZipOrZlib "Use gzipOrZlibFormat. Format constructors will be hidden in version 0.7" #-} - --- | The gzip format uses a header with a checksum and some optional meta-data --- about the compressed file. It is intended primarily for compressing --- individual files but is also sometimes used for network protocols such as --- HTTP. The format is described in detail in RFC #1952 --- --- -gzipFormat :: Format -gzipFormat = GZip - --- | The zlib format uses a minimal header with a checksum but no other --- meta-data. It is especially designed for use in network protocols. The --- format is described in detail in RFC #1950 --- --- -zlibFormat :: Format -zlibFormat = Zlib - --- | The \'raw\' format is just the compressed data stream without any --- additional header, meta-data or data-integrity checksum. The format is --- described in detail in RFC #1951 --- -rawFormat :: Format -rawFormat = Raw - --- | This is not a format as such. It enabled zlib or gzip decoding with --- automatic header detection. This only makes sense for decompression. --- -gzipOrZlibFormat :: Format -gzipOrZlibFormat = GZipOrZlib - -formatSupportsDictionary :: Format -> Bool -formatSupportsDictionary Zlib = True -formatSupportsDictionary Raw = True -formatSupportsDictionary _ = False - --- | The compression method --- -data Method = Deflated - -{-# DEPRECATED Deflated "Use deflateMethod. Method constructors will be hidden in version 0.7" #-} - --- | \'Deflate\' is the only method supported in this version of zlib. --- Indeed it is likely to be the only method that ever will be supported. --- -deflateMethod :: Method -deflateMethod = Deflated - -fromMethod :: Method -> CInt -fromMethod Deflated = #{const Z_DEFLATED} - - --- | The compression level parameter controls the amount of compression. This --- is a trade-off between the amount of compression and the time required to do --- the compression. --- -data CompressionLevel = - DefaultCompression - | NoCompression - | BestSpeed - | BestCompression - | CompressionLevel Int - -{-# DEPRECATED DefaultCompression "Use defaultCompression. CompressionLevel constructors will be hidden in version 0.7" #-} -{-# DEPRECATED NoCompression "Use noCompression. CompressionLevel constructors will be hidden in version 0.7" #-} -{-# DEPRECATED BestSpeed "Use bestSpeed. CompressionLevel constructors will be hidden in version 0.7" #-} -{-# DEPRECATED BestCompression "Use bestCompression. CompressionLevel constructors will be hidden in version 0.7" #-} ---FIXME: cannot deprecate constructor named the same as the type -{- DEPRECATED CompressionLevel "Use compressionLevel. CompressionLevel constructors will be hidden in version 0.7" -} - --- | The default compression level is 6 (that is, biased towards higher --- compression at expense of speed). -defaultCompression :: CompressionLevel -defaultCompression = DefaultCompression - --- | No compression, just a block copy. -noCompression :: CompressionLevel -noCompression = CompressionLevel 0 - --- | The fastest compression method (less compression) -bestSpeed :: CompressionLevel -bestSpeed = CompressionLevel 1 - --- | The slowest compression method (best compression). -bestCompression :: CompressionLevel -bestCompression = CompressionLevel 9 - --- | A specific compression level between 0 and 9. -compressionLevel :: Int -> CompressionLevel -compressionLevel n - | n >= 0 && n <= 9 = CompressionLevel n - | otherwise = error "CompressionLevel must be in the range 0..9" - -fromCompressionLevel :: CompressionLevel -> CInt -fromCompressionLevel DefaultCompression = -1 -fromCompressionLevel NoCompression = 0 -fromCompressionLevel BestSpeed = 1 -fromCompressionLevel BestCompression = 9 -fromCompressionLevel (CompressionLevel n) - | n >= 0 && n <= 9 = fromIntegral n - | otherwise = error "CompressLevel must be in the range 1..9" - - --- | This specifies the size of the compression window. Larger values of this --- parameter result in better compression at the expense of higher memory --- usage. --- --- The compression window size is the value of the the window bits raised to --- the power 2. The window bits must be in the range @8..15@ which corresponds --- to compression window sizes of 256b to 32Kb. The default is 15 which is also --- the maximum size. --- --- The total amount of memory used depends on the window bits and the --- 'MemoryLevel'. See the 'MemoryLevel' for the details. --- -data WindowBits = WindowBits Int - | DefaultWindowBits -- This constructor must be last to make - -- the Ord instance work. The Ord instance - -- is defined with and used by the tests. - -- It makse sense because the default value - -- is is also the max value at 15. - -{-# DEPRECATED DefaultWindowBits "Use defaultWindowBits. WindowBits constructors will be hidden in version 0.7" #-} ---FIXME: cannot deprecate constructor named the same as the type -{- DEPRECATED WindowBits "Use windowBits. WindowBits constructors will be hidden in version 0.7" -} - --- | The default 'WindowBits' is 15 which is also the maximum size. --- -defaultWindowBits :: WindowBits -defaultWindowBits = WindowBits 15 - --- | A specific compression window size, specified in bits in the range @8..15@ --- -windowBits :: Int -> WindowBits -windowBits n - | n >= 8 && n <= 15 = WindowBits n - | otherwise = error "WindowBits must be in the range 8..15" - -fromWindowBits :: Format -> WindowBits-> CInt -fromWindowBits format bits = (formatModifier format) (checkWindowBits bits) - where checkWindowBits DefaultWindowBits = 15 - checkWindowBits (WindowBits n) - | n >= 8 && n <= 15 = fromIntegral n - | otherwise = error "WindowBits must be in the range 8..15" - formatModifier Zlib = id - formatModifier GZip = (+16) - formatModifier GZipOrZlib = (+32) - formatModifier Raw = negate - - --- | The 'MemoryLevel' parameter specifies how much memory should be allocated --- for the internal compression state. It is a tradoff between memory usage, --- compression ratio and compression speed. Using more memory allows faster --- compression and a better compression ratio. --- --- The total amount of memory used for compression depends on the 'WindowBits' --- and the 'MemoryLevel'. For decompression it depends only on the --- 'WindowBits'. The totals are given by the functions: --- --- > compressTotal windowBits memLevel = 4 * 2^windowBits + 512 * 2^memLevel --- > decompressTotal windowBits = 2^windowBits --- --- For example, for compression with the default @windowBits = 15@ and --- @memLevel = 8@ uses @256Kb@. So for example a network server with 100 --- concurrent compressed streams would use @25Mb@. The memory per stream can be --- halved (at the cost of somewhat degraded and slower compressionby) by --- reducing the @windowBits@ and @memLevel@ by one. --- --- Decompression takes less memory, the default @windowBits = 15@ corresponds --- to just @32Kb@. --- -data MemoryLevel = - DefaultMemoryLevel - | MinMemoryLevel - | MaxMemoryLevel - | MemoryLevel Int - -{-# DEPRECATED DefaultMemoryLevel "Use defaultMemoryLevel. MemoryLevel constructors will be hidden in version 0.7" #-} -{-# DEPRECATED MinMemoryLevel "Use minMemoryLevel. MemoryLevel constructors will be hidden in version 0.7" #-} -{-# DEPRECATED MaxMemoryLevel "Use maxMemoryLevel. MemoryLevel constructors will be hidden in version 0.7" #-} ---FIXME: cannot deprecate constructor named the same as the type -{- DEPRECATED MemoryLevel "Use memoryLevel. MemoryLevel constructors will be hidden in version 0.7" -} - --- | The default memory level. (Equivalent to @'memoryLevel' 8@) --- -defaultMemoryLevel :: MemoryLevel -defaultMemoryLevel = MemoryLevel 8 - --- | Use minimum memory. This is slow and reduces the compression ratio. --- (Equivalent to @'memoryLevel' 1@) --- -minMemoryLevel :: MemoryLevel -minMemoryLevel = MemoryLevel 1 - --- | Use maximum memory for optimal compression speed. --- (Equivalent to @'memoryLevel' 9@) --- -maxMemoryLevel :: MemoryLevel -maxMemoryLevel = MemoryLevel 9 - --- | A specific level in the range @1..9@ --- -memoryLevel :: Int -> MemoryLevel -memoryLevel n - | n >= 1 && n <= 9 = MemoryLevel n - | otherwise = error "MemoryLevel must be in the range 1..9" - -fromMemoryLevel :: MemoryLevel -> CInt -fromMemoryLevel DefaultMemoryLevel = 8 -fromMemoryLevel MinMemoryLevel = 1 -fromMemoryLevel MaxMemoryLevel = 9 -fromMemoryLevel (MemoryLevel n) - | n >= 1 && n <= 9 = fromIntegral n - | otherwise = error "MemoryLevel must be in the range 1..9" - - --- | The strategy parameter is used to tune the compression algorithm. --- --- The strategy parameter only affects the compression ratio but not the --- correctness of the compressed output even if it is not set appropriately. --- -data CompressionStrategy = - DefaultStrategy - | Filtered - | HuffmanOnly - -{- --- -- only available in zlib 1.2 and later, uncomment if you need it. - | RLE -- ^ Use 'RLE' to limit match distances to one (run-length - -- encoding). 'RLE' is designed to be almost as fast as - -- 'HuffmanOnly', but give better compression for PNG - -- image data. - | Fixed -- ^ 'Fixed' prevents the use of dynamic Huffman codes, - -- allowing for a simpler decoder for special applications. --} - -{-# DEPRECATED DefaultStrategy "Use defaultStrategy. CompressionStrategy constructors will be hidden in version 0.7" #-} -{-# DEPRECATED Filtered "Use filteredStrategy. CompressionStrategy constructors will be hidden in version 0.7" #-} -{-# DEPRECATED HuffmanOnly "Use huffmanOnlyStrategy. CompressionStrategy constructors will be hidden in version 0.7" #-} - --- | Use this default compression strategy for normal data. --- -defaultStrategy :: CompressionStrategy -defaultStrategy = DefaultStrategy - --- | Use the filtered compression strategy for data produced by a filter (or --- predictor). Filtered data consists mostly of small values with a somewhat --- random distribution. In this case, the compression algorithm is tuned to --- compress them better. The effect of this strategy is to force more Huffman --- coding and less string matching; it is somewhat intermediate between --- 'defaultCompressionStrategy' and 'huffmanOnlyCompressionStrategy'. --- -filteredStrategy :: CompressionStrategy -filteredStrategy = Filtered - --- | Use the Huffman-only compression strategy to force Huffman encoding only --- (no string match). --- -huffmanOnlyStrategy :: CompressionStrategy -huffmanOnlyStrategy = HuffmanOnly - - -fromCompressionStrategy :: CompressionStrategy -> CInt -fromCompressionStrategy DefaultStrategy = #{const Z_DEFAULT_STRATEGY} -fromCompressionStrategy Filtered = #{const Z_FILTERED} -fromCompressionStrategy HuffmanOnly = #{const Z_HUFFMAN_ONLY} ---fromCompressionStrategy RLE = #{const Z_RLE} ---fromCompressionStrategy Fixed = #{const Z_FIXED} - - -withStreamPtr :: (Ptr StreamState -> IO a) -> Stream a -withStreamPtr f = do - stream <- getStreamState - unsafeLiftIO (withForeignPtr stream f) - -withStreamState :: (StreamState -> IO a) -> Stream a -withStreamState f = do - stream <- getStreamState - unsafeLiftIO (withForeignPtr stream (f . StreamState)) - -setInAvail :: Int -> Stream () -setInAvail val = withStreamPtr $ \ptr -> - #{poke z_stream, avail_in} ptr (fromIntegral val :: CUInt) - -getInAvail :: Stream Int -getInAvail = liftM (fromIntegral :: CUInt -> Int) $ - withStreamPtr (#{peek z_stream, avail_in}) - -setInNext :: Ptr Word8 -> Stream () -setInNext val = withStreamPtr (\ptr -> #{poke z_stream, next_in} ptr val) - -#ifdef DEBUG -getInNext :: Stream (Ptr Word8) -getInNext = withStreamPtr (#{peek z_stream, next_in}) -#endif - -setOutFree :: Int -> Stream () -setOutFree val = withStreamPtr $ \ptr -> - #{poke z_stream, avail_out} ptr (fromIntegral val :: CUInt) - -getOutFree :: Stream Int -getOutFree = liftM (fromIntegral :: CUInt -> Int) $ - withStreamPtr (#{peek z_stream, avail_out}) - -setOutNext :: Ptr Word8 -> Stream () -setOutNext val = withStreamPtr (\ptr -> #{poke z_stream, next_out} ptr val) - -#ifdef DEBUG -getOutNext :: Stream (Ptr Word8) -getOutNext = withStreamPtr (#{peek z_stream, next_out}) -#endif - -inflateInit :: Format -> WindowBits -> Stream () -inflateInit format bits = do - checkFormatSupported format - err <- withStreamState $ \zstream -> - c_inflateInit2 zstream (fromIntegral (fromWindowBits format bits)) - failIfError err - getStreamState >>= unsafeLiftIO . addForeignPtrFinalizer c_inflateEnd - -deflateInit :: Format - -> CompressionLevel - -> Method - -> WindowBits - -> MemoryLevel - -> CompressionStrategy - -> Stream () -deflateInit format compLevel method bits memLevel strategy = do - checkFormatSupported format - err <- withStreamState $ \zstream -> - c_deflateInit2 zstream - (fromCompressionLevel compLevel) - (fromMethod method) - (fromWindowBits format bits) - (fromMemoryLevel memLevel) - (fromCompressionStrategy strategy) - failIfError err - getStreamState >>= unsafeLiftIO . addForeignPtrFinalizer c_deflateEnd - -inflate_ :: Flush -> Stream Status -inflate_ flush = do - err <- withStreamState $ \zstream -> - c_inflate zstream (fromFlush flush) - toStatus err - -deflate_ :: Flush -> Stream Status -deflate_ flush = do - err <- withStreamState $ \zstream -> - c_deflate zstream (fromFlush flush) - toStatus err - --- | This never needs to be used as the stream's resources will be released --- automatically when no longer needed, however this can be used to release --- them early. Only use this when you can guarantee that the stream will no --- longer be needed, for example if an error occurs or if the stream ends. --- -finalise :: Stream () -#ifdef __GLASGOW_HASKELL__ ---TODO: finalizeForeignPtr is ghc-only -finalise = getStreamState >>= unsafeLiftIO . finalizeForeignPtr -#else -finalise = return () -#endif - -checkFormatSupported :: Format -> Stream () -checkFormatSupported format = do - version <- unsafeLiftIO (peekCAString =<< c_zlibVersion) - case version of - ('1':'.':'1':'.':_) - | format == GZip - || format == GZipOrZlib - -> fail $ "version 1.1.x of the zlib C library does not support the" - ++ " 'gzip' format via the in-memory api, only the 'raw' and " - ++ " 'zlib' formats." - _ -> return () - ----------------------- --- The foreign imports - -newtype StreamState = StreamState (Ptr StreamState) - --- inflateInit2 and deflateInit2 are actually defined as macros in zlib.h --- They are defined in terms of inflateInit2_ and deflateInit2_ passing two --- additional arguments used to detect compatability problems. They pass the --- version of zlib as a char * and the size of the z_stream struct. --- If we compile via C then we can avoid this hassle however thats not really --- kosher since the Haskell FFI is defined at the C ABI level, not the C --- language level. There is no requirement to compile via C and pick up C --- headers. So it's much better if we can make it work properly and that'd --- also allow compiling via ghc's ncg which is a good thing since the C --- backend is not going to be around forever. --- --- So we define c_inflateInit2 and c_deflateInit2 here as wrappers around --- their _ counterparts and pass the extra args. - -foreign import ccall unsafe "zlib.h inflateInit2_" - c_inflateInit2_ :: StreamState -> CInt -> Ptr CChar -> CInt -> IO CInt - -c_inflateInit2 :: StreamState -> CInt -> IO CInt -c_inflateInit2 z n = - withCAString #{const_str ZLIB_VERSION} $ \versionStr -> - c_inflateInit2_ z n versionStr (#{const sizeof(z_stream)} :: CInt) - -foreign import ccall unsafe "zlib.h inflate" - c_inflate :: StreamState -> CInt -> IO CInt - -foreign import ccall unsafe "zlib.h &inflateEnd" - c_inflateEnd :: FinalizerPtr StreamState - - -foreign import ccall unsafe "zlib.h deflateInit2_" - c_deflateInit2_ :: StreamState - -> CInt -> CInt -> CInt -> CInt -> CInt - -> Ptr CChar -> CInt - -> IO CInt - -c_deflateInit2 :: StreamState - -> CInt -> CInt -> CInt -> CInt -> CInt -> IO CInt -c_deflateInit2 z a b c d e = - withCAString #{const_str ZLIB_VERSION} $ \versionStr -> - c_deflateInit2_ z a b c d e versionStr (#{const sizeof(z_stream)} :: CInt) - -foreign import ccall unsafe "zlib.h deflateSetDictionary" - c_deflateSetDictionary :: StreamState - -> Ptr CChar - -> CUInt - -> IO CInt - -foreign import ccall unsafe "zlib.h inflateSetDictionary" - c_inflateSetDictionary :: StreamState - -> Ptr CChar - -> CUInt - -> IO CInt - -foreign import ccall unsafe "zlib.h deflate" - c_deflate :: StreamState -> CInt -> IO CInt - -foreign import ccall unsafe "zlib.h &deflateEnd" - c_deflateEnd :: FinalizerPtr StreamState - -foreign import ccall unsafe "zlib.h zlibVersion" - c_zlibVersion :: IO CString - -foreign import ccall unsafe "zlib.h adler32" - c_adler32 :: CULong - -> Ptr CChar - -> CUInt - -> IO CULong diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/Zlib.hs cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/Zlib.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Codec/Compression/Zlib.hs 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Codec/Compression/Zlib.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Copyright : (c) 2006-2008 Duncan Coutts --- License : BSD-style --- --- Maintainer : duncan@haskell.org --- Stability : provisional --- Portability : portable (H98 + FFI) --- --- Compression and decompression of data streams in the zlib format. --- --- The format is described in detail in RFC #1950: --- --- --- See also the zlib home page: --- ------------------------------------------------------------------------------ -module Codec.Compression.Zlib ( - - -- | This module provides pure functions for compressing and decompressing - -- streams of data in the zlib format and represented by lazy 'ByteString's. - -- This makes it easy to use either in memory or with disk or network IO. - - -- * Simple compression and decompression - compress, - decompress, - - -- * Extended api with control over compression parameters - compressWith, - decompressWith, - - CompressParams(..), defaultCompressParams, - DecompressParams(..), defaultDecompressParams, - - -- ** The compression parameter types - CompressionLevel(..), - defaultCompression, - noCompression, - bestSpeed, - bestCompression, - compressionLevel, - Method(..), - deflateMethod, - WindowBits(..), - defaultWindowBits, - windowBits, - MemoryLevel(..), - defaultMemoryLevel, - minMemoryLevel, - maxMemoryLevel, - memoryLevel, - CompressionStrategy(..), - defaultStrategy, - filteredStrategy, - huffmanOnlyStrategy, - - ) where - -import Data.ByteString.Lazy (ByteString) - -import qualified Codec.Compression.Zlib.Internal as Internal -import Codec.Compression.Zlib.Internal hiding (compress, decompress) - - --- | Decompress a stream of data in the zlib format. --- --- There are a number of errors that can occur. In each case an exception will --- be thrown. The possible error conditions are: --- --- * if the stream does not start with a valid gzip header --- --- * if the compressed stream is corrupted --- --- * if the compressed stream ends permaturely --- --- Note that the decompression is performed /lazily/. Errors in the data stream --- may not be detected until the end of the stream is demanded (since it is --- only at the end that the final checksum can be checked). If this is --- important to you, you must make sure to consume the whole decompressed --- stream before doing any IO action that depends on it. --- -decompress :: ByteString -> ByteString -decompress = decompressWith defaultDecompressParams - - --- | Like 'decompress' but with the ability to specify various decompression --- parameters. Typical usage: --- --- > decompressWith defaultCompressParams { ... } --- -decompressWith :: DecompressParams -> ByteString -> ByteString -decompressWith = Internal.decompress zlibFormat - - --- | Compress a stream of data into the zlib format. --- --- This uses the default compression parameters. In partiular it uses the --- default compression level which favours a higher compression ratio over --- compression speed, though it does not use the maximum compression level. --- --- Use 'compressWith' to adjust the compression level or other compression --- parameters. --- -compress :: ByteString -> ByteString -compress = compressWith defaultCompressParams - - --- | Like 'compress' but with the ability to specify various compression --- parameters. Typical usage: --- --- > compressWith defaultCompressParams { ... } --- --- In particular you can set the compression level: --- --- > compressWith defaultCompressParams { compressLevel = BestCompression } --- -compressWith :: CompressParams -> ByteString -> ByteString -compressWith = Internal.compress zlibFormat diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/examples/gunzip.hs cabal-install-1.22-1.22.9.0/=unpacked-tar6=/examples/gunzip.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/examples/gunzip.hs 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/examples/gunzip.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -module Main where - -import qualified Data.ByteString.Lazy as B -import qualified Codec.Compression.GZip as GZip - -main = B.interact GZip.decompress diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/examples/gzip.hs cabal-install-1.22-1.22.9.0/=unpacked-tar6=/examples/gzip.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/examples/gzip.hs 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/examples/gzip.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -module Main where - -import qualified Data.ByteString.Lazy as B -import qualified Codec.Compression.GZip as GZip - -main = B.interact $ GZip.compressWith GZip.defaultCompressParams { - GZip.compressLevel = GZip.BestCompression - } diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar6=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/LICENSE 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -Copyright (c) 2006-2008, Duncan Coutts -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -3. This clause is intentionally left blank. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/Setup.hs 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -import Distribution.Simple -main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar6=/zlib.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar6=/zlib.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar6=/zlib.cabal 2014-11-18 11:13:10.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar6=/zlib.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -name: zlib -version: 0.5.4.2 -copyright: (c) 2006-2012 Duncan Coutts -license: BSD3 -license-file: LICENSE -author: Duncan Coutts -maintainer: Duncan Coutts -category: Codec -synopsis: Compression and decompression in the gzip and zlib formats -description: This package provides a pure interface for compressing and - decompressing streams of data represented as lazy - 'ByteString's. It uses the zlib C library so it has high - performance. It supports the \"zlib\", \"gzip\" and \"raw\" - compression formats. - . - It provides a convenient high level API suitable for most - tasks and for the few cases where more control is needed it - provides access to the full zlib feature set. -build-type: Simple -cabal-version: >= 1.8 -extra-source-files: cbits/crc32.h cbits/inffast.h cbits/inflate.h - cbits/trees.h cbits/deflate.h cbits/inffixed.h - cbits/inftrees.h cbits/zutil.h - -- demo programs: - examples/gzip.hs examples/gunzip.hs - -source-repository head - type: darcs - location: http://code.haskell.org/zlib/ - -library - exposed-modules: Codec.Compression.GZip, - Codec.Compression.Zlib, - Codec.Compression.Zlib.Raw, - Codec.Compression.Zlib.Internal - other-modules: Codec.Compression.Zlib.Stream - extensions: CPP, ForeignFunctionInterface - build-depends: base >= 3 && < 5, - bytestring >= 0.9 && < 0.12 - includes: zlib.h - ghc-options: -Wall - if !os(windows) - -- Normally we use the the standard system zlib: - extra-libraries: z - else - -- However for the benefit of users of Windows (which does not have zlib - -- by default) we bundle a complete copy of the C sources of zlib-1.2.5 - c-sources: cbits/adler32.c cbits/compress.c cbits/crc32.c - cbits/deflate.c cbits/infback.c - cbits/inffast.c cbits/inflate.c cbits/inftrees.c - cbits/trees.c cbits/uncompr.c cbits/zutil.c - include-dirs: cbits - install-includes: zlib.h zconf.h diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar7=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar7=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar7=/LICENSE 2014-09-02 18:17:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar7=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -Copyright (c) 2002-2010, The University Court of the University of Glasgow. -Copyright (c) 2007-2010, Johan Tibell - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -- Neither name of the University nor the names of its contributors may be -used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF -GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar7=/Network/URI.hs cabal-install-1.22-1.22.9.0/=unpacked-tar7=/Network/URI.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar7=/Network/URI.hs 2014-09-02 18:17:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar7=/Network/URI.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1349 +0,0 @@ -{-# LANGUAGE CPP #-} --------------------------------------------------------------------------------- --- | --- Module : Network.URI --- Copyright : (c) 2004, Graham Klyne --- License : BSD-style (see end of this file) --- --- Maintainer : Graham Klyne --- Stability : provisional --- Portability : portable --- --- This module defines functions for handling URIs. It presents substantially the --- same interface as the older GHC Network.URI module, but is implemented using --- Parsec rather than a Regex library that is not available with Hugs. The internal --- representation of URI has been changed so that URI strings are more --- completely preserved when round-tripping to a URI value and back. --- --- In addition, four methods are provided for parsing different --- kinds of URI string (as noted in RFC3986): --- 'parseURI', --- 'parseURIReference', --- 'parseRelativeReference' and --- 'parseAbsoluteURI'. --- --- Further, four methods are provided for classifying different --- kinds of URI string (as noted in RFC3986): --- 'isURI', --- 'isURIReference', --- 'isRelativeReference' and --- 'isAbsoluteURI'. --- --- The long-standing official reference for URI handling was RFC2396 [1], --- as updated by RFC 2732 [2], but this was replaced by a new specification, --- RFC3986 [3] in January 2005. This latter specification has been used --- as the primary reference for constructing the URI parser implemented --- here, and it is intended that there is a direct relationship between --- the syntax definition in that document and this parser implementation. --- --- RFC 1808 [4] contains a number of test cases for relative URI handling. --- Dan Connolly's Python module @uripath.py@ [5] also contains useful details --- and test cases. --- --- Some of the code has been copied from the previous GHC implementation, --- but the parser is replaced with one that performs more complete --- syntax checking of the URI itself, according to RFC3986 [3]. --- --- References --- --- (1) --- --- (2) --- --- (3) --- --- (4) --- --- (5) --- --------------------------------------------------------------------------------- - -module Network.URI - ( - -- * The URI type - URI(..) - , URIAuth(..) - , nullURI - - -- * Parsing - , parseURI - , parseURIReference - , parseRelativeReference - , parseAbsoluteURI - - -- * Test for strings containing various kinds of URI - , isURI - , isURIReference - , isRelativeReference - , isAbsoluteURI - , isIPv6address - , isIPv4address - - -- * Predicates - , uriIsAbsolute - , uriIsRelative - - -- * Relative URIs - , relativeTo - , nonStrictRelativeTo - , relativeFrom - - -- * Operations on URI strings - -- | Support for putting strings into URI-friendly - -- escaped format and getting them back again. - -- This can't be done transparently in all cases, because certain - -- characters have different meanings in different kinds of URI. - -- The URI spec [3], section 2.4, indicates that all URI components - -- should be escaped before they are assembled as a URI: - -- \"Once produced, a URI is always in its percent-encoded form\" - , uriToString - , isReserved, isUnreserved - , isAllowedInURI, isUnescapedInURI - , isUnescapedInURIComponent - , escapeURIChar - , escapeURIString - , unEscapeString - - -- * URI Normalization functions - , normalizeCase - , normalizeEscape - , normalizePathSegments - - -- * Deprecated functions - , parseabsoluteURI - , escapeString - , reserved, unreserved - , scheme, authority, path, query, fragment - ) where - -import Text.ParserCombinators.Parsec - ( GenParser, ParseError - , parse, (<|>), (), try - , option, many, many1, count, notFollowedBy - , char, satisfy, oneOf, string, eof - , unexpected - ) - -import Control.Monad (MonadPlus(..)) -import Data.Char (ord, chr, isHexDigit, toLower, toUpper, digitToInt) -import Data.Bits ((.|.),(.&.),shiftL,shiftR) -import Debug.Trace (trace) -import Numeric (showIntAtBase) - -import Data.Typeable (Typeable) -#if MIN_VERSION_base(4,0,0) -import Data.Data (Data) -#else -import Data.Generics (Data) -#endif - ------------------------------------------------------------- --- The URI datatype ------------------------------------------------------------- - --- |Represents a general universal resource identifier using --- its component parts. --- --- For example, for the URI --- --- > foo://anonymous@www.haskell.org:42/ghc?query#frag --- --- the components are: --- -data URI = URI - { uriScheme :: String -- ^ @foo:@ - , uriAuthority :: Maybe URIAuth -- ^ @\/\/anonymous\@www.haskell.org:42@ - , uriPath :: String -- ^ @\/ghc@ - , uriQuery :: String -- ^ @?query@ - , uriFragment :: String -- ^ @#frag@ - } deriving (Eq, Ord, Typeable, Data) - --- |Type for authority value within a URI -data URIAuth = URIAuth - { uriUserInfo :: String -- ^ @anonymous\@@ - , uriRegName :: String -- ^ @www.haskell.org@ - , uriPort :: String -- ^ @:42@ - } deriving (Eq, Ord, Show, Typeable, Data) - --- |Blank URI -nullURI :: URI -nullURI = URI - { uriScheme = "" - , uriAuthority = Nothing - , uriPath = "" - , uriQuery = "" - , uriFragment = "" - } - --- URI as instance of Show. Note that for security reasons, the default --- behaviour is to suppress any userinfo field (see RFC3986, section 7.5). --- This can be overridden by using uriToString directly with first --- argument @id@ (noting that this returns a ShowS value rather than a string). --- --- [[[Another design would be to embed the userinfo mapping function in --- the URIAuth value, with the default value suppressing userinfo formatting, --- but providing a function to return a new URI value with userinfo --- data exposed by show.]]] --- -instance Show URI where - showsPrec _ = uriToString defaultUserInfoMap - -defaultUserInfoMap :: String -> String -defaultUserInfoMap uinf = user++newpass - where - (user,pass) = break (==':') uinf - newpass = if null pass || (pass == "@") - || (pass == ":@") - then pass - else ":...@" - -testDefaultUserInfoMap :: [Bool] -testDefaultUserInfoMap = - [ defaultUserInfoMap "" == "" - , defaultUserInfoMap "@" == "@" - , defaultUserInfoMap "user@" == "user@" - , defaultUserInfoMap "user:@" == "user:@" - , defaultUserInfoMap "user:anonymous@" == "user:...@" - , defaultUserInfoMap "user:pass@" == "user:...@" - , defaultUserInfoMap "user:pass" == "user:...@" - , defaultUserInfoMap "user:anonymous" == "user:...@" - ] - ------------------------------------------------------------- --- Parse a URI ------------------------------------------------------------- - --- |Turn a string containing a URI into a 'URI'. --- Returns 'Nothing' if the string is not a valid URI; --- (an absolute URI with optional fragment identifier). --- --- NOTE: this is different from the previous network.URI, --- whose @parseURI@ function works like 'parseURIReference' --- in this module. --- -parseURI :: String -> Maybe URI -parseURI = parseURIAny uri - --- |Parse a URI reference to a 'URI' value. --- Returns 'Nothing' if the string is not a valid URI reference. --- (an absolute or relative URI with optional fragment identifier). --- -parseURIReference :: String -> Maybe URI -parseURIReference = parseURIAny uriReference - --- |Parse a relative URI to a 'URI' value. --- Returns 'Nothing' if the string is not a valid relative URI. --- (a relative URI with optional fragment identifier). --- -parseRelativeReference :: String -> Maybe URI -parseRelativeReference = parseURIAny relativeRef - --- |Parse an absolute URI to a 'URI' value. --- Returns 'Nothing' if the string is not a valid absolute URI. --- (an absolute URI without a fragment identifier). --- -parseAbsoluteURI :: String -> Maybe URI -parseAbsoluteURI = parseURIAny absoluteURI - --- |Test if string contains a valid URI --- (an absolute URI with optional fragment identifier). --- -isURI :: String -> Bool -isURI = isValidParse uri - --- |Test if string contains a valid URI reference --- (an absolute or relative URI with optional fragment identifier). --- -isURIReference :: String -> Bool -isURIReference = isValidParse uriReference - --- |Test if string contains a valid relative URI --- (a relative URI with optional fragment identifier). --- -isRelativeReference :: String -> Bool -isRelativeReference = isValidParse relativeRef - --- |Test if string contains a valid absolute URI --- (an absolute URI without a fragment identifier). --- -isAbsoluteURI :: String -> Bool -isAbsoluteURI = isValidParse absoluteURI - --- |Test if string contains a valid IPv6 address --- -isIPv6address :: String -> Bool -isIPv6address = isValidParse ipv6address - --- |Test if string contains a valid IPv4 address --- -isIPv4address :: String -> Bool -isIPv4address = isValidParse ipv4address - --- |Test function: parse and reconstruct a URI reference --- -testURIReference :: String -> String -testURIReference uristr = show (parseAll uriReference "" uristr) - --- Helper function for turning a string into a URI --- -parseURIAny :: URIParser URI -> String -> Maybe URI -parseURIAny parser uristr = case parseAll parser "" uristr of - Left _ -> Nothing - Right u -> Just u - --- Helper function to test a string match to a parser --- -isValidParse :: URIParser a -> String -> Bool -isValidParse parser uristr = case parseAll parser "" uristr of - -- Left e -> error (show e) - Left _ -> False - Right _ -> True - -parseAll :: URIParser a -> String -> String -> Either ParseError a -parseAll parser filename uristr = parse newparser filename uristr - where - newparser = - do { res <- parser - ; eof - ; return res - } - ------------------------------------------------------------- --- Predicates ------------------------------------------------------------- - -uriIsAbsolute :: URI -> Bool -uriIsAbsolute (URI {uriScheme = scheme}) = scheme /= "" - -uriIsRelative :: URI -> Bool -uriIsRelative = not . uriIsAbsolute - ------------------------------------------------------------- --- URI parser body based on Parsec elements and combinators ------------------------------------------------------------- - --- Parser parser type. --- Currently -type URIParser a = GenParser Char () a - --- RFC3986, section 2.1 --- --- Parse and return a 'pct-encoded' sequence --- -escaped :: URIParser String -escaped = - do { char '%' - ; h1 <- hexDigitChar - ; h2 <- hexDigitChar - ; return $ ['%',h1,h2] - } - --- RFC3986, section 2.2 --- --- |Returns 'True' if the character is a \"reserved\" character in a --- URI. To include a literal instance of one of these characters in a --- component of a URI, it must be escaped. --- -isReserved :: Char -> Bool -isReserved c = isGenDelims c || isSubDelims c - -isGenDelims :: Char -> Bool -isGenDelims c = c `elem` ":/?#[]@" - -isSubDelims :: Char -> Bool -isSubDelims c = c `elem` "!$&'()*+,;=" - -genDelims :: URIParser String -genDelims = do { c <- satisfy isGenDelims ; return [c] } - -subDelims :: URIParser String -subDelims = do { c <- satisfy isSubDelims ; return [c] } - --- RFC3986, section 2.3 --- --- |Returns 'True' if the character is an \"unreserved\" character in --- a URI. These characters do not need to be escaped in a URI. The --- only characters allowed in a URI are either \"reserved\", --- \"unreserved\", or an escape sequence (@%@ followed by two hex digits). --- -isUnreserved :: Char -> Bool -isUnreserved c = isAlphaNumChar c || (c `elem` "-_.~") - -unreservedChar :: URIParser String -unreservedChar = do { c <- satisfy isUnreserved ; return [c] } - --- RFC3986, section 3 --- --- URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ] --- --- hier-part = "//" authority path-abempty --- / path-abs --- / path-rootless --- / path-empty - -uri :: URIParser URI -uri = - do { us <- try uscheme - -- ; ua <- option Nothing ( do { try (string "//") ; uauthority } ) - -- ; up <- upath - ; (ua,up) <- hierPart - ; uq <- option "" ( do { char '?' ; uquery } ) - ; uf <- option "" ( do { char '#' ; ufragment } ) - ; return $ URI - { uriScheme = us - , uriAuthority = ua - , uriPath = up - , uriQuery = uq - , uriFragment = uf - } - } - -hierPart :: URIParser ((Maybe URIAuth),String) -hierPart = - do { try (string "//") - ; ua <- uauthority - ; up <- pathAbEmpty - ; return (ua,up) - } - <|> do { up <- pathAbs - ; return (Nothing,up) - } - <|> do { up <- pathRootLess - ; return (Nothing,up) - } - <|> do { return (Nothing,"") - } - --- RFC3986, section 3.1 - -uscheme :: URIParser String -uscheme = - do { s <- oneThenMany alphaChar (satisfy isSchemeChar) - ; char ':' - ; return $ s++":" - } - --- RFC3986, section 3.2 - -uauthority :: URIParser (Maybe URIAuth) -uauthority = - do { uu <- option "" (try userinfo) - ; uh <- host - ; up <- option "" port - ; return $ Just $ URIAuth - { uriUserInfo = uu - , uriRegName = uh - , uriPort = up - } - } - --- RFC3986, section 3.2.1 - -userinfo :: URIParser String -userinfo = - do { uu <- many (uchar ";:&=+$,") - ; char '@' - ; return (concat uu ++"@") - } - --- RFC3986, section 3.2.2 - -host :: URIParser String -host = ipLiteral <|> try ipv4address <|> regName - -ipLiteral :: URIParser String -ipLiteral = - do { char '[' - ; ua <- ( ipv6address <|> ipvFuture ) - ; char ']' - ; return $ "[" ++ ua ++ "]" - } - "IP address literal" - -ipvFuture :: URIParser String -ipvFuture = - do { char 'v' - ; h <- hexDigitChar - ; char '.' - ; a <- many1 (satisfy isIpvFutureChar) - ; return $ 'v':h:'.':a - } - -isIpvFutureChar :: Char -> Bool -isIpvFutureChar c = isUnreserved c || isSubDelims c || (c==';') - -ipv6address :: URIParser String -ipv6address = - try ( do - { a2 <- count 6 h4c - ; a3 <- ls32 - ; return $ concat a2 ++ a3 - } ) - <|> try ( do - { string "::" - ; a2 <- count 5 h4c - ; a3 <- ls32 - ; return $ "::" ++ concat a2 ++ a3 - } ) - <|> try ( do - { a1 <- opt_n_h4c_h4 0 - ; string "::" - ; a2 <- count 4 h4c - ; a3 <- ls32 - ; return $ a1 ++ "::" ++ concat a2 ++ a3 - } ) - <|> try ( do - { a1 <- opt_n_h4c_h4 1 - ; string "::" - ; a2 <- count 3 h4c - ; a3 <- ls32 - ; return $ a1 ++ "::" ++ concat a2 ++ a3 - } ) - <|> try ( do - { a1 <- opt_n_h4c_h4 2 - ; string "::" - ; a2 <- count 2 h4c - ; a3 <- ls32 - ; return $ a1 ++ "::" ++ concat a2 ++ a3 - } ) - <|> try ( do - { a1 <- opt_n_h4c_h4 3 - ; string "::" - ; a2 <- h4c - ; a3 <- ls32 - ; return $ a1 ++ "::" ++ a2 ++ a3 - } ) - <|> try ( do - { a1 <- opt_n_h4c_h4 4 - ; string "::" - ; a3 <- ls32 - ; return $ a1 ++ "::" ++ a3 - } ) - <|> try ( do - { a1 <- opt_n_h4c_h4 5 - ; string "::" - ; a3 <- h4 - ; return $ a1 ++ "::" ++ a3 - } ) - <|> try ( do - { a1 <- opt_n_h4c_h4 6 - ; string "::" - ; return $ a1 ++ "::" - } ) - "IPv6 address" - -opt_n_h4c_h4 :: Int -> URIParser String -opt_n_h4c_h4 n = option "" $ - do { a1 <- countMinMax 0 n h4c - ; a2 <- h4 - ; return $ concat a1 ++ a2 - } - -ls32 :: URIParser String -ls32 = try ( do - { a1 <- h4c - ; a2 <- h4 - ; return (a1++a2) - } ) - <|> ipv4address - -h4c :: URIParser String -h4c = try $ - do { a1 <- h4 - ; char ':' - ; notFollowedBy (char ':') - ; return $ a1 ++ ":" - } - -h4 :: URIParser String -h4 = countMinMax 1 4 hexDigitChar - -ipv4address :: URIParser String -ipv4address = - do { a1 <- decOctet ; char '.' - ; a2 <- decOctet ; char '.' - ; a3 <- decOctet ; char '.' - ; a4 <- decOctet - ; notFollowedBy regName - ; return $ a1++"."++a2++"."++a3++"."++a4 - } - "IPv4 Address" - -decOctet :: URIParser String -decOctet = - do { a1 <- countMinMax 1 3 digitChar - ; if (read a1 :: Integer) > 255 then - fail "Decimal octet value too large" - else - return a1 - } - -regName :: URIParser String -regName = - do { ss <- countMinMax 0 255 ( unreservedChar <|> escaped <|> subDelims ) - ; return $ concat ss - } - "Registered name" - --- RFC3986, section 3.2.3 - -port :: URIParser String -port = - do { char ':' - ; p <- many digitChar - ; return (':':p) - } - --- --- RFC3986, section 3.3 --- --- path = path-abempty ; begins with "/" or is empty --- / path-abs ; begins with "/" but not "//" --- / path-noscheme ; begins with a non-colon segment --- / path-rootless ; begins with a segment --- / path-empty ; zero characters --- --- path-abempty = *( "/" segment ) --- path-abs = "/" [ segment-nz *( "/" segment ) ] --- path-noscheme = segment-nzc *( "/" segment ) --- path-rootless = segment-nz *( "/" segment ) --- path-empty = 0 --- --- segment = *pchar --- segment-nz = 1*pchar --- segment-nzc = 1*( unreserved / pct-encoded / sub-delims / "@" ) --- --- pchar = unreserved / pct-encoded / sub-delims / ":" / "@" - -{- -upath :: URIParser String -upath = pathAbEmpty - <|> pathAbs - <|> pathNoScheme - <|> pathRootLess - <|> pathEmpty --} - -pathAbEmpty :: URIParser String -pathAbEmpty = - do { ss <- many slashSegment - ; return $ concat ss - } - -pathAbs :: URIParser String -pathAbs = - do { char '/' - ; ss <- option "" pathRootLess - ; return $ '/':ss - } - -pathNoScheme :: URIParser String -pathNoScheme = - do { s1 <- segmentNzc - ; ss <- many slashSegment - ; return $ concat (s1:ss) - } - -pathRootLess :: URIParser String -pathRootLess = - do { s1 <- segmentNz - ; ss <- many slashSegment - ; return $ concat (s1:ss) - } - -slashSegment :: URIParser String -slashSegment = - do { char '/' - ; s <- segment - ; return ('/':s) - } - -segment :: URIParser String -segment = - do { ps <- many pchar - ; return $ concat ps - } - -segmentNz :: URIParser String -segmentNz = - do { ps <- many1 pchar - ; return $ concat ps - } - -segmentNzc :: URIParser String -segmentNzc = - do { ps <- many1 (uchar "@") - ; return $ concat ps - } - -pchar :: URIParser String -pchar = uchar ":@" - --- helper function for pchar and friends -uchar :: String -> URIParser String -uchar extras = - unreservedChar - <|> escaped - <|> subDelims - <|> do { c <- oneOf extras ; return [c] } - --- RFC3986, section 3.4 - -uquery :: URIParser String -uquery = - do { ss <- many $ uchar (":@"++"/?") - ; return $ '?':concat ss - } - --- RFC3986, section 3.5 - -ufragment :: URIParser String -ufragment = - do { ss <- many $ uchar (":@"++"/?") - ; return $ '#':concat ss - } - --- Reference, Relative and Absolute URI forms --- --- RFC3986, section 4.1 - -uriReference :: URIParser URI -uriReference = uri <|> relativeRef - --- RFC3986, section 4.2 --- --- relative-URI = relative-part [ "?" query ] [ "#" fragment ] --- --- relative-part = "//" authority path-abempty --- / path-abs --- / path-noscheme --- / path-empty - -relativeRef :: URIParser URI -relativeRef = - do { notMatching uscheme - -- ; ua <- option Nothing ( do { try (string "//") ; uauthority } ) - -- ; up <- upath - ; (ua,up) <- relativePart - ; uq <- option "" ( do { char '?' ; uquery } ) - ; uf <- option "" ( do { char '#' ; ufragment } ) - ; return $ URI - { uriScheme = "" - , uriAuthority = ua - , uriPath = up - , uriQuery = uq - , uriFragment = uf - } - } - -relativePart :: URIParser ((Maybe URIAuth),String) -relativePart = - do { try (string "//") - ; ua <- uauthority - ; up <- pathAbEmpty - ; return (ua,up) - } - <|> do { up <- pathAbs - ; return (Nothing,up) - } - <|> do { up <- pathNoScheme - ; return (Nothing,up) - } - <|> do { return (Nothing,"") - } - --- RFC3986, section 4.3 - -absoluteURI :: URIParser URI -absoluteURI = - do { us <- uscheme - -- ; ua <- option Nothing ( do { try (string "//") ; uauthority } ) - -- ; up <- upath - ; (ua,up) <- hierPart - ; uq <- option "" ( do { char '?' ; uquery } ) - ; return $ URI - { uriScheme = us - , uriAuthority = ua - , uriPath = up - , uriQuery = uq - , uriFragment = "" - } - } - --- Imports from RFC 2234 - - -- NOTE: can't use isAlphaNum etc. because these deal with ISO 8859 - -- (and possibly Unicode!) chars. - -- [[[Above was a comment originally in GHC Network/URI.hs: - -- when IRIs are introduced then most codepoints above 128(?) should - -- be treated as unreserved, and higher codepoints for letters should - -- certainly be allowed. - -- ]]] - -isAlphaChar :: Char -> Bool -isAlphaChar c = (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') - -isDigitChar :: Char -> Bool -isDigitChar c = (c >= '0' && c <= '9') - -isAlphaNumChar :: Char -> Bool -isAlphaNumChar c = isAlphaChar c || isDigitChar c - -isHexDigitChar :: Char -> Bool -isHexDigitChar c = isHexDigit c - -isSchemeChar :: Char -> Bool -isSchemeChar c = (isAlphaNumChar c) || (c `elem` "+-.") - -alphaChar :: URIParser Char -alphaChar = satisfy isAlphaChar -- or: Parsec.letter ? - -digitChar :: URIParser Char -digitChar = satisfy isDigitChar -- or: Parsec.digit ? - -alphaNumChar :: URIParser Char -alphaNumChar = satisfy isAlphaNumChar - -hexDigitChar :: URIParser Char -hexDigitChar = satisfy isHexDigitChar -- or: Parsec.hexDigit ? - --- Additional parser combinators for common patterns - -oneThenMany :: GenParser t s a -> GenParser t s a -> GenParser t s [a] -oneThenMany p1 pr = - do { a1 <- p1 - ; ar <- many pr - ; return (a1:ar) - } - -countMinMax :: Int -> Int -> GenParser t s a -> GenParser t s [a] -countMinMax m n p | m > 0 = - do { a1 <- p - ; ar <- countMinMax (m-1) (n-1) p - ; return (a1:ar) - } -countMinMax _ n _ | n <= 0 = return [] -countMinMax _ n p = option [] $ - do { a1 <- p - ; ar <- countMinMax 0 (n-1) p - ; return (a1:ar) - } - -notMatching :: Show a => GenParser tok st a -> GenParser tok st () -notMatching p = do { a <- try p ; unexpected (show a) } <|> return () - ------------------------------------------------------------- --- Reconstruct a URI string ------------------------------------------------------------- --- --- |Turn a 'URI' into a string. --- --- Uses a supplied function to map the userinfo part of the URI. --- --- The Show instance for URI uses a mapping that hides any password --- that may be present in the URI. Use this function with argument @id@ --- to preserve the password in the formatted output. --- -uriToString :: (String->String) -> URI -> ShowS -uriToString userinfomap URI { uriScheme=myscheme - , uriAuthority=myauthority - , uriPath=mypath - , uriQuery=myquery - , uriFragment=myfragment - } = - (myscheme++) . (uriAuthToString userinfomap myauthority) - . (mypath++) . (myquery++) . (myfragment++) - -uriAuthToString :: (String->String) -> (Maybe URIAuth) -> ShowS -uriAuthToString _ Nothing = id -- shows "" -uriAuthToString userinfomap - (Just URIAuth { uriUserInfo = myuinfo - , uriRegName = myregname - , uriPort = myport - } ) = - ("//"++) . (if null myuinfo then id else ((userinfomap myuinfo)++)) - . (myregname++) - . (myport++) - ------------------------------------------------------------- --- Character classes ------------------------------------------------------------- - --- | Returns 'True' if the character is allowed in a URI. --- -isAllowedInURI :: Char -> Bool -isAllowedInURI c = isReserved c || isUnreserved c || c == '%' -- escape char - --- | Returns 'True' if the character is allowed unescaped in a URI. --- -isUnescapedInURI :: Char -> Bool -isUnescapedInURI c = isReserved c || isUnreserved c - --- | Returns 'True' if the character is allowed unescaped in a URI component. --- -isUnescapedInURIComponent :: Char -> Bool -isUnescapedInURIComponent c = not (isReserved c || not (isUnescapedInURI c)) - ------------------------------------------------------------- --- Escape sequence handling ------------------------------------------------------------- - --- |Escape character if supplied predicate is not satisfied, --- otherwise return character as singleton string. --- -escapeURIChar :: (Char->Bool) -> Char -> String -escapeURIChar p c - | p c = [c] - | otherwise = concatMap (\i -> '%' : myShowHex i "") (utf8EncodeChar c) - where - myShowHex :: Int -> ShowS - myShowHex n r = case showIntAtBase 16 (toChrHex) n r of - [] -> "00" - [x] -> ['0',x] - cs -> cs - toChrHex d - | d < 10 = chr (ord '0' + fromIntegral d) - | otherwise = chr (ord 'A' + fromIntegral (d - 10)) - --- From http://hackage.haskell.org/package/utf8-string --- by Eric Mertens, BSD3 --- Returns [Int] for use with showIntAtBase -utf8EncodeChar :: Char -> [Int] -utf8EncodeChar = map fromIntegral . go . ord - where - go oc - | oc <= 0x7f = [oc] - - | oc <= 0x7ff = [ 0xc0 + (oc `shiftR` 6) - , 0x80 + oc .&. 0x3f - ] - - | oc <= 0xffff = [ 0xe0 + (oc `shiftR` 12) - , 0x80 + ((oc `shiftR` 6) .&. 0x3f) - , 0x80 + oc .&. 0x3f - ] - | otherwise = [ 0xf0 + (oc `shiftR` 18) - , 0x80 + ((oc `shiftR` 12) .&. 0x3f) - , 0x80 + ((oc `shiftR` 6) .&. 0x3f) - , 0x80 + oc .&. 0x3f - ] - --- |Can be used to make a string valid for use in a URI. --- -escapeURIString - :: (Char->Bool) -- ^ a predicate which returns 'False' - -- if the character should be escaped - -> String -- ^ the string to process - -> String -- ^ the resulting URI string -escapeURIString p s = concatMap (escapeURIChar p) s - --- |Turns all instances of escaped characters in the string back --- into literal characters. --- -unEscapeString :: String -> String -unEscapeString [] = "" -unEscapeString s@(c:cs) = case unEscapeByte s of - Just (byte, rest) -> unEscapeUtf8 byte rest - Nothing -> c : unEscapeString cs - -unEscapeByte :: String -> Maybe (Int, String) -unEscapeByte ('%':x1:x2:s) | isHexDigit x1 && isHexDigit x2 = - Just (digitToInt x1 * 16 + digitToInt x2, s) -unEscapeByte _ = Nothing - --- Adapted from http://hackage.haskell.org/package/utf8-string --- by Eric Mertens, BSD3 -unEscapeUtf8 :: Int -> String -> String -unEscapeUtf8 c rest - | c < 0x80 = chr c : unEscapeString rest - | c < 0xc0 = replacement_character : unEscapeString rest - | c < 0xe0 = multi1 - | c < 0xf0 = multi_byte 2 0xf 0x800 - | c < 0xf8 = multi_byte 3 0x7 0x10000 - | c < 0xfc = multi_byte 4 0x3 0x200000 - | c < 0xfe = multi_byte 5 0x1 0x4000000 - | otherwise = replacement_character : unEscapeString rest - where - replacement_character = '\xfffd' - multi1 = case unEscapeByte rest of - Just (c1, ds) | c1 .&. 0xc0 == 0x80 -> - let d = ((fromEnum c .&. 0x1f) `shiftL` 6) .|. fromEnum (c1 .&. 0x3f) - in if d >= 0x000080 then toEnum d : unEscapeString ds - else replacement_character : unEscapeString ds - _ -> replacement_character : unEscapeString rest - - multi_byte i mask overlong = - aux i rest (unEscapeByte rest) (c .&. mask) - where - aux 0 rs _ acc - | overlong <= acc && acc <= 0x10ffff && - (acc < 0xd800 || 0xdfff < acc) && - (acc < 0xfffe || 0xffff < acc) = chr acc : unEscapeString rs - | otherwise = replacement_character : unEscapeString rs - - aux n _ (Just (r, rs)) acc - | r .&. 0xc0 == 0x80 = aux (n-1) rs (unEscapeByte rs) - $! shiftL acc 6 .|. (r .&. 0x3f) - - aux _ rs _ _ = replacement_character : unEscapeString rs - ------------------------------------------------------------- --- Resolving a relative URI relative to a base URI ------------------------------------------------------------- - --- |Returns a new 'URI' which represents the value of the --- first 'URI' interpreted as relative to the second 'URI'. --- For example: --- --- > "foo" `relativeTo` "http://bar.org/" = "http://bar.org/foo" --- > "http:foo" `nonStrictRelativeTo` "http://bar.org/" = "http://bar.org/foo" --- --- Algorithm from RFC3986 [3], section 5.2.2 --- - -nonStrictRelativeTo :: URI -> URI -> URI -nonStrictRelativeTo ref base = relativeTo ref' base - where - ref' = if uriScheme ref == uriScheme base - then ref { uriScheme="" } - else ref - -isDefined :: ( MonadPlus m, Eq (m a) ) => m a -> Bool -isDefined a = a /= mzero - --- | Returns a new 'URI' which represents the value of the first 'URI' --- interpreted as relative to the second 'URI'. --- --- Algorithm from RFC3986 [3], section 5.2 -relativeTo :: URI -> URI -> URI -relativeTo ref base - | isDefined ( uriScheme ref ) = - just_segments ref - | isDefined ( uriAuthority ref ) = - just_segments ref { uriScheme = uriScheme base } - | isDefined ( uriPath ref ) = - if (head (uriPath ref) == '/') then - just_segments ref - { uriScheme = uriScheme base - , uriAuthority = uriAuthority base - } - else - just_segments ref - { uriScheme = uriScheme base - , uriAuthority = uriAuthority base - , uriPath = mergePaths base ref - } - | isDefined ( uriQuery ref ) = - just_segments ref - { uriScheme = uriScheme base - , uriAuthority = uriAuthority base - , uriPath = uriPath base - } - | otherwise = - just_segments ref - { uriScheme = uriScheme base - , uriAuthority = uriAuthority base - , uriPath = uriPath base - , uriQuery = uriQuery base - } - where - just_segments u = - u { uriPath = removeDotSegments (uriPath u) } - mergePaths b r - | isDefined (uriAuthority b) && null pb = '/':pr - | otherwise = dropLast pb ++ pr - where - pb = uriPath b - pr = uriPath r - dropLast = fst . splitLast -- reverse . dropWhile (/='/') . reverse - --- Remove dot segments, but protect leading '/' character -removeDotSegments :: String -> String -removeDotSegments ('/':ps) = '/':elimDots ps [] -removeDotSegments ps = elimDots ps [] - --- Second arg accumulates segments processed so far in reverse order -elimDots :: String -> [String] -> String --- elimDots ps rs | traceVal "\nps " ps $ traceVal "rs " rs $ False = error "" -elimDots [] [] = "" -elimDots [] rs = concat (reverse rs) -elimDots ( '.':'/':ps) rs = elimDots ps rs -elimDots ( '.':[] ) rs = elimDots [] rs -elimDots ( '.':'.':'/':ps) rs = elimDots ps (drop 1 rs) -elimDots ( '.':'.':[] ) rs = elimDots [] (drop 1 rs) -elimDots ps rs = elimDots ps1 (r:rs) - where - (r,ps1) = nextSegment ps - --- Returns the next segment and the rest of the path from a path string. --- Each segment ends with the next '/' or the end of string. --- -nextSegment :: String -> (String,String) -nextSegment ps = - case break (=='/') ps of - (r,'/':ps1) -> (r++"/",ps1) - (r,_) -> (r,[]) - --- Split last (name) segment from path, returning (path,name) -splitLast :: String -> (String,String) -splitLast p = (reverse revpath,reverse revname) - where - (revname,revpath) = break (=='/') $ reverse p - ------------------------------------------------------------- --- Finding a URI relative to a base URI ------------------------------------------------------------- - --- |Returns a new 'URI' which represents the relative location of --- the first 'URI' with respect to the second 'URI'. Thus, the --- values supplied are expected to be absolute URIs, and the result --- returned may be a relative URI. --- --- Example: --- --- > "http://example.com/Root/sub1/name2#frag" --- > `relativeFrom` "http://example.com/Root/sub2/name2#frag" --- > == "../sub1/name2#frag" --- --- There is no single correct implementation of this function, --- but any acceptable implementation must satisfy the following: --- --- > (uabs `relativeFrom` ubase) `relativeTo` ubase == uabs --- --- For any valid absolute URI. --- (cf. --- ) --- -relativeFrom :: URI -> URI -> URI -relativeFrom uabs base - | diff uriScheme uabs base = uabs - | diff uriAuthority uabs base = uabs { uriScheme = "" } - | diff uriPath uabs base = uabs - { uriScheme = "" - , uriAuthority = Nothing - , uriPath = relPathFrom (removeBodyDotSegments $ uriPath uabs) - (removeBodyDotSegments $ uriPath base) - } - | diff uriQuery uabs base = uabs - { uriScheme = "" - , uriAuthority = Nothing - , uriPath = "" - } - | otherwise = uabs -- Always carry fragment from uabs - { uriScheme = "" - , uriAuthority = Nothing - , uriPath = "" - , uriQuery = "" - } - where - diff :: Eq b => (a -> b) -> a -> a -> Bool - diff sel u1 u2 = sel u1 /= sel u2 - -- Remove dot segments except the final segment - removeBodyDotSegments p = removeDotSegments p1 ++ p2 - where - (p1,p2) = splitLast p - -relPathFrom :: String -> String -> String -relPathFrom [] _ = "/" -relPathFrom pabs [] = pabs -relPathFrom pabs base = -- Construct a relative path segments - if sa1 == sb1 -- if the paths share a leading segment - then if (sa1 == "/") -- other than a leading '/' - then if (sa2 == sb2) - then relPathFrom1 ra2 rb2 - else pabs - else relPathFrom1 ra1 rb1 - else pabs - where - (sa1,ra1) = nextSegment pabs - (sb1,rb1) = nextSegment base - (sa2,ra2) = nextSegment ra1 - (sb2,rb2) = nextSegment rb1 - --- relPathFrom1 strips off trailing names from the supplied paths, --- and calls difPathFrom to find the relative path from base to --- target -relPathFrom1 :: String -> String -> String -relPathFrom1 pabs base = relName - where - (sa,na) = splitLast pabs - (sb,nb) = splitLast base - rp = relSegsFrom sa sb - relName = if null rp then - if (na == nb) then "" - else if protect na then "./"++na - else na - else - rp++na - -- Precede name with some path if it is null or contains a ':' - protect s = null s || ':' `elem` s - --- relSegsFrom discards any common leading segments from both paths, --- then invokes difSegsFrom to calculate a relative path from the end --- of the base path to the end of the target path. --- The final name is handled separately, so this deals only with --- "directory" segtments. --- -relSegsFrom :: String -> String -> String -{- -relSegsFrom sabs base - | traceVal "\nrelSegsFrom\nsabs " sabs $ traceVal "base " base $ - False = error "" --} -relSegsFrom [] [] = "" -- paths are identical -relSegsFrom sabs base = - if sa1 == sb1 - then relSegsFrom ra1 rb1 - else difSegsFrom sabs base - where - (sa1,ra1) = nextSegment sabs - (sb1,rb1) = nextSegment base - --- difSegsFrom calculates a path difference from base to target, --- not including the final name at the end of the path --- (i.e. results always ends with '/') --- --- This function operates under the invariant that the supplied --- value of sabs is the desired path relative to the beginning of --- base. Thus, when base is empty, the desired path has been found. --- -difSegsFrom :: String -> String -> String -{- -difSegsFrom sabs base - | traceVal "\ndifSegsFrom\nsabs " sabs $ traceVal "base " base $ - False = error "" --} -difSegsFrom sabs "" = sabs -difSegsFrom sabs base = difSegsFrom ("../"++sabs) (snd $ nextSegment base) - ------------------------------------------------------------- --- Other normalization functions ------------------------------------------------------------- - --- |Case normalization; cf. RFC3986 section 6.2.2.1 --- NOTE: authority case normalization is not performed --- -normalizeCase :: String -> String -normalizeCase uristr = ncScheme uristr - where - ncScheme (':':cs) = ':':ncEscape cs - ncScheme (c:cs) | isSchemeChar c = toLower c:ncScheme cs - ncScheme _ = ncEscape uristr -- no scheme present - ncEscape ('%':h1:h2:cs) = '%':toUpper h1:toUpper h2:ncEscape cs - ncEscape (c:cs) = c:ncEscape cs - ncEscape [] = [] - --- |Encoding normalization; cf. RFC3986 section 6.2.2.2 --- -normalizeEscape :: String -> String -normalizeEscape ('%':h1:h2:cs) - | isHexDigit h1 && isHexDigit h2 && isUnreserved escval = - escval:normalizeEscape cs - where - escval = chr (digitToInt h1*16+digitToInt h2) -normalizeEscape (c:cs) = c:normalizeEscape cs -normalizeEscape [] = [] - --- |Path segment normalization; cf. RFC3986 section 6.2.2.4 --- -normalizePathSegments :: String -> String -normalizePathSegments uristr = normstr juri - where - juri = parseURI uristr - normstr Nothing = uristr - normstr (Just u) = show (normuri u) - normuri u = u { uriPath = removeDotSegments (uriPath u) } - ------------------------------------------------------------- --- Local trace helper functions ------------------------------------------------------------- - -traceShow :: Show a => String -> a -> a -traceShow msg x = trace (msg ++ show x) x - -traceVal :: Show a => String -> a -> b -> b -traceVal msg x y = trace (msg ++ show x) y - ------------------------------------------------------------- --- Deprecated functions ------------------------------------------------------------- - -{-# DEPRECATED parseabsoluteURI "use parseAbsoluteURI" #-} -parseabsoluteURI :: String -> Maybe URI -parseabsoluteURI = parseAbsoluteURI - -{-# DEPRECATED escapeString "use escapeURIString, and note the flipped arguments" #-} -escapeString :: String -> (Char->Bool) -> String -escapeString = flip escapeURIString - -{-# DEPRECATED reserved "use isReserved" #-} -reserved :: Char -> Bool -reserved = isReserved - -{-# DEPRECATED unreserved "use isUnreserved" #-} -unreserved :: Char -> Bool -unreserved = isUnreserved - --- Additional component access functions for backward compatibility - -{-# DEPRECATED scheme "use uriScheme" #-} -scheme :: URI -> String -scheme = orNull init . uriScheme - -{-# DEPRECATED authority "use uriAuthority, and note changed functionality" #-} -authority :: URI -> String -authority = dropss . ($"") . uriAuthToString id . uriAuthority - where - -- Old-style authority component does not include leading '//' - dropss ('/':'/':s) = s - dropss s = s - -{-# DEPRECATED path "use uriPath" #-} -path :: URI -> String -path = uriPath - -{-# DEPRECATED query "use uriQuery, and note changed functionality" #-} -query :: URI -> String -query = orNull tail . uriQuery - -{-# DEPRECATED fragment "use uriFragment, and note changed functionality" #-} -fragment :: URI -> String -fragment = orNull tail . uriFragment - -orNull :: ([a]->[a]) -> [a] -> [a] -orNull _ [] = [] -orNull f as = f as - --------------------------------------------------------------------------------- --- --- Copyright (c) 2004, G. KLYNE. All rights reserved. --- Distributed as free software under the following license. --- --- Redistribution and use in source and binary forms, with or without --- modification, are permitted provided that the following conditions --- are met: --- --- - Redistributions of source code must retain the above copyright notice, --- this list of conditions and the following disclaimer. --- --- - Redistributions in binary form must reproduce the above copyright --- notice, this list of conditions and the following disclaimer in the --- documentation and/or other materials provided with the distribution. --- --- - Neither name of the copyright holders nor the names of its --- contributors may be used to endorse or promote products derived from --- this software without specific prior written permission. --- --- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND THE CONTRIBUTORS --- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT --- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR --- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT --- HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, --- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, --- BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS --- OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND --- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR --- TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- --------------------------------------------------------------------------------- diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar7=/network-uri.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar7=/network-uri.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar7=/network-uri.cabal 2014-09-02 18:17:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar7=/network-uri.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -name: network-uri -version: 2.6.0.1 -synopsis: URI manipulation -description: - This package provides an URI manipulation inteface. - . - In network-2.6 the @Network.URI@ module was split off from the - network package into this package. If you're using the @Network.URI@ - module you can automatically get it from the right package by adding - this to your .cabal file: - . - > flag network-uri - > description: Get Network.URI from the network-uri package - > default: True - > - > library - > -- ... - > if flag(network-uri) - > build-depends: network-uri >= 2.6, network >= 2.6 - > else - > build-depends: network-uri < 2.6, network < 2.6 - . - That is, get the module from either network < 2.6 or from - network-uri >= 2.6. -homepage: https://github.com/haskell/network-uri -bug-reports: https://github.com/haskell/network-uri/issues -license: BSD3 -license-file: LICENSE -maintainer: johan.tibell@gmail.com -category: Network -build-type: Simple -cabal-version: >=1.10 - -library - exposed-modules: - Network.URI - build-depends: - base >= 3 && < 5, - parsec >= 3.0 && < 3.2 - default-extensions: CPP, DeriveDataTypeable - ghc-options: -Wall -fwarn-tabs - default-language: Haskell98 - -test-suite uri - hs-source-dirs: tests - main-is: uri001.hs - type: exitcode-stdio-1.0 - - build-depends: - base < 5, - HUnit, - network, - test-framework, - test-framework-hunit, - test-framework-quickcheck2 - - ghc-options: -Wall -fwarn-tabs - default-language: Haskell98 - -source-repository head - type: git - location: git://github.com/haskell/network-uri.git \ No newline at end of file diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar7=/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar7=/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar7=/Setup.hs 2014-09-02 18:17:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar7=/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -import Distribution.Simple -main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar7=/tests/uri001.hs cabal-install-1.22-1.22.9.0/=unpacked-tar7=/tests/uri001.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar7=/tests/uri001.hs 2014-09-02 18:17:26.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar7=/tests/uri001.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1431 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-missing-signatures #-} --------------------------------------------------------------------------------- --- $Id: URITest.hs,v 1.8 2005/07/19 22:01:27 gklyne Exp $ --- --- Copyright (c) 2004, G. KLYNE. All rights reserved. --- See end of this file for licence information. --------------------------------------------------------------------------------- --- | --- Module : URITest --- Copyright : (c) 2004, Graham Klyne --- License : BSD-style (see end of this file) --- --- Maintainer : Graham Klyne --- Stability : provisional --- Portability : H98 --- --- This Module contains test cases for module URI. --- --- To run this test without using Cabal to build the package --- (2013-01-05, instructions tested on MacOS): --- 1. Install Haskell platform --- 2. cabal install test-framework --- 3. cabal install test-framework-hunit --- 4. ghc -XDeriveDataTypeable -D"MIN_VERSION_base(x,y,z)=1" ../Network/URI.hs uri001.hs --- 5. ./uri001 --- --- Previous build instructions: --- Using GHC, I compile with this command line: --- ghc --make -fglasgow-exts --- -i..\;C:\Dev\Haskell\Lib\HUnit;C:\Dev\Haskell\Lib\Parsec --- -o URITest.exe URITest -main-is URITest.main --- The -i line may need changing for alternative installations. --- --------------------------------------------------------------------------------- - -module Main where - -import Network.URI - ( URI(..), URIAuth(..) - , nullURI - , parseURI, parseURIReference, parseRelativeReference, parseAbsoluteURI - , parseAbsoluteURI - , isURI, isURIReference, isRelativeReference, isAbsoluteURI - , uriIsAbsolute, uriIsRelative - , relativeTo, nonStrictRelativeTo - , relativeFrom - , uriToString - , isUnescapedInURIComponent - , isUnescapedInURI, escapeURIString, unEscapeString - , normalizeCase, normalizeEscape, normalizePathSegments - ) - -import Test.HUnit - -import Data.Maybe (fromJust) -import System.IO (openFile, IOMode(WriteMode), hClose) -import qualified Test.Framework as TF -import qualified Test.Framework.Providers.HUnit as TF -import qualified Test.Framework.Providers.QuickCheck2 as TF - --- Test supplied string for valid URI reference syntax --- isValidURIRef :: String -> Bool --- Test supplied string for valid absolute URI reference syntax --- isAbsoluteURIRef :: String -> Bool --- Test supplied string for valid absolute URI syntax --- isAbsoluteURI :: String -> Bool - -data URIType = AbsId -- URI form (absolute, no fragment) - | AbsRf -- Absolute URI reference - | RelRf -- Relative URI reference - | InvRf -- Invalid URI reference -isValidT :: URIType -> Bool -isValidT InvRf = False -isValidT _ = True - -isAbsRfT :: URIType -> Bool -isAbsRfT AbsId = True -isAbsRfT AbsRf = True -isAbsRfT _ = False - -isRelRfT :: URIType -> Bool -isRelRfT RelRf = True -isRelRfT _ = False - -isAbsIdT :: URIType -> Bool -isAbsIdT AbsId = True -isAbsIdT _ = False - -testEq :: (Eq a, Show a) => String -> a -> a -> Assertion -testEq lab a1 a2 = assertEqual lab a1 a2 - -testURIRef :: URIType -> String -> Assertion -testURIRef t u = sequence_ - [ testEq ("test_isURIReference:"++u) (isValidT t) (isURIReference u) - , testEq ("test_isRelativeReference:"++u) (isRelRfT t) (isRelativeReference u) - , testEq ("test_isAbsoluteURI:"++u) (isAbsIdT t) (isAbsoluteURI u) - ] - -testURIRefComponents :: String -> (Maybe URI) -> String -> Assertion -testURIRefComponents _lab uv us = - testEq ("testURIRefComponents:"++us) uv (parseURIReference us) - - -testURIRef001 = testURIRef AbsRf "http://example.org/aaa/bbb#ccc" -testURIRef002 = testURIRef AbsId "mailto:local@domain.org" -testURIRef003 = testURIRef AbsRf "mailto:local@domain.org#frag" -testURIRef004 = testURIRef AbsRf "HTTP://EXAMPLE.ORG/AAA/BBB#CCC" -testURIRef005 = testURIRef RelRf "//example.org/aaa/bbb#ccc" -testURIRef006 = testURIRef RelRf "/aaa/bbb#ccc" -testURIRef007 = testURIRef RelRf "bbb#ccc" -testURIRef008 = testURIRef RelRf "#ccc" -testURIRef009 = testURIRef RelRf "#" -testURIRef010 = testURIRef RelRf "/" --- escapes -testURIRef011 = testURIRef AbsRf "http://example.org/aaa%2fbbb#ccc" -testURIRef012 = testURIRef AbsRf "http://example.org/aaa%2Fbbb#ccc" -testURIRef013 = testURIRef RelRf "%2F" -testURIRef014 = testURIRef RelRf "aaa%2Fbbb" --- ports -testURIRef015 = testURIRef AbsRf "http://example.org:80/aaa/bbb#ccc" -testURIRef016 = testURIRef AbsRf "http://example.org:/aaa/bbb#ccc" -testURIRef017 = testURIRef AbsRf "http://example.org./aaa/bbb#ccc" -testURIRef018 = testURIRef AbsRf "http://example.123./aaa/bbb#ccc" --- bare authority -testURIRef019 = testURIRef AbsId "http://example.org" --- IPv6 literals (from RFC2732): -testURIRef021 = testURIRef AbsId "http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html" -testURIRef022 = testURIRef AbsId "http://[1080:0:0:0:8:800:200C:417A]/index.html" -testURIRef023 = testURIRef AbsId "http://[3ffe:2a00:100:7031::1]" -testURIRef024 = testURIRef AbsId "http://[1080::8:800:200C:417A]/foo" -testURIRef025 = testURIRef AbsId "http://[::192.9.5.5]/ipng" -testURIRef026 = testURIRef AbsId "http://[::FFFF:129.144.52.38]:80/index.html" -testURIRef027 = testURIRef AbsId "http://[2010:836B:4179::836B:4179]" -testURIRef028 = testURIRef RelRf "//[2010:836B:4179::836B:4179]" -testURIRef029 = testURIRef InvRf "[2010:836B:4179::836B:4179]" --- RFC2396 test cases -testURIRef031 = testURIRef RelRf "./aaa" -testURIRef032 = testURIRef RelRf "../aaa" -testURIRef033 = testURIRef AbsId "g:h" -testURIRef034 = testURIRef RelRf "g" -testURIRef035 = testURIRef RelRf "./g" -testURIRef036 = testURIRef RelRf "g/" -testURIRef037 = testURIRef RelRf "/g" -testURIRef038 = testURIRef RelRf "//g" -testURIRef039 = testURIRef RelRf "?y" -testURIRef040 = testURIRef RelRf "g?y" -testURIRef041 = testURIRef RelRf "#s" -testURIRef042 = testURIRef RelRf "g#s" -testURIRef043 = testURIRef RelRf "g?y#s" -testURIRef044 = testURIRef RelRf ";x" -testURIRef045 = testURIRef RelRf "g;x" -testURIRef046 = testURIRef RelRf "g;x?y#s" -testURIRef047 = testURIRef RelRf "." -testURIRef048 = testURIRef RelRf "./" -testURIRef049 = testURIRef RelRf ".." -testURIRef050 = testURIRef RelRf "../" -testURIRef051 = testURIRef RelRf "../g" -testURIRef052 = testURIRef RelRf "../.." -testURIRef053 = testURIRef RelRf "../../" -testURIRef054 = testURIRef RelRf "../../g" -testURIRef055 = testURIRef RelRf "../../../g" -testURIRef056 = testURIRef RelRf "../../../../g" -testURIRef057 = testURIRef RelRf "/./g" -testURIRef058 = testURIRef RelRf "/../g" -testURIRef059 = testURIRef RelRf "g." -testURIRef060 = testURIRef RelRf ".g" -testURIRef061 = testURIRef RelRf "g.." -testURIRef062 = testURIRef RelRf "..g" -testURIRef063 = testURIRef RelRf "./../g" -testURIRef064 = testURIRef RelRf "./g/." -testURIRef065 = testURIRef RelRf "g/./h" -testURIRef066 = testURIRef RelRf "g/../h" -testURIRef067 = testURIRef RelRf "g;x=1/./y" -testURIRef068 = testURIRef RelRf "g;x=1/../y" -testURIRef069 = testURIRef RelRf "g?y/./x" -testURIRef070 = testURIRef RelRf "g?y/../x" -testURIRef071 = testURIRef RelRf "g#s/./x" -testURIRef072 = testURIRef RelRf "g#s/../x" -testURIRef073 = testURIRef RelRf "" -testURIRef074 = testURIRef RelRf "A'C" -testURIRef075 = testURIRef RelRf "A$C" -testURIRef076 = testURIRef RelRf "A@C" -testURIRef077 = testURIRef RelRf "A,C" --- Invalid -testURIRef080 = testURIRef InvRf "http://foo.org:80Path/More" -testURIRef081 = testURIRef InvRf "::" -testURIRef082 = testURIRef InvRf " " -testURIRef083 = testURIRef InvRf "%" -testURIRef084 = testURIRef InvRf "A%Z" -testURIRef085 = testURIRef InvRf "%ZZ" -testURIRef086 = testURIRef InvRf "%AZ" -testURIRef087 = testURIRef InvRf "A C" --- testURIRef088 = -- (case removed) --- testURIRef089 = -- (case removed) -testURIRef090 = testURIRef InvRf "A\"C" -testURIRef091 = testURIRef InvRf "A`C" -testURIRef092 = testURIRef InvRf "AC" -testURIRef094 = testURIRef InvRf "A^C" -testURIRef095 = testURIRef InvRf "A\\C" -testURIRef096 = testURIRef InvRf "A{C" -testURIRef097 = testURIRef InvRf "A|C" -testURIRef098 = testURIRef InvRf "A}C" --- From RFC2396: --- rel_segment = 1*( unreserved | escaped | --- ";" | "@" | "&" | "=" | "+" | "$" | "," ) --- unreserved = alphanum | mark --- mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | --- "(" | ")" --- Note RFC 2732 allows '[', ']' ONLY for reserved purpose of IPv6 literals, --- or does it? -testURIRef101 = testURIRef InvRf "A[C" -testURIRef102 = testURIRef InvRf "A]C" -testURIRef103 = testURIRef InvRf "A[**]C" -testURIRef104 = testURIRef InvRf "http://[xyz]/" -testURIRef105 = testURIRef InvRf "http://]/" -testURIRef106 = testURIRef InvRf "http://example.org/[2010:836B:4179::836B:4179]" -testURIRef107 = testURIRef InvRf "http://example.org/abc#[2010:836B:4179::836B:4179]" -testURIRef108 = testURIRef InvRf "http://example.org/xxx/[qwerty]#a[b]" --- Random other things that crop up -testURIRef111 = testURIRef AbsRf "http://example/Andrȷ" -testURIRef112 = testURIRef AbsId "file:///C:/DEV/Haskell/lib/HXmlToolbox-3.01/examples/" -testURIRef113 = testURIRef AbsId "http://46229EFFE16A9BD60B9F1BE88B2DB047ADDED785/demo.mp3" -testURIRef114 = testURIRef InvRf "http://example.org/xxx/qwerty#a#b" -testURIRef115 = testURIRef InvRf "dcp.tcp.pft://192.168.0.1:1002:3002?fec=1&crc=0" -testURIRef116 = testURIRef AbsId "dcp.tcp.pft://192.168.0.1:1002?fec=1&crc=0" -testURIRef117 = testURIRef AbsId "foo://" --- URIs prefixed with IPv4 addresses -testURIRef118 = testURIRef AbsId "http://192.168.0.1.example.com/" -testURIRef119 = testURIRef AbsId "http://192.168.0.1.example.com./" --- URI prefixed with 3 octets of an IPv4 address and a subdomain part with a leading digit. -testURIRef120 = testURIRef AbsId "http://192.168.0.1test.example.com/" --- URI with IPv(future) address -testURIRef121 = testURIRef AbsId "http://[v9.123.abc;456.def]/" -testURIRef122 = testEq "v.future authority" - (Just (URIAuth "" "[v9.123.abc;456.def]" ":42")) - ((maybe Nothing uriAuthority) . parseURI $ "http://[v9.123.abc;456.def]:42/") --- URI with non-ASCII characters, fail with Network.HTTP escaping code (see below) --- Currently not supported by Network.URI, but captured here for possible future reference --- when IRI support may be added. -testURIRef123 = testURIRef AbsId "http://example.com/test123/䡥汬漬⁗潲汤/index.html" -testURIRef124 = testURIRef AbsId "http://example.com/test124/Москва/index.html" - --- From report by Alexander Ivanov: --- should return " 䡥汬漬⁗潲汤", but returns "Hello, World" instead --- print $ urlDecode $ urlEncode " 䡥汬漬⁗潲汤" --- should return "Москва" --- print $ urlDecode $ urlEncode "Москва" - -testURIRefSuite = TF.testGroup "Test URIrefs" testURIRefList -testURIRefList = - [ TF.testCase "testURIRef001" testURIRef001 - , TF.testCase "testURIRef002" testURIRef002 - , TF.testCase "testURIRef003" testURIRef003 - , TF.testCase "testURIRef004" testURIRef004 - , TF.testCase "testURIRef005" testURIRef005 - , TF.testCase "testURIRef006" testURIRef006 - , TF.testCase "testURIRef007" testURIRef007 - , TF.testCase "testURIRef008" testURIRef008 - , TF.testCase "testURIRef009" testURIRef009 - , TF.testCase "testURIRef010" testURIRef010 - -- - , TF.testCase "testURIRef011" testURIRef011 - , TF.testCase "testURIRef012" testURIRef012 - , TF.testCase "testURIRef013" testURIRef013 - , TF.testCase "testURIRef014" testURIRef014 - , TF.testCase "testURIRef015" testURIRef015 - , TF.testCase "testURIRef016" testURIRef016 - , TF.testCase "testURIRef017" testURIRef017 - , TF.testCase "testURIRef018" testURIRef018 - -- - , TF.testCase "testURIRef019" testURIRef019 - -- - , TF.testCase "testURIRef021" testURIRef021 - , TF.testCase "testURIRef022" testURIRef022 - , TF.testCase "testURIRef023" testURIRef023 - , TF.testCase "testURIRef024" testURIRef024 - , TF.testCase "testURIRef025" testURIRef025 - , TF.testCase "testURIRef026" testURIRef026 - , TF.testCase "testURIRef027" testURIRef027 - , TF.testCase "testURIRef028" testURIRef028 - , TF.testCase "testURIRef029" testURIRef029 - -- - , TF.testCase "testURIRef031" testURIRef031 - , TF.testCase "testURIRef032" testURIRef032 - , TF.testCase "testURIRef033" testURIRef033 - , TF.testCase "testURIRef034" testURIRef034 - , TF.testCase "testURIRef035" testURIRef035 - , TF.testCase "testURIRef036" testURIRef036 - , TF.testCase "testURIRef037" testURIRef037 - , TF.testCase "testURIRef038" testURIRef038 - , TF.testCase "testURIRef039" testURIRef039 - , TF.testCase "testURIRef040" testURIRef040 - , TF.testCase "testURIRef041" testURIRef041 - , TF.testCase "testURIRef042" testURIRef042 - , TF.testCase "testURIRef043" testURIRef043 - , TF.testCase "testURIRef044" testURIRef044 - , TF.testCase "testURIRef045" testURIRef045 - , TF.testCase "testURIRef046" testURIRef046 - , TF.testCase "testURIRef047" testURIRef047 - , TF.testCase "testURIRef048" testURIRef048 - , TF.testCase "testURIRef049" testURIRef049 - , TF.testCase "testURIRef050" testURIRef050 - , TF.testCase "testURIRef051" testURIRef051 - , TF.testCase "testURIRef052" testURIRef052 - , TF.testCase "testURIRef053" testURIRef053 - , TF.testCase "testURIRef054" testURIRef054 - , TF.testCase "testURIRef055" testURIRef055 - , TF.testCase "testURIRef056" testURIRef056 - , TF.testCase "testURIRef057" testURIRef057 - , TF.testCase "testURIRef058" testURIRef058 - , TF.testCase "testURIRef059" testURIRef059 - , TF.testCase "testURIRef060" testURIRef060 - , TF.testCase "testURIRef061" testURIRef061 - , TF.testCase "testURIRef062" testURIRef062 - , TF.testCase "testURIRef063" testURIRef063 - , TF.testCase "testURIRef064" testURIRef064 - , TF.testCase "testURIRef065" testURIRef065 - , TF.testCase "testURIRef066" testURIRef066 - , TF.testCase "testURIRef067" testURIRef067 - , TF.testCase "testURIRef068" testURIRef068 - , TF.testCase "testURIRef069" testURIRef069 - , TF.testCase "testURIRef070" testURIRef070 - , TF.testCase "testURIRef071" testURIRef071 - , TF.testCase "testURIRef072" testURIRef072 - , TF.testCase "testURIRef073" testURIRef073 - , TF.testCase "testURIRef074" testURIRef074 - , TF.testCase "testURIRef075" testURIRef075 - , TF.testCase "testURIRef076" testURIRef076 - , TF.testCase "testURIRef077" testURIRef077 - -- - , TF.testCase "testURIRef080" testURIRef080 - , TF.testCase "testURIRef081" testURIRef081 - , TF.testCase "testURIRef082" testURIRef082 - , TF.testCase "testURIRef083" testURIRef083 - , TF.testCase "testURIRef084" testURIRef084 - , TF.testCase "testURIRef085" testURIRef085 - , TF.testCase "testURIRef086" testURIRef086 - , TF.testCase "testURIRef087" testURIRef087 - -- testURIRef088, - -- testURIRef089, - , TF.testCase "testURIRef090" testURIRef090 - , TF.testCase "testURIRef091" testURIRef091 - , TF.testCase "testURIRef092" testURIRef092 - , TF.testCase "testURIRef093" testURIRef093 - , TF.testCase "testURIRef094" testURIRef094 - , TF.testCase "testURIRef095" testURIRef095 - , TF.testCase "testURIRef096" testURIRef096 - , TF.testCase "testURIRef097" testURIRef097 - , TF.testCase "testURIRef098" testURIRef098 - -- testURIRef099, - -- - , TF.testCase "testURIRef101" testURIRef101 - , TF.testCase "testURIRef102" testURIRef102 - , TF.testCase "testURIRef103" testURIRef103 - , TF.testCase "testURIRef104" testURIRef104 - , TF.testCase "testURIRef105" testURIRef105 - , TF.testCase "testURIRef106" testURIRef106 - , TF.testCase "testURIRef107" testURIRef107 - , TF.testCase "testURIRef108" testURIRef108 - -- - , TF.testCase "testURIRef111" testURIRef111 - , TF.testCase "testURIRef112" testURIRef112 - , TF.testCase "testURIRef113" testURIRef113 - , TF.testCase "testURIRef114" testURIRef114 - , TF.testCase "testURIRef115" testURIRef115 - , TF.testCase "testURIRef116" testURIRef116 - , TF.testCase "testURIRef117" testURIRef117 - -- - , TF.testCase "testURIRef118" testURIRef118 - , TF.testCase "testURIRef119" testURIRef119 - , TF.testCase "testURIRef120" testURIRef120 - -- - , TF.testCase "testURIRef121" testURIRef121 - , TF.testCase "testURIRef122" testURIRef122 - -- IRI test cases not currently supported - -- , TF.testCase "testURIRef123" testURIRef123 - -- , TF.testCase "testURIRef124" testURIRef124 - ] - --- test decomposition of URI into components -testComponent01 = testURIRefComponents "testComponent01" - ( Just $ URI - { uriScheme = "http:" - , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") - , uriPath = "/aaa/bbb" - , uriQuery = "?qqq" - , uriFragment = "#fff" - } ) - "http://user:pass@example.org:99/aaa/bbb?qqq#fff" -testComponent02 = testURIRefComponents "testComponent02" - ( const Nothing - ( Just $ URI - { uriScheme = "http:" - , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") - , uriPath = "aaa/bbb" - , uriQuery = "" - , uriFragment = "" - } ) - ) - "http://user:pass@example.org:99aaa/bbb" -testComponent03 = testURIRefComponents "testComponent03" - ( Just $ URI - { uriScheme = "http:" - , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") - , uriPath = "" - , uriQuery = "?aaa/bbb" - , uriFragment = "" - } ) - "http://user:pass@example.org:99?aaa/bbb" -testComponent04 = testURIRefComponents "testComponent03" - ( Just $ URI - { uriScheme = "http:" - , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") - , uriPath = "" - , uriQuery = "" - , uriFragment = "#aaa/bbb" - } ) - "http://user:pass@example.org:99#aaa/bbb" --- These test cases contributed by Robert Buck (mathworks.com) -testComponent11 = testURIRefComponents "testComponent03" - ( Just $ URI - { uriScheme = "about:" - , uriAuthority = Nothing - , uriPath = "" - , uriQuery = "" - , uriFragment = "" - } ) - "about:" -testComponent12 = testURIRefComponents "testComponent03" - ( Just $ URI - { uriScheme = "file:" - , uriAuthority = Just (URIAuth "" "windowsauth" "") - , uriPath = "/d$" - , uriQuery = "" - , uriFragment = "" - } ) - "file://windowsauth/d$" - -testComponentSuite = TF.testGroup "Test URIrefs" $ - [ TF.testCase "testComponent01" testComponent01 - , TF.testCase "testComponent02" testComponent02 - , TF.testCase "testComponent03" testComponent03 - , TF.testCase "testComponent04" testComponent04 - , TF.testCase "testComponent11" testComponent11 - , TF.testCase "testComponent12" testComponent12 - ] - --- Get reference relative to given base --- relativeRef :: String -> String -> String --- --- Get absolute URI given base and relative reference --- absoluteURI :: String -> String -> String --- --- Test cases taken from: http://www.w3.org/2000/10/swap/uripath.py --- (Thanks, Dan Connolly) --- --- NOTE: absoluteURI base (relativeRef base u) is always equivalent to u. --- cf. http://lists.w3.org/Archives/Public/uri/2003Jan/0008.html - -testRelSplit :: String -> String -> String -> String -> Assertion -testRelSplit label base uabs urel = - testEq label urel (mkrel puabs pubas) - where - mkrel (Just u1) (Just u2) = show (u1 `relativeFrom` u2) - mkrel Nothing _ = "Invalid URI: "++urel - mkrel _ Nothing = "Invalid URI: "++uabs - puabs = parseURIReference uabs - pubas = parseURIReference base - -testRelJoin :: String -> String -> String -> String -> Assertion -testRelJoin label base urel uabs = - testEq label uabs (mkabs purel pubas) - where - mkabs (Just u1) (Just u2) = show (u1 `relativeTo` u2) - mkabs Nothing _ = "Invalid URI: "++urel - mkabs _ Nothing = "Invalid URI: "++uabs - purel = parseURIReference urel - pubas = parseURIReference base - -testRelative :: String -> String -> String -> String -> Assertion -testRelative label base uabs urel = sequence_ - [ - (testRelSplit (label++"(rel)") base uabs urel), - (testRelJoin (label++"(abs)") base urel uabs) - ] - -testRelative01 = testRelative "testRelative01" - "foo:xyz" "bar:abc" "bar:abc" -testRelative02 = testRelative "testRelative02" - "http://example/x/y/z" "http://example/x/abc" "../abc" -testRelative03 = testRelative "testRelative03" - "http://example2/x/y/z" "http://example/x/abc" "//example/x/abc" - -- "http://example2/x/y/z" "http://example/x/abc" "http://example/x/abc" -testRelative04 = testRelative "testRelative04" - "http://ex/x/y/z" "http://ex/x/r" "../r" -testRelative05 = testRelative "testRelative05" - "http://ex/x/y/z" "http://ex/r" "/r" - -- "http://ex/x/y/z" "http://ex/r" "../../r" -testRelative06 = testRelative "testRelative06" - "http://ex/x/y/z" "http://ex/x/y/q/r" "q/r" -testRelative07 = testRelative "testRelative07" - "http://ex/x/y" "http://ex/x/q/r#s" "q/r#s" -testRelative08 = testRelative "testRelative08" - "http://ex/x/y" "http://ex/x/q/r#s/t" "q/r#s/t" -testRelative09 = testRelative "testRelative09" - "http://ex/x/y" "ftp://ex/x/q/r" "ftp://ex/x/q/r" -testRelative10 = testRelative "testRelative10" - -- "http://ex/x/y" "http://ex/x/y" "y" - "http://ex/x/y" "http://ex/x/y" "" -testRelative11 = testRelative "testRelative11" - -- "http://ex/x/y/" "http://ex/x/y/" "./" - "http://ex/x/y/" "http://ex/x/y/" "" -testRelative12 = testRelative "testRelative12" - -- "http://ex/x/y/pdq" "http://ex/x/y/pdq" "pdq" - "http://ex/x/y/pdq" "http://ex/x/y/pdq" "" -testRelative13 = testRelative "testRelative13" - "http://ex/x/y/" "http://ex/x/y/z/" "z/" -testRelative14 = testRelative "testRelative14" - -- "file:/swap/test/animal.rdf" "file:/swap/test/animal.rdf#Animal" "animal.rdf#Animal" - "file:/swap/test/animal.rdf" "file:/swap/test/animal.rdf#Animal" "#Animal" -testRelative15 = testRelative "testRelative15" - "file:/e/x/y/z" "file:/e/x/abc" "../abc" -testRelative16 = testRelative "testRelative16" - "file:/example2/x/y/z" "file:/example/x/abc" "/example/x/abc" -testRelative17 = testRelative "testRelative17" - "file:/ex/x/y/z" "file:/ex/x/r" "../r" -testRelative18 = testRelative "testRelative18" - "file:/ex/x/y/z" "file:/r" "/r" -testRelative19 = testRelative "testRelative19" - "file:/ex/x/y" "file:/ex/x/q/r" "q/r" -testRelative20 = testRelative "testRelative20" - "file:/ex/x/y" "file:/ex/x/q/r#s" "q/r#s" -testRelative21 = testRelative "testRelative21" - "file:/ex/x/y" "file:/ex/x/q/r#" "q/r#" -testRelative22 = testRelative "testRelative22" - "file:/ex/x/y" "file:/ex/x/q/r#s/t" "q/r#s/t" -testRelative23 = testRelative "testRelative23" - "file:/ex/x/y" "ftp://ex/x/q/r" "ftp://ex/x/q/r" -testRelative24 = testRelative "testRelative24" - -- "file:/ex/x/y" "file:/ex/x/y" "y" - "file:/ex/x/y" "file:/ex/x/y" "" -testRelative25 = testRelative "testRelative25" - -- "file:/ex/x/y/" "file:/ex/x/y/" "./" - "file:/ex/x/y/" "file:/ex/x/y/" "" -testRelative26 = testRelative "testRelative26" - -- "file:/ex/x/y/pdq" "file:/ex/x/y/pdq" "pdq" - "file:/ex/x/y/pdq" "file:/ex/x/y/pdq" "" -testRelative27 = testRelative "testRelative27" - "file:/ex/x/y/" "file:/ex/x/y/z/" "z/" -testRelative28 = testRelative "testRelative28" - "file:/devel/WWW/2000/10/swap/test/reluri-1.n3" - "file://meetings.example.com/cal#m1" "//meetings.example.com/cal#m1" - -- "file:/devel/WWW/2000/10/swap/test/reluri-1.n3" - -- "file://meetings.example.com/cal#m1" "file://meetings.example.com/cal#m1" -testRelative29 = testRelative "testRelative29" - "file:/home/connolly/w3ccvs/WWW/2000/10/swap/test/reluri-1.n3" - "file://meetings.example.com/cal#m1" "//meetings.example.com/cal#m1" - -- "file:/home/connolly/w3ccvs/WWW/2000/10/swap/test/reluri-1.n3" - -- "file://meetings.example.com/cal#m1" "file://meetings.example.com/cal#m1" -testRelative30 = testRelative "testRelative30" - "file:/some/dir/foo" "file:/some/dir/#blort" "./#blort" -testRelative31 = testRelative "testRelative31" - "file:/some/dir/foo" "file:/some/dir/#" "./#" -testRelative32 = testRelative "testRelative32" - "http://ex/x/y" "http://ex/x/q:r" "./q:r" - -- see RFC2396bis, section 5 ^^ -testRelative33 = testRelative "testRelative33" - "http://ex/x/y" "http://ex/x/p=q:r" "./p=q:r" - -- "http://ex/x/y" "http://ex/x/p=q:r" "p=q:r" -testRelative34 = testRelative "testRelative34" - "http://ex/x/y?pp/qq" "http://ex/x/y?pp/rr" "?pp/rr" -testRelative35 = testRelative "testRelative35" - "http://ex/x/y?pp/qq" "http://ex/x/y/z" "y/z" -testRelative36 = testRelative "testRelative36" - "mailto:local" - "mailto:local/qual@domain.org#frag" - "local/qual@domain.org#frag" -testRelative37 = testRelative "testRelative37" - "mailto:local/qual1@domain1.org" - "mailto:local/more/qual2@domain2.org#frag" - "more/qual2@domain2.org#frag" -testRelative38 = testRelative "testRelative38" - "http://ex/x/z?q" "http://ex/x/y?q" "y?q" -testRelative39 = testRelative "testRelative39" - "http://ex?p" "http://ex/x/y?q" "/x/y?q" -testRelative40 = testRelative "testRelative40" - "foo:a/b" "foo:a/c/d" "c/d" -testRelative41 = testRelative "testRelative41" - "foo:a/b" "foo:/c/d" "/c/d" -testRelative42 = testRelative "testRelative42" - "foo:a/b?c#d" "foo:a/b?c" "" -testRelative43 = testRelative "testRelative42" - "foo:a" "foo:b/c" "b/c" -testRelative44 = testRelative "testRelative44" - "foo:/a/y/z" "foo:/a/b/c" "../b/c" -testRelative45 = testRelJoin "testRelative45" - "foo:a" "./b/c" "foo:b/c" -testRelative46 = testRelJoin "testRelative46" - "foo:a" "/./b/c" "foo:/b/c" -testRelative47 = testRelJoin "testRelative47" - "foo://a//b/c" "../../d" "foo://a/d" -testRelative48 = testRelJoin "testRelative48" - "foo:a" "." "foo:" -testRelative49 = testRelJoin "testRelative49" - "foo:a" ".." "foo:" - --- add escape tests -testRelative50 = testRelative "testRelative50" - "http://example/x/y%2Fz" "http://example/x/abc" "abc" -testRelative51 = testRelative "testRelative51" - "http://example/a/x/y/z" "http://example/a/x%2Fabc" "../../x%2Fabc" -testRelative52 = testRelative "testRelative52" - "http://example/a/x/y%2Fz" "http://example/a/x%2Fabc" "../x%2Fabc" -testRelative53 = testRelative "testRelative53" - "http://example/x%2Fy/z" "http://example/x%2Fy/abc" "abc" -testRelative54 = testRelative "testRelative54" - "http://ex/x/y" "http://ex/x/q%3Ar" "q%3Ar" -testRelative55 = testRelative "testRelative55" - "http://example/x/y%2Fz" "http://example/x%2Fabc" "/x%2Fabc" --- Apparently, TimBL prefers the following way to 41, 42 above --- cf. http://lists.w3.org/Archives/Public/uri/2003Feb/0028.html --- He also notes that there may be different relative fuctions --- that satisfy the basic equivalence axiom: --- cf. http://lists.w3.org/Archives/Public/uri/2003Jan/0008.html -testRelative56 = testRelative "testRelative56" - "http://example/x/y/z" "http://example/x%2Fabc" "/x%2Fabc" -testRelative57 = testRelative "testRelative57" - "http://example/x/y%2Fz" "http://example/x%2Fabc" "/x%2Fabc" - --- Other oddball tests - -- Check segment normalization code: -testRelative60 = testRelJoin "testRelative60" - "ftp://example/x/y" "http://example/a/b/../../c" "http://example/c" -testRelative61 = testRelJoin "testRelative61" - "ftp://example/x/y" "http://example/a/b/c/../../" "http://example/a/" -testRelative62 = testRelJoin "testRelative62" - "ftp://example/x/y" "http://example/a/b/c/./" "http://example/a/b/c/" -testRelative63 = testRelJoin "testRelative63" - "ftp://example/x/y" "http://example/a/b/c/.././" "http://example/a/b/" -testRelative64 = testRelJoin "testRelative64" - "ftp://example/x/y" "http://example/a/b/c/d/../../../../e" "http://example/e" -testRelative65 = testRelJoin "testRelative65" - "ftp://example/x/y" "http://example/a/b/c/d/../.././../../e" "http://example/e" - -- Check handling of queries and fragments with non-relative paths -testRelative70 = testRelative "testRelative70" - "mailto:local1@domain1?query1" "mailto:local2@domain2" - "local2@domain2" -testRelative71 = testRelative "testRelative71" - "mailto:local1@domain1" "mailto:local2@domain2?query2" - "local2@domain2?query2" -testRelative72 = testRelative "testRelative72" - "mailto:local1@domain1?query1" "mailto:local2@domain2?query2" - "local2@domain2?query2" -testRelative73 = testRelative "testRelative73" - "mailto:local@domain?query1" "mailto:local@domain?query2" - "?query2" -testRelative74 = testRelative "testRelative74" - "mailto:?query1" "mailto:local@domain?query2" - "local@domain?query2" -testRelative75 = testRelative "testRelative75" - "mailto:local@domain?query1" "mailto:local@domain?query2" - "?query2" -testRelative76 = testRelative "testRelative76" - "foo:bar" "http://example/a/b?c/../d" "http://example/a/b?c/../d" -testRelative77 = testRelative "testRelative77" - "foo:bar" "http://example/a/b#c/../d" "http://example/a/b#c/../d" -{- These (78-81) are some awkward test cases thrown up by a question on the URI list: - http://lists.w3.org/Archives/Public/uri/2005Jul/0013 - Mote that RFC 3986 discards path segents after the final '/' only when merging two - paths - otherwise the final segment in the base URI is mnaintained. This leads to - difficulty in constructinmg a reversible relativeTo/relativeFrom pair of functions. --} -testRelative78 = testRelative "testRelative78" - "http://www.example.com/data/limit/.." "http://www.example.com/data/limit/test.xml" - "test.xml" -testRelative79 = testRelative "testRelative79" - "file:/some/dir/foo" "file:/some/dir/#blort" "./#blort" -testRelative80 = testRelative "testRelative80" - "file:/some/dir/foo" "file:/some/dir/#" "./#" -testRelative81 = testRelative "testRelative81" - "file:/some/dir/.." "file:/some/dir/#blort" "./#blort" - --- testRelative base abs rel --- testRelSplit base abs rel --- testRelJoin base rel abs -testRelative91 = testRelSplit "testRelative91" - "http://example.org/base/uri" "http:this" - "this" -testRelative92 = testRelJoin "testRelative92" - "http://example.org/base/uri" "http:this" - "http:this" -testRelative93 = testRelJoin "testRelative93" - "http:base" "http:this" - "http:this" -testRelative94 = testRelJoin "testRelative94" - "f:/a" ".//g" - "f://g" -testRelative95 = testRelJoin "testRelative95" - "f://example.org/base/a" "b/c//d/e" - "f://example.org/base/b/c//d/e" -testRelative96 = testRelJoin "testRelative96" - "mid:m@example.ord/c@example.org" "m2@example.ord/c2@example.org" - "mid:m@example.ord/m2@example.ord/c2@example.org" -testRelative97 = testRelJoin "testRelative97" - "file:///C:/DEV/Haskell/lib/HXmlToolbox-3.01/examples/" "mini1.xml" - "file:///C:/DEV/Haskell/lib/HXmlToolbox-3.01/examples/mini1.xml" -testRelative98 = testRelative "testRelative98" - "foo:a/y/z" "foo:a/b/c" "../b/c" -testRelative99 = testRelJoin "testRelative99" - "f:/a/" "..//g" - "f://g" - - -testRelativeSuite = TF.testGroup "Test Relative URIs" testRelativeList -testRelativeList = - [ TF.testCase "testRelative01" testRelative01 - , TF.testCase "testRelative02" testRelative02 - , TF.testCase "testRelative03" testRelative03 - , TF.testCase "testRelative04" testRelative04 - , TF.testCase "testRelative05" testRelative05 - , TF.testCase "testRelative06" testRelative06 - , TF.testCase "testRelative07" testRelative07 - , TF.testCase "testRelative08" testRelative08 - , TF.testCase "testRelative09" testRelative09 - , TF.testCase "testRelative10" testRelative10 - , TF.testCase "testRelative11" testRelative11 - , TF.testCase "testRelative12" testRelative12 - , TF.testCase "testRelative13" testRelative13 - , TF.testCase "testRelative14" testRelative14 - , TF.testCase "testRelative15" testRelative15 - , TF.testCase "testRelative16" testRelative16 - , TF.testCase "testRelative17" testRelative17 - , TF.testCase "testRelative18" testRelative18 - , TF.testCase "testRelative19" testRelative19 - , TF.testCase "testRelative20" testRelative20 - , TF.testCase "testRelative21" testRelative21 - , TF.testCase "testRelative22" testRelative22 - , TF.testCase "testRelative23" testRelative23 - , TF.testCase "testRelative24" testRelative24 - , TF.testCase "testRelative25" testRelative25 - , TF.testCase "testRelative26" testRelative26 - , TF.testCase "testRelative27" testRelative27 - , TF.testCase "testRelative28" testRelative28 - , TF.testCase "testRelative29" testRelative29 - , TF.testCase "testRelative30" testRelative30 - , TF.testCase "testRelative31" testRelative31 - , TF.testCase "testRelative32" testRelative32 - , TF.testCase "testRelative33" testRelative33 - , TF.testCase "testRelative34" testRelative34 - , TF.testCase "testRelative35" testRelative35 - , TF.testCase "testRelative36" testRelative36 - , TF.testCase "testRelative37" testRelative37 - , TF.testCase "testRelative38" testRelative38 - , TF.testCase "testRelative39" testRelative39 - , TF.testCase "testRelative40" testRelative40 - , TF.testCase "testRelative41" testRelative41 - , TF.testCase "testRelative42" testRelative42 - , TF.testCase "testRelative43" testRelative43 - , TF.testCase "testRelative44" testRelative44 - , TF.testCase "testRelative45" testRelative45 - , TF.testCase "testRelative46" testRelative46 - , TF.testCase "testRelative47" testRelative47 - , TF.testCase "testRelative48" testRelative48 - , TF.testCase "testRelative49" testRelative49 - -- - , TF.testCase "testRelative50" testRelative50 - , TF.testCase "testRelative51" testRelative51 - , TF.testCase "testRelative52" testRelative52 - , TF.testCase "testRelative53" testRelative53 - , TF.testCase "testRelative54" testRelative54 - , TF.testCase "testRelative55" testRelative55 - , TF.testCase "testRelative56" testRelative56 - , TF.testCase "testRelative57" testRelative57 - -- - , TF.testCase "testRelative60" testRelative60 - , TF.testCase "testRelative61" testRelative61 - , TF.testCase "testRelative62" testRelative62 - , TF.testCase "testRelative63" testRelative63 - , TF.testCase "testRelative64" testRelative64 - , TF.testCase "testRelative65" testRelative65 - -- - , TF.testCase "testRelative70" testRelative70 - , TF.testCase "testRelative71" testRelative71 - , TF.testCase "testRelative72" testRelative72 - , TF.testCase "testRelative73" testRelative73 - , TF.testCase "testRelative74" testRelative74 - , TF.testCase "testRelative75" testRelative75 - , TF.testCase "testRelative76" testRelative76 - , TF.testCase "testRelative77" testRelative77 - -- Awkward cases: - , TF.testCase "testRelative78" testRelative78 - , TF.testCase "testRelative79" testRelative79 - , TF.testCase "testRelative80" testRelative80 - , TF.testCase "testRelative81" testRelative81 - -- - -- , TF.testCase "testRelative90" testRelative90 - , TF.testCase "testRelative91" testRelative91 - , TF.testCase "testRelative92" testRelative92 - , TF.testCase "testRelative93" testRelative93 - , TF.testCase "testRelative94" testRelative94 - , TF.testCase "testRelative95" testRelative95 - , TF.testCase "testRelative96" testRelative96 - , TF.testCase "testRelative97" testRelative97 - , TF.testCase "testRelative98" testRelative98 - , TF.testCase "testRelative99" testRelative99 - ] - --- RFC2396 relative-to-absolute URI tests - -rfcbase = "http://a/b/c/d;p?q" --- normal cases, RFC2396bis 5.4.1 -testRFC01 = testRelJoin "testRFC01" rfcbase "g:h" "g:h" -testRFC02 = testRelJoin "testRFC02" rfcbase "g" "http://a/b/c/g" -testRFC03 = testRelJoin "testRFC03" rfcbase "./g" "http://a/b/c/g" -testRFC04 = testRelJoin "testRFC04" rfcbase "g/" "http://a/b/c/g/" -testRFC05 = testRelJoin "testRFC05" rfcbase "/g" "http://a/g" -testRFC06 = testRelJoin "testRFC06" rfcbase "//g" "http://g" -testRFC07 = testRelJoin "testRFC07" rfcbase "?y" "http://a/b/c/d;p?y" -testRFC08 = testRelJoin "testRFC08" rfcbase "g?y" "http://a/b/c/g?y" -testRFC09 = testRelJoin "testRFC09" rfcbase "?q#s" "http://a/b/c/d;p?q#s" -testRFC23 = testRelJoin "testRFC10" rfcbase "#s" "http://a/b/c/d;p?q#s" -testRFC10 = testRelJoin "testRFC11" rfcbase "g#s" "http://a/b/c/g#s" -testRFC11 = testRelJoin "testRFC12" rfcbase "g?y#s" "http://a/b/c/g?y#s" -testRFC12 = testRelJoin "testRFC13" rfcbase ";x" "http://a/b/c/;x" -testRFC13 = testRelJoin "testRFC14" rfcbase "g;x" "http://a/b/c/g;x" -testRFC14 = testRelJoin "testRFC15" rfcbase "g;x?y#s" "http://a/b/c/g;x?y#s" -testRFC24 = testRelJoin "testRFC16" rfcbase "" "http://a/b/c/d;p?q" -testRFC15 = testRelJoin "testRFC17" rfcbase "." "http://a/b/c/" -testRFC16 = testRelJoin "testRFC18" rfcbase "./" "http://a/b/c/" -testRFC17 = testRelJoin "testRFC19" rfcbase ".." "http://a/b/" -testRFC18 = testRelJoin "testRFC20" rfcbase "../" "http://a/b/" -testRFC19 = testRelJoin "testRFC21" rfcbase "../g" "http://a/b/g" -testRFC20 = testRelJoin "testRFC22" rfcbase "../.." "http://a/" -testRFC21 = testRelJoin "testRFC23" rfcbase "../../" "http://a/" -testRFC22 = testRelJoin "testRFC24" rfcbase "../../g" "http://a/g" --- abnormal cases, RFC2396bis 5.4.2 -testRFC31 = testRelJoin "testRFC31" rfcbase "?q" rfcbase -testRFC32 = testRelJoin "testRFC32" rfcbase "../../../g" "http://a/g" -testRFC33 = testRelJoin "testRFC33" rfcbase "../../../../g" "http://a/g" -testRFC34 = testRelJoin "testRFC34" rfcbase "/./g" "http://a/g" -testRFC35 = testRelJoin "testRFC35" rfcbase "/../g" "http://a/g" -testRFC36 = testRelJoin "testRFC36" rfcbase "g." "http://a/b/c/g." -testRFC37 = testRelJoin "testRFC37" rfcbase ".g" "http://a/b/c/.g" -testRFC38 = testRelJoin "testRFC38" rfcbase "g.." "http://a/b/c/g.." -testRFC39 = testRelJoin "testRFC39" rfcbase "..g" "http://a/b/c/..g" -testRFC40 = testRelJoin "testRFC40" rfcbase "./../g" "http://a/b/g" -testRFC41 = testRelJoin "testRFC41" rfcbase "./g/." "http://a/b/c/g/" -testRFC42 = testRelJoin "testRFC42" rfcbase "g/./h" "http://a/b/c/g/h" -testRFC43 = testRelJoin "testRFC43" rfcbase "g/../h" "http://a/b/c/h" -testRFC44 = testRelJoin "testRFC44" rfcbase "g;x=1/./y" "http://a/b/c/g;x=1/y" -testRFC45 = testRelJoin "testRFC45" rfcbase "g;x=1/../y" "http://a/b/c/y" -testRFC46 = testRelJoin "testRFC46" rfcbase "g?y/./x" "http://a/b/c/g?y/./x" -testRFC47 = testRelJoin "testRFC47" rfcbase "g?y/../x" "http://a/b/c/g?y/../x" -testRFC48 = testRelJoin "testRFC48" rfcbase "g#s/./x" "http://a/b/c/g#s/./x" -testRFC49 = testRelJoin "testRFC49" rfcbase "g#s/../x" "http://a/b/c/g#s/../x" -testRFC50 = testRelJoin "testRFC50" rfcbase "http:x" "http:x" - --- Null path tests --- See RFC2396bis, section 5.2, --- "If the base URI's path component is the empty string, then a single --- slash character is copied to the buffer" -testRFC60 = testRelative "testRFC60" "http://ex" "http://ex/x/y?q" "/x/y?q" -testRFC61 = testRelJoin "testRFC61" "http://ex" "x/y?q" "http://ex/x/y?q" -testRFC62 = testRelative "testRFC62" "http://ex?p" "http://ex/x/y?q" "/x/y?q" -testRFC63 = testRelJoin "testRFC63" "http://ex?p" "x/y?q" "http://ex/x/y?q" -testRFC64 = testRelative "testRFC64" "http://ex#f" "http://ex/x/y?q" "/x/y?q" -testRFC65 = testRelJoin "testRFC65" "http://ex#f" "x/y?q" "http://ex/x/y?q" -testRFC66 = testRelative "testRFC66" "http://ex?p" "http://ex/x/y#g" "/x/y#g" -testRFC67 = testRelJoin "testRFC67" "http://ex?p" "x/y#g" "http://ex/x/y#g" -testRFC68 = testRelative "testRFC68" "http://ex" "http://ex/" "/" -testRFC69 = testRelJoin "testRFC69" "http://ex" "./" "http://ex/" -testRFC70 = testRelative "testRFC70" "http://ex" "http://ex/a/b" "/a/b" -testRFC71 = testRelative "testRFC71" "http://ex/a/b" "http://ex" "./" - -testRFC2396Suite = TF.testGroup "Test RFC2396 examples" testRFC2396List -testRFC2396List = - [ TF.testCase "testRFC01" testRFC01 - , TF.testCase "testRFC02" testRFC02 - , TF.testCase "testRFC03" testRFC03 - , TF.testCase "testRFC04" testRFC04 - , TF.testCase "testRFC05" testRFC05 - , TF.testCase "testRFC06" testRFC06 - , TF.testCase "testRFC07" testRFC07 - , TF.testCase "testRFC08" testRFC08 - , TF.testCase "testRFC09" testRFC09 - , TF.testCase "testRFC10" testRFC10 - , TF.testCase "testRFC11" testRFC11 - , TF.testCase "testRFC12" testRFC12 - , TF.testCase "testRFC13" testRFC13 - , TF.testCase "testRFC14" testRFC14 - , TF.testCase "testRFC15" testRFC15 - , TF.testCase "testRFC16" testRFC16 - , TF.testCase "testRFC17" testRFC17 - , TF.testCase "testRFC18" testRFC18 - , TF.testCase "testRFC19" testRFC19 - , TF.testCase "testRFC20" testRFC20 - , TF.testCase "testRFC21" testRFC21 - , TF.testCase "testRFC22" testRFC22 - , TF.testCase "testRFC23" testRFC23 - , TF.testCase "testRFC24" testRFC24 - -- testRFC30, - , TF.testCase "testRFC31" testRFC31 - , TF.testCase "testRFC32" testRFC32 - , TF.testCase "testRFC33" testRFC33 - , TF.testCase "testRFC34" testRFC34 - , TF.testCase "testRFC35" testRFC35 - , TF.testCase "testRFC36" testRFC36 - , TF.testCase "testRFC37" testRFC37 - , TF.testCase "testRFC38" testRFC38 - , TF.testCase "testRFC39" testRFC39 - , TF.testCase "testRFC40" testRFC40 - , TF.testCase "testRFC41" testRFC41 - , TF.testCase "testRFC42" testRFC42 - , TF.testCase "testRFC43" testRFC43 - , TF.testCase "testRFC44" testRFC44 - , TF.testCase "testRFC45" testRFC45 - , TF.testCase "testRFC46" testRFC46 - , TF.testCase "testRFC47" testRFC47 - , TF.testCase "testRFC48" testRFC48 - , TF.testCase "testRFC49" testRFC49 - , TF.testCase "testRFC50" testRFC50 - -- - , TF.testCase "testRFC60" testRFC60 - , TF.testCase "testRFC61" testRFC61 - , TF.testCase "testRFC62" testRFC62 - , TF.testCase "testRFC63" testRFC63 - , TF.testCase "testRFC64" testRFC64 - , TF.testCase "testRFC65" testRFC65 - , TF.testCase "testRFC66" testRFC66 - , TF.testCase "testRFC67" testRFC67 - , TF.testCase "testRFC68" testRFC68 - , TF.testCase "testRFC69" testRFC69 - , TF.testCase "testRFC70" testRFC70 - ] - --- And some other oddballs: -mailbase = "mailto:local/option@domain.org?notaquery#frag" -testMail01 = testRelJoin "testMail01" - mailbase "more@domain" - "mailto:local/more@domain" -testMail02 = testRelJoin "testMail02" - mailbase "#newfrag" - "mailto:local/option@domain.org?notaquery#newfrag" -testMail03 = testRelJoin "testMail03" - mailbase "l1/q1@domain" - "mailto:local/l1/q1@domain" - -testMail11 = testRelJoin "testMail11" - "mailto:local1@domain1?query1" "mailto:local2@domain2" - "mailto:local2@domain2" -testMail12 = testRelJoin "testMail12" - "mailto:local1@domain1" "mailto:local2@domain2?query2" - "mailto:local2@domain2?query2" -testMail13 = testRelJoin "testMail13" - "mailto:local1@domain1?query1" "mailto:local2@domain2?query2" - "mailto:local2@domain2?query2" -testMail14 = testRelJoin "testMail14" - "mailto:local@domain?query1" "mailto:local@domain?query2" - "mailto:local@domain?query2" -testMail15 = testRelJoin "testMail15" - "mailto:?query1" "mailto:local@domain?query2" - "mailto:local@domain?query2" -testMail16 = testRelJoin "testMail16" - "mailto:local@domain?query1" "?query2" - "mailto:local@domain?query2" -testInfo17 = testRelJoin "testInfo17" - "info:name/1234/../567" "name/9876/../543" - "info:name/name/543" -testInfo18 = testRelJoin "testInfo18" - "info:/name/1234/../567" "name/9876/../543" - "info:/name/name/543" - -testOddballSuite = TF.testGroup "Test oddball examples" testOddballList -testOddballList = - [ TF.testCase "testMail01" testMail01 - , TF.testCase "testMail02" testMail02 - , TF.testCase "testMail03" testMail03 - , TF.testCase "testMail11" testMail11 - , TF.testCase "testMail12" testMail12 - , TF.testCase "testMail13" testMail13 - , TF.testCase "testMail14" testMail14 - , TF.testCase "testMail15" testMail15 - , TF.testCase "testMail16" testMail16 - , TF.testCase "testInfo17" testInfo17 - ] - --- Normalization tests - --- Case normalization; cf. RFC2396bis section 6.2.2.1 --- NOTE: authority case normalization is not performed -testNormalize01 = testEq "testNormalize01" - "http://EXAMPLE.com/Root/%2A?%2B#%2C" - (normalizeCase "HTTP://EXAMPLE.com/Root/%2a?%2b#%2c") - --- Encoding normalization; cf. RFC2396bis section 6.2.2.2 -testNormalize11 = testEq "testNormalize11" - "HTTP://EXAMPLE.com/Root/~Me/" - (normalizeEscape "HTTP://EXAMPLE.com/Root/%7eMe/") -testNormalize12 = testEq "testNormalize12" - "foo:%40AZ%5b%60az%7b%2f09%3a-._~" - (normalizeEscape "foo:%40%41%5a%5b%60%61%7a%7b%2f%30%39%3a%2d%2e%5f%7e") -testNormalize13 = testEq "testNormalize13" - "foo:%3a%2f%3f%23%5b%5d%40" - (normalizeEscape "foo:%3a%2f%3f%23%5b%5d%40") - --- Path segment normalization; cf. RFC2396bis section 6.2.2.4 -testNormalize21 = testEq "testNormalize21" - "http://example/c" - (normalizePathSegments "http://example/a/b/../../c") -testNormalize22 = testEq "testNormalize22" - "http://example/a/" - (normalizePathSegments "http://example/a/b/c/../../") -testNormalize23 = testEq "testNormalize23" - "http://example/a/b/c/" - (normalizePathSegments "http://example/a/b/c/./") -testNormalize24 = testEq "testNormalize24" - "http://example/a/b/" - (normalizePathSegments "http://example/a/b/c/.././") -testNormalize25 = testEq "testNormalize25" - "http://example/e" - (normalizePathSegments "http://example/a/b/c/d/../../../../e") -testNormalize26 = testEq "testNormalize26" - "http://example/e" - (normalizePathSegments "http://example/a/b/c/d/../.././../../e") -testNormalize27 = testEq "testNormalize27" - "http://example/e" - (normalizePathSegments "http://example/a/b/../.././../../e") -testNormalize28 = testEq "testNormalize28" - "foo:e" - (normalizePathSegments "foo:a/b/../.././../../e") - -testNormalizeSuite = TF.testGroup "testNormalizeSuite" - [ TF.testCase "testNormalize01" testNormalize01 - , TF.testCase "testNormalize11" testNormalize11 - , TF.testCase "testNormalize12" testNormalize12 - , TF.testCase "testNormalize13" testNormalize13 - , TF.testCase "testNormalize21" testNormalize21 - , TF.testCase "testNormalize22" testNormalize22 - , TF.testCase "testNormalize23" testNormalize23 - , TF.testCase "testNormalize24" testNormalize24 - , TF.testCase "testNormalize25" testNormalize25 - , TF.testCase "testNormalize26" testNormalize26 - , TF.testCase "testNormalize27" testNormalize27 - , TF.testCase "testNormalize28" testNormalize28 - ] - --- URI formatting (show) tests - -ts02URI = URI { uriScheme = "http:" - , uriAuthority = Just (URIAuth "user:pass@" "example.org" ":99") - , uriPath = "/aaa/bbb" - , uriQuery = "?ccc" - , uriFragment = "#ddd/eee" - } - -ts04URI = URI { uriScheme = "http:" - , uriAuthority = Just (URIAuth "user:anonymous@" "example.org" ":99") - , uriPath = "/aaa/bbb" - , uriQuery = "?ccc" - , uriFragment = "#ddd/eee" - } - -ts02str = "http://user:...@example.org:99/aaa/bbb?ccc#ddd/eee" -ts03str = "http://user:pass@example.org:99/aaa/bbb?ccc#ddd/eee" -ts04str = "http://user:...@example.org:99/aaa/bbb?ccc#ddd/eee" - -testShowURI01 = testEq "testShowURI01" "" (show nullURI) -testShowURI02 = testEq "testShowURI02" ts02str (show ts02URI) -testShowURI03 = testEq "testShowURI03" ts03str ((uriToString id ts02URI) "") -testShowURI04 = testEq "testShowURI04" ts04str (show ts04URI) - -testShowURI = TF.testGroup "testShowURI" - [ TF.testCase "testShowURI01" testShowURI01 - , TF.testCase "testShowURI02" testShowURI02 - , TF.testCase "testShowURI03" testShowURI03 - , TF.testCase "testShowURI04" testShowURI04 - ] - - --- URI escaping tests - -te01str = "http://example.org/az/09-_/.~:/?#[]@!$&'()*+,;=" -te02str = "http://example.org/a/c%/d /e" -te02esc = "http://example.org/a%3C/b%3E/c%25/d%20/e" - -testEscapeURIString01 = testEq "testEscapeURIString01" - te01str (escapeURIString isUnescapedInURI te01str) - -testEscapeURIString02 = testEq "testEscapeURIString02" - te02esc (escapeURIString isUnescapedInURI te02str) - -testEscapeURIString03 = testEq "testEscapeURIString03" - te01str (unEscapeString te01str) - -testEscapeURIString04 = testEq "testEscapeURIString04" - te02str (unEscapeString te02esc) - -testEscapeURIString05 = testEq "testEscapeURIString05" - "http%3A%2F%2Fexample.org%2Faz%2F09-_%2F.~%3A%2F%3F%23%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D" - (escapeURIString isUnescapedInURIComponent te01str) - -testEscapeURIString06 = testEq "testEscapeURIString06" - "hello%C3%B8%C2%A9%E6%97%A5%E6%9C%AC" - (escapeURIString isUnescapedInURIComponent "helloø©日本") - -propEscapeUnEscapeLoop :: String -> Bool -propEscapeUnEscapeLoop s = s == (unEscapeString $! escaped) - where - escaped = escapeURIString (const False) s - {-# NOINLINE escaped #-} - -testEscapeURIString = TF.testGroup "testEscapeURIString" - [ TF.testCase "testEscapeURIString01" testEscapeURIString01 - , TF.testCase "testEscapeURIString02" testEscapeURIString02 - , TF.testCase "testEscapeURIString03" testEscapeURIString03 - , TF.testCase "testEscapeURIString04" testEscapeURIString04 - , TF.testCase "testEscapeURIString05" testEscapeURIString05 - , TF.testCase "testEscapeURIString06" testEscapeURIString06 - , TF.testProperty "propEscapeUnEscapeLoop" propEscapeUnEscapeLoop - ] - --- URI string normalization tests - -tn01str = "eXAMPLE://a/b/%7bfoo%7d" -tn01nrm = "example://a/b/%7Bfoo%7D" - -tn02str = "example://a/b/%63/" -tn02nrm = "example://a/b/c/" - -tn03str = "example://a/./b/../b/c/foo" -tn03nrm = "example://a/b/c/foo" - -tn04str = "eXAMPLE://a/b/%7bfoo%7d" -- From RFC2396bis, 6.2.2 -tn04nrm = "example://a/b/%7Bfoo%7D" - -tn06str = "file:/x/..//y" -tn06nrm = "file://y" - -tn07str = "file:x/..//y/" -tn07nrm = "file:/y/" - -testNormalizeURIString01 = testEq "testNormalizeURIString01" - tn01nrm (normalizeCase tn01str) -testNormalizeURIString02 = testEq "testNormalizeURIString02" - tn02nrm (normalizeEscape tn02str) -testNormalizeURIString03 = testEq "testNormalizeURIString03" - tn03nrm (normalizePathSegments tn03str) -testNormalizeURIString04 = testEq "testNormalizeURIString04" - tn04nrm ((normalizeCase . normalizeEscape . normalizePathSegments) tn04str) -testNormalizeURIString05 = testEq "testNormalizeURIString05" - tn04nrm ((normalizePathSegments . normalizeEscape . normalizeCase) tn04str) -testNormalizeURIString06 = testEq "testNormalizeURIString06" - tn06nrm (normalizePathSegments tn06str) -testNormalizeURIString07 = testEq "testNormalizeURIString07" - tn07nrm (normalizePathSegments tn07str) - -testNormalizeURIString = TF.testGroup "testNormalizeURIString" - [ TF.testCase "testNormalizeURIString01" testNormalizeURIString01 - , TF.testCase "testNormalizeURIString02" testNormalizeURIString02 - , TF.testCase "testNormalizeURIString03" testNormalizeURIString03 - , TF.testCase "testNormalizeURIString04" testNormalizeURIString04 - , TF.testCase "testNormalizeURIString05" testNormalizeURIString05 - , TF.testCase "testNormalizeURIString06" testNormalizeURIString06 - , TF.testCase "testNormalizeURIString07" testNormalizeURIString07 - ] - --- Test strict vs non-strict relativeTo logic - -trbase = fromJust $ parseURIReference "http://bar.org/" - -testRelativeTo01 = testEq "testRelativeTo01" - "http://bar.org/foo" - (show $ - (fromJust $ parseURIReference "foo") `relativeTo` trbase) - -testRelativeTo02 = testEq "testRelativeTo02" - "http:foo" - (show $ - (fromJust $ parseURIReference "http:foo") `relativeTo` trbase) - -testRelativeTo03 = testEq "testRelativeTo03" - "http://bar.org/foo" - (show $ - (fromJust $ parseURIReference "http:foo") `nonStrictRelativeTo` trbase) - -testRelativeTo = TF.testGroup "testRelativeTo" - [ TF.testCase "testRelativeTo01" testRelativeTo01 - , TF.testCase "testRelativeTo02" testRelativeTo02 - , TF.testCase "testRelativeTo03" testRelativeTo03 - ] - --- Test alternative parsing functions -testAltFn01 = testEq "testAltFn01" "Just http://a.b/c#f" - (show . parseURI $ "http://a.b/c#f") -testAltFn02 = testEq "testAltFn02" "Just http://a.b/c#f" - (show . parseURIReference $ "http://a.b/c#f") -testAltFn03 = testEq "testAltFn03" "Just c/d#f" - (show . parseRelativeReference $ "c/d#f") -testAltFn04 = testEq "testAltFn04" "Nothing" - (show . parseRelativeReference $ "http://a.b/c#f") -testAltFn05 = testEq "testAltFn05" "Just http://a.b/c" - (show . parseAbsoluteURI $ "http://a.b/c") -testAltFn06 = testEq "testAltFn06" "Nothing" - (show . parseAbsoluteURI $ "http://a.b/c#f") -testAltFn07 = testEq "testAltFn07" "Nothing" - (show . parseAbsoluteURI $ "c/d") -testAltFn08 = testEq "testAltFn08" "Just http://a.b/c" - (show . parseAbsoluteURI $ "http://a.b/c") - -testAltFn11 = testEq "testAltFn11" True (isURI "http://a.b/c#f") -testAltFn12 = testEq "testAltFn12" True (isURIReference "http://a.b/c#f") -testAltFn13 = testEq "testAltFn13" True (isRelativeReference "c/d#f") -testAltFn14 = testEq "testAltFn14" False (isRelativeReference "http://a.b/c#f") -testAltFn15 = testEq "testAltFn15" True (isAbsoluteURI "http://a.b/c") -testAltFn16 = testEq "testAltFn16" False (isAbsoluteURI "http://a.b/c#f") -testAltFn17 = testEq "testAltFn17" False (isAbsoluteURI "c/d") - -testAltFn = TF.testGroup "testAltFn" - [ TF.testCase "testAltFn01" testAltFn01 - , TF.testCase "testAltFn02" testAltFn02 - , TF.testCase "testAltFn03" testAltFn03 - , TF.testCase "testAltFn04" testAltFn04 - , TF.testCase "testAltFn05" testAltFn05 - , TF.testCase "testAltFn06" testAltFn06 - , TF.testCase "testAltFn07" testAltFn07 - , TF.testCase "testAltFn08" testAltFn08 - , TF.testCase "testAltFn11" testAltFn11 - , TF.testCase "testAltFn12" testAltFn12 - , TF.testCase "testAltFn13" testAltFn13 - , TF.testCase "testAltFn14" testAltFn14 - , TF.testCase "testAltFn15" testAltFn15 - , TF.testCase "testAltFn16" testAltFn16 - , TF.testCase "testAltFn17" testAltFn17 - ] - -testUriIsAbsolute :: String -> Assertion -testUriIsAbsolute str = - assertBool str (uriIsAbsolute uri) - where - Just uri = parseURIReference str - -testUriIsRelative :: String -> Assertion -testUriIsRelative str = - assertBool str (uriIsRelative uri) - where - Just uri = parseURIReference str - -testIsAbsolute = TF.testGroup "testIsAbsolute" - [ TF.testCase "testIsAbsolute01" $ testUriIsAbsolute "http://google.com" - , TF.testCase "testIsAbsolute02" $ testUriIsAbsolute "ftp://p.x.ca/woo?hai=a" - , TF.testCase "testIsAbsolute03" $ testUriIsAbsolute "mailto:bob@example.com" - ] - -testIsRelative = TF.testGroup "testIsRelative" - [ TF.testCase "testIsRelative01" $ testUriIsRelative "//google.com" - , TF.testCase "testIsRelative02" $ testUriIsRelative "/hello" - , TF.testCase "testIsRelative03" $ testUriIsRelative "this/is/a/path" - , TF.testCase "testIsRelative04" $ testUriIsRelative "?what=that" - ] - --- Full test suite -allTests = - [ testURIRefSuite - , testComponentSuite - , testRelativeSuite - , testRFC2396Suite - , testOddballSuite - , testNormalizeSuite - , testShowURI - , testEscapeURIString - , testNormalizeURIString - , testRelativeTo - , testAltFn - , testIsAbsolute - , testIsRelative - ] - -main = TF.defaultMain allTests - -runTestFile t = do - h <- openFile "a.tmp" WriteMode - _ <- runTestText (putTextToHandle h False) t - hClose h -tf = runTestFile -tt = runTestTT - --- Miscellaneous values for hand-testing/debugging in Hugs: - -uref = testURIRefSuite -tr01 = testRelative01 -tr02 = testRelative02 -tr03 = testRelative03 -tr04 = testRelative04 -rel = testRelativeSuite -rfc = testRFC2396Suite -oddb = testOddballSuite - -(Just bu02) = parseURIReference "http://example/x/y/z" -(Just ou02) = parseURIReference "../abc" -(Just ru02) = parseURIReference "http://example/x/abc" --- fileuri = testURIReference "file:///C:/DEV/Haskell/lib/HXmlToolbox-3.01/examples/" - -cu02 = ou02 `relativeTo` bu02 - --------------------------------------------------------------------------------- --- --- Copyright (c) 2004, G. KLYNE. All rights reserved. --- Distributed as free software under the following license. --- --- Redistribution and use in source and binary forms, with or without --- modification, are permitted provided that the following conditions --- are met: --- --- - Redistributions of source code must retain the above copyright notice, --- this list of conditions and the following disclaimer. --- --- - Redistributions in binary form must reproduce the above copyright --- notice, this list of conditions and the following disclaimer in the --- documentation and/or other materials provided with the distribution. --- --- - Neither name of the copyright holders nor the names of its --- contributors may be used to endorse or promote products derived from --- this software without specific prior written permission. --- --- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND THE CONTRIBUTORS --- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT --- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR --- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT --- HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, --- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, --- BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS --- OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND --- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR --- TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- --------------------------------------------------------------------------------- --- $Source: /srv/cvs/cvs.haskell.org/fptools/libraries/network/tests/URITest.hs,v $ --- $Author: gklyne $ --- $Revision: 1.8 $ --- $Log: URITest.hs,v $ --- Revision 1.81 2012/08/01 aaronfriel --- Added additional test case for the "xip.io" service style URLs and absolute URLs prefixed with ipv4 addresses. --- --- Revision 1.8 2005/07/19 22:01:27 gklyne --- Added some additional test cases raised by discussion on URI@w3.org mailing list about 2005-07-19. The test p[roposed by this discussion exposed a subtle bug in relativeFrom not being an exact inverse of relativeTo. --- --- Revision 1.7 2005/06/06 16:31:44 gklyne --- Added two new test cases. --- --- Revision 1.6 2005/05/31 17:18:36 gklyne --- Added some additional test cases triggered by URI-list discussions. --- --- Revision 1.5 2005/04/07 11:09:37 gklyne --- Added test cases for alternate parsing functions (including deprecated 'parseabsoluteURI') --- --- Revision 1.4 2005/04/05 12:47:32 gklyne --- Added test case. --- Changed module name, now requires GHC -main-is to compile. --- All tests run OK with GHC 6.4 on MS-Windows. --- --- Revision 1.3 2004/11/05 17:29:09 gklyne --- Changed password-obscuring logic to reflect late change in revised URI --- specification (password "anonymous" is no longer a special case). --- Updated URI test module to use function 'escapeURIString'. --- (Should unEscapeString be similarly updated?) --- --- Revision 1.2 2004/10/27 13:06:55 gklyne --- Updated URI module function names per: --- http://www.haskell.org//pipermail/cvs-libraries/2004-October/002916.html --- Added test cases to give better covereage of module functions. --- --- Revision 1.1 2004/10/14 16:11:30 gklyne --- Add URI unit test to cvs.haskell.org repository --- --- Revision 1.17 2004/10/14 11:51:09 graham --- Confirm that URITest runs with GHC. --- Fix up some comments and other minor details. --- --- Revision 1.16 2004/10/14 11:45:30 graham --- Use moduke name main for GHC 6.2 --- --- Revision 1.15 2004/08/11 11:07:39 graham --- Add new test case. --- --- Revision 1.14 2004/06/30 11:35:27 graham --- Update URI code to use hierarchical libraries for Parsec and Network. --- --- Revision 1.13 2004/06/22 16:19:16 graham --- New URI test case added. --- --- Revision 1.12 2004/04/21 15:13:29 graham --- Add test case --- --- Revision 1.11 2004/04/21 14:54:05 graham --- Fix up some tests --- --- Revision 1.10 2004/04/20 14:54:13 graham --- Fix up test cases related to port number in authority, --- and add some more URI decomposition tests. --- --- Revision 1.9 2004/04/07 15:06:17 graham --- Add extra test case --- Revise syntax in line with changes to RFC2396bis --- --- Revision 1.8 2004/03/17 14:34:58 graham --- Add Network.HTTP files to CVS --- --- Revision 1.7 2004/03/16 14:19:38 graham --- Change licence to BSD style; add nullURI definition; new test cases. --- --- Revision 1.6 2004/02/20 12:12:00 graham --- Add URI normalization functions --- --- Revision 1.5 2004/02/19 23:19:35 graham --- Network.URI module passes all test cases --- --- Revision 1.4 2004/02/17 20:06:02 graham --- Revised URI parser to reflect latest RFC2396bis (-04) --- --- Revision 1.3 2004/02/11 14:32:14 graham --- Added work-in-progress notes. --- --- Revision 1.2 2004/02/02 14:00:39 graham --- Fix optional host name in URI. Add test cases. --- --- Revision 1.1 2004/01/27 21:13:45 graham --- New URI module and test suite added, --- implementing the GHC Network.URI interface. --- diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/cbits/time_iconv.c cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/cbits/time_iconv.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/cbits/time_iconv.c 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/cbits/time_iconv.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -#include -#include -#include -#include - -int time_iconv(char *srcbuf, size_t srcbufsize) -{ - uint16_t *destbuf = NULL; - size_t destbufsize; - static uint16_t *origdestbuf; - static size_t origdestbufsize; - iconv_t ic = (iconv_t) -1; - int ret = 0; - - if (ic == (iconv_t) -1) { - ic = iconv_open("UTF-16LE", "UTF-8"); - if (ic == (iconv_t) -1) { - ret = -1; - goto done; - } - } - - destbufsize = srcbufsize * sizeof(uint16_t); - if (destbufsize > origdestbufsize) { - free(origdestbuf); - origdestbuf = destbuf = malloc(origdestbufsize = destbufsize); - } else { - destbuf = origdestbuf; - } - - iconv(ic, &srcbuf, &srcbufsize, (char**) &destbuf, &destbufsize); - - done: - return ret; -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Builder.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Builder.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Builder.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Builder.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,75 +0,0 @@ --- | Testing the internal builder monoid --- --- Tested in this benchmark: --- --- * Concatenating many small strings using a builder --- -{-# LANGUAGE OverloadedStrings #-} -module Benchmarks.Builder - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, nf) -import Data.Binary.Builder as B -import Data.ByteString.Char8 () -import Data.Monoid (mconcat, mempty) -import qualified Blaze.ByteString.Builder as Blaze -import qualified Blaze.ByteString.Builder.Char.Utf8 as Blaze -import qualified Data.ByteString as SB -import qualified Data.ByteString.Lazy as LB -import qualified Data.Text as T -import qualified Data.Text.Lazy as LT -import qualified Data.Text.Lazy.Builder as LTB -import qualified Data.Text.Lazy.Builder.Int as Int -import Data.Int (Int64) - -benchmark :: IO Benchmark -benchmark = return $ bgroup "Builder" - [ bgroup "Comparison" - [ bench "LazyText" $ nf - (LT.length . LTB.toLazyText . mconcat . map LTB.fromText) texts - , bench "Binary" $ nf - (LB.length . B.toLazyByteString . mconcat . map B.fromByteString) - byteStrings - , bench "Blaze" $ nf - (LB.length . Blaze.toLazyByteString . mconcat . map Blaze.fromString) - strings - ] - , bgroup "Int" - [ bgroup "Decimal" - [ bgroup "Positive" . - flip map numbers $ \n -> - (bench (show (length (show n))) $ nf (LTB.toLazyText . Int.decimal) n) - , bgroup "Negative" . - flip map numbers $ \m -> - let n = negate m in - (bench (show (length (show n))) $ nf (LTB.toLazyText . Int.decimal) n) - , bench "Empty" $ nf LTB.toLazyText mempty - , bgroup "Show" . - flip map numbers $ \n -> - (bench (show (length (show n))) $ nf show n) - ] - ] - ] - where - numbers :: [Int64] - numbers = [ - 6, 14, 500, 9688, 10654, 620735, 5608880, 37010612, - 731223504, 5061580596, 24596952933, 711732309084, 2845910093839, - 54601756118340, 735159434806159, 3619097625502435, 95777227510267124, - 414944309510675693, 8986407456998704019 - ] - -texts :: [T.Text] -texts = take 200000 $ cycle ["foo", "λx", "由の"] -{-# NOINLINE texts #-} - --- Note that the non-ascii characters will be chopped -byteStrings :: [SB.ByteString] -byteStrings = take 200000 $ cycle ["foo", "λx", "由の"] -{-# NOINLINE byteStrings #-} - --- Note that the non-ascii characters will be chopped -strings :: [String] -strings = take 200000 $ cycle ["foo", "λx", "由の"] -{-# NOINLINE strings #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/DecodeUtf8.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/DecodeUtf8.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/DecodeUtf8.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/DecodeUtf8.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -{-# LANGUAGE ForeignFunctionInterface #-} - --- | Test decoding of UTF-8 --- --- Tested in this benchmark: --- --- * Decoding bytes using UTF-8 --- --- In some tests: --- --- * Taking the length of the result --- --- * Taking the init of the result --- --- The latter are used for testing stream fusion. --- -module Benchmarks.DecodeUtf8 - ( benchmark - ) where - -import Foreign.C.Types -import Data.ByteString.Internal (ByteString(..)) -import Data.ByteString.Lazy.Internal (ByteString(..)) -import Foreign.Ptr (Ptr, plusPtr) -import Foreign.ForeignPtr (withForeignPtr) -import Data.Word (Word8) -import qualified Criterion as C -import Criterion (Benchmark, bgroup, nf, whnfIO) -import qualified Codec.Binary.UTF8.Generic as U8 -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL - -benchmark :: String -> FilePath -> IO Benchmark -benchmark kind fp = do - bs <- B.readFile fp - lbs <- BL.readFile fp - let bench name = C.bench (name ++ "+" ++ kind) - decodeStream (Chunk b0 bs0) = case T.streamDecodeUtf8 b0 of - T.Some t0 _ f0 -> t0 : go f0 bs0 - where go f (Chunk b bs1) = case f b of - T.Some t1 _ f1 -> t1 : go f1 bs1 - go _ _ = [] - decodeStream _ = [] - return $ bgroup "DecodeUtf8" - [ bench "Strict" $ nf T.decodeUtf8 bs - , bench "Stream" $ nf decodeStream lbs - , bench "IConv" $ whnfIO $ iconv bs - , bench "StrictLength" $ nf (T.length . T.decodeUtf8) bs - , bench "StrictInitLength" $ nf (T.length . T.init . T.decodeUtf8) bs - , bench "Lazy" $ nf TL.decodeUtf8 lbs - , bench "LazyLength" $ nf (TL.length . TL.decodeUtf8) lbs - , bench "LazyInitLength" $ nf (TL.length . TL.init . TL.decodeUtf8) lbs - , bench "StrictStringUtf8" $ nf U8.toString bs - , bench "StrictStringUtf8Length" $ nf (length . U8.toString) bs - , bench "LazyStringUtf8" $ nf U8.toString lbs - , bench "LazyStringUtf8Length" $ nf (length . U8.toString) lbs - ] - -iconv :: B.ByteString -> IO CInt -iconv (PS fp off len) = withForeignPtr fp $ \ptr -> - time_iconv (ptr `plusPtr` off) (fromIntegral len) - -foreign import ccall unsafe time_iconv :: Ptr Word8 -> CSize -> IO CInt diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/EncodeUtf8.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/EncodeUtf8.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/EncodeUtf8.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/EncodeUtf8.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ --- | UTF-8 encode a text --- --- Tested in this benchmark: --- --- * Replicating a string a number of times --- --- * UTF-8 encoding it --- -module Benchmarks.EncodeUtf8 - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnf) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL - -benchmark :: String -> IO Benchmark -benchmark string = do - return $ bgroup "EncodeUtf8" - [ bench "Text" $ whnf (B.length . T.encodeUtf8) text - , bench "LazyText" $ whnf (BL.length . TL.encodeUtf8) lazyText - ] - where - -- The string in different formats - text = T.replicate k $ T.pack string - lazyText = TL.replicate (fromIntegral k) $ TL.pack string - - -- Amount - k = 100000 diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Equality.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Equality.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Equality.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Equality.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ --- | Compare a string with a copy of itself that is identical except --- for the last character. --- --- Tested in this benchmark: --- --- * Comparison of strings (Eq instance) --- -module Benchmarks.Equality - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnf) -import qualified Data.ByteString.Char8 as B -import qualified Data.ByteString.Lazy.Char8 as BL -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL - -benchmark :: FilePath -> IO Benchmark -benchmark fp = do - b <- B.readFile fp - bl1 <- BL.readFile fp - -- A lazy bytestring is a list of chunks. When we do not explicitly create two - -- different lazy bytestrings at a different address, the bytestring library - -- will compare the chunk addresses instead of the chunk contents. This is why - -- we read the lazy bytestring twice here. - bl2 <- BL.readFile fp - l <- readFile fp - let t = T.decodeUtf8 b - tl = TL.decodeUtf8 bl1 - return $ bgroup "Equality" - [ bench "Text" $ whnf (== T.init t `T.snoc` '\xfffd') t - , bench "LazyText" $ whnf (== TL.init tl `TL.snoc` '\xfffd') tl - , bench "ByteString" $ whnf (== B.init b `B.snoc` '\xfffd') b - , bench "LazyByteString" $ whnf (== BL.init bl2 `BL.snoc` '\xfffd') bl1 - , bench "String" $ whnf (== init l ++ "\xfffd") l - ] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/FileRead.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/FileRead.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/FileRead.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/FileRead.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ --- | Benchmarks simple file reading --- --- Tested in this benchmark: --- --- * Reading a file from the disk --- -module Benchmarks.FileRead - ( benchmark - ) where - -import Control.Applicative ((<$>)) -import Criterion (Benchmark, bgroup, bench, whnfIO) -import qualified Data.ByteString as SB -import qualified Data.ByteString.Lazy as LB -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as LT -import qualified Data.Text.Lazy.Encoding as LT -import qualified Data.Text.Lazy.IO as LT - -benchmark :: FilePath -> IO Benchmark -benchmark p = return $ bgroup "FileRead" - [ bench "String" $ whnfIO $ length <$> readFile p - , bench "ByteString" $ whnfIO $ SB.length <$> SB.readFile p - , bench "LazyByteString" $ whnfIO $ LB.length <$> LB.readFile p - , bench "Text" $ whnfIO $ T.length <$> T.readFile p - , bench "LazyText" $ whnfIO $ LT.length <$> LT.readFile p - , bench "TextByteString" $ whnfIO $ - (T.length . T.decodeUtf8) <$> SB.readFile p - , bench "LazyTextByteString" $ whnfIO $ - (LT.length . LT.decodeUtf8) <$> LB.readFile p - ] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/FoldLines.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/FoldLines.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/FoldLines.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/FoldLines.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ --- | Read a file line-by-line using handles, and perform a fold over the lines. --- The fold is used here to calculate the number of lines in the file. --- --- Tested in this benchmark: --- --- * Buffered, line-based IO --- -{-# LANGUAGE BangPatterns #-} -module Benchmarks.FoldLines - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import System.IO -import qualified Data.ByteString as B -import qualified Data.Text as T -import qualified Data.Text.IO as T - -benchmark :: FilePath -> IO Benchmark -benchmark fp = return $ bgroup "ReadLines" - [ bench "Text" $ withHandle $ foldLinesT (\n _ -> n + 1) (0 :: Int) - , bench "ByteString" $ withHandle $ foldLinesB (\n _ -> n + 1) (0 :: Int) - ] - where - withHandle f = whnfIO $ do - h <- openFile fp ReadMode - hSetBuffering h (BlockBuffering (Just 16384)) - x <- f h - hClose h - return x - --- | Text line fold --- -foldLinesT :: (a -> T.Text -> a) -> a -> Handle -> IO a -foldLinesT f z0 h = go z0 - where - go !z = do - eof <- hIsEOF h - if eof - then return z - else do - l <- T.hGetLine h - let z' = f z l in go z' -{-# INLINE foldLinesT #-} - --- | ByteString line fold --- -foldLinesB :: (a -> B.ByteString -> a) -> a -> Handle -> IO a -foldLinesB f z0 h = go z0 - where - go !z = do - eof <- hIsEOF h - if eof - then return z - else do - l <- B.hGetLine h - let z' = f z l in go z' -{-# INLINE foldLinesB #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Mul.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Mul.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Mul.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Mul.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,138 +0,0 @@ -module Benchmarks.Mul (benchmark) where - -import Control.Exception (evaluate) -import Criterion.Main -import Data.Int (Int32, Int64) -import Data.Text.Internal (mul32, mul64) -import qualified Data.Vector.Unboxed as U - -oldMul :: Int64 -> Int64 -> Int64 -oldMul m n - | n == 0 = 0 - | m <= maxBound `quot` n = m * n - | otherwise = error "overflow" - -benchmark :: IO Benchmark -benchmark = do - _ <- evaluate testVector32 - _ <- evaluate testVector64 - return $ bgroup "Mul" [ - bench "oldMul" $ whnf (U.map (uncurry oldMul)) testVector64 - , bench "mul64" $ whnf (U.map (uncurry mul64)) testVector64 - , bench "*64" $ whnf (U.map (uncurry (*))) testVector64 - , bench "mul32" $ whnf (U.map (uncurry mul32)) testVector32 - , bench "*32" $ whnf (U.map (uncurry (*))) testVector32 - ] - -testVector64 :: U.Vector (Int64,Int64) -testVector64 = U.fromList [ - (0,1248868987182846646),(169004623633872,24458),(482549039517835,7614), - (372,8157063115504364),(27,107095594861148252),(3,63249878517962420), - (4363,255694473572912),(86678474,1732634806),(1572453024,1800489338), - (9384523143,77053781),(49024709555,75095046),(7,43457620410239131), - (8,8201563008844571),(387719037,1520696708),(189869238220197,1423), - (46788016849611,23063),(503077742109974359,0),(104,1502010908706487), - (30478140346,207525518),(80961140129236192,14),(4283,368012829143675), - (1028719181728108146,6),(318904,5874863049591),(56724427166898,110794), - (234539368,31369110449),(2,251729663598178612),(103291548194451219,5), - (76013,5345328755566),(1769631,2980846129318),(40898,60598477385754), - (0,98931348893227155),(573555872156917492,3),(318821187115,4476566), - (11152874213584,243582),(40274276,16636653248),(127,4249988676030597), - (103543712111871836,5),(71,16954462148248238),(3963027173504,216570), - (13000,503523808916753),(17038308,20018685905),(0,510350226577891549), - (175898,3875698895405),(425299191292676,5651),(17223451323664536,50), - (61755131,14247665326),(0,1018195131697569303),(36433751497238985,20), - (3473607861601050,1837),(1392342328,1733971838),(225770297367,3249655), - (14,127545244155254102),(1751488975299136,2634),(3949208,504190668767), - (153329,831454434345),(1066212122928663658,2),(351224,2663633539556), - (344565,53388869217),(35825609350446863,54),(276011553660081475,10), - (1969754174790470349,3),(35,68088438338633),(506710,3247689556438), - (11099382291,327739909),(105787303549,32824363),(210366111,14759049409), - (688893241579,3102676),(8490,70047474429581),(152085,29923000251880), - (5046974599257095,400),(4183167795,263434071),(10089728,502781960687), - (44831977765,4725378),(91,8978094664238578),(30990165721,44053350), - (1772377,149651820860),(243420621763408572,4),(32,5790357453815138), - (27980806337993771,5),(47696295759774,20848),(1745874142313778,1098), - (46869334770121,1203),(886995283,1564424789),(40679396544,76002479), - (1,672849481568486995),(337656187205,3157069),(816980552858963,6003), - (2271434085804831543,1),(0,1934521023868747186),(6266220038281,15825), - (4160,107115946987394),(524,246808621791561),(0,1952519482439636339), - (128,2865935904539691),(1044,3211982069426297),(16000511542473,88922), - (1253596745404082,2226),(27041,56836278958002),(23201,49247489754471), - (175906590497,21252392),(185163584757182295,24),(34742225226802197,150), - (2363228,250824838408),(216327527109550,45),(24,81574076994520675), - (28559899906542,15356),(10890139774837133,511),(2293,707179303654492), - (2749366833,40703233),(0,4498229704622845986),(439,4962056468281937), - (662,1453820621089921),(16336770612459631,220),(24282989393,74239137), - (2724564648490195,3),(743672760,124992589),(4528103,704330948891), - (6050483122491561,250),(13322953,13594265152),(181794,22268101450214), - (25957941712,75384092),(43352,7322262295009),(32838,52609059549923), - (33003585202001564,2),(103019,68430142267402),(129918230800,8742978), - (0,2114347379589080688),(2548,905723041545274),(222745067962838382,0), - (1671683850790425181,1),(455,4836932776795684),(794227702827214,6620), - (212534135175874,1365),(96432431858,29784975),(466626763743380,3484), - (29793949,53041519613),(8359,309952753409844),(3908960585331901,26), - (45185288970365760,114),(10131829775,68110174),(58039242399640479,83), - (628092278238719399,6),(1,196469106875361889),(302336625,16347502444), - (148,3748088684181047),(1,1649096568849015456),(1019866864,2349753026), - (8211344830,569363306),(65647579546873,34753),(2340190,1692053129069), - (64263301,30758930355),(48681618072372209,110),(7074794736,47640197), - (249634721521,7991792),(1162917363807215,232),(7446433349,420634045), - (63398619383,60709817),(51359004508011,14200),(131788797028647,7072), - (52079887791430043,7),(7,136277667582599838),(28582879735696,50327), - (1404582800566278,833),(469164435,15017166943),(99567079957578263,49), - (1015285971,3625801566),(321504843,4104079293),(5196954,464515406632), - (114246832260876,7468),(8149664437,487119673),(12265299,378168974869), - (37711995764,30766513),(3971137243,710996152),(483120070302,603162), - (103009942,61645547145),(8476344625340,6987),(547948761229739,1446), - (42234,18624767306301),(13486714173011,58948),(4,198309153268019840), - (9913176974,325539248),(28246225540203,116822),(2882463945582154,18), - (959,25504987505398),(3,1504372236378217710),(13505229956793,374987), - (751661959,457611342),(27375926,36219151769),(482168869,5301952074), - (1,1577425863241520640),(714116235611821,1164),(904492524250310488,0), - (5983514941763398,68),(10759472423,23540686),(72539568471529,34919), - (4,176090672310337473),(938702842110356453,1),(673652445,3335287382), - (3111998893666122,917),(1568013,3168419765469)] - -testVector32 :: U.Vector (Int32,Int32) -testVector32 = U.fromList [ - (39242,410),(0,100077553),(2206,9538),(509400240,1),(38048,6368), - (1789,651480),(2399,157032),(701,170017),(5241456,14),(11212,70449), - (1,227804876),(749687254,1),(74559,2954),(1158,147957),(410604456,1), - (170851,1561),(92643422,1),(6192,180509),(7,24202210),(3440,241481), - (5753677,5),(294327,1622),(252,4454673),(127684121,11),(28315800,30), - (340370905,0),(1,667887987),(592782090,1),(49023,27641),(750,290387), - (72886,3847),(0,301047933),(3050276,473),(1,788366142),(59457,15813), - (637726933,1),(1135,344317),(853616,264),(696816,493),(7038,12046), - (125219574,4),(803694088,1),(107081726,1),(39294,21699),(16361,38191), - (132561123,12),(1760,23499),(847543,484),(175687349,1),(2963,252678), - (6248,224553),(27596,4606),(5422922,121),(1542,485890),(131,583035), - (59096,4925),(3637115,132),(0,947225435),(86854,6794),(2984745,339), - (760129569,1),(1,68260595),(380835652,2),(430575,2579),(54514,7211), - (15550606,3),(9,27367402),(3007053,207),(7060988,60),(28560,27130), - (1355,21087),(10880,53059),(14563646,4),(461886361,1),(2,169260724), - (241454126,2),(406797,1),(61631630,16),(44473,5943),(63869104,12), - (950300,1528),(2113,62333),(120817,9358),(100261456,1),(426764723,1), - (119,12723684),(3,53358711),(4448071,18),(1,230278091),(238,232102), - (8,57316440),(42437979,10),(6769,19555),(48590,22006),(11500585,79), - (2808,97638),(42,26952545),(11,32104194),(23954638,1),(785427272,0), - (513,81379),(31333960,37),(897772,1009),(4,25679692),(103027993,12), - (104972702,11),(546,443401),(7,65137092),(88574269,3),(872139069,0), - (2,97417121),(378802603,0),(141071401,4),(22613,10575),(2191743,118), - (470,116119),(7062,38166),(231056,1847),(43901963,9),(2400,70640), - (63553,1555),(34,11249573),(815174,1820),(997894011,0),(98881794,2), - (5448,43132),(27956,9),(904926,1357),(112608626,3),(124,613021), - (282086,1966),(99,10656881),(113799,1501),(433318,2085),(442,948171), - (165380,1043),(28,14372905),(14880,50462),(2386,219918),(229,1797565), - (1174961,298),(3925,41833),(3903515,299),(15690452,111),(360860521,3), - (7440846,81),(2541026,507),(0,492448477),(6869,82469),(245,8322939), - (3503496,253),(123495298,0),(150963,2299),(33,4408482),(1,200911107), - (305,252121),(13,123369189),(215846,8181),(2440,65387),(776764401,1), - (1241172,434),(8,15493155),(81953961,6),(17884993,5),(26,6893822), - (0,502035190),(1,582451018),(2,514870139),(227,3625619),(49,12720258), - (1456769,207),(94797661,10),(234407,893),(26843,5783),(15688,24547), - (4091,86268),(4339448,151),(21360,6294),(397046497,2),(1227,205936), - (9966,21959),(160046791,1),(0,159992224),(27,24974797),(19177,29334), - (4136148,42),(21179785,53),(61256583,31),(385,344176),(7,11934915), - (1,18992566),(3488065,5),(768021,224),(36288474,7),(8624,117561), - (8,20341439),(5903,261475),(561,1007618),(1738,392327),(633049,1708)] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/BigTable.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/BigTable.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/BigTable.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/BigTable.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ --- | Create a large HTML table and dump it to a handle --- --- Tested in this benchmark: --- --- * Creating a large HTML document using a builder --- --- * Writing to a handle --- -{-# LANGUAGE OverloadedStrings #-} -module Benchmarks.Programs.BigTable - ( benchmark - ) where - -import Criterion (Benchmark, bench, whnfIO) -import Data.Monoid (mappend, mconcat) -import Data.Text.Lazy.Builder (Builder, fromText, toLazyText) -import Data.Text.Lazy.IO (hPutStr) -import System.IO (Handle) -import qualified Data.Text as T - -benchmark :: Handle -> IO Benchmark -benchmark sink = return $ bench "BigTable" $ whnfIO $ do - hPutStr sink "Content-Type: text/html\n\n" - hPutStr sink . toLazyText . makeTable =<< rows - hPutStr sink "
" - where - -- We provide the number of rows in IO so the builder value isn't shared - -- between the benchmark samples. - rows :: IO Int - rows = return 20000 - {-# NOINLINE rows #-} - -makeTable :: Int -> Builder -makeTable n = mconcat $ replicate n $ mconcat $ map makeCol [1 .. 50] - -makeCol :: Int -> Builder -makeCol 1 = fromText "1" -makeCol 50 = fromText "50" -makeCol i = fromText "" `mappend` (fromInt i `mappend` fromText "") - -fromInt :: Int -> Builder -fromInt = fromText . T.pack . show diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Cut.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Cut.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Cut.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Cut.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ --- | Cut into a file, selecting certain columns (e.g. columns 10 to 40) --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Splitting into lines --- --- * Taking a number of characters from the lines --- --- * Joining the lines --- --- * Writing back to a handle --- -module Benchmarks.Programs.Cut - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import System.IO (Handle, hPutStr) -import qualified Data.ByteString as B -import qualified Data.ByteString.Char8 as BC -import qualified Data.ByteString.Lazy as BL -import qualified Data.ByteString.Lazy.Char8 as BLC -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> Handle -> Int -> Int -> IO Benchmark -benchmark p sink from to = return $ bgroup "Cut" - [ bench' "String" string - , bench' "ByteString" byteString - , bench' "LazyByteString" lazyByteString - , bench' "Text" text - , bench' "LazyText" lazyText - , bench' "TextByteString" textByteString - , bench' "LazyTextByteString" lazyTextByteString - ] - where - bench' n s = bench n $ whnfIO (s p sink from to) - -string :: FilePath -> Handle -> Int -> Int -> IO () -string fp sink from to = do - s <- readFile fp - hPutStr sink $ cut s - where - cut = unlines . map (take (to - from) . drop from) . lines - -byteString :: FilePath -> Handle -> Int -> Int -> IO () -byteString fp sink from to = do - bs <- B.readFile fp - B.hPutStr sink $ cut bs - where - cut = BC.unlines . map (B.take (to - from) . B.drop from) . BC.lines - -lazyByteString :: FilePath -> Handle -> Int -> Int -> IO () -lazyByteString fp sink from to = do - bs <- BL.readFile fp - BL.hPutStr sink $ cut bs - where - cut = BLC.unlines . map (BL.take (to' - from') . BL.drop from') . BLC.lines - from' = fromIntegral from - to' = fromIntegral to - -text :: FilePath -> Handle -> Int -> Int -> IO () -text fp sink from to = do - t <- T.readFile fp - T.hPutStr sink $ cut t - where - cut = T.unlines . map (T.take (to - from) . T.drop from) . T.lines - -lazyText :: FilePath -> Handle -> Int -> Int -> IO () -lazyText fp sink from to = do - t <- TL.readFile fp - TL.hPutStr sink $ cut t - where - cut = TL.unlines . map (TL.take (to' - from') . TL.drop from') . TL.lines - from' = fromIntegral from - to' = fromIntegral to - -textByteString :: FilePath -> Handle -> Int -> Int -> IO () -textByteString fp sink from to = do - t <- T.decodeUtf8 `fmap` B.readFile fp - B.hPutStr sink $ T.encodeUtf8 $ cut t - where - cut = T.unlines . map (T.take (to - from) . T.drop from) . T.lines - -lazyTextByteString :: FilePath -> Handle -> Int -> Int -> IO () -lazyTextByteString fp sink from to = do - t <- TL.decodeUtf8 `fmap` BL.readFile fp - BL.hPutStr sink $ TL.encodeUtf8 $ cut t - where - cut = TL.unlines . map (TL.take (to' - from') . TL.drop from') . TL.lines - from' = fromIntegral from - to' = fromIntegral to diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Fold.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Fold.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Fold.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Fold.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ --- | Benchmark which formats paragraph, like the @sort@ unix utility. --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Splitting into paragraphs --- --- * Reformatting the paragraphs to a certain line width --- --- * Concatenating the results using the text builder --- --- * Writing back to a handle --- -{-# LANGUAGE OverloadedStrings #-} -module Benchmarks.Programs.Fold - ( benchmark - ) where - -import Data.List (foldl') -import Data.List (intersperse) -import Data.Monoid (mempty, mappend, mconcat) -import System.IO (Handle) -import Criterion (Benchmark, bench, whnfIO) -import qualified Data.Text as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy.Builder as TLB -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> Handle -> IO Benchmark -benchmark i o = return $ - bench "Fold" $ whnfIO $ T.readFile i >>= TL.hPutStr o . fold 80 - --- | We represent a paragraph by a word list --- -type Paragraph = [T.Text] - --- | Fold a text --- -fold :: Int -> T.Text -> TL.Text -fold maxWidth = TLB.toLazyText . mconcat . - intersperse "\n\n" . map (foldParagraph maxWidth) . paragraphs - --- | Fold a paragraph --- -foldParagraph :: Int -> Paragraph -> TLB.Builder -foldParagraph _ [] = mempty -foldParagraph max' (w : ws) = fst $ foldl' go (TLB.fromText w, T.length w) ws - where - go (builder, width) word - | width + len + 1 <= max' = - (builder `mappend` " " `mappend` word', width + len + 1) - | otherwise = - (builder `mappend` "\n" `mappend` word', len) - where - word' = TLB.fromText word - len = T.length word - --- | Divide a text into paragraphs --- -paragraphs :: T.Text -> [Paragraph] -paragraphs = splitParagraphs . map T.words . T.lines - where - splitParagraphs ls = case break null ls of - ([], []) -> [] - (p, []) -> [concat p] - (p, lr) -> concat p : splitParagraphs (dropWhile null lr) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Sort.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Sort.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Sort.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Sort.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ --- | This benchmark sorts the lines of a file, like the @sort@ unix utility. --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Splitting into lines --- --- * Sorting the lines --- --- * Joining the lines --- --- * Writing back to a handle --- -{-# LANGUAGE OverloadedStrings #-} -module Benchmarks.Programs.Sort - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import Data.Monoid (mconcat) -import System.IO (Handle, hPutStr) -import qualified Data.ByteString as B -import qualified Data.ByteString.Char8 as BC -import qualified Data.ByteString.Lazy as BL -import qualified Data.ByteString.Lazy.Char8 as BLC -import qualified Data.List as L -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Builder as TLB -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> Handle -> IO Benchmark -benchmark i o = return $ bgroup "Sort" - [ bench "String" $ whnfIO $ readFile i >>= hPutStr o . string - , bench "ByteString" $ whnfIO $ B.readFile i >>= B.hPutStr o . byteString - , bench "LazyByteString" $ whnfIO $ - BL.readFile i >>= BL.hPutStr o . lazyByteString - , bench "Text" $ whnfIO $ T.readFile i >>= T.hPutStr o . text - , bench "LazyText" $ whnfIO $ TL.readFile i >>= TL.hPutStr o . lazyText - , bench "TextByteString" $ whnfIO $ B.readFile i >>= - B.hPutStr o . T.encodeUtf8 . text . T.decodeUtf8 - , bench "LazyTextByteString" $ whnfIO $ BL.readFile i >>= - BL.hPutStr o . TL.encodeUtf8 . lazyText . TL.decodeUtf8 - , bench "TextBuilder" $ whnfIO $ B.readFile i >>= - BL.hPutStr o . TL.encodeUtf8 . textBuilder . T.decodeUtf8 - ] - -string :: String -> String -string = unlines . L.sort . lines - -byteString :: B.ByteString -> B.ByteString -byteString = BC.unlines . L.sort . BC.lines - -lazyByteString :: BL.ByteString -> BL.ByteString -lazyByteString = BLC.unlines . L.sort . BLC.lines - -text :: T.Text -> T.Text -text = T.unlines . L.sort . T.lines - -lazyText :: TL.Text -> TL.Text -lazyText = TL.unlines . L.sort . TL.lines - --- | Text variant using a builder monoid for the final concatenation --- -textBuilder :: T.Text -> TL.Text -textBuilder = TLB.toLazyText . mconcat . L.intersperse (TLB.singleton '\n') . - map TLB.fromText . L.sort . T.lines diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/StripTags.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/StripTags.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/StripTags.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/StripTags.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ --- | Program to replace HTML tags by whitespace --- --- This program was originally contributed by Petr Prokhorenkov. --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Replacing text between HTML tags (<>) with whitespace --- --- * Writing back to a handle --- -{-# OPTIONS_GHC -fspec-constr-count=5 #-} -module Benchmarks.Programs.StripTags - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import Data.List (mapAccumL) -import System.IO (Handle, hPutStr) -import qualified Data.ByteString as B -import qualified Data.ByteString.Char8 as BC -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T - -benchmark :: FilePath -> Handle -> IO Benchmark -benchmark i o = return $ bgroup "StripTags" - [ bench "String" $ whnfIO $ readFile i >>= hPutStr o . string - , bench "ByteString" $ whnfIO $ B.readFile i >>= B.hPutStr o . byteString - , bench "Text" $ whnfIO $ T.readFile i >>= T.hPutStr o . text - , bench "TextByteString" $ whnfIO $ - B.readFile i >>= B.hPutStr o . T.encodeUtf8 . text . T.decodeUtf8 - ] - -string :: String -> String -string = snd . mapAccumL step 0 - -text :: T.Text -> T.Text -text = snd . T.mapAccumL step 0 - -byteString :: B.ByteString -> B.ByteString -byteString = snd . BC.mapAccumL step 0 - -step :: Int -> Char -> (Int, Char) -step d c - | d > 0 || d' > 0 = (d', ' ') - | otherwise = (d', c) - where - d' = d + depth c - depth '>' = 1 - depth '<' = -1 - depth _ = 0 diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Throughput.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Throughput.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Throughput.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Programs/Throughput.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ --- | This benchmark simply reads and writes a file using the various string --- libraries. The point of it is that we can make better estimations on how --- much time the other benchmarks spend doing IO. --- --- Note that we expect ByteStrings to be a whole lot faster, since they do not --- do any actual encoding/decoding here, while String and Text do have UTF-8 --- encoding/decoding. --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Replacing text between HTML tags (<>) with whitespace --- --- * Writing back to a handle --- -module Benchmarks.Programs.Throughput - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import System.IO (Handle, hPutStr) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> Handle -> IO Benchmark -benchmark fp sink = return $ bgroup "Throughput" - [ bench "String" $ whnfIO $ readFile fp >>= hPutStr sink - , bench "ByteString" $ whnfIO $ B.readFile fp >>= B.hPutStr sink - , bench "LazyByteString" $ whnfIO $ BL.readFile fp >>= BL.hPutStr sink - , bench "Text" $ whnfIO $ T.readFile fp >>= T.hPutStr sink - , bench "LazyText" $ whnfIO $ TL.readFile fp >>= TL.hPutStr sink - , bench "TextByteString" $ whnfIO $ - B.readFile fp >>= B.hPutStr sink . T.encodeUtf8 . T.decodeUtf8 - , bench "LazyTextByteString" $ whnfIO $ - BL.readFile fp >>= BL.hPutStr sink . TL.encodeUtf8 . TL.decodeUtf8 - ] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Pure.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Pure.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Pure.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Pure.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,479 +0,0 @@ --- | Benchmarks various pure functions from the Text library --- --- Tested in this benchmark: --- --- * Most pure functions defined the string types --- -{-# LANGUAGE BangPatterns, CPP, GADTs, MagicHash #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -module Benchmarks.Pure - ( benchmark - ) where - -import Control.DeepSeq (NFData (..)) -import Control.Exception (evaluate) -import Criterion (Benchmark, bgroup, bench, nf) -import Data.Char (toLower, toUpper) -import Data.Monoid (mappend, mempty) -import GHC.Base (Char (..), Int (..), chr#, ord#, (+#)) -import qualified Data.ByteString.Char8 as BS -import qualified Data.ByteString.Lazy.Char8 as BL -import qualified Data.ByteString.UTF8 as UTF8 -import qualified Data.List as L -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Builder as TB -import qualified Data.Text.Lazy.Encoding as TL - -benchmark :: String -> FilePath -> IO Benchmark -benchmark kind fp = do - -- Evaluate stuff before actually running the benchmark, we don't want to - -- count it here. - - -- ByteString A - bsa <- BS.readFile fp - - -- Text A/B, LazyText A/B - ta <- evaluate $ T.decodeUtf8 bsa - tb <- evaluate $ T.toUpper ta - tla <- evaluate $ TL.fromChunks (T.chunksOf 16376 ta) - tlb <- evaluate $ TL.fromChunks (T.chunksOf 16376 tb) - - -- ByteString B, LazyByteString A/B - bsb <- evaluate $ T.encodeUtf8 tb - bla <- evaluate $ BL.fromChunks (chunksOf 16376 bsa) - blb <- evaluate $ BL.fromChunks (chunksOf 16376 bsb) - - -- String A/B - sa <- evaluate $ UTF8.toString bsa - sb <- evaluate $ T.unpack tb - - -- Lengths - bsa_len <- evaluate $ BS.length bsa - ta_len <- evaluate $ T.length ta - bla_len <- evaluate $ BL.length bla - tla_len <- evaluate $ TL.length tla - sa_len <- evaluate $ L.length sa - - -- Lines - bsl <- evaluate $ BS.lines bsa - bll <- evaluate $ BL.lines bla - tl <- evaluate $ T.lines ta - tll <- evaluate $ TL.lines tla - sl <- evaluate $ L.lines sa - - return $ bgroup "Pure" - [ bgroup "append" - [ benchT $ nf (T.append tb) ta - , benchTL $ nf (TL.append tlb) tla - , benchBS $ nf (BS.append bsb) bsa - , benchBSL $ nf (BL.append blb) bla - , benchS $ nf ((++) sb) sa - ] - , bgroup "concat" - [ benchT $ nf T.concat tl - , benchTL $ nf TL.concat tll - , benchBS $ nf BS.concat bsl - , benchBSL $ nf BL.concat bll - , benchS $ nf L.concat sl - ] - , bgroup "cons" - [ benchT $ nf (T.cons c) ta - , benchTL $ nf (TL.cons c) tla - , benchBS $ nf (BS.cons c) bsa - , benchBSL $ nf (BL.cons c) bla - , benchS $ nf (c:) sa - ] - , bgroup "concatMap" - [ benchT $ nf (T.concatMap (T.replicate 3 . T.singleton)) ta - , benchTL $ nf (TL.concatMap (TL.replicate 3 . TL.singleton)) tla - , benchBS $ nf (BS.concatMap (BS.replicate 3)) bsa - , benchBSL $ nf (BL.concatMap (BL.replicate 3)) bla - , benchS $ nf (L.concatMap (L.replicate 3 . (:[]))) sa - ] - , bgroup "decode" - [ benchT $ nf T.decodeUtf8 bsa - , benchTL $ nf TL.decodeUtf8 bla - , benchBS $ nf BS.unpack bsa - , benchBSL $ nf BL.unpack bla - , benchS $ nf UTF8.toString bsa - ] - , bgroup "decode'" - [ benchT $ nf T.decodeUtf8' bsa - , benchTL $ nf TL.decodeUtf8' bla - ] - , bgroup "drop" - [ benchT $ nf (T.drop (ta_len `div` 3)) ta - , benchTL $ nf (TL.drop (tla_len `div` 3)) tla - , benchBS $ nf (BS.drop (bsa_len `div` 3)) bsa - , benchBSL $ nf (BL.drop (bla_len `div` 3)) bla - , benchS $ nf (L.drop (sa_len `div` 3)) sa - ] - , bgroup "encode" - [ benchT $ nf T.encodeUtf8 ta - , benchTL $ nf TL.encodeUtf8 tla - , benchBS $ nf BS.pack sa - , benchBSL $ nf BL.pack sa - , benchS $ nf UTF8.fromString sa - ] - , bgroup "filter" - [ benchT $ nf (T.filter p0) ta - , benchTL $ nf (TL.filter p0) tla - , benchBS $ nf (BS.filter p0) bsa - , benchBSL $ nf (BL.filter p0) bla - , benchS $ nf (L.filter p0) sa - ] - , bgroup "filter.filter" - [ benchT $ nf (T.filter p1 . T.filter p0) ta - , benchTL $ nf (TL.filter p1 . TL.filter p0) tla - , benchBS $ nf (BS.filter p1 . BS.filter p0) bsa - , benchBSL $ nf (BL.filter p1 . BL.filter p0) bla - , benchS $ nf (L.filter p1 . L.filter p0) sa - ] - , bgroup "foldl'" - [ benchT $ nf (T.foldl' len 0) ta - , benchTL $ nf (TL.foldl' len 0) tla - , benchBS $ nf (BS.foldl' len 0) bsa - , benchBSL $ nf (BL.foldl' len 0) bla - , benchS $ nf (L.foldl' len 0) sa - ] - , bgroup "foldr" - [ benchT $ nf (L.length . T.foldr (:) []) ta - , benchTL $ nf (L.length . TL.foldr (:) []) tla - , benchBS $ nf (L.length . BS.foldr (:) []) bsa - , benchBSL $ nf (L.length . BL.foldr (:) []) bla - , benchS $ nf (L.length . L.foldr (:) []) sa - ] - , bgroup "head" - [ benchT $ nf T.head ta - , benchTL $ nf TL.head tla - , benchBS $ nf BS.head bsa - , benchBSL $ nf BL.head bla - , benchS $ nf L.head sa - ] - , bgroup "init" - [ benchT $ nf T.init ta - , benchTL $ nf TL.init tla - , benchBS $ nf BS.init bsa - , benchBSL $ nf BL.init bla - , benchS $ nf L.init sa - ] - , bgroup "intercalate" - [ benchT $ nf (T.intercalate tsw) tl - , benchTL $ nf (TL.intercalate tlw) tll - , benchBS $ nf (BS.intercalate bsw) bsl - , benchBSL $ nf (BL.intercalate blw) bll - , benchS $ nf (L.intercalate lw) sl - ] - , bgroup "intersperse" - [ benchT $ nf (T.intersperse c) ta - , benchTL $ nf (TL.intersperse c) tla - , benchBS $ nf (BS.intersperse c) bsa - , benchBSL $ nf (BL.intersperse c) bla - , benchS $ nf (L.intersperse c) sa - ] - , bgroup "isInfixOf" - [ benchT $ nf (T.isInfixOf tsw) ta - , benchTL $ nf (TL.isInfixOf tlw) tla - , benchBS $ nf (BS.isInfixOf bsw) bsa - -- no isInfixOf for lazy bytestrings - , benchS $ nf (L.isInfixOf lw) sa - ] - , bgroup "last" - [ benchT $ nf T.last ta - , benchTL $ nf TL.last tla - , benchBS $ nf BS.last bsa - , benchBSL $ nf BL.last bla - , benchS $ nf L.last sa - ] - , bgroup "map" - [ benchT $ nf (T.map f) ta - , benchTL $ nf (TL.map f) tla - , benchBS $ nf (BS.map f) bsa - , benchBSL $ nf (BL.map f) bla - , benchS $ nf (L.map f) sa - ] - , bgroup "mapAccumL" - [ benchT $ nf (T.mapAccumL g 0) ta - , benchTL $ nf (TL.mapAccumL g 0) tla - , benchBS $ nf (BS.mapAccumL g 0) bsa - , benchBSL $ nf (BL.mapAccumL g 0) bla - , benchS $ nf (L.mapAccumL g 0) sa - ] - , bgroup "mapAccumR" - [ benchT $ nf (T.mapAccumR g 0) ta - , benchTL $ nf (TL.mapAccumR g 0) tla - , benchBS $ nf (BS.mapAccumR g 0) bsa - , benchBSL $ nf (BL.mapAccumR g 0) bla - , benchS $ nf (L.mapAccumR g 0) sa - ] - , bgroup "map.map" - [ benchT $ nf (T.map f . T.map f) ta - , benchTL $ nf (TL.map f . TL.map f) tla - , benchBS $ nf (BS.map f . BS.map f) bsa - , benchBSL $ nf (BL.map f . BL.map f) bla - , benchS $ nf (L.map f . L.map f) sa - ] - , bgroup "replicate char" - [ benchT $ nf (T.replicate bsa_len) (T.singleton c) - , benchTL $ nf (TL.replicate (fromIntegral bsa_len)) (TL.singleton c) - , benchBS $ nf (BS.replicate bsa_len) c - , benchBSL $ nf (BL.replicate (fromIntegral bsa_len)) c - , benchS $ nf (L.replicate bsa_len) c - ] - , bgroup "replicate string" - [ benchT $ nf (T.replicate (bsa_len `div` T.length tsw)) tsw - , benchTL $ nf (TL.replicate (fromIntegral bsa_len `div` TL.length tlw)) tlw - , benchS $ nf (replicat (bsa_len `div` T.length tsw)) lw - ] - , bgroup "reverse" - [ benchT $ nf T.reverse ta - , benchTL $ nf TL.reverse tla - , benchBS $ nf BS.reverse bsa - , benchBSL $ nf BL.reverse bla - , benchS $ nf L.reverse sa - ] - , bgroup "take" - [ benchT $ nf (T.take (ta_len `div` 3)) ta - , benchTL $ nf (TL.take (tla_len `div` 3)) tla - , benchBS $ nf (BS.take (bsa_len `div` 3)) bsa - , benchBSL $ nf (BL.take (bla_len `div` 3)) bla - , benchS $ nf (L.take (sa_len `div` 3)) sa - ] - , bgroup "tail" - [ benchT $ nf T.tail ta - , benchTL $ nf TL.tail tla - , benchBS $ nf BS.tail bsa - , benchBSL $ nf BL.tail bla - , benchS $ nf L.tail sa - ] - , bgroup "toLower" - [ benchT $ nf T.toLower ta - , benchTL $ nf TL.toLower tla - , benchBS $ nf (BS.map toLower) bsa - , benchBSL $ nf (BL.map toLower) bla - , benchS $ nf (L.map toLower) sa - ] - , bgroup "toUpper" - [ benchT $ nf T.toUpper ta - , benchTL $ nf TL.toUpper tla - , benchBS $ nf (BS.map toUpper) bsa - , benchBSL $ nf (BL.map toUpper) bla - , benchS $ nf (L.map toUpper) sa - ] - , bgroup "words" - [ benchT $ nf T.words ta - , benchTL $ nf TL.words tla - , benchBS $ nf BS.words bsa - , benchBSL $ nf BL.words bla - , benchS $ nf L.words sa - ] - , bgroup "zipWith" - [ benchT $ nf (T.zipWith min tb) ta - , benchTL $ nf (TL.zipWith min tlb) tla - , benchBS $ nf (BS.zipWith min bsb) bsa - , benchBSL $ nf (BL.zipWith min blb) bla - , benchS $ nf (L.zipWith min sb) sa - ] - , bgroup "length" - [ bgroup "cons" - [ benchT $ nf (T.length . T.cons c) ta - , benchTL $ nf (TL.length . TL.cons c) tla - , benchBS $ nf (BS.length . BS.cons c) bsa - , benchBSL $ nf (BL.length . BL.cons c) bla - , benchS $ nf (L.length . (:) c) sa - ] - , bgroup "decode" - [ benchT $ nf (T.length . T.decodeUtf8) bsa - , benchTL $ nf (TL.length . TL.decodeUtf8) bla - , benchBS $ nf (L.length . BS.unpack) bsa - , benchBSL $ nf (L.length . BL.unpack) bla - , bench "StringUTF8" $ nf (L.length . UTF8.toString) bsa - ] - , bgroup "drop" - [ benchT $ nf (T.length . T.drop (ta_len `div` 3)) ta - , benchTL $ nf (TL.length . TL.drop (tla_len `div` 3)) tla - , benchBS $ nf (BS.length . BS.drop (bsa_len `div` 3)) bsa - , benchBSL $ nf (BL.length . BL.drop (bla_len `div` 3)) bla - , benchS $ nf (L.length . L.drop (sa_len `div` 3)) sa - ] - , bgroup "filter" - [ benchT $ nf (T.length . T.filter p0) ta - , benchTL $ nf (TL.length . TL.filter p0) tla - , benchBS $ nf (BS.length . BS.filter p0) bsa - , benchBSL $ nf (BL.length . BL.filter p0) bla - , benchS $ nf (L.length . L.filter p0) sa - ] - , bgroup "filter.filter" - [ benchT $ nf (T.length . T.filter p1 . T.filter p0) ta - , benchTL $ nf (TL.length . TL.filter p1 . TL.filter p0) tla - , benchBS $ nf (BS.length . BS.filter p1 . BS.filter p0) bsa - , benchBSL $ nf (BL.length . BL.filter p1 . BL.filter p0) bla - , benchS $ nf (L.length . L.filter p1 . L.filter p0) sa - ] - , bgroup "init" - [ benchT $ nf (T.length . T.init) ta - , benchTL $ nf (TL.length . TL.init) tla - , benchBS $ nf (BS.length . BS.init) bsa - , benchBSL $ nf (BL.length . BL.init) bla - , benchS $ nf (L.length . L.init) sa - ] - , bgroup "intercalate" - [ benchT $ nf (T.length . T.intercalate tsw) tl - , benchTL $ nf (TL.length . TL.intercalate tlw) tll - , benchBS $ nf (BS.length . BS.intercalate bsw) bsl - , benchBSL $ nf (BL.length . BL.intercalate blw) bll - , benchS $ nf (L.length . L.intercalate lw) sl - ] - , bgroup "intersperse" - [ benchT $ nf (T.length . T.intersperse c) ta - , benchTL $ nf (TL.length . TL.intersperse c) tla - , benchBS $ nf (BS.length . BS.intersperse c) bsa - , benchBSL $ nf (BL.length . BL.intersperse c) bla - , benchS $ nf (L.length . L.intersperse c) sa - ] - , bgroup "map" - [ benchT $ nf (T.length . T.map f) ta - , benchTL $ nf (TL.length . TL.map f) tla - , benchBS $ nf (BS.length . BS.map f) bsa - , benchBSL $ nf (BL.length . BL.map f) bla - , benchS $ nf (L.length . L.map f) sa - ] - , bgroup "map.map" - [ benchT $ nf (T.length . T.map f . T.map f) ta - , benchTL $ nf (TL.length . TL.map f . TL.map f) tla - , benchBS $ nf (BS.length . BS.map f . BS.map f) bsa - , benchS $ nf (L.length . L.map f . L.map f) sa - ] - , bgroup "replicate char" - [ benchT $ nf (T.length . T.replicate bsa_len) (T.singleton c) - , benchTL $ nf (TL.length . TL.replicate (fromIntegral bsa_len)) (TL.singleton c) - , benchBS $ nf (BS.length . BS.replicate bsa_len) c - , benchBSL $ nf (BL.length . BL.replicate (fromIntegral bsa_len)) c - , benchS $ nf (L.length . L.replicate bsa_len) c - ] - , bgroup "replicate string" - [ benchT $ nf (T.length . T.replicate (bsa_len `div` T.length tsw)) tsw - , benchTL $ nf (TL.length . TL.replicate (fromIntegral bsa_len `div` TL.length tlw)) tlw - , benchS $ nf (L.length . replicat (bsa_len `div` T.length tsw)) lw - ] - , bgroup "take" - [ benchT $ nf (T.length . T.take (ta_len `div` 3)) ta - , benchTL $ nf (TL.length . TL.take (tla_len `div` 3)) tla - , benchBS $ nf (BS.length . BS.take (bsa_len `div` 3)) bsa - , benchBSL $ nf (BL.length . BL.take (bla_len `div` 3)) bla - , benchS $ nf (L.length . L.take (sa_len `div` 3)) sa - ] - , bgroup "tail" - [ benchT $ nf (T.length . T.tail) ta - , benchTL $ nf (TL.length . TL.tail) tla - , benchBS $ nf (BS.length . BS.tail) bsa - , benchBSL $ nf (BL.length . BL.tail) bla - , benchS $ nf (L.length . L.tail) sa - ] - , bgroup "toLower" - [ benchT $ nf (T.length . T.toLower) ta - , benchTL $ nf (TL.length . TL.toLower) tla - , benchBS $ nf (BS.length . BS.map toLower) bsa - , benchBSL $ nf (BL.length . BL.map toLower) bla - , benchS $ nf (L.length . L.map toLower) sa - ] - , bgroup "toUpper" - [ benchT $ nf (T.length . T.toUpper) ta - , benchTL $ nf (TL.length . TL.toUpper) tla - , benchBS $ nf (BS.length . BS.map toUpper) bsa - , benchBSL $ nf (BL.length . BL.map toUpper) bla - , benchS $ nf (L.length . L.map toUpper) sa - ] - , bgroup "words" - [ benchT $ nf (L.length . T.words) ta - , benchTL $ nf (L.length . TL.words) tla - , benchBS $ nf (L.length . BS.words) bsa - , benchBSL $ nf (L.length . BL.words) bla - , benchS $ nf (L.length . L.words) sa - ] - , bgroup "zipWith" - [ benchT $ nf (T.length . T.zipWith min tb) ta - , benchTL $ nf (TL.length . TL.zipWith min tlb) tla - , benchBS $ nf (L.length . BS.zipWith min bsb) bsa - , benchBSL $ nf (L.length . BL.zipWith min blb) bla - , benchS $ nf (L.length . L.zipWith min sb) sa - ] - ] - , bgroup "Builder" - [ bench "mappend char" $ nf (TL.length . TB.toLazyText . mappendNChar 'a') 10000 - , bench "mappend 8 char" $ nf (TL.length . TB.toLazyText . mappend8Char) 'a' - , bench "mappend text" $ nf (TL.length . TB.toLazyText . mappendNText short) 10000 - ] - ] - where - benchS = bench ("String+" ++ kind) - benchT = bench ("Text+" ++ kind) - benchTL = bench ("LazyText+" ++ kind) - benchBS = bench ("ByteString+" ++ kind) - benchBSL = bench ("LazyByteString+" ++ kind) - - c = 'й' - p0 = (== c) - p1 = (/= 'д') - lw = "право" - bsw = UTF8.fromString lw - blw = BL.fromChunks [bsw] - tsw = T.pack lw - tlw = TL.fromChunks [tsw] - f (C# c#) = C# (chr# (ord# c# +# 1#)) - g (I# i#) (C# c#) = (I# (i# +# 1#), C# (chr# (ord# c# +# i#))) - len l _ = l + (1::Int) - replicat n = concat . L.replicate n - short = T.pack "short" - -#if !MIN_VERSION_bytestring(0,10,0) -instance NFData BS.ByteString - -instance NFData BL.ByteString where - rnf BL.Empty = () - rnf (BL.Chunk _ ts) = rnf ts -#endif - -data B where - B :: NFData a => a -> B - -instance NFData B where - rnf (B b) = rnf b - --- | Split a bytestring in chunks --- -chunksOf :: Int -> BS.ByteString -> [BS.ByteString] -chunksOf k = go - where - go t = case BS.splitAt k t of - (a,b) | BS.null a -> [] - | otherwise -> a : go b - --- | Append a character n times --- -mappendNChar :: Char -> Int -> TB.Builder -mappendNChar c n = go 0 - where - go i - | i < n = TB.singleton c `mappend` go (i+1) - | otherwise = mempty - --- | Gives more opportunity for inlining and elimination of unnecesary --- bounds checks. --- -mappend8Char :: Char -> TB.Builder -mappend8Char c = TB.singleton c `mappend` TB.singleton c `mappend` - TB.singleton c `mappend` TB.singleton c `mappend` - TB.singleton c `mappend` TB.singleton c `mappend` - TB.singleton c `mappend` TB.singleton c - --- | Append a text N times --- -mappendNText :: T.Text -> Int -> TB.Builder -mappendNText t n = go 0 - where - go i - | i < n = TB.fromText t `mappend` go (i+1) - | otherwise = mempty diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/ReadNumbers.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/ReadNumbers.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/ReadNumbers.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/ReadNumbers.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ --- | Read numbers from a file with a just a number on each line, find the --- minimum of those numbers. The file contains different kinds of numbers: --- --- * Decimals --- --- * Hexadecimals --- --- * Floating point numbers --- --- * Floating point numbers in scientific notation --- --- The different benchmarks will only take into account the values they can --- parse. --- --- Tested in this benchmark: --- --- * Lexing/parsing of different numerical types --- -module Benchmarks.ReadNumbers - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnf) -import Data.List (foldl') -import Numeric (readDec, readFloat, readHex) -import qualified Data.ByteString.Char8 as B -import qualified Data.ByteString.Lazy.Char8 as BL -import qualified Data.ByteString.Lex.Double as B -import qualified Data.ByteString.Lex.Lazy.Double as BL -import qualified Data.Text as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.IO as TL -import qualified Data.Text.Lazy.Read as TL -import qualified Data.Text.Read as T - -benchmark :: FilePath -> IO Benchmark -benchmark fp = do - -- Read all files into lines: string, text, lazy text, bytestring, lazy - -- bytestring - s <- lines `fmap` readFile fp - t <- T.lines `fmap` T.readFile fp - tl <- TL.lines `fmap` TL.readFile fp - b <- B.lines `fmap` B.readFile fp - bl <- BL.lines `fmap` BL.readFile fp - return $ bgroup "ReadNumbers" - [ bench "DecimalString" $ whnf (int . string readDec) s - , bench "HexadecimalString" $ whnf (int . string readHex) s - , bench "DoubleString" $ whnf (double . string readFloat) s - - , bench "DecimalText" $ whnf (int . text (T.signed T.decimal)) t - , bench "HexadecimalText" $ whnf (int . text (T.signed T.hexadecimal)) t - , bench "DoubleText" $ whnf (double . text T.double) t - , bench "RationalText" $ whnf (double . text T.rational) t - - , bench "DecimalLazyText" $ - whnf (int . text (TL.signed TL.decimal)) tl - , bench "HexadecimalLazyText" $ - whnf (int . text (TL.signed TL.hexadecimal)) tl - , bench "DoubleLazyText" $ - whnf (double . text TL.double) tl - , bench "RationalLazyText" $ - whnf (double . text TL.rational) tl - - , bench "DecimalByteString" $ whnf (int . byteString B.readInt) b - , bench "DoubleByteString" $ whnf (double . byteString B.readDouble) b - - , bench "DecimalLazyByteString" $ - whnf (int . byteString BL.readInt) bl - , bench "DoubleLazyByteString" $ - whnf (double . byteString BL.readDouble) bl - ] - where - -- Used for fixing types - int :: Int -> Int - int = id - double :: Double -> Double - double = id - -string :: (Ord a, Num a) => (t -> [(a, t)]) -> [t] -> a -string reader = foldl' go 1000000 - where - go z t = case reader t of [(n, _)] -> min n z - _ -> z - -text :: (Ord a, Num a) => (t -> Either String (a,t)) -> [t] -> a -text reader = foldl' go 1000000 - where - go z t = case reader t of Left _ -> z - Right (n, _) -> min n z - -byteString :: (Ord a, Num a) => (t -> Maybe (a,t)) -> [t] -> a -byteString reader = foldl' go 1000000 - where - go z t = case reader t of Nothing -> z - Just (n, _) -> min n z diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Replace.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Replace.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Replace.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Replace.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -{-# LANGUAGE BangPatterns #-} --- | Replace a string by another string --- --- Tested in this benchmark: --- --- * Search and replace of a pattern in a text --- -module Benchmarks.Replace - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, nf) -import qualified Data.ByteString.Char8 as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.ByteString.Lazy.Search as BL -import qualified Data.ByteString.Search as B -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> String -> String -> IO Benchmark -benchmark fp pat sub = do - tl <- TL.readFile fp - bl <- BL.readFile fp - let !t = TL.toStrict tl - !b = T.encodeUtf8 t - return $ bgroup "Replace" [ - bench "Text" $ nf (T.length . T.replace tpat tsub) t - , bench "ByteString" $ nf (BL.length . B.replace bpat bsub) b - , bench "LazyText" $ nf (TL.length . TL.replace tlpat tlsub) tl - , bench "LazyByteString" $ nf (BL.length . BL.replace blpat blsub) bl - ] - where - tpat = T.pack pat - tsub = T.pack sub - tlpat = TL.pack pat - tlsub = TL.pack sub - bpat = T.encodeUtf8 tpat - bsub = T.encodeUtf8 tsub - blpat = B.concat $ BL.toChunks $ TL.encodeUtf8 tlpat - blsub = B.concat $ BL.toChunks $ TL.encodeUtf8 tlsub diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Search.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Search.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Search.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Search.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ --- | Search for a pattern in a file, find the number of occurences --- --- Tested in this benchmark: --- --- * Searching all occurences of a pattern using library routines --- -module Benchmarks.Search - ( benchmark - ) where - -import Criterion (Benchmark, bench, bgroup, whnf) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.ByteString.Lazy.Search as BL -import qualified Data.ByteString.Search as B -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> T.Text -> IO Benchmark -benchmark fp needleT = do - b <- B.readFile fp - bl <- BL.readFile fp - t <- T.readFile fp - tl <- TL.readFile fp - return $ bgroup "FileIndices" - [ bench "ByteString" $ whnf (byteString needleB) b - , bench "LazyByteString" $ whnf (lazyByteString needleB) bl - , bench "Text" $ whnf (text needleT) t - , bench "LazyText" $ whnf (lazyText needleTL) tl - ] - where - needleB = T.encodeUtf8 needleT - needleTL = TL.fromChunks [needleT] - -byteString :: B.ByteString -> B.ByteString -> Int -byteString needle = length . B.indices needle - -lazyByteString :: B.ByteString -> BL.ByteString -> Int -lazyByteString needle = length . BL.indices needle - -text :: T.Text -> T.Text -> Int -text = T.count - -lazyText :: TL.Text -> TL.Text -> Int -lazyText needle = fromIntegral . TL.count needle diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Stream.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Stream.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Stream.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/Stream.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ --- | This module contains a number of benchmarks for the different streaming --- functions --- --- Tested in this benchmark: --- --- * Most streaming functions --- -{-# LANGUAGE BangPatterns #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -module Benchmarks.Stream - ( benchmark - ) where - -import Control.DeepSeq (NFData (..)) -import Criterion (Benchmark, bgroup, bench, nf) -import Data.Text.Internal.Fusion.Types (Step (..), Stream (..)) -import qualified Data.Text.Encoding as T -import qualified Data.Text.Encoding.Error as E -import qualified Data.Text.Internal.Encoding.Fusion as T -import qualified Data.Text.Internal.Encoding.Fusion.Common as F -import qualified Data.Text.Internal.Fusion as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Internal.Lazy.Encoding.Fusion as TL -import qualified Data.Text.Internal.Lazy.Fusion as TL -import qualified Data.Text.Lazy.IO as TL - -instance NFData a => NFData (Stream a) where - -- Currently, this implementation does not force evaluation of the size hint - rnf (Stream next s0 _) = go s0 - where - go !s = case next s of - Done -> () - Skip s' -> go s' - Yield x s' -> rnf x `seq` go s' - -benchmark :: FilePath -> IO Benchmark -benchmark fp = do - -- Different formats - t <- T.readFile fp - let !utf8 = T.encodeUtf8 t - !utf16le = T.encodeUtf16LE t - !utf16be = T.encodeUtf16BE t - !utf32le = T.encodeUtf32LE t - !utf32be = T.encodeUtf32BE t - - -- Once again for the lazy variants - tl <- TL.readFile fp - let !utf8L = TL.encodeUtf8 tl - !utf16leL = TL.encodeUtf16LE tl - !utf16beL = TL.encodeUtf16BE tl - !utf32leL = TL.encodeUtf32LE tl - !utf32beL = TL.encodeUtf32BE tl - - -- For the functions which operate on streams - let !s = T.stream t - - return $ bgroup "Stream" - - -- Fusion - [ bgroup "stream" $ - [ bench "Text" $ nf T.stream t - , bench "LazyText" $ nf TL.stream tl - ] - - -- Encoding.Fusion - , bgroup "streamUtf8" - [ bench "Text" $ nf (T.streamUtf8 E.lenientDecode) utf8 - , bench "LazyText" $ nf (TL.streamUtf8 E.lenientDecode) utf8L - ] - , bgroup "streamUtf16LE" - [ bench "Text" $ nf (T.streamUtf16LE E.lenientDecode) utf16le - , bench "LazyText" $ nf (TL.streamUtf16LE E.lenientDecode) utf16leL - ] - , bgroup "streamUtf16BE" - [ bench "Text" $ nf (T.streamUtf16BE E.lenientDecode) utf16be - , bench "LazyText" $ nf (TL.streamUtf16BE E.lenientDecode) utf16beL - ] - , bgroup "streamUtf32LE" - [ bench "Text" $ nf (T.streamUtf32LE E.lenientDecode) utf32le - , bench "LazyText" $ nf (TL.streamUtf32LE E.lenientDecode) utf32leL - ] - , bgroup "streamUtf32BE" - [ bench "Text" $ nf (T.streamUtf32BE E.lenientDecode) utf32be - , bench "LazyText" $ nf (TL.streamUtf32BE E.lenientDecode) utf32beL - ] - - -- Encoding.Fusion.Common - , bench "restreamUtf16LE" $ nf F.restreamUtf16LE s - , bench "restreamUtf16BE" $ nf F.restreamUtf16BE s - , bench "restreamUtf32LE" $ nf F.restreamUtf32LE s - , bench "restreamUtf32BE" $ nf F.restreamUtf32BE s - ] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/WordFrequencies.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/WordFrequencies.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/WordFrequencies.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks/WordFrequencies.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ --- | A word frequency count using the different string types --- --- Tested in this benchmark: --- --- * Splitting into words --- --- * Converting to lowercase --- --- * Comparing: Eq/Ord instances --- -module Benchmarks.WordFrequencies - ( benchmark - ) where - -import Criterion (Benchmark, bench, bgroup, whnf) -import Data.Char (toLower) -import Data.List (foldl') -import Data.Map (Map) -import qualified Data.ByteString.Char8 as B -import qualified Data.Map as M -import qualified Data.Text as T -import qualified Data.Text.IO as T - -benchmark :: FilePath -> IO Benchmark -benchmark fp = do - s <- readFile fp - b <- B.readFile fp - t <- T.readFile fp - return $ bgroup "WordFrequencies" - [ bench "String" $ whnf (frequencies . words . map toLower) s - , bench "ByteString" $ whnf (frequencies . B.words . B.map toLower) b - , bench "Text" $ whnf (frequencies . T.words . T.toLower) t - ] - -frequencies :: Ord a => [a] -> Map a Int -frequencies = foldl' (\m k -> M.insertWith (+) k 1 m) M.empty diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Benchmarks.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,79 +0,0 @@ --- | Main module to run the micro benchmarks --- -{-# LANGUAGE OverloadedStrings #-} -module Main - ( main - ) where - -import Criterion.Main (Benchmark, defaultMain, bgroup) -import System.FilePath (()) -import System.IO (IOMode (WriteMode), openFile, hSetEncoding, utf8) - -import qualified Benchmarks.Builder as Builder -import qualified Benchmarks.DecodeUtf8 as DecodeUtf8 -import qualified Benchmarks.EncodeUtf8 as EncodeUtf8 -import qualified Benchmarks.Equality as Equality -import qualified Benchmarks.FileRead as FileRead -import qualified Benchmarks.FoldLines as FoldLines -import qualified Benchmarks.Mul as Mul -import qualified Benchmarks.Pure as Pure -import qualified Benchmarks.ReadNumbers as ReadNumbers -import qualified Benchmarks.Replace as Replace -import qualified Benchmarks.Search as Search -import qualified Benchmarks.Stream as Stream -import qualified Benchmarks.WordFrequencies as WordFrequencies - -import qualified Benchmarks.Programs.BigTable as Programs.BigTable -import qualified Benchmarks.Programs.Cut as Programs.Cut -import qualified Benchmarks.Programs.Fold as Programs.Fold -import qualified Benchmarks.Programs.Sort as Programs.Sort -import qualified Benchmarks.Programs.StripTags as Programs.StripTags -import qualified Benchmarks.Programs.Throughput as Programs.Throughput - -main :: IO () -main = benchmarks >>= defaultMain - -benchmarks :: IO [Benchmark] -benchmarks = do - sink <- openFile "/dev/null" WriteMode - hSetEncoding sink utf8 - - -- Traditional benchmarks - bs <- sequence - [ Builder.benchmark - , DecodeUtf8.benchmark "html" (tf "libya-chinese.html") - , DecodeUtf8.benchmark "xml" (tf "yiwiki.xml") - , DecodeUtf8.benchmark "ascii" (tf "ascii.txt") - , DecodeUtf8.benchmark "russian" (tf "russian.txt") - , DecodeUtf8.benchmark "japanese" (tf "japanese.txt") - , EncodeUtf8.benchmark "επανάληψη 竺法蘭共譯" - , Equality.benchmark (tf "japanese.txt") - , FileRead.benchmark (tf "russian.txt") - , FoldLines.benchmark (tf "russian.txt") - , Mul.benchmark - , Pure.benchmark "tiny" (tf "tiny.txt") - , Pure.benchmark "ascii" (tf "ascii-small.txt") - , Pure.benchmark "france" (tf "france.html") - , Pure.benchmark "russian" (tf "russian-small.txt") - , Pure.benchmark "japanese" (tf "japanese.txt") - , ReadNumbers.benchmark (tf "numbers.txt") - , Replace.benchmark (tf "russian.txt") "принимая" "своем" - , Search.benchmark (tf "russian.txt") "принимая" - , Stream.benchmark (tf "russian.txt") - , WordFrequencies.benchmark (tf "russian.txt") - ] - - -- Program-like benchmarks - ps <- bgroup "Programs" `fmap` sequence - [ Programs.BigTable.benchmark sink - , Programs.Cut.benchmark (tf "russian.txt") sink 20 40 - , Programs.Fold.benchmark (tf "russian.txt") sink - , Programs.Sort.benchmark (tf "russian.txt") sink - , Programs.StripTags.benchmark (tf "yiwiki.xml") sink - , Programs.Throughput.benchmark (tf "russian.txt") sink - ] - - return $ bs ++ [ps] - where - -- Location of a test file - tf = ("../tests/text-test-data" ) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Multilang.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Multilang.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Multilang.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Multilang.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -{-# LANGUAGE BangPatterns, OverloadedStrings, RankNTypes #-} - -module Main ( - main - ) where - -import Control.Monad (forM_) -import qualified Data.ByteString as B -import qualified Data.Text as Text -import Data.Text.Encoding (decodeUtf8) -import Data.Text (Text) -import System.IO (hFlush, stdout) -import Timer (timer) - -type BM = Text -> () - -bm :: forall a. (Text -> a) -> BM -bm f t = f t `seq` () - -benchmarks :: [(String, Text.Text -> ())] -benchmarks = [ - ("find_first", bm $ Text.isInfixOf "en:Benin") - , ("find_index", bm $ Text.findIndex (=='c')) - ] - -main :: IO () -main = do - !contents <- decodeUtf8 `fmap` B.readFile "../tests/text-test-data/yiwiki.xml" - forM_ benchmarks $ \(name, bmark) -> do - putStr $ name ++ " " - hFlush stdout - putStrLn =<< (timer 100 contents bmark) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Timer.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Timer.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/haskell/Timer.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/haskell/Timer.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -{-# LANGUAGE BangPatterns #-} - -module Timer (timer) where - -import Control.Exception (evaluate) -import Data.Time.Clock.POSIX (getPOSIXTime) -import GHC.Float (FFFormat(..), formatRealFloat) - -ickyRound :: Int -> Double -> String -ickyRound k = formatRealFloat FFFixed (Just k) - -timer :: Int -> a -> (a -> b) -> IO String -timer count a0 f = do - let loop !k !fastest - | k <= 0 = return fastest - | otherwise = do - start <- getPOSIXTime - let inner a i - | i <= 0 = return () - | otherwise = evaluate (f a) >> inner a (i-1) - inner a0 count - end <- getPOSIXTime - let elapsed = end - start - loop (k-1) (min fastest (elapsed / fromIntegral count)) - t <- loop (3::Int) 1e300 - let log10 x = log x / log 10 - ft = realToFrac t - prec = round (log10 (fromIntegral count) - log10 ft) - return $! ickyRound prec ft -{-# NOINLINE timer #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/cut.py cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/cut.py --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/cut.py 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/cut.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -import utils, sys, codecs - -def cut(filename, l, r): - content = open(filename, encoding='utf-8') - for line in content: - print(line[l:r]) - -for f in sys.argv[1:]: - t = utils.benchmark(lambda: cut(f, 20, 40)) - sys.stderr.write('{0}: {1}\n'.format(f, t)) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/multilang.py cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/multilang.py --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/multilang.py 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/multilang.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -#!/usr/bin/env python - -import math -import sys -import time - -def find_first(): - cf = contents.find - return timer(lambda: cf("en:Benin")) - -def timer(f, count=100): - a = 1e300 - def g(): - return - for i in xrange(3): - start = time.time() - for j in xrange(count): - g() - a = min(a, (time.time() - start) / count) - - b = 1e300 - for i in xrange(3): - start = time.time() - for j in xrange(count): - f() - b = min(b, (time.time() - start) / count) - - return round(b - a, int(round(math.log(count, 10) - math.log(b - a, 10)))) - -contents = open('../../tests/text-test-data/yiwiki.xml', 'r').read() -contents = contents.decode('utf-8') - -benchmarks = ( - find_first, - ) - -to_run = sys.argv[1:] -bms = [] -if to_run: - for r in to_run: - for b in benchmarks: - if b.__name__.startswith(r): - bms.append(b) -else: - bms = benchmarks - -for b in bms: - sys.stdout.write(b.__name__ + ' ') - sys.stdout.flush() - print b() diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/sort.py cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/sort.py --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/sort.py 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/sort.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -import utils, sys, codecs - -def sort(filename): - content = open(filename, encoding='utf-8').read() - lines = content.splitlines() - lines.sort() - print('\n'.join(lines)) - -for f in sys.argv[1:]: - t = utils.benchmark(lambda: sort(f)) - sys.stderr.write('{0}: {1}\n'.format(f, t)) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/strip_tags.py cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/strip_tags.py --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/strip_tags.py 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/strip_tags.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -#!/usr/bin/env python - -import utils, sys - -def strip_tags(filename): - string = open(filename, encoding='utf-8').read() - - d = 0 - out = [] - - for c in string: - if c == '<': d += 1 - - if d > 0: - out += ' ' - else: - out += c - - if c == '>': d -= 1 - - print(''.join(out)) - -for f in sys.argv[1:]: - t = utils.benchmark(lambda: strip_tags(f)) - sys.stderr.write('{0}: {1}\n'.format(f, t)) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/utils.py cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/utils.py --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/python/utils.py 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/python/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -#!/usr/bin/env python - -import sys, time - -def benchmark_once(f): - start = time.time() - f() - end = time.time() - return end - start - -def benchmark(f): - runs = 100 - total = 0.0 - for i in range(runs): - result = benchmark_once(f) - sys.stderr.write('Run {0}: {1}\n'.format(i, result)) - total += result - return total / runs diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/cut.rb cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/cut.rb --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/cut.rb 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/cut.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -#!/usr/bin/env ruby - -require './utils.rb' - -def cut(filename, l, r) - File.open(filename, 'r:utf-8') do |file| - file.each_line do |line| - puts line[l, r - l] - end - end -end - -ARGV.each do |f| - t = benchmark { cut(f, 20, 40) } - STDERR.puts "#{f}: #{t}" -end diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/fold.rb cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/fold.rb --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/fold.rb 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/fold.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -#!/usr/bin/env ruby - -require './utils.rb' - -def fold(filename, max_width) - File.open(filename, 'r:utf-8') do |file| - # Words in this paragraph - paragraph = [] - - file.each_line do |line| - # If we encounter an empty line, we reformat and dump the current - # paragraph - if line.strip.empty? - puts fold_paragraph(paragraph, max_width) - puts - paragraph = [] - # Otherwise, we append the words found in the line to the paragraph - else - paragraph.concat line.split - end - end - - # Last paragraph - puts fold_paragraph(paragraph, max_width) unless paragraph.empty? - end -end - -# Fold a single paragraph to the desired width -def fold_paragraph(paragraph, max_width) - # Gradually build our output - str, *rest = paragraph - width = str.length - - rest.each do |word| - if width + word.length + 1 <= max_width - str << ' ' << word - width += word.length + 1 - else - str << "\n" << word - width = word.length - end - end - - str -end - -ARGV.each do |f| - t = benchmark { fold(f, 80) } - STDERR.puts "#{f}: #{t}" -end diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/sort.rb cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/sort.rb --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/sort.rb 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/sort.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -#!/usr/bin/env ruby - -require './utils.rb' - -def sort(filename) - File.open(filename, 'r:utf-8') do |file| - content = file.read - puts content.lines.sort.join - end -end - -ARGV.each do |f| - t = benchmark { sort(f) } - STDERR.puts "#{f}: #{t}" -end diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/strip_tags.rb cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/strip_tags.rb --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/strip_tags.rb 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/strip_tags.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -#!/usr/bin/env ruby - -require './utils.rb' - -def strip_tags(filename) - File.open(filename, 'r:utf-8') do |file| - str = file.read - - d = 0 - - str.each_char do |c| - d += 1 if c == '<' - putc(if d > 0 then ' ' else c end) - d -= 1 if c == '>' - end - end -end - -ARGV.each do |f| - t = benchmark { strip_tags(f) } - STDERR.puts "#{f}: #{t}" -end diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/utils.rb cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/utils.rb --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/ruby/utils.rb 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/ruby/utils.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -require 'benchmark' - -def benchmark(&block) - runs = 100 - total = 0 - - runs.times do |i| - result = Benchmark.measure(&block).total - $stderr.puts "Run #{i}: #{result}" - total += result - end - - total / runs -end diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/Setup.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -import Distribution.Simple -main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/text-benchmarks.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/text-benchmarks.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/benchmarks/text-benchmarks.cabal 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/benchmarks/text-benchmarks.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -name: text-benchmarks -version: 0.0.0.0 -synopsis: Benchmarks for the text package -description: Benchmarks for the text package -homepage: https://bitbucket.org/bos/text -license: BSD3 -license-file: ../LICENSE -author: Jasper Van der Jeugt , - Bryan O'Sullivan , - Tom Harper , - Duncan Coutts -maintainer: jaspervdj@gmail.com -category: Text -build-type: Simple - -cabal-version: >=1.2 - -flag llvm - description: use LLVM - default: False - manual: True - -executable text-benchmarks - hs-source-dirs: haskell .. - c-sources: ../cbits/cbits.c - cbits/time_iconv.c - include-dirs: ../include - main-is: Benchmarks.hs - ghc-options: -Wall -O2 -rtsopts - if flag(llvm) - ghc-options: -fllvm - cpp-options: -DHAVE_DEEPSEQ -DINTEGER_GMP - build-depends: base == 4.*, - binary, - blaze-builder, - bytestring, - bytestring-lexing, - containers, - criterion >= 0.10.0.0, - deepseq, - directory, - filepath, - ghc-prim, - integer-gmp, - stringsearch, - utf8-string, - vector - -executable text-multilang - hs-source-dirs: haskell - main-is: Multilang.hs - ghc-options: -Wall -O2 - build-depends: base == 4.*, - bytestring, - text, - time diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/cbits/cbits.c cabal-install-1.22-1.22.9.0/=unpacked-tar8=/cbits/cbits.c --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/cbits/cbits.c 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/cbits/cbits.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,311 +0,0 @@ -/* - * Copyright (c) 2011 Bryan O'Sullivan . - * - * Portions copyright (c) 2008-2010 Björn Höhrmann . - * - * See http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ for details. - */ - -#include -#include -#include -#include "text_cbits.h" - -void _hs_text_memcpy(void *dest, size_t doff, const void *src, size_t soff, - size_t n) -{ - memcpy(dest + (doff<<1), src + (soff<<1), n<<1); -} - -int _hs_text_memcmp(const void *a, size_t aoff, const void *b, size_t boff, - size_t n) -{ - return memcmp(a + (aoff<<1), b + (boff<<1), n<<1); -} - -#define UTF8_ACCEPT 0 -#define UTF8_REJECT 12 - -static const uint8_t utf8d[] = { - /* - * The first part of the table maps bytes to character classes that - * to reduce the size of the transition table and create bitmasks. - */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, - 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, - 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, - 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, - - /* - * The second part is a transition table that maps a combination of - * a state of the automaton and a character class to a state. - */ - 0,12,24,36,60,96,84,12,12,12,48,72, 12,12,12,12,12,12,12,12,12,12,12,12, - 12, 0,12,12,12,12,12, 0,12, 0,12,12, 12,24,12,12,12,12,12,24,12,24,12,12, - 12,12,12,12,12,12,12,24,12,12,12,12, 12,24,12,12,12,12,12,12,12,24,12,12, - 12,12,12,12,12,12,12,36,12,36,12,12, 12,36,12,12,12,12,12,36,12,36,12,12, - 12,36,12,12,12,12,12,12,12,12,12,12, -}; - -static inline uint32_t -decode(uint32_t *state, uint32_t* codep, uint32_t byte) { - uint32_t type = utf8d[byte]; - - *codep = (*state != UTF8_ACCEPT) ? - (byte & 0x3fu) | (*codep << 6) : - (0xff >> type) & (byte); - - return *state = utf8d[256 + *state + type]; -} - -/* - * The ISO 8859-1 (aka latin-1) code points correspond exactly to the first 256 unicode - * code-points, therefore we can trivially convert from a latin-1 encoded bytestring to - * an UTF16 array - */ -void -_hs_text_decode_latin1(uint16_t *dest, const uint8_t *src, - const uint8_t *srcend) -{ - const uint8_t *p = src; - -#if defined(__i386__) || defined(__x86_64__) - /* This optimization works on a little-endian systems by using - (aligned) 32-bit loads instead of 8-bit loads - */ - - /* consume unaligned prefix */ - while (p != srcend && (uintptr_t)p & 0x3) - *dest++ = *p++; - - /* iterate over 32-bit aligned loads */ - while (p < srcend - 3) { - const uint32_t w = *((const uint32_t *)p); - - *dest++ = w & 0xff; - *dest++ = (w >> 8) & 0xff; - *dest++ = (w >> 16) & 0xff; - *dest++ = (w >> 24) & 0xff; - - p += 4; - } -#endif - - /* handle unaligned suffix */ - while (p != srcend) - *dest++ = *p++; -} - -/* - * A best-effort decoder. Runs until it hits either end of input or - * the start of an invalid byte sequence. - * - * At exit, we update *destoff with the next offset to write to, *src - * with the next source location past the last one successfully - * decoded, and return the next source location to read from. - * - * Moreover, we expose the internal decoder state (state0 and - * codepoint0), allowing one to restart the decoder after it - * terminates (say, due to a partial codepoint). - * - * In particular, there are a few possible outcomes, - * - * 1) We decoded the buffer entirely: - * In this case we return srcend - * state0 == UTF8_ACCEPT - * - * 2) We met an invalid encoding - * In this case we return the address of the first invalid byte - * state0 == UTF8_REJECT - * - * 3) We reached the end of the buffer while decoding a codepoint - * In this case we return a pointer to the first byte of the partial codepoint - * state0 != UTF8_ACCEPT, UTF8_REJECT - * - */ -#if defined(__GNUC__) || defined(__clang__) -static inline uint8_t const * -_hs_text_decode_utf8_int(uint16_t *const dest, size_t *destoff, - const uint8_t **src, const uint8_t *srcend, - uint32_t *codepoint0, uint32_t *state0) - __attribute((always_inline)); -#endif - -static inline uint8_t const * -_hs_text_decode_utf8_int(uint16_t *const dest, size_t *destoff, - const uint8_t **src, const uint8_t *srcend, - uint32_t *codepoint0, uint32_t *state0) -{ - uint16_t *d = dest + *destoff; - const uint8_t *s = *src, *last = *src; - uint32_t state = *state0; - uint32_t codepoint = *codepoint0; - - while (s < srcend) { -#if defined(__i386__) || defined(__x86_64__) - /* - * This code will only work on a little-endian system that - * supports unaligned loads. - * - * It gives a substantial speed win on data that is purely or - * partly ASCII (e.g. HTML), at only a slight cost on purely - * non-ASCII text. - */ - - if (state == UTF8_ACCEPT) { - while (s < srcend - 4) { - codepoint = *((uint32_t *) s); - if ((codepoint & 0x80808080) != 0) - break; - s += 4; - - /* - * Tried 32-bit stores here, but the extra bit-twiddling - * slowed the code down. - */ - - *d++ = (uint16_t) (codepoint & 0xff); - *d++ = (uint16_t) ((codepoint >> 8) & 0xff); - *d++ = (uint16_t) ((codepoint >> 16) & 0xff); - *d++ = (uint16_t) ((codepoint >> 24) & 0xff); - } - last = s; - } -#endif - - if (decode(&state, &codepoint, *s++) != UTF8_ACCEPT) { - if (state != UTF8_REJECT) - continue; - break; - } - - if (codepoint <= 0xffff) - *d++ = (uint16_t) codepoint; - else { - *d++ = (uint16_t) (0xD7C0 + (codepoint >> 10)); - *d++ = (uint16_t) (0xDC00 + (codepoint & 0x3FF)); - } - last = s; - } - - *destoff = d - dest; - *codepoint0 = codepoint; - *state0 = state; - *src = last; - - return s; -} - -uint8_t const * -_hs_text_decode_utf8_state(uint16_t *const dest, size_t *destoff, - const uint8_t **src, - const uint8_t *srcend, - uint32_t *codepoint0, uint32_t *state0) -{ - uint8_t const *ret = _hs_text_decode_utf8_int(dest, destoff, src, srcend, - codepoint0, state0); - if (*state0 == UTF8_REJECT) - ret -=1; - return ret; -} - -/* - * Helper to decode buffer and discard final decoder state - */ -const uint8_t * -_hs_text_decode_utf8(uint16_t *const dest, size_t *destoff, - const uint8_t *src, const uint8_t *const srcend) -{ - uint32_t codepoint; - uint32_t state = UTF8_ACCEPT; - uint8_t const *ret = _hs_text_decode_utf8_int(dest, destoff, &src, srcend, - &codepoint, &state); - /* Back up if we have an incomplete or invalid encoding */ - if (state != UTF8_ACCEPT) - ret -= 1; - return ret; -} - -void -_hs_text_encode_utf8(uint8_t **destp, const uint16_t *src, size_t srcoff, - size_t srclen) -{ - const uint16_t *srcend; - uint8_t *dest = *destp; - - src += srcoff; - srcend = src + srclen; - - ascii: -#if defined(__x86_64__) - while (srcend - src >= 4) { - uint64_t w = *((uint64_t *) src); - - if (w & 0xFF80FF80FF80FF80ULL) { - if (!(w & 0x000000000000FF80ULL)) { - *dest++ = w & 0xFFFF; - src++; - if (!(w & 0x00000000FF800000ULL)) { - *dest++ = (w >> 16) & 0xFFFF; - src++; - if (!(w & 0x0000FF8000000000ULL)) { - *dest++ = (w >> 32) & 0xFFFF; - src++; - } - } - } - break; - } - *dest++ = w & 0xFFFF; - *dest++ = (w >> 16) & 0xFFFF; - *dest++ = (w >> 32) & 0xFFFF; - *dest++ = w >> 48; - src += 4; - } -#endif - -#if defined(__i386__) - while (srcend - src >= 2) { - uint32_t w = *((uint32_t *) src); - - if (w & 0xFF80FF80) - break; - *dest++ = w & 0xFFFF; - *dest++ = w >> 16; - src += 2; - } -#endif - - while (src < srcend) { - uint16_t w = *src++; - - if (w <= 0x7F) { - *dest++ = w; - /* An ASCII byte is likely to begin a run of ASCII bytes. - Falling back into the fast path really helps performance. */ - goto ascii; - } - else if (w <= 0x7FF) { - *dest++ = (w >> 6) | 0xC0; - *dest++ = (w & 0x3f) | 0x80; - } - else if (w < 0xD800 || w > 0xDBFF) { - *dest++ = (w >> 12) | 0xE0; - *dest++ = ((w >> 6) & 0x3F) | 0x80; - *dest++ = (w & 0x3F) | 0x80; - } else { - uint32_t c = ((((uint32_t) w) - 0xD800) << 10) + - (((uint32_t) *src++) - 0xDC00) + 0x10000; - *dest++ = (c >> 18) | 0xF0; - *dest++ = ((c >> 12) & 0x3F) | 0x80; - *dest++ = ((c >> 6) & 0x3F) | 0x80; - *dest++ = (c & 0x3F) | 0x80; - } - } - - *destp = dest; -} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/changelog.md cabal-install-1.22-1.22.9.0/=unpacked-tar8=/changelog.md --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/changelog.md 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/changelog.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -1.2.0.3 - -* Update formatRealFloat to correspond to the definition in versions - of base newer than 4.5 (https://github.com/bos/text/issues/105) - -1.2.0.2 - -* Bumped lower bound on deepseq to 1.4 for compatibility with the - upcoming GHC 7.10 - -1.2.0.1 - -* Fixed a buffer overflow in rendering of large Integers - (https://github.com/bos/text/issues/99) - -1.2.0.0 - -* Fixed an integer overflow in the replace function - (https://github.com/bos/text/issues/81) - -* Fixed a hang in lazy decodeUtf8With - (https://github.com/bos/text/issues/87) - -* Reduced codegen bloat caused by use of empty and single-character - literals - -* Added an instance of IsList for GHC 7.8 and above - -1.1.1.0 - -* The Data.Data instance now allows gunfold to work, via a virtual - pack constructor - -* dropEnd, takeEnd: new functions - -* Comparing the length of a Text against a number can now - short-circuit in more cases - -1.1.0.1 - -* streamDecodeUtf8: fixed gh-70, did not return all unconsumed bytes - in single-byte chunks - -1.1.0.0 - -* encodeUtf8: Performance is improved by up to 4x. - -* encodeUtf8Builder, encodeUtf8BuilderEscaped: new functions, - available only if bytestring >= 0.10.4.0 is installed, that allow - very fast and flexible encoding of a Text value to a bytestring - Builder. - - As an example of the performance gain to be had, the - encodeUtf8BuilderEscaped function helps to double the speed of JSON - encoding in the latest version of aeson! (Note: if all you need is a - plain ByteString, encodeUtf8 is still the faster way to go.) - -* All of the internal module hierarchy is now publicly exposed. If a - module is in the .Internal hierarchy, or is documented as internal, - use at your own risk - there are no API stability guarantees for - internal modules! - -1.0.0.1 - -* decodeUtf8: Fixed a regression that caused us to incorrectly - identify truncated UTF-8 as valid (gh-61) - -1.0.0.0 - -* Added support for Unicode 6.3.0 to case conversion functions - -* New function toTitle converts words in a string to title case - -* New functions peekCStringLen and withCStringLen simplify - interoperability with C functionns - -* Added support for decoding UTF-8 in stream-friendly fashion - -* Fixed a bug in mapAccumL - -* Added trusted Haskell support - -* Removed support for GHC 6.10 (released in 2008) and older diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Array.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Array.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Array.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Array.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,245 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, ForeignFunctionInterface, MagicHash, Rank2Types, - RecordWildCards, UnboxedTuples, UnliftedFFITypes #-} -{-# OPTIONS_GHC -fno-warn-unused-matches #-} --- | --- Module : Data.Text.Array --- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- Packed, unboxed, heap-resident arrays. Suitable for performance --- critical use, both in terms of large data quantities and high --- speed. --- --- This module is intended to be imported @qualified@, to avoid name --- clashes with "Prelude" functions, e.g. --- --- > import qualified Data.Text.Array as A --- --- The names in this module resemble those in the 'Data.Array' family --- of modules, but are shorter due to the assumption of qualifid --- naming. -module Data.Text.Array - ( - -- * Types - Array(Array, aBA) - , MArray(MArray, maBA) - - -- * Functions - , copyM - , copyI - , empty - , equal -#if defined(ASSERTS) - , length -#endif - , run - , run2 - , toList - , unsafeFreeze - , unsafeIndex - , new - , unsafeWrite - ) where - -#if defined(ASSERTS) --- This fugly hack is brought by GHC's apparent reluctance to deal --- with MagicHash and UnboxedTuples when inferring types. Eek! -# define CHECK_BOUNDS(_func_,_len_,_k_) \ -if (_k_) < 0 || (_k_) >= (_len_) then error ("Data.Text.Array." ++ (_func_) ++ ": bounds error, offset " ++ show (_k_) ++ ", length " ++ show (_len_)) else -#else -# define CHECK_BOUNDS(_func_,_len_,_k_) -#endif - -#include "MachDeps.h" - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -#if __GLASGOW_HASKELL__ >= 702 -import Control.Monad.ST.Unsafe (unsafeIOToST) -#else -import Control.Monad.ST (unsafeIOToST) -#endif -import Data.Bits ((.&.), xor) -import Data.Text.Internal.Unsafe (inlinePerformIO) -import Data.Text.Internal.Unsafe.Shift (shiftL, shiftR) -#if __GLASGOW_HASKELL__ >= 703 -import Foreign.C.Types (CInt(CInt), CSize(CSize)) -#else -import Foreign.C.Types (CInt, CSize) -#endif -import GHC.Base (ByteArray#, MutableByteArray#, Int(..), - indexWord16Array#, newByteArray#, - unsafeCoerce#, writeWord16Array#) -import GHC.ST (ST(..), runST) -import GHC.Word (Word16(..)) -import Prelude hiding (length, read) - --- | Immutable array type. -data Array = Array { - aBA :: ByteArray# -#if defined(ASSERTS) - , aLen :: {-# UNPACK #-} !Int -- length (in units of Word16, not bytes) -#endif - } - --- | Mutable array type, for use in the ST monad. -data MArray s = MArray { - maBA :: MutableByteArray# s -#if defined(ASSERTS) - , maLen :: {-# UNPACK #-} !Int -- length (in units of Word16, not bytes) -#endif - } - -#if defined(ASSERTS) --- | Operations supported by all arrays. -class IArray a where - -- | Return the length of an array. - length :: a -> Int - -instance IArray Array where - length = aLen - {-# INLINE length #-} - -instance IArray (MArray s) where - length = maLen - {-# INLINE length #-} -#endif - --- | Create an uninitialized mutable array. -new :: forall s. Int -> ST s (MArray s) -new n - | n < 0 || n .&. highBit /= 0 = array_size_error - | otherwise = ST $ \s1# -> - case newByteArray# len# s1# of - (# s2#, marr# #) -> (# s2#, MArray marr# -#if defined(ASSERTS) - n -#endif - #) - where !(I# len#) = bytesInArray n - highBit = maxBound `xor` (maxBound `shiftR` 1) -{-# INLINE new #-} - -array_size_error :: a -array_size_error = error "Data.Text.Array.new: size overflow" - --- | Freeze a mutable array. Do not mutate the 'MArray' afterwards! -unsafeFreeze :: MArray s -> ST s Array -unsafeFreeze MArray{..} = ST $ \s# -> - (# s#, Array (unsafeCoerce# maBA) -#if defined(ASSERTS) - maLen -#endif - #) -{-# INLINE unsafeFreeze #-} - --- | Indicate how many bytes would be used for an array of the given --- size. -bytesInArray :: Int -> Int -bytesInArray n = n `shiftL` 1 -{-# INLINE bytesInArray #-} - --- | Unchecked read of an immutable array. May return garbage or --- crash on an out-of-bounds access. -unsafeIndex :: Array -> Int -> Word16 -unsafeIndex Array{..} i@(I# i#) = - CHECK_BOUNDS("unsafeIndex",aLen,i) - case indexWord16Array# aBA i# of r# -> (W16# r#) -{-# INLINE unsafeIndex #-} - --- | Unchecked write of a mutable array. May return garbage or crash --- on an out-of-bounds access. -unsafeWrite :: MArray s -> Int -> Word16 -> ST s () -unsafeWrite MArray{..} i@(I# i#) (W16# e#) = ST $ \s1# -> - CHECK_BOUNDS("unsafeWrite",maLen,i) - case writeWord16Array# maBA i# e# s1# of - s2# -> (# s2#, () #) -{-# INLINE unsafeWrite #-} - --- | Convert an immutable array to a list. -toList :: Array -> Int -> Int -> [Word16] -toList ary off len = loop 0 - where loop i | i < len = unsafeIndex ary (off+i) : loop (i+1) - | otherwise = [] - --- | An empty immutable array. -empty :: Array -empty = runST (new 0 >>= unsafeFreeze) - --- | Run an action in the ST monad and return an immutable array of --- its result. -run :: (forall s. ST s (MArray s)) -> Array -run k = runST (k >>= unsafeFreeze) - --- | Run an action in the ST monad and return an immutable array of --- its result paired with whatever else the action returns. -run2 :: (forall s. ST s (MArray s, a)) -> (Array, a) -run2 k = runST (do - (marr,b) <- k - arr <- unsafeFreeze marr - return (arr,b)) -{-# INLINE run2 #-} - --- | Copy some elements of a mutable array. -copyM :: MArray s -- ^ Destination - -> Int -- ^ Destination offset - -> MArray s -- ^ Source - -> Int -- ^ Source offset - -> Int -- ^ Count - -> ST s () -copyM dest didx src sidx count - | count <= 0 = return () - | otherwise = -#if defined(ASSERTS) - assert (sidx + count <= length src) . - assert (didx + count <= length dest) . -#endif - unsafeIOToST $ memcpyM (maBA dest) (fromIntegral didx) - (maBA src) (fromIntegral sidx) - (fromIntegral count) -{-# INLINE copyM #-} - --- | Copy some elements of an immutable array. -copyI :: MArray s -- ^ Destination - -> Int -- ^ Destination offset - -> Array -- ^ Source - -> Int -- ^ Source offset - -> Int -- ^ First offset in destination /not/ to - -- copy (i.e. /not/ length) - -> ST s () -copyI dest i0 src j0 top - | i0 >= top = return () - | otherwise = unsafeIOToST $ - memcpyI (maBA dest) (fromIntegral i0) - (aBA src) (fromIntegral j0) - (fromIntegral (top-i0)) -{-# INLINE copyI #-} - --- | Compare portions of two arrays for equality. No bounds checking --- is performed. -equal :: Array -- ^ First - -> Int -- ^ Offset into first - -> Array -- ^ Second - -> Int -- ^ Offset into second - -> Int -- ^ Count - -> Bool -equal arrA offA arrB offB count = inlinePerformIO $ do - i <- memcmp (aBA arrA) (fromIntegral offA) - (aBA arrB) (fromIntegral offB) (fromIntegral count) - return $! i == 0 -{-# INLINE equal #-} - -foreign import ccall unsafe "_hs_text_memcpy" memcpyI - :: MutableByteArray# s -> CSize -> ByteArray# -> CSize -> CSize -> IO () - -foreign import ccall unsafe "_hs_text_memcmp" memcmp - :: ByteArray# -> CSize -> ByteArray# -> CSize -> CSize -> IO CInt - -foreign import ccall unsafe "_hs_text_memcpy" memcpyM - :: MutableByteArray# s -> CSize -> MutableByteArray# s -> CSize -> CSize - -> IO () diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Encoding/Error.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Encoding/Error.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Encoding/Error.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Encoding/Error.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -{-# LANGUAGE CPP, DeriveDataTypeable #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.Encoding.Error --- Copyright : (c) Bryan O'Sullivan 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Types and functions for dealing with encoding and decoding errors --- in Unicode text. --- --- The standard functions for encoding and decoding text are strict, --- which is to say that they throw exceptions on invalid input. This --- is often unhelpful on real world input, so alternative functions --- exist that accept custom handlers for dealing with invalid inputs. --- These 'OnError' handlers are normal Haskell functions. You can use --- one of the presupplied functions in this module, or you can write a --- custom handler of your own. - -module Data.Text.Encoding.Error - ( - -- * Error handling types - UnicodeException(..) - , OnError - , OnDecodeError - , OnEncodeError - -- * Useful error handling functions - , lenientDecode - , strictDecode - , strictEncode - , ignore - , replace - ) where - -import Control.DeepSeq (NFData (..)) -import Control.Exception (Exception, throw) -import Data.Typeable (Typeable) -import Data.Word (Word8) -import Numeric (showHex) - --- | Function type for handling a coding error. It is supplied with --- two inputs: --- --- * A 'String' that describes the error. --- --- * The input value that caused the error. If the error arose --- because the end of input was reached or could not be identified --- precisely, this value will be 'Nothing'. --- --- If the handler returns a value wrapped with 'Just', that value will --- be used in the output as the replacement for the invalid input. If --- it returns 'Nothing', no value will be used in the output. --- --- Should the handler need to abort processing, it should use 'error' --- or 'throw' an exception (preferably a 'UnicodeException'). It may --- use the description provided to construct a more helpful error --- report. -type OnError a b = String -> Maybe a -> Maybe b - --- | A handler for a decoding error. -type OnDecodeError = OnError Word8 Char - --- | A handler for an encoding error. -type OnEncodeError = OnError Char Word8 - --- | An exception type for representing Unicode encoding errors. -data UnicodeException = - DecodeError String (Maybe Word8) - -- ^ Could not decode a byte sequence because it was invalid under - -- the given encoding, or ran out of input in mid-decode. - | EncodeError String (Maybe Char) - -- ^ Tried to encode a character that could not be represented - -- under the given encoding, or ran out of input in mid-encode. - deriving (Eq, Typeable) - -showUnicodeException :: UnicodeException -> String -showUnicodeException (DecodeError desc (Just w)) - = "Cannot decode byte '\\x" ++ showHex w ("': " ++ desc) -showUnicodeException (DecodeError desc Nothing) - = "Cannot decode input: " ++ desc -showUnicodeException (EncodeError desc (Just c)) - = "Cannot encode character '\\x" ++ showHex (fromEnum c) ("': " ++ desc) -showUnicodeException (EncodeError desc Nothing) - = "Cannot encode input: " ++ desc - -instance Show UnicodeException where - show = showUnicodeException - -instance Exception UnicodeException - -instance NFData UnicodeException where - rnf (DecodeError desc w) = rnf desc `seq` rnf w `seq` () - rnf (EncodeError desc c) = rnf desc `seq` rnf c `seq` () - --- | Throw a 'UnicodeException' if decoding fails. -strictDecode :: OnDecodeError -strictDecode desc c = throw (DecodeError desc c) - --- | Replace an invalid input byte with the Unicode replacement --- character U+FFFD. -lenientDecode :: OnDecodeError -lenientDecode _ _ = Just '\xfffd' - --- | Throw a 'UnicodeException' if encoding fails. -strictEncode :: OnEncodeError -strictEncode desc c = throw (EncodeError desc c) - --- | Ignore an invalid input, substituting nothing in the output. -ignore :: OnError a b -ignore _ _ = Nothing - --- | Replace an invalid input with a valid output. -replace :: b -> OnError a b -replace c _ _ = Just c diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Encoding.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Encoding.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Encoding.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Encoding.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,490 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, ForeignFunctionInterface, GeneralizedNewtypeDeriving, MagicHash, - UnliftedFFITypes #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.Encoding --- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts, --- (c) 2008, 2009 Tom Harper --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- Functions for converting 'Text' values to and from 'ByteString', --- using several standard encodings. --- --- To gain access to a much larger family of encodings, use the --- @text-icu@ package: - -module Data.Text.Encoding - ( - -- * Decoding ByteStrings to Text - -- $strict - decodeASCII - , decodeLatin1 - , decodeUtf8 - , decodeUtf16LE - , decodeUtf16BE - , decodeUtf32LE - , decodeUtf32BE - - -- ** Catchable failure - , decodeUtf8' - - -- ** Controllable error handling - , decodeUtf8With - , decodeUtf16LEWith - , decodeUtf16BEWith - , decodeUtf32LEWith - , decodeUtf32BEWith - - -- ** Stream oriented decoding - -- $stream - , streamDecodeUtf8 - , streamDecodeUtf8With - , Decoding(..) - - -- * Encoding Text to ByteStrings - , encodeUtf8 - , encodeUtf16LE - , encodeUtf16BE - , encodeUtf32LE - , encodeUtf32BE - -#if MIN_VERSION_bytestring(0,10,4) - -- * Encoding Text using ByteString Builders - -- | /Note/ that these functions are only available if built against - -- @bytestring >= 0.10.4.0@. - , encodeUtf8Builder - , encodeUtf8BuilderEscaped -#endif - ) where - -#if __GLASGOW_HASKELL__ >= 702 -import Control.Monad.ST.Unsafe (unsafeIOToST, unsafeSTToIO) -#else -import Control.Monad.ST (unsafeIOToST, unsafeSTToIO) -#endif - -#if MIN_VERSION_bytestring(0,10,4) -import Data.Bits ((.&.)) -import Data.Text.Internal.Unsafe.Char (ord) -import qualified Data.ByteString.Builder as B -import qualified Data.ByteString.Builder.Internal as B hiding (empty, append) -import qualified Data.ByteString.Builder.Prim as BP -import qualified Data.ByteString.Builder.Prim.Internal as BP -import qualified Data.Text.Internal.Encoding.Utf16 as U16 -#endif - -import Control.Exception (evaluate, try) -import Control.Monad.ST (runST) -import Data.ByteString as B -import Data.ByteString.Internal as B hiding (c2w) -import Data.Text () -import Data.Text.Encoding.Error (OnDecodeError, UnicodeException, strictDecode) -import Data.Text.Internal (Text(..), safe, text) -import Data.Text.Internal.Private (runText) -import Data.Text.Internal.Unsafe.Char (unsafeWrite) -import Data.Text.Internal.Unsafe.Shift (shiftR) -import Data.Text.Unsafe (unsafeDupablePerformIO) -import Data.Word (Word8, Word32) -import Foreign.C.Types (CSize(..)) -import Foreign.ForeignPtr (withForeignPtr) -import Foreign.Marshal.Utils (with) -import Foreign.Ptr (Ptr, minusPtr, nullPtr, plusPtr) -import Foreign.Storable (Storable, peek, poke) -import GHC.Base (ByteArray#, MutableByteArray#) -import qualified Data.Text.Array as A -import qualified Data.Text.Internal.Encoding.Fusion as E -import qualified Data.Text.Internal.Fusion as F - -#include "text_cbits.h" - --- $strict --- --- All of the single-parameter functions for decoding bytestrings --- encoded in one of the Unicode Transformation Formats (UTF) operate --- in a /strict/ mode: each will throw an exception if given invalid --- input. --- --- Each function has a variant, whose name is suffixed with -'With', --- that gives greater control over the handling of decoding errors. --- For instance, 'decodeUtf8' will throw an exception, but --- 'decodeUtf8With' allows the programmer to determine what to do on a --- decoding error. - --- | /Deprecated/. Decode a 'ByteString' containing 7-bit ASCII --- encoded text. -decodeASCII :: ByteString -> Text -decodeASCII = decodeUtf8 -{-# DEPRECATED decodeASCII "Use decodeUtf8 instead" #-} - --- | Decode a 'ByteString' containing Latin-1 (aka ISO-8859-1) encoded text. --- --- 'decodeLatin1' is semantically equivalent to --- @Data.Text.pack . Data.ByteString.Char8.unpack@ -decodeLatin1 :: ByteString -> Text -decodeLatin1 (PS fp off len) = text a 0 len - where - a = A.run (A.new len >>= unsafeIOToST . go) - go dest = withForeignPtr fp $ \ptr -> do - c_decode_latin1 (A.maBA dest) (ptr `plusPtr` off) (ptr `plusPtr` (off+len)) - return dest - --- | Decode a 'ByteString' containing UTF-8 encoded text. -decodeUtf8With :: OnDecodeError -> ByteString -> Text -decodeUtf8With onErr (PS fp off len) = runText $ \done -> do - let go dest = withForeignPtr fp $ \ptr -> - with (0::CSize) $ \destOffPtr -> do - let end = ptr `plusPtr` (off + len) - loop curPtr = do - curPtr' <- c_decode_utf8 (A.maBA dest) destOffPtr curPtr end - if curPtr' == end - then do - n <- peek destOffPtr - unsafeSTToIO (done dest (fromIntegral n)) - else do - x <- peek curPtr' - case onErr desc (Just x) of - Nothing -> loop $ curPtr' `plusPtr` 1 - Just c -> do - destOff <- peek destOffPtr - w <- unsafeSTToIO $ - unsafeWrite dest (fromIntegral destOff) (safe c) - poke destOffPtr (destOff + fromIntegral w) - loop $ curPtr' `plusPtr` 1 - loop (ptr `plusPtr` off) - (unsafeIOToST . go) =<< A.new len - where - desc = "Data.Text.Internal.Encoding.decodeUtf8: Invalid UTF-8 stream" -{- INLINE[0] decodeUtf8With #-} - --- $stream --- --- The 'streamDecodeUtf8' and 'streamDecodeUtf8With' functions accept --- a 'ByteString' that represents a possibly incomplete input (e.g. a --- packet from a network stream) that may not end on a UTF-8 boundary. --- --- 1. The maximal prefix of 'Text' that could be decoded from the --- given input. --- --- 2. The suffix of the 'ByteString' that could not be decoded due to --- insufficient input. --- --- 3. A function that accepts another 'ByteString'. That string will --- be assumed to directly follow the string that was passed as --- input to the original function, and it will in turn be decoded. --- --- To help understand the use of these functions, consider the Unicode --- string @\"hi ☃\"@. If encoded as UTF-8, this becomes @\"hi --- \\xe2\\x98\\x83\"@; the final @\'☃\'@ is encoded as 3 bytes. --- --- Now suppose that we receive this encoded string as 3 packets that --- are split up on untidy boundaries: @[\"hi \\xe2\", \"\\x98\", --- \"\\x83\"]@. We cannot decode the entire Unicode string until we --- have received all three packets, but we would like to make progress --- as we receive each one. --- --- @ --- ghci> let s0\@('Some' _ _ f0) = 'streamDecodeUtf8' \"hi \\xe2\" --- ghci> s0 --- 'Some' \"hi \" \"\\xe2\" _ --- @ --- --- We use the continuation @f0@ to decode our second packet. --- --- @ --- ghci> let s1\@('Some' _ _ f1) = f0 \"\\x98\" --- ghci> s1 --- 'Some' \"\" \"\\xe2\\x98\" --- @ --- --- We could not give @f0@ enough input to decode anything, so it --- returned an empty string. Once we feed our second continuation @f1@ --- the last byte of input, it will make progress. --- --- @ --- ghci> let s2\@('Some' _ _ f2) = f1 \"\\x83\" --- ghci> s2 --- 'Some' \"\\x2603\" \"\" _ --- @ --- --- If given invalid input, an exception will be thrown by the function --- or continuation where it is encountered. - --- | A stream oriented decoding result. -data Decoding = Some Text ByteString (ByteString -> Decoding) - -instance Show Decoding where - showsPrec d (Some t bs _) = showParen (d > prec) $ - showString "Some " . showsPrec prec' t . - showChar ' ' . showsPrec prec' bs . - showString " _" - where prec = 10; prec' = prec + 1 - -newtype CodePoint = CodePoint Word32 deriving (Eq, Show, Num, Storable) -newtype DecoderState = DecoderState Word32 deriving (Eq, Show, Num, Storable) - --- | Decode, in a stream oriented way, a 'ByteString' containing UTF-8 --- encoded text that is known to be valid. --- --- If the input contains any invalid UTF-8 data, an exception will be --- thrown (either by this function or a continuation) that cannot be --- caught in pure code. For more control over the handling of invalid --- data, use 'streamDecodeUtf8With'. -streamDecodeUtf8 :: ByteString -> Decoding -streamDecodeUtf8 = streamDecodeUtf8With strictDecode - --- | Decode, in a stream oriented way, a 'ByteString' containing UTF-8 --- encoded text. -streamDecodeUtf8With :: OnDecodeError -> ByteString -> Decoding -streamDecodeUtf8With onErr = decodeChunk B.empty 0 0 - where - -- We create a slightly larger than necessary buffer to accommodate a - -- potential surrogate pair started in the last buffer - decodeChunk :: ByteString -> CodePoint -> DecoderState -> ByteString - -> Decoding - decodeChunk undecoded0 codepoint0 state0 bs@(PS fp off len) = - runST $ (unsafeIOToST . decodeChunkToBuffer) =<< A.new (len+1) - where - decodeChunkToBuffer :: A.MArray s -> IO Decoding - decodeChunkToBuffer dest = withForeignPtr fp $ \ptr -> - with (0::CSize) $ \destOffPtr -> - with codepoint0 $ \codepointPtr -> - with state0 $ \statePtr -> - with nullPtr $ \curPtrPtr -> - let end = ptr `plusPtr` (off + len) - loop curPtr = do - poke curPtrPtr curPtr - curPtr' <- c_decode_utf8_with_state (A.maBA dest) destOffPtr - curPtrPtr end codepointPtr statePtr - state <- peek statePtr - case state of - UTF8_REJECT -> do - -- We encountered an encoding error - x <- peek curPtr' - poke statePtr 0 - case onErr desc (Just x) of - Nothing -> loop $ curPtr' `plusPtr` 1 - Just c -> do - destOff <- peek destOffPtr - w <- unsafeSTToIO $ - unsafeWrite dest (fromIntegral destOff) (safe c) - poke destOffPtr (destOff + fromIntegral w) - loop $ curPtr' `plusPtr` 1 - - _ -> do - -- We encountered the end of the buffer while decoding - n <- peek destOffPtr - codepoint <- peek codepointPtr - chunkText <- unsafeSTToIO $ do - arr <- A.unsafeFreeze dest - return $! text arr 0 (fromIntegral n) - lastPtr <- peek curPtrPtr - let left = lastPtr `minusPtr` curPtr - undecoded = case state of - UTF8_ACCEPT -> B.empty - _ -> B.append undecoded0 (B.drop left bs) - return $ Some chunkText undecoded - (decodeChunk undecoded codepoint state) - in loop (ptr `plusPtr` off) - desc = "Data.Text.Internal.Encoding.streamDecodeUtf8With: Invalid UTF-8 stream" - --- | Decode a 'ByteString' containing UTF-8 encoded text that is known --- to be valid. --- --- If the input contains any invalid UTF-8 data, an exception will be --- thrown that cannot be caught in pure code. For more control over --- the handling of invalid data, use 'decodeUtf8'' or --- 'decodeUtf8With'. -decodeUtf8 :: ByteString -> Text -decodeUtf8 = decodeUtf8With strictDecode -{-# INLINE[0] decodeUtf8 #-} -{-# RULES "STREAM stream/decodeUtf8 fusion" [1] - forall bs. F.stream (decodeUtf8 bs) = E.streamUtf8 strictDecode bs #-} - --- | Decode a 'ByteString' containing UTF-8 encoded text. --- --- If the input contains any invalid UTF-8 data, the relevant --- exception will be returned, otherwise the decoded text. -decodeUtf8' :: ByteString -> Either UnicodeException Text -decodeUtf8' = unsafeDupablePerformIO . try . evaluate . decodeUtf8With strictDecode -{-# INLINE decodeUtf8' #-} - -#if MIN_VERSION_bytestring(0,10,4) - --- | Encode text to a ByteString 'B.Builder' using UTF-8 encoding. -encodeUtf8Builder :: Text -> B.Builder -encodeUtf8Builder = encodeUtf8BuilderEscaped (BP.liftFixedToBounded BP.word8) - --- | Encode text using UTF-8 encoding and escape the ASCII characters using --- a 'BP.BoundedPrim'. --- --- Use this function is to implement efficient encoders for text-based formats --- like JSON or HTML. -{-# INLINE encodeUtf8BuilderEscaped #-} --- TODO: Extend documentation with references to source code in @blaze-html@ --- or @aeson@ that uses this function. -encodeUtf8BuilderEscaped :: BP.BoundedPrim Word8 -> Text -> B.Builder -encodeUtf8BuilderEscaped be = - -- manual eta-expansion to ensure inlining works as expected - \txt -> B.builder (mkBuildstep txt) - where - bound = max 4 $ BP.sizeBound be - - mkBuildstep (Text arr off len) !k = - outerLoop off - where - iend = off + len - - outerLoop !i0 !br@(B.BufferRange op0 ope) - | i0 >= iend = k br - | outRemaining > 0 = goPartial (i0 + min outRemaining inpRemaining) - -- TODO: Use a loop with an integrated bound's check if outRemaining - -- is smaller than 8, as this will save on divisions. - | otherwise = return $ B.bufferFull bound op0 (outerLoop i0) - where - outRemaining = (ope `minusPtr` op0) `div` bound - inpRemaining = iend - i0 - - goPartial !iendTmp = go i0 op0 - where - go !i !op - | i < iendTmp = case A.unsafeIndex arr i of - w | w <= 0x7F -> do - BP.runB be (fromIntegral w) op >>= go (i + 1) - | w <= 0x7FF -> do - poke8 0 $ (w `shiftR` 6) + 0xC0 - poke8 1 $ (w .&. 0x3f) + 0x80 - go (i + 1) (op `plusPtr` 2) - | 0xD800 <= w && w <= 0xDBFF -> do - let c = ord $ U16.chr2 w (A.unsafeIndex arr (i+1)) - poke8 0 $ (c `shiftR` 18) + 0xF0 - poke8 1 $ ((c `shiftR` 12) .&. 0x3F) + 0x80 - poke8 2 $ ((c `shiftR` 6) .&. 0x3F) + 0x80 - poke8 3 $ (c .&. 0x3F) + 0x80 - go (i + 2) (op `plusPtr` 4) - | otherwise -> do - poke8 0 $ (w `shiftR` 12) + 0xE0 - poke8 1 $ ((w `shiftR` 6) .&. 0x3F) + 0x80 - poke8 2 $ (w .&. 0x3F) + 0x80 - go (i + 1) (op `plusPtr` 3) - | otherwise = - outerLoop i (B.BufferRange op ope) - where - poke8 j v = poke (op `plusPtr` j) (fromIntegral v :: Word8) -#endif - --- | Encode text using UTF-8 encoding. -encodeUtf8 :: Text -> ByteString -encodeUtf8 (Text arr off len) - | len == 0 = B.empty - | otherwise = unsafeDupablePerformIO $ do - fp <- mallocByteString (len*4) - withForeignPtr fp $ \ptr -> - with ptr $ \destPtr -> do - c_encode_utf8 destPtr (A.aBA arr) (fromIntegral off) (fromIntegral len) - newDest <- peek destPtr - let utf8len = newDest `minusPtr` ptr - if utf8len >= len `shiftR` 1 - then return (PS fp 0 utf8len) - else do - fp' <- mallocByteString utf8len - withForeignPtr fp' $ \ptr' -> do - memcpy ptr' ptr (fromIntegral utf8len) - return (PS fp' 0 utf8len) - --- | Decode text from little endian UTF-16 encoding. -decodeUtf16LEWith :: OnDecodeError -> ByteString -> Text -decodeUtf16LEWith onErr bs = F.unstream (E.streamUtf16LE onErr bs) -{-# INLINE decodeUtf16LEWith #-} - --- | Decode text from little endian UTF-16 encoding. --- --- If the input contains any invalid little endian UTF-16 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf16LEWith'. -decodeUtf16LE :: ByteString -> Text -decodeUtf16LE = decodeUtf16LEWith strictDecode -{-# INLINE decodeUtf16LE #-} - --- | Decode text from big endian UTF-16 encoding. -decodeUtf16BEWith :: OnDecodeError -> ByteString -> Text -decodeUtf16BEWith onErr bs = F.unstream (E.streamUtf16BE onErr bs) -{-# INLINE decodeUtf16BEWith #-} - --- | Decode text from big endian UTF-16 encoding. --- --- If the input contains any invalid big endian UTF-16 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf16BEWith'. -decodeUtf16BE :: ByteString -> Text -decodeUtf16BE = decodeUtf16BEWith strictDecode -{-# INLINE decodeUtf16BE #-} - --- | Encode text using little endian UTF-16 encoding. -encodeUtf16LE :: Text -> ByteString -encodeUtf16LE txt = E.unstream (E.restreamUtf16LE (F.stream txt)) -{-# INLINE encodeUtf16LE #-} - --- | Encode text using big endian UTF-16 encoding. -encodeUtf16BE :: Text -> ByteString -encodeUtf16BE txt = E.unstream (E.restreamUtf16BE (F.stream txt)) -{-# INLINE encodeUtf16BE #-} - --- | Decode text from little endian UTF-32 encoding. -decodeUtf32LEWith :: OnDecodeError -> ByteString -> Text -decodeUtf32LEWith onErr bs = F.unstream (E.streamUtf32LE onErr bs) -{-# INLINE decodeUtf32LEWith #-} - --- | Decode text from little endian UTF-32 encoding. --- --- If the input contains any invalid little endian UTF-32 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf32LEWith'. -decodeUtf32LE :: ByteString -> Text -decodeUtf32LE = decodeUtf32LEWith strictDecode -{-# INLINE decodeUtf32LE #-} - --- | Decode text from big endian UTF-32 encoding. -decodeUtf32BEWith :: OnDecodeError -> ByteString -> Text -decodeUtf32BEWith onErr bs = F.unstream (E.streamUtf32BE onErr bs) -{-# INLINE decodeUtf32BEWith #-} - --- | Decode text from big endian UTF-32 encoding. --- --- If the input contains any invalid big endian UTF-32 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf32BEWith'. -decodeUtf32BE :: ByteString -> Text -decodeUtf32BE = decodeUtf32BEWith strictDecode -{-# INLINE decodeUtf32BE #-} - --- | Encode text using little endian UTF-32 encoding. -encodeUtf32LE :: Text -> ByteString -encodeUtf32LE txt = E.unstream (E.restreamUtf32LE (F.stream txt)) -{-# INLINE encodeUtf32LE #-} - --- | Encode text using big endian UTF-32 encoding. -encodeUtf32BE :: Text -> ByteString -encodeUtf32BE txt = E.unstream (E.restreamUtf32BE (F.stream txt)) -{-# INLINE encodeUtf32BE #-} - -foreign import ccall unsafe "_hs_text_decode_utf8" c_decode_utf8 - :: MutableByteArray# s -> Ptr CSize - -> Ptr Word8 -> Ptr Word8 -> IO (Ptr Word8) - -foreign import ccall unsafe "_hs_text_decode_utf8_state" c_decode_utf8_with_state - :: MutableByteArray# s -> Ptr CSize - -> Ptr (Ptr Word8) -> Ptr Word8 - -> Ptr CodePoint -> Ptr DecoderState -> IO (Ptr Word8) - -foreign import ccall unsafe "_hs_text_decode_latin1" c_decode_latin1 - :: MutableByteArray# s -> Ptr Word8 -> Ptr Word8 -> IO () - -foreign import ccall unsafe "_hs_text_encode_utf8" c_encode_utf8 - :: Ptr (Ptr Word8) -> ByteArray# -> CSize -> CSize -> IO () diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Foreign.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Foreign.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Foreign.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Foreign.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,173 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, GeneralizedNewtypeDeriving #-} --- | --- Module : Data.Text.Foreign --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Support for using 'Text' data with native code via the Haskell --- foreign function interface. - -module Data.Text.Foreign - ( - -- * Interoperability with native code - -- $interop - I16 - -- * Safe conversion functions - , fromPtr - , useAsPtr - , asForeignPtr - -- ** Encoding as UTF-8 - , peekCStringLen - , withCStringLen - -- * Unsafe conversion code - , lengthWord16 - , unsafeCopyToPtr - -- * Low-level manipulation - -- $lowlevel - , dropWord16 - , takeWord16 - ) where - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -#if __GLASGOW_HASKELL__ >= 702 -import Control.Monad.ST.Unsafe (unsafeIOToST) -#else -import Control.Monad.ST (unsafeIOToST) -#endif -import Data.ByteString.Unsafe (unsafePackCStringLen, unsafeUseAsCStringLen) -import Data.Text.Encoding (decodeUtf8, encodeUtf8) -import Data.Text.Internal (Text(..), empty) -import Data.Text.Unsafe (lengthWord16) -import Data.Word (Word16) -import Foreign.C.String (CStringLen) -import Foreign.ForeignPtr (ForeignPtr, mallocForeignPtrArray, withForeignPtr) -import Foreign.Marshal.Alloc (allocaBytes) -import Foreign.Ptr (Ptr, castPtr, plusPtr) -import Foreign.Storable (peek, poke) -import qualified Data.Text.Array as A - --- $interop --- --- The 'Text' type is implemented using arrays that are not guaranteed --- to have a fixed address in the Haskell heap. All communication with --- native code must thus occur by copying data back and forth. --- --- The 'Text' type's internal representation is UTF-16, using the --- platform's native endianness. This makes copied data suitable for --- use with native libraries that use a similar representation, such --- as ICU. To interoperate with native libraries that use different --- internal representations, such as UTF-8 or UTF-32, consider using --- the functions in the 'Data.Text.Encoding' module. - --- | A type representing a number of UTF-16 code units. -newtype I16 = I16 Int - deriving (Bounded, Enum, Eq, Integral, Num, Ord, Read, Real, Show) - --- | /O(n)/ Create a new 'Text' from a 'Ptr' 'Word16' by copying the --- contents of the array. -fromPtr :: Ptr Word16 -- ^ source array - -> I16 -- ^ length of source array (in 'Word16' units) - -> IO Text -fromPtr _ (I16 0) = return empty -fromPtr ptr (I16 len) = -#if defined(ASSERTS) - assert (len > 0) $ -#endif - return $! Text arr 0 len - where - arr = A.run (A.new len >>= copy) - copy marr = loop ptr 0 - where - loop !p !i | i == len = return marr - | otherwise = do - A.unsafeWrite marr i =<< unsafeIOToST (peek p) - loop (p `plusPtr` 2) (i + 1) - --- $lowlevel --- --- Foreign functions that use UTF-16 internally may return indices in --- units of 'Word16' instead of characters. These functions may --- safely be used with such indices, as they will adjust offsets if --- necessary to preserve the validity of a Unicode string. - --- | /O(1)/ Return the prefix of the 'Text' of @n@ 'Word16' units in --- length. --- --- If @n@ would cause the 'Text' to end inside a surrogate pair, the --- end of the prefix will be advanced by one additional 'Word16' unit --- to maintain its validity. -takeWord16 :: I16 -> Text -> Text -takeWord16 (I16 n) t@(Text arr off len) - | n <= 0 = empty - | n >= len || m >= len = t - | otherwise = Text arr off m - where - m | w < 0xDB00 || w > 0xD8FF = n - | otherwise = n+1 - w = A.unsafeIndex arr (off+n-1) - --- | /O(1)/ Return the suffix of the 'Text', with @n@ 'Word16' units --- dropped from its beginning. --- --- If @n@ would cause the 'Text' to begin inside a surrogate pair, the --- beginning of the suffix will be advanced by one additional 'Word16' --- unit to maintain its validity. -dropWord16 :: I16 -> Text -> Text -dropWord16 (I16 n) t@(Text arr off len) - | n <= 0 = t - | n >= len || m >= len = empty - | otherwise = Text arr (off+m) (len-m) - where - m | w < 0xD800 || w > 0xDBFF = n - | otherwise = n+1 - w = A.unsafeIndex arr (off+n-1) - --- | /O(n)/ Copy a 'Text' to an array. The array is assumed to be big --- enough to hold the contents of the entire 'Text'. -unsafeCopyToPtr :: Text -> Ptr Word16 -> IO () -unsafeCopyToPtr (Text arr off len) ptr = loop ptr off - where - end = off + len - loop !p !i | i == end = return () - | otherwise = do - poke p (A.unsafeIndex arr i) - loop (p `plusPtr` 2) (i + 1) - --- | /O(n)/ Perform an action on a temporary, mutable copy of a --- 'Text'. The copy is freed as soon as the action returns. -useAsPtr :: Text -> (Ptr Word16 -> I16 -> IO a) -> IO a -useAsPtr t@(Text _arr _off len) action = - allocaBytes (len * 2) $ \buf -> do - unsafeCopyToPtr t buf - action (castPtr buf) (fromIntegral len) - --- | /O(n)/ Make a mutable copy of a 'Text'. -asForeignPtr :: Text -> IO (ForeignPtr Word16, I16) -asForeignPtr t@(Text _arr _off len) = do - fp <- mallocForeignPtrArray len - withForeignPtr fp $ unsafeCopyToPtr t - return (fp, I16 len) - --- | /O(n)/ Decode a C string with explicit length, which is assumed --- to have been encoded as UTF-8. If decoding fails, a --- 'UnicodeException' is thrown. -peekCStringLen :: CStringLen -> IO Text -peekCStringLen cs = do - bs <- unsafePackCStringLen cs - return $! decodeUtf8 bs - --- | Marshal a 'Text' into a C string encoded as UTF-8 in temporary --- storage, with explicit length information. The encoded string may --- contain NUL bytes, and is not followed by a trailing NUL byte. --- --- The temporary storage is freed when the subcomputation terminates --- (either normally or via an exception), so the pointer to the --- temporary storage must /not/ be used after this function returns. -withCStringLen :: Text -> (CStringLen -> IO a) -> IO a -withCStringLen t act = unsafeUseAsCStringLen (encodeUtf8 t) act diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Builder/Functions.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Builder/Functions.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Builder/Functions.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Builder/Functions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -{-# LANGUAGE MagicHash #-} - --- | --- Module : Data.Text.Internal.Builder.Functions --- Copyright : (c) 2011 MailRank, Inc. --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Useful functions and combinators. - -module Data.Text.Internal.Builder.Functions - ( - (<>) - , i2d - ) where - -import Data.Monoid (mappend) -import Data.Text.Lazy.Builder (Builder) -import GHC.Base - --- | Unsafe conversion for decimal digits. -{-# INLINE i2d #-} -i2d :: Int -> Char -i2d (I# i#) = C# (chr# (ord# '0'# +# i#)) - --- | The normal 'mappend' function with right associativity instead of --- left. -(<>) :: Builder -> Builder -> Builder -(<>) = mappend -{-# INLINE (<>) #-} - -infixr 4 <> diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Builder/Int/Digits.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Builder/Int/Digits.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Builder/Int/Digits.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Builder/Int/Digits.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - --- Module: Data.Text.Internal.Builder.Int.Digits --- Copyright: (c) 2013 Bryan O'Sullivan --- License: BSD3 --- Maintainer: Bryan O'Sullivan --- Stability: experimental --- Portability: portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- This module exists because the C preprocessor does things that we --- shall not speak of when confronted with Haskell multiline strings. - -module Data.Text.Internal.Builder.Int.Digits (digits) where - -import Data.ByteString.Char8 (ByteString) - -digits :: ByteString -digits = "0001020304050607080910111213141516171819\ - \2021222324252627282930313233343536373839\ - \4041424344454647484950515253545556575859\ - \6061626364656667686970717273747576777879\ - \8081828384858687888990919293949596979899" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Builder/RealFloat/Functions.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Builder/RealFloat/Functions.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Builder/RealFloat/Functions.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Builder/RealFloat/Functions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -{-# LANGUAGE CPP #-} - --- | --- Module: Data.Text.Internal.Builder.RealFloat.Functions --- Copyright: (c) The University of Glasgow 1994-2002 --- License: see libraries/base/LICENSE --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! - -module Data.Text.Internal.Builder.RealFloat.Functions - ( - roundTo - ) where - -roundTo :: Int -> [Int] -> (Int,[Int]) - -#if MIN_VERSION_base(4,6,0) - -roundTo d is = - case f d True is of - x@(0,_) -> x - (1,xs) -> (1, 1:xs) - _ -> error "roundTo: bad Value" - where - b2 = base `quot` 2 - - f n _ [] = (0, replicate n 0) - f 0 e (x:xs) | x == b2 && e && all (== 0) xs = (0, []) -- Round to even when at exactly half the base - | otherwise = (if x >= b2 then 1 else 0, []) - f n _ (i:xs) - | i' == base = (1,0:ds) - | otherwise = (0,i':ds) - where - (c,ds) = f (n-1) (even i) xs - i' = c + i - base = 10 - -#else - -roundTo d is = - case f d is of - x@(0,_) -> x - (1,xs) -> (1, 1:xs) - _ -> error "roundTo: bad Value" - where - f n [] = (0, replicate n 0) - f 0 (x:_) = (if x >= 5 then 1 else 0, []) - f n (i:xs) - | i' == 10 = (1,0:ds) - | otherwise = (0,i':ds) - where - (c,ds) = f (n-1) xs - i' = c + i - -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Builder.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Builder.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Builder.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Builder.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,314 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} -{-# OPTIONS_HADDOCK not-home #-} - ------------------------------------------------------------------------------ --- | --- Module : Data.Text.Internal.Builder --- Copyright : (c) 2013 Bryan O'Sullivan --- (c) 2010 Johan Tibell --- License : BSD3-style (see LICENSE) --- --- Maintainer : Johan Tibell --- Stability : experimental --- Portability : portable to Hugs and GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Efficient construction of lazy @Text@ values. The principal --- operations on a @Builder@ are @singleton@, @fromText@, and --- @fromLazyText@, which construct new builders, and 'mappend', which --- concatenates two builders. --- --- To get maximum performance when building lazy @Text@ values using a --- builder, associate @mappend@ calls to the right. For example, --- prefer --- --- > singleton 'a' `mappend` (singleton 'b' `mappend` singleton 'c') --- --- to --- --- > singleton 'a' `mappend` singleton 'b' `mappend` singleton 'c' --- --- as the latter associates @mappend@ to the left. --- ------------------------------------------------------------------------------ - -module Data.Text.Internal.Builder - ( -- * Public API - -- ** The Builder type - Builder - , toLazyText - , toLazyTextWith - - -- ** Constructing Builders - , singleton - , fromText - , fromLazyText - , fromString - - -- ** Flushing the buffer state - , flush - - -- * Internal functions - , append' - , ensureFree - , writeN - ) where - -import Control.Monad.ST (ST, runST) -import Data.Monoid (Monoid(..)) -import Data.Text.Internal (Text(..)) -import Data.Text.Internal.Lazy (smallChunkSize) -import Data.Text.Unsafe (inlineInterleaveST) -import Data.Text.Internal.Unsafe.Char (unsafeWrite) -import Prelude hiding (map, putChar) - -import qualified Data.String as String -import qualified Data.Text as S -import qualified Data.Text.Array as A -import qualified Data.Text.Lazy as L - ------------------------------------------------------------------------- - --- | A @Builder@ is an efficient way to build lazy @Text@ values. --- There are several functions for constructing builders, but only one --- to inspect them: to extract any data, you have to turn them into --- lazy @Text@ values using @toLazyText@. --- --- Internally, a builder constructs a lazy @Text@ by filling arrays --- piece by piece. As each buffer is filled, it is \'popped\' off, to --- become a new chunk of the resulting lazy @Text@. All this is --- hidden from the user of the @Builder@. -newtype Builder = Builder { - -- Invariant (from Data.Text.Lazy): - -- The lists include no null Texts. - runBuilder :: forall s. (Buffer s -> ST s [S.Text]) - -> Buffer s - -> ST s [S.Text] - } - -instance Monoid Builder where - mempty = empty - {-# INLINE mempty #-} - mappend = append - {-# INLINE mappend #-} - mconcat = foldr mappend mempty - {-# INLINE mconcat #-} - -instance String.IsString Builder where - fromString = fromString - {-# INLINE fromString #-} - -instance Show Builder where - show = show . toLazyText - -instance Eq Builder where - a == b = toLazyText a == toLazyText b - -instance Ord Builder where - a <= b = toLazyText a <= toLazyText b - ------------------------------------------------------------------------- - --- | /O(1)./ The empty @Builder@, satisfying --- --- * @'toLazyText' 'empty' = 'L.empty'@ --- -empty :: Builder -empty = Builder (\ k buf -> k buf) -{-# INLINE empty #-} - --- | /O(1)./ A @Builder@ taking a single character, satisfying --- --- * @'toLazyText' ('singleton' c) = 'L.singleton' c@ --- -singleton :: Char -> Builder -singleton c = writeAtMost 2 $ \ marr o -> unsafeWrite marr o c -{-# INLINE singleton #-} - ------------------------------------------------------------------------- - --- | /O(1)./ The concatenation of two builders, an associative --- operation with identity 'empty', satisfying --- --- * @'toLazyText' ('append' x y) = 'L.append' ('toLazyText' x) ('toLazyText' y)@ --- -append :: Builder -> Builder -> Builder -append (Builder f) (Builder g) = Builder (f . g) -{-# INLINE [0] append #-} - --- TODO: Experiment to find the right threshold. -copyLimit :: Int -copyLimit = 128 - --- This function attempts to merge small @Text@ values instead of --- treating each value as its own chunk. We may not always want this. - --- | /O(1)./ A @Builder@ taking a 'S.Text', satisfying --- --- * @'toLazyText' ('fromText' t) = 'L.fromChunks' [t]@ --- -fromText :: S.Text -> Builder -fromText t@(Text arr off l) - | S.null t = empty - | l <= copyLimit = writeN l $ \marr o -> A.copyI marr o arr off (l+o) - | otherwise = flush `append` mapBuilder (t :) -{-# INLINE [1] fromText #-} - -{-# RULES -"fromText/pack" forall s . - fromText (S.pack s) = fromString s - #-} - --- | /O(1)./ A Builder taking a @String@, satisfying --- --- * @'toLazyText' ('fromString' s) = 'L.fromChunks' [S.pack s]@ --- -fromString :: String -> Builder -fromString str = Builder $ \k (Buffer p0 o0 u0 l0) -> - let loop !marr !o !u !l [] = k (Buffer marr o u l) - loop marr o u l s@(c:cs) - | l <= 1 = do - arr <- A.unsafeFreeze marr - let !t = Text arr o u - marr' <- A.new chunkSize - ts <- inlineInterleaveST (loop marr' 0 0 chunkSize s) - return $ t : ts - | otherwise = do - n <- unsafeWrite marr (o+u) c - loop marr o (u+n) (l-n) cs - in loop p0 o0 u0 l0 str - where - chunkSize = smallChunkSize -{-# INLINE fromString #-} - --- | /O(1)./ A @Builder@ taking a lazy @Text@, satisfying --- --- * @'toLazyText' ('fromLazyText' t) = t@ --- -fromLazyText :: L.Text -> Builder -fromLazyText ts = flush `append` mapBuilder (L.toChunks ts ++) -{-# INLINE fromLazyText #-} - ------------------------------------------------------------------------- - --- Our internal buffer type -data Buffer s = Buffer {-# UNPACK #-} !(A.MArray s) - {-# UNPACK #-} !Int -- offset - {-# UNPACK #-} !Int -- used units - {-# UNPACK #-} !Int -- length left - ------------------------------------------------------------------------- - --- | /O(n)./ Extract a lazy @Text@ from a @Builder@ with a default --- buffer size. The construction work takes place if and when the --- relevant part of the lazy @Text@ is demanded. -toLazyText :: Builder -> L.Text -toLazyText = toLazyTextWith smallChunkSize - --- | /O(n)./ Extract a lazy @Text@ from a @Builder@, using the given --- size for the initial buffer. The construction work takes place if --- and when the relevant part of the lazy @Text@ is demanded. --- --- If the initial buffer is too small to hold all data, subsequent --- buffers will be the default buffer size. -toLazyTextWith :: Int -> Builder -> L.Text -toLazyTextWith chunkSize m = L.fromChunks (runST $ - newBuffer chunkSize >>= runBuilder (m `append` flush) (const (return []))) - --- | /O(1)./ Pop the strict @Text@ we have constructed so far, if any, --- yielding a new chunk in the result lazy @Text@. -flush :: Builder -flush = Builder $ \ k buf@(Buffer p o u l) -> - if u == 0 - then k buf - else do arr <- A.unsafeFreeze p - let !b = Buffer p (o+u) 0 l - !t = Text arr o u - ts <- inlineInterleaveST (k b) - return $! t : ts - ------------------------------------------------------------------------- - --- | Sequence an ST operation on the buffer -withBuffer :: (forall s. Buffer s -> ST s (Buffer s)) -> Builder -withBuffer f = Builder $ \k buf -> f buf >>= k -{-# INLINE withBuffer #-} - --- | Get the size of the buffer -withSize :: (Int -> Builder) -> Builder -withSize f = Builder $ \ k buf@(Buffer _ _ _ l) -> - runBuilder (f l) k buf -{-# INLINE withSize #-} - --- | Map the resulting list of texts. -mapBuilder :: ([S.Text] -> [S.Text]) -> Builder -mapBuilder f = Builder (fmap f .) - ------------------------------------------------------------------------- - --- | Ensure that there are at least @n@ many elements available. -ensureFree :: Int -> Builder -ensureFree !n = withSize $ \ l -> - if n <= l - then empty - else flush `append'` withBuffer (const (newBuffer (max n smallChunkSize))) -{-# INLINE [0] ensureFree #-} - -writeAtMost :: Int -> (forall s. A.MArray s -> Int -> ST s Int) -> Builder -writeAtMost n f = ensureFree n `append'` withBuffer (writeBuffer f) -{-# INLINE [0] writeAtMost #-} - --- | Ensure that @n@ many elements are available, and then use @f@ to --- write some elements into the memory. -writeN :: Int -> (forall s. A.MArray s -> Int -> ST s ()) -> Builder -writeN n f = writeAtMost n (\ p o -> f p o >> return n) -{-# INLINE writeN #-} - -writeBuffer :: (A.MArray s -> Int -> ST s Int) -> Buffer s -> ST s (Buffer s) -writeBuffer f (Buffer p o u l) = do - n <- f p (o+u) - return $! Buffer p o (u+n) (l-n) -{-# INLINE writeBuffer #-} - -newBuffer :: Int -> ST s (Buffer s) -newBuffer size = do - arr <- A.new size - return $! Buffer arr 0 0 size -{-# INLINE newBuffer #-} - ------------------------------------------------------------------------- --- Some nice rules for Builder - --- This function makes GHC understand that 'writeN' and 'ensureFree' --- are *not* recursive in the precense of the rewrite rules below. --- This is not needed with GHC 7+. -append' :: Builder -> Builder -> Builder -append' (Builder f) (Builder g) = Builder (f . g) -{-# INLINE append' #-} - -{-# RULES - -"append/writeAtMost" forall a b (f::forall s. A.MArray s -> Int -> ST s Int) - (g::forall s. A.MArray s -> Int -> ST s Int) ws. - append (writeAtMost a f) (append (writeAtMost b g) ws) = - append (writeAtMost (a+b) (\marr o -> f marr o >>= \ n -> - g marr (o+n) >>= \ m -> - let s = n+m in s `seq` return s)) ws - -"writeAtMost/writeAtMost" forall a b (f::forall s. A.MArray s -> Int -> ST s Int) - (g::forall s. A.MArray s -> Int -> ST s Int). - append (writeAtMost a f) (writeAtMost b g) = - writeAtMost (a+b) (\marr o -> f marr o >>= \ n -> - g marr (o+n) >>= \ m -> - let s = n+m in s `seq` return s) - -"ensureFree/ensureFree" forall a b . - append (ensureFree a) (ensureFree b) = ensureFree (max a b) - -"flush/flush" - append flush flush = flush - - #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Fusion/Common.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Fusion/Common.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Fusion/Common.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Fusion/Common.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -{-# LANGUAGE BangPatterns #-} - --- | --- Module : Data.Text.Internal.Encoding.Fusion.Common --- Copyright : (c) Tom Harper 2008-2009, --- (c) Bryan O'Sullivan 2009, --- (c) Duncan Coutts 2009, --- (c) Jasper Van der Jeugt 2011 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Use at your own risk! --- --- Fusible 'Stream'-oriented functions for converting between 'Text' --- and several common encodings. - -module Data.Text.Internal.Encoding.Fusion.Common - ( - -- * Restreaming - -- Restreaming is the act of converting from one 'Stream' - -- representation to another. - restreamUtf16LE - , restreamUtf16BE - , restreamUtf32LE - , restreamUtf32BE - ) where - -import Data.Bits ((.&.)) -import Data.Text.Internal.Fusion (Step(..), Stream(..)) -import Data.Text.Internal.Fusion.Types (RS(..)) -import Data.Text.Internal.Unsafe.Char (ord) -import Data.Text.Internal.Unsafe.Shift (shiftR) -import Data.Word (Word8) - -restreamUtf16BE :: Stream Char -> Stream Word8 -restreamUtf16BE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) - where - next (RS0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (RS0 s') - Yield x s' - | n < 0x10000 -> Yield (fromIntegral $ n `shiftR` 8) $ - RS1 s' (fromIntegral n) - | otherwise -> Yield c1 $ RS3 s' c2 c3 c4 - where - n = ord x - n1 = n - 0x10000 - c1 = fromIntegral (n1 `shiftR` 18 + 0xD8) - c2 = fromIntegral (n1 `shiftR` 10) - n2 = n1 .&. 0x3FF - c3 = fromIntegral (n2 `shiftR` 8 + 0xDC) - c4 = fromIntegral n2 - next (RS1 s x2) = Yield x2 (RS0 s) - next (RS2 s x2 x3) = Yield x2 (RS1 s x3) - next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) - {-# INLINE next #-} -{-# INLINE restreamUtf16BE #-} - -restreamUtf16LE :: Stream Char -> Stream Word8 -restreamUtf16LE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) - where - next (RS0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (RS0 s') - Yield x s' - | n < 0x10000 -> Yield (fromIntegral n) $ - RS1 s' (fromIntegral $ shiftR n 8) - | otherwise -> Yield c1 $ RS3 s' c2 c3 c4 - where - n = ord x - n1 = n - 0x10000 - c2 = fromIntegral (shiftR n1 18 + 0xD8) - c1 = fromIntegral (shiftR n1 10) - n2 = n1 .&. 0x3FF - c4 = fromIntegral (shiftR n2 8 + 0xDC) - c3 = fromIntegral n2 - next (RS1 s x2) = Yield x2 (RS0 s) - next (RS2 s x2 x3) = Yield x2 (RS1 s x3) - next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) - {-# INLINE next #-} -{-# INLINE restreamUtf16LE #-} - -restreamUtf32BE :: Stream Char -> Stream Word8 -restreamUtf32BE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) - where - next (RS0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (RS0 s') - Yield x s' -> Yield c1 (RS3 s' c2 c3 c4) - where - n = ord x - c1 = fromIntegral $ shiftR n 24 - c2 = fromIntegral $ shiftR n 16 - c3 = fromIntegral $ shiftR n 8 - c4 = fromIntegral n - next (RS1 s x2) = Yield x2 (RS0 s) - next (RS2 s x2 x3) = Yield x2 (RS1 s x3) - next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) - {-# INLINE next #-} -{-# INLINE restreamUtf32BE #-} - -restreamUtf32LE :: Stream Char -> Stream Word8 -restreamUtf32LE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) - where - next (RS0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (RS0 s') - Yield x s' -> Yield c1 (RS3 s' c2 c3 c4) - where - n = ord x - c4 = fromIntegral $ shiftR n 24 - c3 = fromIntegral $ shiftR n 16 - c2 = fromIntegral $ shiftR n 8 - c1 = fromIntegral n - next (RS1 s x2) = Yield x2 (RS0 s) - next (RS2 s x2 x3) = Yield x2 (RS1 s x3) - next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) - {-# INLINE next #-} -{-# INLINE restreamUtf32LE #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Fusion.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Fusion.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Fusion.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,208 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} - --- | --- Module : Data.Text.Internal.Encoding.Fusion --- Copyright : (c) Tom Harper 2008-2009, --- (c) Bryan O'Sullivan 2009, --- (c) Duncan Coutts 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fusible 'Stream'-oriented functions for converting between 'Text' --- and several common encodings. - -module Data.Text.Internal.Encoding.Fusion - ( - -- * Streaming - streamASCII - , streamUtf8 - , streamUtf16LE - , streamUtf16BE - , streamUtf32LE - , streamUtf32BE - - -- * Unstreaming - , unstream - - , module Data.Text.Internal.Encoding.Fusion.Common - ) where - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.ByteString.Internal (ByteString(..), mallocByteString, memcpy) -import Data.Text.Internal.Fusion (Step(..), Stream(..)) -import Data.Text.Internal.Fusion.Size -import Data.Text.Encoding.Error -import Data.Text.Internal.Encoding.Fusion.Common -import Data.Text.Internal.Unsafe.Char (unsafeChr, unsafeChr8, unsafeChr32) -import Data.Text.Internal.Unsafe.Shift (shiftL, shiftR) -import Data.Word (Word8, Word16, Word32) -import Foreign.ForeignPtr (withForeignPtr, ForeignPtr) -import Foreign.Storable (pokeByteOff) -import qualified Data.ByteString as B -import qualified Data.ByteString.Unsafe as B -import qualified Data.Text.Internal.Encoding.Utf8 as U8 -import qualified Data.Text.Internal.Encoding.Utf16 as U16 -import qualified Data.Text.Internal.Encoding.Utf32 as U32 -import Data.Text.Unsafe (unsafeDupablePerformIO) - -streamASCII :: ByteString -> Stream Char -streamASCII bs = Stream next 0 (maxSize l) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | otherwise = Yield (unsafeChr8 x1) (i+1) - where - x1 = B.unsafeIndex bs i -{-# DEPRECATED streamASCII "Do not use this function" #-} -{-# INLINE [0] streamASCII #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using UTF-8 --- encoding. -streamUtf8 :: OnDecodeError -> ByteString -> Stream Char -streamUtf8 onErr bs = Stream next 0 (maxSize l) - where - l = B.length bs - next i - | i >= l = Done - | U8.validate1 x1 = Yield (unsafeChr8 x1) (i+1) - | i+1 < l && U8.validate2 x1 x2 = Yield (U8.chr2 x1 x2) (i+2) - | i+2 < l && U8.validate3 x1 x2 x3 = Yield (U8.chr3 x1 x2 x3) (i+3) - | i+3 < l && U8.validate4 x1 x2 x3 x4 = Yield (U8.chr4 x1 x2 x3 x4) (i+4) - | otherwise = decodeError "streamUtf8" "UTF-8" onErr (Just x1) (i+1) - where - x1 = idx i - x2 = idx (i + 1) - x3 = idx (i + 2) - x4 = idx (i + 3) - idx = B.unsafeIndex bs -{-# INLINE [0] streamUtf8 #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little --- endian UTF-16 encoding. -streamUtf16LE :: OnDecodeError -> ByteString -> Stream Char -streamUtf16LE onErr bs = Stream next 0 (maxSize (l `shiftR` 1)) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | i+1 < l && U16.validate1 x1 = Yield (unsafeChr x1) (i+2) - | i+3 < l && U16.validate2 x1 x2 = Yield (U16.chr2 x1 x2) (i+4) - | otherwise = decodeError "streamUtf16LE" "UTF-16LE" onErr Nothing (i+1) - where - x1 = idx i + (idx (i + 1) `shiftL` 8) - x2 = idx (i + 2) + (idx (i + 3) `shiftL` 8) - idx = fromIntegral . B.unsafeIndex bs :: Int -> Word16 -{-# INLINE [0] streamUtf16LE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big --- endian UTF-16 encoding. -streamUtf16BE :: OnDecodeError -> ByteString -> Stream Char -streamUtf16BE onErr bs = Stream next 0 (maxSize (l `shiftR` 1)) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | i+1 < l && U16.validate1 x1 = Yield (unsafeChr x1) (i+2) - | i+3 < l && U16.validate2 x1 x2 = Yield (U16.chr2 x1 x2) (i+4) - | otherwise = decodeError "streamUtf16BE" "UTF-16BE" onErr Nothing (i+1) - where - x1 = (idx i `shiftL` 8) + idx (i + 1) - x2 = (idx (i + 2) `shiftL` 8) + idx (i + 3) - idx = fromIntegral . B.unsafeIndex bs :: Int -> Word16 -{-# INLINE [0] streamUtf16BE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big --- endian UTF-32 encoding. -streamUtf32BE :: OnDecodeError -> ByteString -> Stream Char -streamUtf32BE onErr bs = Stream next 0 (maxSize (l `shiftR` 2)) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | i+3 < l && U32.validate x = Yield (unsafeChr32 x) (i+4) - | otherwise = decodeError "streamUtf32BE" "UTF-32BE" onErr Nothing (i+1) - where - x = shiftL x1 24 + shiftL x2 16 + shiftL x3 8 + x4 - x1 = idx i - x2 = idx (i+1) - x3 = idx (i+2) - x4 = idx (i+3) - idx = fromIntegral . B.unsafeIndex bs :: Int -> Word32 -{-# INLINE [0] streamUtf32BE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little --- endian UTF-32 encoding. -streamUtf32LE :: OnDecodeError -> ByteString -> Stream Char -streamUtf32LE onErr bs = Stream next 0 (maxSize (l `shiftR` 2)) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | i+3 < l && U32.validate x = Yield (unsafeChr32 x) (i+4) - | otherwise = decodeError "streamUtf32LE" "UTF-32LE" onErr Nothing (i+1) - where - x = shiftL x4 24 + shiftL x3 16 + shiftL x2 8 + x1 - x1 = idx i - x2 = idx $ i+1 - x3 = idx $ i+2 - x4 = idx $ i+3 - idx = fromIntegral . B.unsafeIndex bs :: Int -> Word32 -{-# INLINE [0] streamUtf32LE #-} - --- | /O(n)/ Convert a 'Stream' 'Word8' to a 'ByteString'. -unstream :: Stream Word8 -> ByteString -unstream (Stream next s0 len) = unsafeDupablePerformIO $ do - let mlen = upperBound 4 len - mallocByteString mlen >>= loop mlen 0 s0 - where - loop !n !off !s fp = case next s of - Done -> trimUp fp n off - Skip s' -> loop n off s' fp - Yield x s' - | off == n -> realloc fp n off s' x - | otherwise -> do - withForeignPtr fp $ \p -> pokeByteOff p off x - loop n (off+1) s' fp - {-# NOINLINE realloc #-} - realloc fp n off s x = do - let n' = n+n - fp' <- copy0 fp n n' - withForeignPtr fp' $ \p -> pokeByteOff p off x - loop n' (off+1) s fp' - {-# NOINLINE trimUp #-} - trimUp fp _ off = return $! PS fp 0 off - copy0 :: ForeignPtr Word8 -> Int -> Int -> IO (ForeignPtr Word8) - copy0 !src !srcLen !destLen = -#if defined(ASSERTS) - assert (srcLen <= destLen) $ -#endif - do - dest <- mallocByteString destLen - withForeignPtr src $ \src' -> - withForeignPtr dest $ \dest' -> - memcpy dest' src' (fromIntegral srcLen) - return dest - -decodeError :: forall s. String -> String -> OnDecodeError -> Maybe Word8 - -> s -> Step s Char -decodeError func kind onErr mb i = - case onErr desc mb of - Nothing -> Skip i - Just c -> Yield c i - where desc = "Data.Text.Internal.Encoding.Fusion." ++ func ++ ": Invalid " ++ - kind ++ " stream" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf16.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf16.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf16.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf16.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -{-# LANGUAGE MagicHash, BangPatterns #-} - --- | --- Module : Data.Text.Internal.Encoding.Utf16 --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Basic UTF-16 validation and character manipulation. -module Data.Text.Internal.Encoding.Utf16 - ( - chr2 - , validate1 - , validate2 - ) where - -import GHC.Exts -import GHC.Word (Word16(..)) - -chr2 :: Word16 -> Word16 -> Char -chr2 (W16# a#) (W16# b#) = C# (chr# (upper# +# lower# +# 0x10000#)) - where - !x# = word2Int# a# - !y# = word2Int# b# - !upper# = uncheckedIShiftL# (x# -# 0xD800#) 10# - !lower# = y# -# 0xDC00# -{-# INLINE chr2 #-} - -validate1 :: Word16 -> Bool -validate1 x1 = x1 < 0xD800 || x1 > 0xDFFF -{-# INLINE validate1 #-} - -validate2 :: Word16 -> Word16 -> Bool -validate2 x1 x2 = x1 >= 0xD800 && x1 <= 0xDBFF && - x2 >= 0xDC00 && x2 <= 0xDFFF -{-# INLINE validate2 #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf32.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf32.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf32.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf32.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ --- | --- Module : Data.Text.Internal.Encoding.Utf32 --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Basic UTF-32 validation. -module Data.Text.Internal.Encoding.Utf32 - ( - validate - ) where - -import Data.Word (Word32) - -validate :: Word32 -> Bool -validate x1 = x1 < 0xD800 || (x1 > 0xDFFF && x1 <= 0x10FFFF) -{-# INLINE validate #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf8.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf8.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf8.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Encoding/Utf8.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,168 +0,0 @@ -{-# LANGUAGE CPP, MagicHash, BangPatterns #-} - --- | --- Module : Data.Text.Internal.Encoding.Utf8 --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Basic UTF-8 validation and character manipulation. -module Data.Text.Internal.Encoding.Utf8 - ( - -- Decomposition - ord2 - , ord3 - , ord4 - -- Construction - , chr2 - , chr3 - , chr4 - -- * Validation - , validate1 - , validate2 - , validate3 - , validate4 - ) where - -#if defined(TEST_SUITE) -# undef ASSERTS -#endif - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.Bits ((.&.)) -import Data.Text.Internal.Unsafe.Char (ord) -import Data.Text.Internal.Unsafe.Shift (shiftR) -import GHC.Exts -import GHC.Word (Word8(..)) - -default(Int) - -between :: Word8 -- ^ byte to check - -> Word8 -- ^ lower bound - -> Word8 -- ^ upper bound - -> Bool -between x y z = x >= y && x <= z -{-# INLINE between #-} - -ord2 :: Char -> (Word8,Word8) -ord2 c = -#if defined(ASSERTS) - assert (n >= 0x80 && n <= 0x07ff) -#endif - (x1,x2) - where - n = ord c - x1 = fromIntegral $ (n `shiftR` 6) + 0xC0 - x2 = fromIntegral $ (n .&. 0x3F) + 0x80 - -ord3 :: Char -> (Word8,Word8,Word8) -ord3 c = -#if defined(ASSERTS) - assert (n >= 0x0800 && n <= 0xffff) -#endif - (x1,x2,x3) - where - n = ord c - x1 = fromIntegral $ (n `shiftR` 12) + 0xE0 - x2 = fromIntegral $ ((n `shiftR` 6) .&. 0x3F) + 0x80 - x3 = fromIntegral $ (n .&. 0x3F) + 0x80 - -ord4 :: Char -> (Word8,Word8,Word8,Word8) -ord4 c = -#if defined(ASSERTS) - assert (n >= 0x10000) -#endif - (x1,x2,x3,x4) - where - n = ord c - x1 = fromIntegral $ (n `shiftR` 18) + 0xF0 - x2 = fromIntegral $ ((n `shiftR` 12) .&. 0x3F) + 0x80 - x3 = fromIntegral $ ((n `shiftR` 6) .&. 0x3F) + 0x80 - x4 = fromIntegral $ (n .&. 0x3F) + 0x80 - -chr2 :: Word8 -> Word8 -> Char -chr2 (W8# x1#) (W8# x2#) = C# (chr# (z1# +# z2#)) - where - !y1# = word2Int# x1# - !y2# = word2Int# x2# - !z1# = uncheckedIShiftL# (y1# -# 0xC0#) 6# - !z2# = y2# -# 0x80# -{-# INLINE chr2 #-} - -chr3 :: Word8 -> Word8 -> Word8 -> Char -chr3 (W8# x1#) (W8# x2#) (W8# x3#) = C# (chr# (z1# +# z2# +# z3#)) - where - !y1# = word2Int# x1# - !y2# = word2Int# x2# - !y3# = word2Int# x3# - !z1# = uncheckedIShiftL# (y1# -# 0xE0#) 12# - !z2# = uncheckedIShiftL# (y2# -# 0x80#) 6# - !z3# = y3# -# 0x80# -{-# INLINE chr3 #-} - -chr4 :: Word8 -> Word8 -> Word8 -> Word8 -> Char -chr4 (W8# x1#) (W8# x2#) (W8# x3#) (W8# x4#) = - C# (chr# (z1# +# z2# +# z3# +# z4#)) - where - !y1# = word2Int# x1# - !y2# = word2Int# x2# - !y3# = word2Int# x3# - !y4# = word2Int# x4# - !z1# = uncheckedIShiftL# (y1# -# 0xF0#) 18# - !z2# = uncheckedIShiftL# (y2# -# 0x80#) 12# - !z3# = uncheckedIShiftL# (y3# -# 0x80#) 6# - !z4# = y4# -# 0x80# -{-# INLINE chr4 #-} - -validate1 :: Word8 -> Bool -validate1 x1 = x1 <= 0x7F -{-# INLINE validate1 #-} - -validate2 :: Word8 -> Word8 -> Bool -validate2 x1 x2 = between x1 0xC2 0xDF && between x2 0x80 0xBF -{-# INLINE validate2 #-} - -validate3 :: Word8 -> Word8 -> Word8 -> Bool -{-# INLINE validate3 #-} -validate3 x1 x2 x3 = validate3_1 || validate3_2 || validate3_3 || validate3_4 - where - validate3_1 = (x1 == 0xE0) && - between x2 0xA0 0xBF && - between x3 0x80 0xBF - validate3_2 = between x1 0xE1 0xEC && - between x2 0x80 0xBF && - between x3 0x80 0xBF - validate3_3 = x1 == 0xED && - between x2 0x80 0x9F && - between x3 0x80 0xBF - validate3_4 = between x1 0xEE 0xEF && - between x2 0x80 0xBF && - between x3 0x80 0xBF - -validate4 :: Word8 -> Word8 -> Word8 -> Word8 -> Bool -{-# INLINE validate4 #-} -validate4 x1 x2 x3 x4 = validate4_1 || validate4_2 || validate4_3 - where - validate4_1 = x1 == 0xF0 && - between x2 0x90 0xBF && - between x3 0x80 0xBF && - between x4 0x80 0xBF - validate4_2 = between x1 0xF1 0xF3 && - between x2 0x80 0xBF && - between x3 0x80 0xBF && - between x4 0x80 0xBF - validate4_3 = x1 == 0xF4 && - between x2 0x80 0x8F && - between x3 0x80 0xBF && - between x4 0x80 0xBF diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Functions.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Functions.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Functions.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Functions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -{-# LANGUAGE CPP, DeriveDataTypeable #-} - --- | --- Module : Data.Text.Internal.Functions --- Copyright : 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Useful functions. - -module Data.Text.Internal.Functions - ( - intersperse - ) where - --- | A lazier version of Data.List.intersperse. The other version --- causes space leaks! -intersperse :: a -> [a] -> [a] -intersperse _ [] = [] -intersperse sep (x:xs) = x : go xs - where - go [] = [] - go (y:ys) = sep : y: go ys -{-# INLINE intersperse #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion/CaseMapping.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion/CaseMapping.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion/CaseMapping.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion/CaseMapping.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,570 +0,0 @@ -{-# LANGUAGE Rank2Types #-} --- AUTOMATICALLY GENERATED - DO NOT EDIT --- Generated by scripts/SpecialCasing.hs --- CaseFolding-6.3.0.txt --- Date: 2012-12-20, 22:14:35 GMT [MD] --- SpecialCasing-6.3.0.txt --- Date: 2013-05-08, 13:54:51 GMT [MD] - -module Data.Text.Internal.Fusion.CaseMapping where -import Data.Char -import Data.Text.Internal.Fusion.Types - -upperMapping :: forall s. Char -> s -> Step (CC s) Char -{-# INLINE upperMapping #-} --- LATIN SMALL LETTER SHARP S -upperMapping '\x00df' s = Yield '\x0053' (CC s '\x0053' '\x0000') --- LATIN SMALL LIGATURE FF -upperMapping '\xfb00' s = Yield '\x0046' (CC s '\x0046' '\x0000') --- LATIN SMALL LIGATURE FI -upperMapping '\xfb01' s = Yield '\x0046' (CC s '\x0049' '\x0000') --- LATIN SMALL LIGATURE FL -upperMapping '\xfb02' s = Yield '\x0046' (CC s '\x004c' '\x0000') --- LATIN SMALL LIGATURE FFI -upperMapping '\xfb03' s = Yield '\x0046' (CC s '\x0046' '\x0049') --- LATIN SMALL LIGATURE FFL -upperMapping '\xfb04' s = Yield '\x0046' (CC s '\x0046' '\x004c') --- LATIN SMALL LIGATURE LONG S T -upperMapping '\xfb05' s = Yield '\x0053' (CC s '\x0054' '\x0000') --- LATIN SMALL LIGATURE ST -upperMapping '\xfb06' s = Yield '\x0053' (CC s '\x0054' '\x0000') --- ARMENIAN SMALL LIGATURE ECH YIWN -upperMapping '\x0587' s = Yield '\x0535' (CC s '\x0552' '\x0000') --- ARMENIAN SMALL LIGATURE MEN NOW -upperMapping '\xfb13' s = Yield '\x0544' (CC s '\x0546' '\x0000') --- ARMENIAN SMALL LIGATURE MEN ECH -upperMapping '\xfb14' s = Yield '\x0544' (CC s '\x0535' '\x0000') --- ARMENIAN SMALL LIGATURE MEN INI -upperMapping '\xfb15' s = Yield '\x0544' (CC s '\x053b' '\x0000') --- ARMENIAN SMALL LIGATURE VEW NOW -upperMapping '\xfb16' s = Yield '\x054e' (CC s '\x0546' '\x0000') --- ARMENIAN SMALL LIGATURE MEN XEH -upperMapping '\xfb17' s = Yield '\x0544' (CC s '\x053d' '\x0000') --- LATIN SMALL LETTER N PRECEDED BY APOSTROPHE -upperMapping '\x0149' s = Yield '\x02bc' (CC s '\x004e' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS -upperMapping '\x0390' s = Yield '\x0399' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS -upperMapping '\x03b0' s = Yield '\x03a5' (CC s '\x0308' '\x0301') --- LATIN SMALL LETTER J WITH CARON -upperMapping '\x01f0' s = Yield '\x004a' (CC s '\x030c' '\x0000') --- LATIN SMALL LETTER H WITH LINE BELOW -upperMapping '\x1e96' s = Yield '\x0048' (CC s '\x0331' '\x0000') --- LATIN SMALL LETTER T WITH DIAERESIS -upperMapping '\x1e97' s = Yield '\x0054' (CC s '\x0308' '\x0000') --- LATIN SMALL LETTER W WITH RING ABOVE -upperMapping '\x1e98' s = Yield '\x0057' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER Y WITH RING ABOVE -upperMapping '\x1e99' s = Yield '\x0059' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER A WITH RIGHT HALF RING -upperMapping '\x1e9a' s = Yield '\x0041' (CC s '\x02be' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI -upperMapping '\x1f50' s = Yield '\x03a5' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA -upperMapping '\x1f52' s = Yield '\x03a5' (CC s '\x0313' '\x0300') --- GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA -upperMapping '\x1f54' s = Yield '\x03a5' (CC s '\x0313' '\x0301') --- GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI -upperMapping '\x1f56' s = Yield '\x03a5' (CC s '\x0313' '\x0342') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI -upperMapping '\x1fb6' s = Yield '\x0391' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ETA WITH PERISPOMENI -upperMapping '\x1fc6' s = Yield '\x0397' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA -upperMapping '\x1fd2' s = Yield '\x0399' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA -upperMapping '\x1fd3' s = Yield '\x0399' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER IOTA WITH PERISPOMENI -upperMapping '\x1fd6' s = Yield '\x0399' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI -upperMapping '\x1fd7' s = Yield '\x0399' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA -upperMapping '\x1fe2' s = Yield '\x03a5' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA -upperMapping '\x1fe3' s = Yield '\x03a5' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER RHO WITH PSILI -upperMapping '\x1fe4' s = Yield '\x03a1' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PERISPOMENI -upperMapping '\x1fe6' s = Yield '\x03a5' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI -upperMapping '\x1fe7' s = Yield '\x03a5' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI -upperMapping '\x1ff6' s = Yield '\x03a9' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI -upperMapping '\x1f80' s = Yield '\x1f08' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI -upperMapping '\x1f81' s = Yield '\x1f09' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1f82' s = Yield '\x1f0a' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1f83' s = Yield '\x1f0b' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1f84' s = Yield '\x1f0c' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1f85' s = Yield '\x1f0d' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1f86' s = Yield '\x1f0e' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1f87' s = Yield '\x1f0f' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI -upperMapping '\x1f88' s = Yield '\x1f08' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI -upperMapping '\x1f89' s = Yield '\x1f09' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1f8a' s = Yield '\x1f0a' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1f8b' s = Yield '\x1f0b' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1f8c' s = Yield '\x1f0c' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1f8d' s = Yield '\x1f0d' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1f8e' s = Yield '\x1f0e' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1f8f' s = Yield '\x1f0f' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI -upperMapping '\x1f90' s = Yield '\x1f28' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI -upperMapping '\x1f91' s = Yield '\x1f29' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1f92' s = Yield '\x1f2a' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1f93' s = Yield '\x1f2b' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1f94' s = Yield '\x1f2c' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1f95' s = Yield '\x1f2d' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1f96' s = Yield '\x1f2e' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1f97' s = Yield '\x1f2f' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI -upperMapping '\x1f98' s = Yield '\x1f28' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI -upperMapping '\x1f99' s = Yield '\x1f29' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1f9a' s = Yield '\x1f2a' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1f9b' s = Yield '\x1f2b' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1f9c' s = Yield '\x1f2c' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1f9d' s = Yield '\x1f2d' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1f9e' s = Yield '\x1f2e' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1f9f' s = Yield '\x1f2f' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI -upperMapping '\x1fa0' s = Yield '\x1f68' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI -upperMapping '\x1fa1' s = Yield '\x1f69' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1fa2' s = Yield '\x1f6a' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1fa3' s = Yield '\x1f6b' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1fa4' s = Yield '\x1f6c' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1fa5' s = Yield '\x1f6d' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1fa6' s = Yield '\x1f6e' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1fa7' s = Yield '\x1f6f' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI -upperMapping '\x1fa8' s = Yield '\x1f68' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI -upperMapping '\x1fa9' s = Yield '\x1f69' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1faa' s = Yield '\x1f6a' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1fab' s = Yield '\x1f6b' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1fac' s = Yield '\x1f6c' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1fad' s = Yield '\x1f6d' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1fae' s = Yield '\x1f6e' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1faf' s = Yield '\x1f6f' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI -upperMapping '\x1fb3' s = Yield '\x0391' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI -upperMapping '\x1fbc' s = Yield '\x0391' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI -upperMapping '\x1fc3' s = Yield '\x0397' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI -upperMapping '\x1fcc' s = Yield '\x0397' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI -upperMapping '\x1ff3' s = Yield '\x03a9' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI -upperMapping '\x1ffc' s = Yield '\x03a9' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI -upperMapping '\x1fb2' s = Yield '\x1fba' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI -upperMapping '\x1fb4' s = Yield '\x0386' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI -upperMapping '\x1fc2' s = Yield '\x1fca' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI -upperMapping '\x1fc4' s = Yield '\x0389' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI -upperMapping '\x1ff2' s = Yield '\x1ffa' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI -upperMapping '\x1ff4' s = Yield '\x038f' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1fb7' s = Yield '\x0391' (CC s '\x0342' '\x0399') --- GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1fc7' s = Yield '\x0397' (CC s '\x0342' '\x0399') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1ff7' s = Yield '\x03a9' (CC s '\x0342' '\x0399') -upperMapping c s = Yield (toUpper c) (CC s '\0' '\0') -lowerMapping :: forall s. Char -> s -> Step (CC s) Char -{-# INLINE lowerMapping #-} --- LATIN CAPITAL LETTER I WITH DOT ABOVE -lowerMapping '\x0130' s = Yield '\x0069' (CC s '\x0307' '\x0000') -lowerMapping c s = Yield (toLower c) (CC s '\0' '\0') -titleMapping :: forall s. Char -> s -> Step (CC s) Char -{-# INLINE titleMapping #-} --- LATIN SMALL LETTER SHARP S -titleMapping '\x00df' s = Yield '\x0053' (CC s '\x0073' '\x0000') --- LATIN SMALL LIGATURE FF -titleMapping '\xfb00' s = Yield '\x0046' (CC s '\x0066' '\x0000') --- LATIN SMALL LIGATURE FI -titleMapping '\xfb01' s = Yield '\x0046' (CC s '\x0069' '\x0000') --- LATIN SMALL LIGATURE FL -titleMapping '\xfb02' s = Yield '\x0046' (CC s '\x006c' '\x0000') --- LATIN SMALL LIGATURE FFI -titleMapping '\xfb03' s = Yield '\x0046' (CC s '\x0066' '\x0069') --- LATIN SMALL LIGATURE FFL -titleMapping '\xfb04' s = Yield '\x0046' (CC s '\x0066' '\x006c') --- LATIN SMALL LIGATURE LONG S T -titleMapping '\xfb05' s = Yield '\x0053' (CC s '\x0074' '\x0000') --- LATIN SMALL LIGATURE ST -titleMapping '\xfb06' s = Yield '\x0053' (CC s '\x0074' '\x0000') --- ARMENIAN SMALL LIGATURE ECH YIWN -titleMapping '\x0587' s = Yield '\x0535' (CC s '\x0582' '\x0000') --- ARMENIAN SMALL LIGATURE MEN NOW -titleMapping '\xfb13' s = Yield '\x0544' (CC s '\x0576' '\x0000') --- ARMENIAN SMALL LIGATURE MEN ECH -titleMapping '\xfb14' s = Yield '\x0544' (CC s '\x0565' '\x0000') --- ARMENIAN SMALL LIGATURE MEN INI -titleMapping '\xfb15' s = Yield '\x0544' (CC s '\x056b' '\x0000') --- ARMENIAN SMALL LIGATURE VEW NOW -titleMapping '\xfb16' s = Yield '\x054e' (CC s '\x0576' '\x0000') --- ARMENIAN SMALL LIGATURE MEN XEH -titleMapping '\xfb17' s = Yield '\x0544' (CC s '\x056d' '\x0000') --- LATIN SMALL LETTER N PRECEDED BY APOSTROPHE -titleMapping '\x0149' s = Yield '\x02bc' (CC s '\x004e' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS -titleMapping '\x0390' s = Yield '\x0399' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS -titleMapping '\x03b0' s = Yield '\x03a5' (CC s '\x0308' '\x0301') --- LATIN SMALL LETTER J WITH CARON -titleMapping '\x01f0' s = Yield '\x004a' (CC s '\x030c' '\x0000') --- LATIN SMALL LETTER H WITH LINE BELOW -titleMapping '\x1e96' s = Yield '\x0048' (CC s '\x0331' '\x0000') --- LATIN SMALL LETTER T WITH DIAERESIS -titleMapping '\x1e97' s = Yield '\x0054' (CC s '\x0308' '\x0000') --- LATIN SMALL LETTER W WITH RING ABOVE -titleMapping '\x1e98' s = Yield '\x0057' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER Y WITH RING ABOVE -titleMapping '\x1e99' s = Yield '\x0059' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER A WITH RIGHT HALF RING -titleMapping '\x1e9a' s = Yield '\x0041' (CC s '\x02be' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI -titleMapping '\x1f50' s = Yield '\x03a5' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA -titleMapping '\x1f52' s = Yield '\x03a5' (CC s '\x0313' '\x0300') --- GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA -titleMapping '\x1f54' s = Yield '\x03a5' (CC s '\x0313' '\x0301') --- GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI -titleMapping '\x1f56' s = Yield '\x03a5' (CC s '\x0313' '\x0342') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI -titleMapping '\x1fb6' s = Yield '\x0391' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ETA WITH PERISPOMENI -titleMapping '\x1fc6' s = Yield '\x0397' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA -titleMapping '\x1fd2' s = Yield '\x0399' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA -titleMapping '\x1fd3' s = Yield '\x0399' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER IOTA WITH PERISPOMENI -titleMapping '\x1fd6' s = Yield '\x0399' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI -titleMapping '\x1fd7' s = Yield '\x0399' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA -titleMapping '\x1fe2' s = Yield '\x03a5' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA -titleMapping '\x1fe3' s = Yield '\x03a5' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER RHO WITH PSILI -titleMapping '\x1fe4' s = Yield '\x03a1' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PERISPOMENI -titleMapping '\x1fe6' s = Yield '\x03a5' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI -titleMapping '\x1fe7' s = Yield '\x03a5' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI -titleMapping '\x1ff6' s = Yield '\x03a9' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI -titleMapping '\x1fb2' s = Yield '\x1fba' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI -titleMapping '\x1fb4' s = Yield '\x0386' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI -titleMapping '\x1fc2' s = Yield '\x1fca' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI -titleMapping '\x1fc4' s = Yield '\x0389' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI -titleMapping '\x1ff2' s = Yield '\x1ffa' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI -titleMapping '\x1ff4' s = Yield '\x038f' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI -titleMapping '\x1fb7' s = Yield '\x0391' (CC s '\x0342' '\x0345') --- GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI -titleMapping '\x1fc7' s = Yield '\x0397' (CC s '\x0342' '\x0345') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI -titleMapping '\x1ff7' s = Yield '\x03a9' (CC s '\x0342' '\x0345') -titleMapping c s = Yield (toTitle c) (CC s '\0' '\0') -foldMapping :: forall s. Char -> s -> Step (CC s) Char -{-# INLINE foldMapping #-} --- MICRO SIGN -foldMapping '\x00b5' s = Yield '\x03bc' (CC s '\x0000' '\x0000') --- LATIN SMALL LETTER SHARP S -foldMapping '\x00df' s = Yield '\x0073' (CC s '\x0073' '\x0000') --- LATIN CAPITAL LETTER I WITH DOT ABOVE -foldMapping '\x0130' s = Yield '\x0069' (CC s '\x0307' '\x0000') --- LATIN SMALL LETTER N PRECEDED BY APOSTROPHE -foldMapping '\x0149' s = Yield '\x02bc' (CC s '\x006e' '\x0000') --- LATIN SMALL LETTER LONG S -foldMapping '\x017f' s = Yield '\x0073' (CC s '\x0000' '\x0000') --- LATIN SMALL LETTER J WITH CARON -foldMapping '\x01f0' s = Yield '\x006a' (CC s '\x030c' '\x0000') --- COMBINING GREEK YPOGEGRAMMENI -foldMapping '\x0345' s = Yield '\x03b9' (CC s '\x0000' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS -foldMapping '\x0390' s = Yield '\x03b9' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS -foldMapping '\x03b0' s = Yield '\x03c5' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER FINAL SIGMA -foldMapping '\x03c2' s = Yield '\x03c3' (CC s '\x0000' '\x0000') --- GREEK BETA SYMBOL -foldMapping '\x03d0' s = Yield '\x03b2' (CC s '\x0000' '\x0000') --- GREEK THETA SYMBOL -foldMapping '\x03d1' s = Yield '\x03b8' (CC s '\x0000' '\x0000') --- GREEK PHI SYMBOL -foldMapping '\x03d5' s = Yield '\x03c6' (CC s '\x0000' '\x0000') --- GREEK PI SYMBOL -foldMapping '\x03d6' s = Yield '\x03c0' (CC s '\x0000' '\x0000') --- GREEK KAPPA SYMBOL -foldMapping '\x03f0' s = Yield '\x03ba' (CC s '\x0000' '\x0000') --- GREEK RHO SYMBOL -foldMapping '\x03f1' s = Yield '\x03c1' (CC s '\x0000' '\x0000') --- GREEK LUNATE EPSILON SYMBOL -foldMapping '\x03f5' s = Yield '\x03b5' (CC s '\x0000' '\x0000') --- ARMENIAN SMALL LIGATURE ECH YIWN -foldMapping '\x0587' s = Yield '\x0565' (CC s '\x0582' '\x0000') --- GEORGIAN CAPITAL LETTER YN -foldMapping '\x10c7' s = Yield '\x2d27' (CC s '\x0000' '\x0000') --- GEORGIAN CAPITAL LETTER AEN -foldMapping '\x10cd' s = Yield '\x2d2d' (CC s '\x0000' '\x0000') --- LATIN SMALL LETTER H WITH LINE BELOW -foldMapping '\x1e96' s = Yield '\x0068' (CC s '\x0331' '\x0000') --- LATIN SMALL LETTER T WITH DIAERESIS -foldMapping '\x1e97' s = Yield '\x0074' (CC s '\x0308' '\x0000') --- LATIN SMALL LETTER W WITH RING ABOVE -foldMapping '\x1e98' s = Yield '\x0077' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER Y WITH RING ABOVE -foldMapping '\x1e99' s = Yield '\x0079' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER A WITH RIGHT HALF RING -foldMapping '\x1e9a' s = Yield '\x0061' (CC s '\x02be' '\x0000') --- LATIN SMALL LETTER LONG S WITH DOT ABOVE -foldMapping '\x1e9b' s = Yield '\x1e61' (CC s '\x0000' '\x0000') --- LATIN CAPITAL LETTER SHARP S -foldMapping '\x1e9e' s = Yield '\x0073' (CC s '\x0073' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI -foldMapping '\x1f50' s = Yield '\x03c5' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA -foldMapping '\x1f52' s = Yield '\x03c5' (CC s '\x0313' '\x0300') --- GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA -foldMapping '\x1f54' s = Yield '\x03c5' (CC s '\x0313' '\x0301') --- GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI -foldMapping '\x1f56' s = Yield '\x03c5' (CC s '\x0313' '\x0342') --- GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI -foldMapping '\x1f80' s = Yield '\x1f00' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI -foldMapping '\x1f81' s = Yield '\x1f01' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1f82' s = Yield '\x1f02' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1f83' s = Yield '\x1f03' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1f84' s = Yield '\x1f04' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1f85' s = Yield '\x1f05' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1f86' s = Yield '\x1f06' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1f87' s = Yield '\x1f07' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI -foldMapping '\x1f88' s = Yield '\x1f00' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI -foldMapping '\x1f89' s = Yield '\x1f01' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1f8a' s = Yield '\x1f02' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1f8b' s = Yield '\x1f03' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1f8c' s = Yield '\x1f04' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1f8d' s = Yield '\x1f05' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1f8e' s = Yield '\x1f06' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1f8f' s = Yield '\x1f07' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI -foldMapping '\x1f90' s = Yield '\x1f20' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI -foldMapping '\x1f91' s = Yield '\x1f21' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1f92' s = Yield '\x1f22' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1f93' s = Yield '\x1f23' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1f94' s = Yield '\x1f24' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1f95' s = Yield '\x1f25' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1f96' s = Yield '\x1f26' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1f97' s = Yield '\x1f27' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI -foldMapping '\x1f98' s = Yield '\x1f20' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI -foldMapping '\x1f99' s = Yield '\x1f21' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1f9a' s = Yield '\x1f22' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1f9b' s = Yield '\x1f23' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1f9c' s = Yield '\x1f24' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1f9d' s = Yield '\x1f25' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1f9e' s = Yield '\x1f26' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1f9f' s = Yield '\x1f27' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI -foldMapping '\x1fa0' s = Yield '\x1f60' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI -foldMapping '\x1fa1' s = Yield '\x1f61' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1fa2' s = Yield '\x1f62' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1fa3' s = Yield '\x1f63' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1fa4' s = Yield '\x1f64' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1fa5' s = Yield '\x1f65' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1fa6' s = Yield '\x1f66' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1fa7' s = Yield '\x1f67' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI -foldMapping '\x1fa8' s = Yield '\x1f60' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI -foldMapping '\x1fa9' s = Yield '\x1f61' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1faa' s = Yield '\x1f62' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1fab' s = Yield '\x1f63' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1fac' s = Yield '\x1f64' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1fad' s = Yield '\x1f65' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1fae' s = Yield '\x1f66' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1faf' s = Yield '\x1f67' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI -foldMapping '\x1fb2' s = Yield '\x1f70' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI -foldMapping '\x1fb3' s = Yield '\x03b1' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI -foldMapping '\x1fb4' s = Yield '\x03ac' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI -foldMapping '\x1fb6' s = Yield '\x03b1' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1fb7' s = Yield '\x03b1' (CC s '\x0342' '\x03b9') --- GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI -foldMapping '\x1fbc' s = Yield '\x03b1' (CC s '\x03b9' '\x0000') --- GREEK PROSGEGRAMMENI -foldMapping '\x1fbe' s = Yield '\x03b9' (CC s '\x0000' '\x0000') --- GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI -foldMapping '\x1fc2' s = Yield '\x1f74' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI -foldMapping '\x1fc3' s = Yield '\x03b7' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI -foldMapping '\x1fc4' s = Yield '\x03ae' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PERISPOMENI -foldMapping '\x1fc6' s = Yield '\x03b7' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1fc7' s = Yield '\x03b7' (CC s '\x0342' '\x03b9') --- GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI -foldMapping '\x1fcc' s = Yield '\x03b7' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA -foldMapping '\x1fd2' s = Yield '\x03b9' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA -foldMapping '\x1fd3' s = Yield '\x03b9' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER IOTA WITH PERISPOMENI -foldMapping '\x1fd6' s = Yield '\x03b9' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI -foldMapping '\x1fd7' s = Yield '\x03b9' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA -foldMapping '\x1fe2' s = Yield '\x03c5' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA -foldMapping '\x1fe3' s = Yield '\x03c5' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER RHO WITH PSILI -foldMapping '\x1fe4' s = Yield '\x03c1' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PERISPOMENI -foldMapping '\x1fe6' s = Yield '\x03c5' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI -foldMapping '\x1fe7' s = Yield '\x03c5' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI -foldMapping '\x1ff2' s = Yield '\x1f7c' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI -foldMapping '\x1ff3' s = Yield '\x03c9' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI -foldMapping '\x1ff4' s = Yield '\x03ce' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI -foldMapping '\x1ff6' s = Yield '\x03c9' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1ff7' s = Yield '\x03c9' (CC s '\x0342' '\x03b9') --- GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI -foldMapping '\x1ffc' s = Yield '\x03c9' (CC s '\x03b9' '\x0000') --- COPTIC CAPITAL LETTER BOHAIRIC KHEI -foldMapping '\x2cf2' s = Yield '\x2cf3' (CC s '\x0000' '\x0000') --- LATIN CAPITAL LETTER C WITH BAR -foldMapping '\xa792' s = Yield '\xa793' (CC s '\x0000' '\x0000') --- LATIN CAPITAL LETTER H WITH HOOK -foldMapping '\xa7aa' s = Yield '\x0266' (CC s '\x0000' '\x0000') --- LATIN SMALL LIGATURE FF -foldMapping '\xfb00' s = Yield '\x0066' (CC s '\x0066' '\x0000') --- LATIN SMALL LIGATURE FI -foldMapping '\xfb01' s = Yield '\x0066' (CC s '\x0069' '\x0000') --- LATIN SMALL LIGATURE FL -foldMapping '\xfb02' s = Yield '\x0066' (CC s '\x006c' '\x0000') --- LATIN SMALL LIGATURE FFI -foldMapping '\xfb03' s = Yield '\x0066' (CC s '\x0066' '\x0069') --- LATIN SMALL LIGATURE FFL -foldMapping '\xfb04' s = Yield '\x0066' (CC s '\x0066' '\x006c') --- LATIN SMALL LIGATURE LONG S T -foldMapping '\xfb05' s = Yield '\x0073' (CC s '\x0074' '\x0000') --- LATIN SMALL LIGATURE ST -foldMapping '\xfb06' s = Yield '\x0073' (CC s '\x0074' '\x0000') --- ARMENIAN SMALL LIGATURE MEN NOW -foldMapping '\xfb13' s = Yield '\x0574' (CC s '\x0576' '\x0000') --- ARMENIAN SMALL LIGATURE MEN ECH -foldMapping '\xfb14' s = Yield '\x0574' (CC s '\x0565' '\x0000') --- ARMENIAN SMALL LIGATURE MEN INI -foldMapping '\xfb15' s = Yield '\x0574' (CC s '\x056b' '\x0000') --- ARMENIAN SMALL LIGATURE VEW NOW -foldMapping '\xfb16' s = Yield '\x057e' (CC s '\x0576' '\x0000') --- ARMENIAN SMALL LIGATURE MEN XEH -foldMapping '\xfb17' s = Yield '\x0574' (CC s '\x056d' '\x0000') -foldMapping c s = Yield (toLower c) (CC s '\0' '\0') diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Common.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Common.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Common.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Common.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,951 +0,0 @@ -{-# LANGUAGE BangPatterns, MagicHash, Rank2Types #-} --- | --- Module : Data.Text.Internal.Fusion.Common --- Copyright : (c) Bryan O'Sullivan 2009, 2012 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Common stream fusion functionality for text. - -module Data.Text.Internal.Fusion.Common - ( - -- * Creation and elimination - singleton - , streamList - , unstreamList - , streamCString# - - -- * Basic interface - , cons - , snoc - , append - , head - , uncons - , last - , tail - , init - , null - , lengthI - , compareLengthI - , isSingleton - - -- * Transformations - , map - , intercalate - , intersperse - - -- ** Case conversion - -- $case - , toCaseFold - , toLower - , toTitle - , toUpper - - -- ** Justification - , justifyLeftI - - -- * Folds - , foldl - , foldl' - , foldl1 - , foldl1' - , foldr - , foldr1 - - -- ** Special folds - , concat - , concatMap - , any - , all - , maximum - , minimum - - -- * Construction - -- ** Scans - , scanl - - -- ** Accumulating maps - -- , mapAccumL - - -- ** Generation and unfolding - , replicateCharI - , replicateI - , unfoldr - , unfoldrNI - - -- * Substrings - -- ** Breaking strings - , take - , drop - , takeWhile - , dropWhile - - -- * Predicates - , isPrefixOf - - -- * Searching - , elem - , filter - - -- * Indexing - , findBy - , indexI - , findIndexI - , countCharI - - -- * Zipping and unzipping - , zipWith - ) where - -import Prelude (Bool(..), Char, Eq(..), Int, Integral, Maybe(..), - Ord(..), Ordering(..), String, (.), ($), (+), (-), (*), (++), - (&&), fromIntegral, otherwise) -import qualified Data.List as L -import qualified Prelude as P -import Data.Bits (shiftL) -import Data.Char (isLetter) -import Data.Int (Int64) -import Data.Text.Internal.Fusion.Types -import Data.Text.Internal.Fusion.CaseMapping (foldMapping, lowerMapping, titleMapping, - upperMapping) -import Data.Text.Internal.Fusion.Size -import GHC.Prim (Addr#, chr#, indexCharOffAddr#, ord#) -import GHC.Types (Char(..), Int(..)) - -singleton :: Char -> Stream Char -singleton c = Stream next False 1 - where next False = Yield c True - next True = Done -{-# INLINE [0] singleton #-} - -streamList :: [a] -> Stream a -{-# INLINE [0] streamList #-} -streamList s = Stream next s unknownSize - where next [] = Done - next (x:xs) = Yield x xs - -unstreamList :: Stream a -> [a] -unstreamList (Stream next s0 _len) = unfold s0 - where unfold !s = case next s of - Done -> [] - Skip s' -> unfold s' - Yield x s' -> x : unfold s' -{-# INLINE [0] unstreamList #-} - -{-# RULES "STREAM streamList/unstreamList fusion" forall s. streamList (unstreamList s) = s #-} - --- | Stream the UTF-8-like packed encoding used by GHC to represent --- constant strings in generated code. --- --- This encoding uses the byte sequence "\xc0\x80" to represent NUL, --- and the string is NUL-terminated. -streamCString# :: Addr# -> Stream Char -streamCString# addr = Stream step 0 unknownSize - where - step !i - | b == 0 = Done - | b <= 0x7f = Yield (C# b#) (i+1) - | b <= 0xdf = let !c = chr $ ((b-0xc0) `shiftL` 6) + next 1 - in Yield c (i+2) - | b <= 0xef = let !c = chr $ ((b-0xe0) `shiftL` 12) + - (next 1 `shiftL` 6) + - next 2 - in Yield c (i+3) - | otherwise = let !c = chr $ ((b-0xf0) `shiftL` 18) + - (next 1 `shiftL` 12) + - (next 2 `shiftL` 6) + - next 3 - in Yield c (i+4) - where b = I# (ord# b#) - next n = I# (ord# (at# (i+n))) - 0x80 - !b# = at# i - at# (I# i#) = indexCharOffAddr# addr i# - chr (I# i#) = C# (chr# i#) -{-# INLINE [0] streamCString# #-} - --- ---------------------------------------------------------------------------- --- * Basic stream functions - -data C s = C0 !s - | C1 !s - --- | /O(n)/ Adds a character to the front of a Stream Char. -cons :: Char -> Stream Char -> Stream Char -cons !w (Stream next0 s0 len) = Stream next (C1 s0) (len+1) - where - next (C1 s) = Yield w (C0 s) - next (C0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (C0 s') - Yield x s' -> Yield x (C0 s') -{-# INLINE [0] cons #-} - --- | /O(n)/ Adds a character to the end of a stream. -snoc :: Stream Char -> Char -> Stream Char -snoc (Stream next0 xs0 len) w = Stream next (J xs0) (len+1) - where - next (J xs) = case next0 xs of - Done -> Yield w N - Skip xs' -> Skip (J xs') - Yield x xs' -> Yield x (J xs') - next N = Done -{-# INLINE [0] snoc #-} - -data E l r = L !l - | R !r - --- | /O(n)/ Appends one Stream to the other. -append :: Stream Char -> Stream Char -> Stream Char -append (Stream next0 s01 len1) (Stream next1 s02 len2) = - Stream next (L s01) (len1 + len2) - where - next (L s1) = case next0 s1 of - Done -> Skip (R s02) - Skip s1' -> Skip (L s1') - Yield x s1' -> Yield x (L s1') - next (R s2) = case next1 s2 of - Done -> Done - Skip s2' -> Skip (R s2') - Yield x s2' -> Yield x (R s2') -{-# INLINE [0] append #-} - --- | /O(1)/ Returns the first character of a Text, which must be non-empty. --- Subject to array fusion. -head :: Stream Char -> Char -head (Stream next s0 _len) = loop_head s0 - where - loop_head !s = case next s of - Yield x _ -> x - Skip s' -> loop_head s' - Done -> head_empty -{-# INLINE [0] head #-} - -head_empty :: a -head_empty = streamError "head" "Empty stream" -{-# NOINLINE head_empty #-} - --- | /O(1)/ Returns the first character and remainder of a 'Stream --- Char', or 'Nothing' if empty. Subject to array fusion. -uncons :: Stream Char -> Maybe (Char, Stream Char) -uncons (Stream next s0 len) = loop_uncons s0 - where - loop_uncons !s = case next s of - Yield x s1 -> Just (x, Stream next s1 (len-1)) - Skip s' -> loop_uncons s' - Done -> Nothing -{-# INLINE [0] uncons #-} - --- | /O(n)/ Returns the last character of a 'Stream Char', which must --- be non-empty. -last :: Stream Char -> Char -last (Stream next s0 _len) = loop0_last s0 - where - loop0_last !s = case next s of - Done -> emptyError "last" - Skip s' -> loop0_last s' - Yield x s' -> loop_last x s' - loop_last !x !s = case next s of - Done -> x - Skip s' -> loop_last x s' - Yield x' s' -> loop_last x' s' -{-# INLINE[0] last #-} - --- | /O(1)/ Returns all characters after the head of a Stream Char, which must --- be non-empty. -tail :: Stream Char -> Stream Char -tail (Stream next0 s0 len) = Stream next (C0 s0) (len-1) - where - next (C0 s) = case next0 s of - Done -> emptyError "tail" - Skip s' -> Skip (C0 s') - Yield _ s' -> Skip (C1 s') - next (C1 s) = case next0 s of - Done -> Done - Skip s' -> Skip (C1 s') - Yield x s' -> Yield x (C1 s') -{-# INLINE [0] tail #-} - -data Init s = Init0 !s - | Init1 {-# UNPACK #-} !Char !s - --- | /O(1)/ Returns all but the last character of a Stream Char, which --- must be non-empty. -init :: Stream Char -> Stream Char -init (Stream next0 s0 len) = Stream next (Init0 s0) (len-1) - where - next (Init0 s) = case next0 s of - Done -> emptyError "init" - Skip s' -> Skip (Init0 s') - Yield x s' -> Skip (Init1 x s') - next (Init1 x s) = case next0 s of - Done -> Done - Skip s' -> Skip (Init1 x s') - Yield x' s' -> Yield x (Init1 x' s') -{-# INLINE [0] init #-} - --- | /O(1)/ Tests whether a Stream Char is empty or not. -null :: Stream Char -> Bool -null (Stream next s0 _len) = loop_null s0 - where - loop_null !s = case next s of - Done -> True - Yield _ _ -> False - Skip s' -> loop_null s' -{-# INLINE[0] null #-} - --- | /O(n)/ Returns the number of characters in a string. -lengthI :: Integral a => Stream Char -> a -lengthI (Stream next s0 _len) = loop_length 0 s0 - where - loop_length !z s = case next s of - Done -> z - Skip s' -> loop_length z s' - Yield _ s' -> loop_length (z + 1) s' -{-# INLINE[0] lengthI #-} - --- | /O(n)/ Compares the count of characters in a string to a number. --- Subject to fusion. --- --- This function gives the same answer as comparing against the result --- of 'lengthI', but can short circuit if the count of characters is --- greater than the number or if the stream can't possibly be as long --- as the number supplied, and hence be more efficient. -compareLengthI :: Integral a => Stream Char -> a -> Ordering -compareLengthI (Stream next s0 len) n = - case compareSize len (fromIntegral n) of - Just o -> o - Nothing -> loop_cmp 0 s0 - where - loop_cmp !z s = case next s of - Done -> compare z n - Skip s' -> loop_cmp z s' - Yield _ s' | z > n -> GT - | otherwise -> loop_cmp (z + 1) s' -{-# INLINE[0] compareLengthI #-} - --- | /O(n)/ Indicate whether a string contains exactly one element. -isSingleton :: Stream Char -> Bool -isSingleton (Stream next s0 _len) = loop 0 s0 - where - loop !z s = case next s of - Done -> z == (1::Int) - Skip s' -> loop z s' - Yield _ s' - | z >= 1 -> False - | otherwise -> loop (z+1) s' -{-# INLINE[0] isSingleton #-} - --- ---------------------------------------------------------------------------- --- * Stream transformations - --- | /O(n)/ 'map' @f @xs is the Stream Char obtained by applying @f@ --- to each element of @xs@. -map :: (Char -> Char) -> Stream Char -> Stream Char -map f (Stream next0 s0 len) = Stream next s0 len - where - next !s = case next0 s of - Done -> Done - Skip s' -> Skip s' - Yield x s' -> Yield (f x) s' -{-# INLINE [0] map #-} - -{-# - RULES "STREAM map/map fusion" forall f g s. - map f (map g s) = map (\x -> f (g x)) s - #-} - -data I s = I1 !s - | I2 !s {-# UNPACK #-} !Char - | I3 !s - --- | /O(n)/ Take a character and place it between each of the --- characters of a 'Stream Char'. -intersperse :: Char -> Stream Char -> Stream Char -intersperse c (Stream next0 s0 len) = Stream next (I1 s0) len - where - next (I1 s) = case next0 s of - Done -> Done - Skip s' -> Skip (I1 s') - Yield x s' -> Skip (I2 s' x) - next (I2 s x) = Yield x (I3 s) - next (I3 s) = case next0 s of - Done -> Done - Skip s' -> Skip (I3 s') - Yield x s' -> Yield c (I2 s' x) -{-# INLINE [0] intersperse #-} - --- ---------------------------------------------------------------------------- --- ** Case conversions (folds) - --- $case --- --- With Unicode text, it is incorrect to use combinators like @map --- toUpper@ to case convert each character of a string individually. --- Instead, use the whole-string case conversion functions from this --- module. For correctness in different writing systems, these --- functions may map one input character to two or three output --- characters. - -caseConvert :: (forall s. Char -> s -> Step (CC s) Char) - -> Stream Char -> Stream Char -caseConvert remap (Stream next0 s0 len) = Stream next (CC s0 '\0' '\0') len - where - next (CC s '\0' _) = - case next0 s of - Done -> Done - Skip s' -> Skip (CC s' '\0' '\0') - Yield c s' -> remap c s' - next (CC s a b) = Yield a (CC s b '\0') - --- | /O(n)/ Convert a string to folded case. This function is mainly --- useful for performing caseless (or case insensitive) string --- comparisons. --- --- A string @x@ is a caseless match for a string @y@ if and only if: --- --- @toCaseFold x == toCaseFold y@ --- --- The result string may be longer than the input string, and may --- differ from applying 'toLower' to the input string. For instance, --- the Armenian small ligature men now (U+FB13) is case folded to the --- bigram men now (U+0574 U+0576), while the micro sign (U+00B5) is --- case folded to the Greek small letter letter mu (U+03BC) instead of --- itself. -toCaseFold :: Stream Char -> Stream Char -toCaseFold = caseConvert foldMapping -{-# INLINE [0] toCaseFold #-} - --- | /O(n)/ Convert a string to upper case, using simple case --- conversion. The result string may be longer than the input string. --- For instance, the German eszett (U+00DF) maps to the two-letter --- sequence SS. -toUpper :: Stream Char -> Stream Char -toUpper = caseConvert upperMapping -{-# INLINE [0] toUpper #-} - --- | /O(n)/ Convert a string to lower case, using simple case --- conversion. The result string may be longer than the input string. --- For instance, the Latin capital letter I with dot above (U+0130) --- maps to the sequence Latin small letter i (U+0069) followed by --- combining dot above (U+0307). -toLower :: Stream Char -> Stream Char -toLower = caseConvert lowerMapping -{-# INLINE [0] toLower #-} - --- | /O(n)/ Convert a string to title case, using simple case --- conversion. --- --- The first letter of the input is converted to title case, as is --- every subsequent letter that immediately follows a non-letter. --- Every letter that immediately follows another letter is converted --- to lower case. --- --- The result string may be longer than the input string. For example, --- the Latin small ligature fl (U+FB02) is converted to the --- sequence Latin capital letter F (U+0046) followed by Latin small --- letter l (U+006C). --- --- /Note/: this function does not take language or culture specific --- rules into account. For instance, in English, different style --- guides disagree on whether the book name \"The Hill of the Red --- Fox\" is correctly title cased—but this function will --- capitalize /every/ word. -toTitle :: Stream Char -> Stream Char -toTitle (Stream next0 s0 len) = Stream next (CC (False :*: s0) '\0' '\0') len - where - next (CC (letter :*: s) '\0' _) = - case next0 s of - Done -> Done - Skip s' -> Skip (CC (letter :*: s') '\0' '\0') - Yield c s' - | letter' -> if letter - then lowerMapping c (letter' :*: s') - else titleMapping c (letter' :*: s') - | otherwise -> Yield c (CC (letter' :*: s') '\0' '\0') - where letter' = isLetter c - next (CC s a b) = Yield a (CC s b '\0') -{-# INLINE [0] toTitle #-} - -justifyLeftI :: Integral a => a -> Char -> Stream Char -> Stream Char -justifyLeftI k c (Stream next0 s0 len) = - Stream next (s0 :*: S1 :*: 0) (larger (fromIntegral k) len) - where - next (s :*: S1 :*: n) = - case next0 s of - Done -> next (s :*: S2 :*: n) - Skip s' -> Skip (s' :*: S1 :*: n) - Yield x s' -> Yield x (s' :*: S1 :*: n+1) - next (s :*: S2 :*: n) - | n < k = Yield c (s :*: S2 :*: n+1) - | otherwise = Done - {-# INLINE next #-} -{-# INLINE [0] justifyLeftI #-} - --- ---------------------------------------------------------------------------- --- * Reducing Streams (folds) - --- | foldl, applied to a binary operator, a starting value (typically the --- left-identity of the operator), and a Stream, reduces the Stream using the --- binary operator, from left to right. -foldl :: (b -> Char -> b) -> b -> Stream Char -> b -foldl f z0 (Stream next s0 _len) = loop_foldl z0 s0 - where - loop_foldl z !s = case next s of - Done -> z - Skip s' -> loop_foldl z s' - Yield x s' -> loop_foldl (f z x) s' -{-# INLINE [0] foldl #-} - --- | A strict version of foldl. -foldl' :: (b -> Char -> b) -> b -> Stream Char -> b -foldl' f z0 (Stream next s0 _len) = loop_foldl' z0 s0 - where - loop_foldl' !z !s = case next s of - Done -> z - Skip s' -> loop_foldl' z s' - Yield x s' -> loop_foldl' (f z x) s' -{-# INLINE [0] foldl' #-} - --- | foldl1 is a variant of foldl that has no starting value argument, --- and thus must be applied to non-empty Streams. -foldl1 :: (Char -> Char -> Char) -> Stream Char -> Char -foldl1 f (Stream next s0 _len) = loop0_foldl1 s0 - where - loop0_foldl1 !s = case next s of - Skip s' -> loop0_foldl1 s' - Yield x s' -> loop_foldl1 x s' - Done -> emptyError "foldl1" - loop_foldl1 z !s = case next s of - Done -> z - Skip s' -> loop_foldl1 z s' - Yield x s' -> loop_foldl1 (f z x) s' -{-# INLINE [0] foldl1 #-} - --- | A strict version of foldl1. -foldl1' :: (Char -> Char -> Char) -> Stream Char -> Char -foldl1' f (Stream next s0 _len) = loop0_foldl1' s0 - where - loop0_foldl1' !s = case next s of - Skip s' -> loop0_foldl1' s' - Yield x s' -> loop_foldl1' x s' - Done -> emptyError "foldl1" - loop_foldl1' !z !s = case next s of - Done -> z - Skip s' -> loop_foldl1' z s' - Yield x s' -> loop_foldl1' (f z x) s' -{-# INLINE [0] foldl1' #-} - --- | 'foldr', applied to a binary operator, a starting value (typically the --- right-identity of the operator), and a stream, reduces the stream using the --- binary operator, from right to left. -foldr :: (Char -> b -> b) -> b -> Stream Char -> b -foldr f z (Stream next s0 _len) = loop_foldr s0 - where - loop_foldr !s = case next s of - Done -> z - Skip s' -> loop_foldr s' - Yield x s' -> f x (loop_foldr s') -{-# INLINE [0] foldr #-} - --- | foldr1 is a variant of 'foldr' that has no starting value argument, --- and thus must be applied to non-empty streams. --- Subject to array fusion. -foldr1 :: (Char -> Char -> Char) -> Stream Char -> Char -foldr1 f (Stream next s0 _len) = loop0_foldr1 s0 - where - loop0_foldr1 !s = case next s of - Done -> emptyError "foldr1" - Skip s' -> loop0_foldr1 s' - Yield x s' -> loop_foldr1 x s' - - loop_foldr1 x !s = case next s of - Done -> x - Skip s' -> loop_foldr1 x s' - Yield x' s' -> f x (loop_foldr1 x' s') -{-# INLINE [0] foldr1 #-} - -intercalate :: Stream Char -> [Stream Char] -> Stream Char -intercalate s = concat . (L.intersperse s) -{-# INLINE [0] intercalate #-} - --- ---------------------------------------------------------------------------- --- ** Special folds - --- | /O(n)/ Concatenate a list of streams. Subject to array fusion. -concat :: [Stream Char] -> Stream Char -concat = L.foldr append empty -{-# INLINE [0] concat #-} - --- | Map a function over a stream that results in a stream and concatenate the --- results. -concatMap :: (Char -> Stream Char) -> Stream Char -> Stream Char -concatMap f = foldr (append . f) empty -{-# INLINE [0] concatMap #-} - --- | /O(n)/ any @p @xs determines if any character in the stream --- @xs@ satisifes the predicate @p@. -any :: (Char -> Bool) -> Stream Char -> Bool -any p (Stream next0 s0 _len) = loop_any s0 - where - loop_any !s = case next0 s of - Done -> False - Skip s' -> loop_any s' - Yield x s' | p x -> True - | otherwise -> loop_any s' -{-# INLINE [0] any #-} - --- | /O(n)/ all @p @xs determines if all characters in the 'Text' --- @xs@ satisify the predicate @p@. -all :: (Char -> Bool) -> Stream Char -> Bool -all p (Stream next0 s0 _len) = loop_all s0 - where - loop_all !s = case next0 s of - Done -> True - Skip s' -> loop_all s' - Yield x s' | p x -> loop_all s' - | otherwise -> False -{-# INLINE [0] all #-} - --- | /O(n)/ maximum returns the maximum value from a stream, which must be --- non-empty. -maximum :: Stream Char -> Char -maximum (Stream next0 s0 _len) = loop0_maximum s0 - where - loop0_maximum !s = case next0 s of - Done -> emptyError "maximum" - Skip s' -> loop0_maximum s' - Yield x s' -> loop_maximum x s' - loop_maximum !z !s = case next0 s of - Done -> z - Skip s' -> loop_maximum z s' - Yield x s' - | x > z -> loop_maximum x s' - | otherwise -> loop_maximum z s' -{-# INLINE [0] maximum #-} - --- | /O(n)/ minimum returns the minimum value from a 'Text', which must be --- non-empty. -minimum :: Stream Char -> Char -minimum (Stream next0 s0 _len) = loop0_minimum s0 - where - loop0_minimum !s = case next0 s of - Done -> emptyError "minimum" - Skip s' -> loop0_minimum s' - Yield x s' -> loop_minimum x s' - loop_minimum !z !s = case next0 s of - Done -> z - Skip s' -> loop_minimum z s' - Yield x s' - | x < z -> loop_minimum x s' - | otherwise -> loop_minimum z s' -{-# INLINE [0] minimum #-} - --- ----------------------------------------------------------------------------- --- * Building streams - -scanl :: (Char -> Char -> Char) -> Char -> Stream Char -> Stream Char -scanl f z0 (Stream next0 s0 len) = Stream next (S1 :*: z0 :*: s0) (len+1) -- HINT maybe too low - where - {-# INLINE next #-} - next (S1 :*: z :*: s) = Yield z (S2 :*: z :*: s) - next (S2 :*: z :*: s) = case next0 s of - Yield x s' -> let !x' = f z x - in Yield x' (S2 :*: x' :*: s') - Skip s' -> Skip (S2 :*: z :*: s') - Done -> Done -{-# INLINE [0] scanl #-} - --- ----------------------------------------------------------------------------- --- ** Accumulating maps - -{- --- | /O(n)/ Like a combination of 'map' and 'foldl'. Applies a --- function to each element of a stream, passing an accumulating --- parameter from left to right, and returns a final stream. --- --- /Note/: Unlike the version over lists, this function does not --- return a final value for the accumulator, because the nature of --- streams precludes it. -mapAccumL :: (a -> b -> (a,b)) -> a -> Stream b -> Stream b -mapAccumL f z0 (Stream next0 s0 len) = Stream next (s0 :*: z0) len -- HINT depends on f - where - {-# INLINE next #-} - next (s :*: z) = case next0 s of - Yield x s' -> let (z',y) = f z x - in Yield y (s' :*: z') - Skip s' -> Skip (s' :*: z) - Done -> Done -{-# INLINE [0] mapAccumL #-} --} - --- ----------------------------------------------------------------------------- --- ** Generating and unfolding streams - -replicateCharI :: Integral a => a -> Char -> Stream Char -replicateCharI n c - | n < 0 = empty - | otherwise = Stream next 0 (fromIntegral n) -- HINT maybe too low - where - next i | i >= n = Done - | otherwise = Yield c (i + 1) -{-# INLINE [0] replicateCharI #-} - -data RI s = RI !s {-# UNPACK #-} !Int64 - -replicateI :: Int64 -> Stream Char -> Stream Char -replicateI n (Stream next0 s0 len) = - Stream next (RI s0 0) (fromIntegral (max 0 n) * len) - where - next (RI s k) - | k >= n = Done - | otherwise = case next0 s of - Done -> Skip (RI s0 (k+1)) - Skip s' -> Skip (RI s' k) - Yield x s' -> Yield x (RI s' k) -{-# INLINE [0] replicateI #-} - --- | /O(n)/, where @n@ is the length of the result. The unfoldr function --- is analogous to the List 'unfoldr'. unfoldr builds a stream --- from a seed value. The function takes the element and returns --- Nothing if it is done producing the stream or returns Just --- (a,b), in which case, a is the next Char in the string, and b is --- the seed value for further production. -unfoldr :: (a -> Maybe (Char,a)) -> a -> Stream Char -unfoldr f s0 = Stream next s0 1 -- HINT maybe too low - where - {-# INLINE next #-} - next !s = case f s of - Nothing -> Done - Just (w, s') -> Yield w s' -{-# INLINE [0] unfoldr #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrNI' builds a stream from a seed --- value. However, the length of the result is limited by the --- first argument to 'unfoldrNI'. This function is more efficient than --- 'unfoldr' when the length of the result is known. -unfoldrNI :: Integral a => a -> (b -> Maybe (Char,b)) -> b -> Stream Char -unfoldrNI n f s0 | n < 0 = empty - | otherwise = Stream next (0 :*: s0) (fromIntegral (n*2)) -- HINT maybe too high - where - {-# INLINE next #-} - next (z :*: s) = case f s of - Nothing -> Done - Just (w, s') | z >= n -> Done - | otherwise -> Yield w ((z + 1) :*: s') -{-# INLINE unfoldrNI #-} - -------------------------------------------------------------------------------- --- * Substreams - --- | /O(n)/ take n, applied to a stream, returns the prefix of the --- stream of length @n@, or the stream itself if @n@ is greater than the --- length of the stream. -take :: Integral a => a -> Stream Char -> Stream Char -take n0 (Stream next0 s0 len) = - Stream next (n0 :*: s0) (smaller len (fromIntegral (max 0 n0))) - where - {-# INLINE next #-} - next (n :*: s) | n <= 0 = Done - | otherwise = case next0 s of - Done -> Done - Skip s' -> Skip (n :*: s') - Yield x s' -> Yield x ((n-1) :*: s') -{-# INLINE [0] take #-} - --- | /O(n)/ drop n, applied to a stream, returns the suffix of the --- stream after the first @n@ characters, or the empty stream if @n@ --- is greater than the length of the stream. -drop :: Integral a => a -> Stream Char -> Stream Char -drop n0 (Stream next0 s0 len) = - Stream next (J n0 :*: s0) (len - fromIntegral (max 0 n0)) - where - {-# INLINE next #-} - next (J n :*: s) - | n <= 0 = Skip (N :*: s) - | otherwise = case next0 s of - Done -> Done - Skip s' -> Skip (J n :*: s') - Yield _ s' -> Skip (J (n-1) :*: s') - next (N :*: s) = case next0 s of - Done -> Done - Skip s' -> Skip (N :*: s') - Yield x s' -> Yield x (N :*: s') -{-# INLINE [0] drop #-} - --- | takeWhile, applied to a predicate @p@ and a stream, returns the --- longest prefix (possibly empty) of elements that satisfy p. -takeWhile :: (Char -> Bool) -> Stream Char -> Stream Char -takeWhile p (Stream next0 s0 len) = Stream next s0 len -- HINT maybe too high - where - {-# INLINE next #-} - next !s = case next0 s of - Done -> Done - Skip s' -> Skip s' - Yield x s' | p x -> Yield x s' - | otherwise -> Done -{-# INLINE [0] takeWhile #-} - --- | dropWhile @p @xs returns the suffix remaining after takeWhile @p @xs. -dropWhile :: (Char -> Bool) -> Stream Char -> Stream Char -dropWhile p (Stream next0 s0 len) = Stream next (S1 :*: s0) len -- HINT maybe too high - where - {-# INLINE next #-} - next (S1 :*: s) = case next0 s of - Done -> Done - Skip s' -> Skip (S1 :*: s') - Yield x s' | p x -> Skip (S1 :*: s') - | otherwise -> Yield x (S2 :*: s') - next (S2 :*: s) = case next0 s of - Done -> Done - Skip s' -> Skip (S2 :*: s') - Yield x s' -> Yield x (S2 :*: s') -{-# INLINE [0] dropWhile #-} - --- | /O(n)/ The 'isPrefixOf' function takes two 'Stream's and returns --- 'True' iff the first is a prefix of the second. -isPrefixOf :: (Eq a) => Stream a -> Stream a -> Bool -isPrefixOf (Stream next1 s1 _) (Stream next2 s2 _) = loop (next1 s1) (next2 s2) - where - loop Done _ = True - loop _ Done = False - loop (Skip s1') (Skip s2') = loop (next1 s1') (next2 s2') - loop (Skip s1') x2 = loop (next1 s1') x2 - loop x1 (Skip s2') = loop x1 (next2 s2') - loop (Yield x1 s1') (Yield x2 s2') = x1 == x2 && - loop (next1 s1') (next2 s2') -{-# INLINE [0] isPrefixOf #-} - --- ---------------------------------------------------------------------------- --- * Searching - -------------------------------------------------------------------------------- --- ** Searching by equality - --- | /O(n)/ elem is the stream membership predicate. -elem :: Char -> Stream Char -> Bool -elem w (Stream next s0 _len) = loop_elem s0 - where - loop_elem !s = case next s of - Done -> False - Skip s' -> loop_elem s' - Yield x s' | x == w -> True - | otherwise -> loop_elem s' -{-# INLINE [0] elem #-} - -------------------------------------------------------------------------------- --- ** Searching with a predicate - --- | /O(n)/ The 'findBy' function takes a predicate and a stream, --- and returns the first element in matching the predicate, or 'Nothing' --- if there is no such element. - -findBy :: (Char -> Bool) -> Stream Char -> Maybe Char -findBy p (Stream next s0 _len) = loop_find s0 - where - loop_find !s = case next s of - Done -> Nothing - Skip s' -> loop_find s' - Yield x s' | p x -> Just x - | otherwise -> loop_find s' -{-# INLINE [0] findBy #-} - --- | /O(n)/ Stream index (subscript) operator, starting from 0. -indexI :: Integral a => Stream Char -> a -> Char -indexI (Stream next s0 _len) n0 - | n0 < 0 = streamError "index" "Negative index" - | otherwise = loop_index n0 s0 - where - loop_index !n !s = case next s of - Done -> streamError "index" "Index too large" - Skip s' -> loop_index n s' - Yield x s' | n == 0 -> x - | otherwise -> loop_index (n-1) s' -{-# INLINE [0] indexI #-} - --- | /O(n)/ 'filter', applied to a predicate and a stream, --- returns a stream containing those characters that satisfy the --- predicate. -filter :: (Char -> Bool) -> Stream Char -> Stream Char -filter p (Stream next0 s0 len) = Stream next s0 len -- HINT maybe too high - where - next !s = case next0 s of - Done -> Done - Skip s' -> Skip s' - Yield x s' | p x -> Yield x s' - | otherwise -> Skip s' -{-# INLINE [0] filter #-} - -{-# RULES - "STREAM filter/filter fusion" forall p q s. - filter p (filter q s) = filter (\x -> q x && p x) s - #-} - --- | The 'findIndexI' function takes a predicate and a stream and --- returns the index of the first element in the stream satisfying the --- predicate. -findIndexI :: Integral a => (Char -> Bool) -> Stream Char -> Maybe a -findIndexI p s = case findIndicesI p s of - (i:_) -> Just i - _ -> Nothing -{-# INLINE [0] findIndexI #-} - --- | The 'findIndicesI' function takes a predicate and a stream and --- returns all indices of the elements in the stream satisfying the --- predicate. -findIndicesI :: Integral a => (Char -> Bool) -> Stream Char -> [a] -findIndicesI p (Stream next s0 _len) = loop_findIndex 0 s0 - where - loop_findIndex !i !s = case next s of - Done -> [] - Skip s' -> loop_findIndex i s' -- hmm. not caught by QC - Yield x s' | p x -> i : loop_findIndex (i+1) s' - | otherwise -> loop_findIndex (i+1) s' -{-# INLINE [0] findIndicesI #-} - -------------------------------------------------------------------------------- --- * Zipping - --- | zipWith generalises 'zip' by zipping with the function given as --- the first argument, instead of a tupling function. -zipWith :: (a -> a -> b) -> Stream a -> Stream a -> Stream b -zipWith f (Stream next0 sa0 len1) (Stream next1 sb0 len2) = - Stream next (sa0 :*: sb0 :*: N) (smaller len1 len2) - where - next (sa :*: sb :*: N) = case next0 sa of - Done -> Done - Skip sa' -> Skip (sa' :*: sb :*: N) - Yield a sa' -> Skip (sa' :*: sb :*: J a) - - next (sa' :*: sb :*: J a) = case next1 sb of - Done -> Done - Skip sb' -> Skip (sa' :*: sb' :*: J a) - Yield b sb' -> Yield (f a b) (sa' :*: sb' :*: N) -{-# INLINE [0] zipWith #-} - --- | /O(n)/ The 'countCharI' function returns the number of times the --- query element appears in the given stream. -countCharI :: Integral a => Char -> Stream Char -> a -countCharI a (Stream next s0 _len) = loop 0 s0 - where - loop !i !s = case next s of - Done -> i - Skip s' -> loop i s' - Yield x s' | a == x -> loop (i+1) s' - | otherwise -> loop i s' -{-# INLINE [0] countCharI #-} - -streamError :: String -> String -> a -streamError func msg = P.error $ "Data.Text.Internal.Fusion.Common." ++ func ++ ": " ++ msg - -emptyError :: String -> a -emptyError func = internalError func "Empty input" - -internalError :: String -> a -internalError func = streamError func "Internal error" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Size.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Size.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Size.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Size.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,157 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# OPTIONS_GHC -fno-warn-missing-methods #-} --- | --- Module : Data.Text.Internal.Fusion.Internal --- Copyright : (c) Roman Leshchinskiy 2008, --- (c) Bryan O'Sullivan 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Size hints. - -module Data.Text.Internal.Fusion.Size - ( - Size - , exactly - , exactSize - , maxSize - , betweenSize - , unknownSize - , smaller - , larger - , upperBound - , lowerBound - , compareSize - , isEmpty - ) where - -import Data.Text.Internal (mul) -#if defined(ASSERTS) -import Control.Exception (assert) -#endif - -data Size = Between {-# UNPACK #-} !Int {-# UNPACK #-} !Int -- ^ Lower and upper bounds on size. - | Unknown -- ^ Unknown size. - deriving (Eq, Show) - -exactly :: Size -> Maybe Int -exactly (Between na nb) | na == nb = Just na -exactly _ = Nothing -{-# INLINE exactly #-} - -exactSize :: Int -> Size -exactSize n = -#if defined(ASSERTS) - assert (n >= 0) -#endif - Between n n -{-# INLINE exactSize #-} - -maxSize :: Int -> Size -maxSize n = -#if defined(ASSERTS) - assert (n >= 0) -#endif - Between 0 n -{-# INLINE maxSize #-} - -betweenSize :: Int -> Int -> Size -betweenSize m n = -#if defined(ASSERTS) - assert (m >= 0) - assert (n >= m) -#endif - Between m n -{-# INLINE betweenSize #-} - -unknownSize :: Size -unknownSize = Unknown -{-# INLINE unknownSize #-} - -instance Num Size where - (+) = addSize - (-) = subtractSize - (*) = mulSize - - fromInteger = f where f = exactSize . fromInteger - {-# INLINE f #-} - -add :: Int -> Int -> Int -add m n | mn >= 0 = mn - | otherwise = overflowError - where mn = m + n -{-# INLINE add #-} - -addSize :: Size -> Size -> Size -addSize (Between ma mb) (Between na nb) = Between (add ma na) (add mb nb) -addSize _ _ = Unknown -{-# INLINE addSize #-} - -subtractSize :: Size -> Size -> Size -subtractSize (Between ma mb) (Between na nb) = Between (max (ma-nb) 0) (max (mb-na) 0) -subtractSize a@(Between 0 _) Unknown = a -subtractSize (Between _ mb) Unknown = Between 0 mb -subtractSize _ _ = Unknown -{-# INLINE subtractSize #-} - -mulSize :: Size -> Size -> Size -mulSize (Between ma mb) (Between na nb) = Between (mul ma na) (mul mb nb) -mulSize _ _ = Unknown -{-# INLINE mulSize #-} - --- | Minimum of two size hints. -smaller :: Size -> Size -> Size -smaller a@(Between ma mb) b@(Between na nb) - | mb <= na = a - | nb <= ma = b - | otherwise = Between (ma `min` na) (mb `min` nb) -smaller a@(Between 0 _) Unknown = a -smaller (Between _ mb) Unknown = Between 0 mb -smaller Unknown b@(Between 0 _) = b -smaller Unknown (Between _ nb) = Between 0 nb -smaller Unknown Unknown = Unknown -{-# INLINE smaller #-} - --- | Maximum of two size hints. -larger :: Size -> Size -> Size -larger a@(Between ma mb) b@(Between na nb) - | ma >= nb = a - | na >= mb = b - | otherwise = Between (ma `max` na) (mb `max` nb) -larger _ _ = Unknown -{-# INLINE larger #-} - --- | Compute the maximum size from a size hint, if possible. -upperBound :: Int -> Size -> Int -upperBound _ (Between _ n) = n -upperBound k _ = k -{-# INLINE upperBound #-} - --- | Compute the maximum size from a size hint, if possible. -lowerBound :: Int -> Size -> Int -lowerBound _ (Between n _) = n -lowerBound k _ = k -{-# INLINE lowerBound #-} - -compareSize :: Size -> Int -> Maybe Ordering -compareSize (Between ma mb) n - | mb < n = Just LT - | ma > n = Just GT - | ma == n && mb == n = Just EQ -compareSize _ _ = Nothing - - -isEmpty :: Size -> Bool -isEmpty (Between _ n) = n <= 0 -isEmpty _ = False -{-# INLINE isEmpty #-} - -overflowError :: Int -overflowError = error "Data.Text.Internal.Fusion.Size: size overflow" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Types.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Types.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Types.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion/Types.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,127 +0,0 @@ -{-# LANGUAGE BangPatterns, ExistentialQuantification #-} --- | --- Module : Data.Text.Internal.Fusion.Types --- Copyright : (c) Tom Harper 2008-2009, --- (c) Bryan O'Sullivan 2009, --- (c) Duncan Coutts 2009, --- (c) Jasper Van der Jeugt 2011 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Core stream fusion functionality for text. - -module Data.Text.Internal.Fusion.Types - ( - CC(..) - , M(..) - , M8 - , PairS(..) - , RS(..) - , Step(..) - , Stream(..) - , Switch(..) - , empty - ) where - -import Data.Text.Internal.Fusion.Size -import Data.Word (Word8) - --- | Specialised tuple for case conversion. -data CC s = CC !s {-# UNPACK #-} !Char {-# UNPACK #-} !Char - --- | Specialised, strict Maybe-like type. -data M a = N - | J !a - -type M8 = M Word8 - --- Restreaming state. -data RS s - = RS0 !s - | RS1 !s {-# UNPACK #-} !Word8 - | RS2 !s {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - | RS3 !s {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - -infixl 2 :*: -data PairS a b = !a :*: !b - -- deriving (Eq, Ord, Show) - --- | Allow a function over a stream to switch between two states. -data Switch = S1 | S2 - -data Step s a = Done - | Skip !s - | Yield !a !s - -{- -instance (Show a) => Show (Step s a) - where show Done = "Done" - show (Skip _) = "Skip" - show (Yield x _) = "Yield " ++ show x --} - -instance (Eq a) => Eq (Stream a) where - (==) = eq - -instance (Ord a) => Ord (Stream a) where - compare = cmp - --- The length hint in a Stream has two roles. If its value is zero, --- we trust it, and treat the stream as empty. Otherwise, we treat it --- as a hint: it should usually be accurate, so we use it when --- unstreaming to decide what size array to allocate. However, the --- unstreaming functions must be able to cope with the hint being too --- small or too large. --- --- The size hint tries to track the UTF-16 code points in a stream, --- but often counts the number of characters instead. It can easily --- undercount if, for instance, a transformed stream contains astral --- plane characters (those above 0x10000). - -data Stream a = - forall s. Stream - (s -> Step s a) -- stepper function - !s -- current state - !Size -- size hint - --- | /O(n)/ Determines if two streams are equal. -eq :: (Eq a) => Stream a -> Stream a -> Bool -eq (Stream next1 s1 _) (Stream next2 s2 _) = loop (next1 s1) (next2 s2) - where - loop Done Done = True - loop (Skip s1') (Skip s2') = loop (next1 s1') (next2 s2') - loop (Skip s1') x2 = loop (next1 s1') x2 - loop x1 (Skip s2') = loop x1 (next2 s2') - loop Done _ = False - loop _ Done = False - loop (Yield x1 s1') (Yield x2 s2') = x1 == x2 && - loop (next1 s1') (next2 s2') -{-# INLINE [0] eq #-} - -cmp :: (Ord a) => Stream a -> Stream a -> Ordering -cmp (Stream next1 s1 _) (Stream next2 s2 _) = loop (next1 s1) (next2 s2) - where - loop Done Done = EQ - loop (Skip s1') (Skip s2') = loop (next1 s1') (next2 s2') - loop (Skip s1') x2 = loop (next1 s1') x2 - loop x1 (Skip s2') = loop x1 (next2 s2') - loop Done _ = LT - loop _ Done = GT - loop (Yield x1 s1') (Yield x2 s2') = - case compare x1 x2 of - EQ -> loop (next1 s1') (next2 s2') - other -> other -{-# INLINE [0] cmp #-} - --- | The empty stream. -empty :: Stream a -empty = Stream next () 0 - where next _ = Done -{-# INLINE [0] empty #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Fusion.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,234 +0,0 @@ -{-# LANGUAGE BangPatterns, MagicHash #-} - --- | --- Module : Data.Text.Internal.Fusion --- Copyright : (c) Tom Harper 2008-2009, --- (c) Bryan O'Sullivan 2009-2010, --- (c) Duncan Coutts 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Text manipulation functions represented as fusible operations over --- streams. -module Data.Text.Internal.Fusion - ( - -- * Types - Stream(..) - , Step(..) - - -- * Creation and elimination - , stream - , unstream - , reverseStream - - , length - - -- * Transformations - , reverse - - -- * Construction - -- ** Scans - , reverseScanr - - -- ** Accumulating maps - , mapAccumL - - -- ** Generation and unfolding - , unfoldrN - - -- * Indexing - , index - , findIndex - , countChar - ) where - -import Prelude (Bool(..), Char, Maybe(..), Monad(..), Int, - Num(..), Ord(..), ($), (&&), - fromIntegral, otherwise) -import Data.Bits ((.&.)) -import Data.Text.Internal (Text(..)) -import Data.Text.Internal.Private (runText) -import Data.Text.Internal.Unsafe.Char (ord, unsafeChr, unsafeWrite) -import Data.Text.Internal.Unsafe.Shift (shiftL, shiftR) -import qualified Data.Text.Array as A -import qualified Data.Text.Internal.Fusion.Common as S -import Data.Text.Internal.Fusion.Types -import Data.Text.Internal.Fusion.Size -import qualified Data.Text.Internal as I -import qualified Data.Text.Internal.Encoding.Utf16 as U16 - -default(Int) - --- | /O(n)/ Convert a 'Text' into a 'Stream Char'. -stream :: Text -> Stream Char -stream (Text arr off len) = Stream next off (betweenSize (len `shiftR` 1) len) - where - !end = off+len - next !i - | i >= end = Done - | n >= 0xD800 && n <= 0xDBFF = Yield (U16.chr2 n n2) (i + 2) - | otherwise = Yield (unsafeChr n) (i + 1) - where - n = A.unsafeIndex arr i - n2 = A.unsafeIndex arr (i + 1) -{-# INLINE [0] stream #-} - --- | /O(n)/ Convert a 'Text' into a 'Stream Char', but iterate --- backwards. -reverseStream :: Text -> Stream Char -reverseStream (Text arr off len) = Stream next (off+len-1) (betweenSize (len `shiftR` 1) len) - where - {-# INLINE next #-} - next !i - | i < off = Done - | n >= 0xDC00 && n <= 0xDFFF = Yield (U16.chr2 n2 n) (i - 2) - | otherwise = Yield (unsafeChr n) (i - 1) - where - n = A.unsafeIndex arr i - n2 = A.unsafeIndex arr (i - 1) -{-# INLINE [0] reverseStream #-} - --- | /O(n)/ Convert a 'Stream Char' into a 'Text'. -unstream :: Stream Char -> Text -unstream (Stream next0 s0 len) = runText $ \done -> do - let mlen = upperBound 4 len - arr0 <- A.new mlen - let outer arr top = loop - where - loop !s !i = - case next0 s of - Done -> done arr i - Skip s' -> loop s' i - Yield x s' - | j >= top -> {-# SCC "unstream/resize" #-} do - let top' = (top + 1) `shiftL` 1 - arr' <- A.new top' - A.copyM arr' 0 arr 0 top - outer arr' top' s i - | otherwise -> do d <- unsafeWrite arr i x - loop s' (i+d) - where j | ord x < 0x10000 = i - | otherwise = i + 1 - outer arr0 mlen s0 0 -{-# INLINE [0] unstream #-} -{-# RULES "STREAM stream/unstream fusion" forall s. stream (unstream s) = s #-} - - --- ---------------------------------------------------------------------------- --- * Basic stream functions - -length :: Stream Char -> Int -length = S.lengthI -{-# INLINE[0] length #-} - --- | /O(n)/ Reverse the characters of a string. -reverse :: Stream Char -> Text -reverse (Stream next s len0) - | isEmpty len0 = I.empty - | otherwise = I.text arr off' len' - where - len0' = upperBound 4 (larger len0 4) - (arr, (off', len')) = A.run2 (A.new len0' >>= loop s (len0'-1) len0') - loop !s0 !i !len marr = - case next s0 of - Done -> return (marr, (j, len-j)) - where j = i + 1 - Skip s1 -> loop s1 i len marr - Yield x s1 | i < least -> {-# SCC "reverse/resize" #-} do - let newLen = len `shiftL` 1 - marr' <- A.new newLen - A.copyM marr' (newLen-len) marr 0 len - write s1 (len+i) newLen marr' - | otherwise -> write s1 i len marr - where n = ord x - least | n < 0x10000 = 0 - | otherwise = 1 - m = n - 0x10000 - lo = fromIntegral $ (m `shiftR` 10) + 0xD800 - hi = fromIntegral $ (m .&. 0x3FF) + 0xDC00 - write t j l mar - | n < 0x10000 = do - A.unsafeWrite mar j (fromIntegral n) - loop t (j-1) l mar - | otherwise = do - A.unsafeWrite mar (j-1) lo - A.unsafeWrite mar j hi - loop t (j-2) l mar -{-# INLINE [0] reverse #-} - --- | /O(n)/ Perform the equivalent of 'scanr' over a list, only with --- the input and result reversed. -reverseScanr :: (Char -> Char -> Char) -> Char -> Stream Char -> Stream Char -reverseScanr f z0 (Stream next0 s0 len) = Stream next (S1 :*: z0 :*: s0) (len+1) -- HINT maybe too low - where - {-# INLINE next #-} - next (S1 :*: z :*: s) = Yield z (S2 :*: z :*: s) - next (S2 :*: z :*: s) = case next0 s of - Yield x s' -> let !x' = f x z - in Yield x' (S2 :*: x' :*: s') - Skip s' -> Skip (S2 :*: z :*: s') - Done -> Done -{-# INLINE reverseScanr #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a stream from a seed --- value. However, the length of the result is limited by the --- first argument to 'unfoldrN'. This function is more efficient than --- 'unfoldr' when the length of the result is known. -unfoldrN :: Int -> (a -> Maybe (Char,a)) -> a -> Stream Char -unfoldrN n = S.unfoldrNI n -{-# INLINE [0] unfoldrN #-} - -------------------------------------------------------------------------------- --- ** Indexing streams - --- | /O(n)/ stream index (subscript) operator, starting from 0. -index :: Stream Char -> Int -> Char -index = S.indexI -{-# INLINE [0] index #-} - --- | The 'findIndex' function takes a predicate and a stream and --- returns the index of the first element in the stream --- satisfying the predicate. -findIndex :: (Char -> Bool) -> Stream Char -> Maybe Int -findIndex = S.findIndexI -{-# INLINE [0] findIndex #-} - --- | /O(n)/ The 'count' function returns the number of times the query --- element appears in the given stream. -countChar :: Char -> Stream Char -> Int -countChar = S.countCharI -{-# INLINE [0] countChar #-} - --- | /O(n)/ Like a combination of 'map' and 'foldl''. Applies a --- function to each element of a 'Text', passing an accumulating --- parameter from left to right, and returns a final 'Text'. -mapAccumL :: (a -> Char -> (a,Char)) -> a -> Stream Char -> (a, Text) -mapAccumL f z0 (Stream next0 s0 len) = (nz, I.text na 0 nl) - where - (na,(nz,nl)) = A.run2 (A.new mlen >>= \arr -> outer arr mlen z0 s0 0) - where mlen = upperBound 4 len - outer arr top = loop - where - loop !z !s !i = - case next0 s of - Done -> return (arr, (z,i)) - Skip s' -> loop z s' i - Yield x s' - | j >= top -> {-# SCC "mapAccumL/resize" #-} do - let top' = (top + 1) `shiftL` 1 - arr' <- A.new top' - A.copyM arr' 0 arr 0 top - outer arr' top' z s i - | otherwise -> do d <- unsafeWrite arr i c - loop z' s' (i+d) - where (z',c) = f z x - j | ord c < 0x10000 = i - | otherwise = i + 1 -{-# INLINE [0] mapAccumL #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/IO.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/IO.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/IO.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/IO.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,166 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, RecordWildCards #-} --- | --- Module : Data.Text.Internal.IO --- Copyright : (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Simon Marlow --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Low-level support for text I\/O. - -module Data.Text.Internal.IO - ( - hGetLineWith - , readChunk - ) where - -import qualified Control.Exception as E -import Data.IORef (readIORef, writeIORef) -import Data.Text (Text) -import Data.Text.Internal.Fusion (unstream) -import Data.Text.Internal.Fusion.Types (Step(..), Stream(..)) -import Data.Text.Internal.Fusion.Size (exactSize, maxSize) -import Data.Text.Unsafe (inlinePerformIO) -import Foreign.Storable (peekElemOff) -import GHC.IO.Buffer (Buffer(..), CharBuffer, RawCharBuffer, bufferAdjustL, - bufferElems, charSize, isEmptyBuffer, readCharBuf, - withRawBuffer, writeCharBuf) -import GHC.IO.Handle.Internals (ioe_EOF, readTextDevice, wantReadableHandle_) -import GHC.IO.Handle.Types (Handle__(..), Newline(..)) -import System.IO (Handle) -import System.IO.Error (isEOFError) -import qualified Data.Text as T - --- | Read a single line of input from a handle, constructing a list of --- decoded chunks as we go. When we're done, transform them into the --- destination type. -hGetLineWith :: ([Text] -> t) -> Handle -> IO t -hGetLineWith f h = wantReadableHandle_ "hGetLine" h go - where - go hh@Handle__{..} = readIORef haCharBuffer >>= fmap f . hGetLineLoop hh [] - -hGetLineLoop :: Handle__ -> [Text] -> CharBuffer -> IO [Text] -hGetLineLoop hh@Handle__{..} = go where - go ts buf@Buffer{ bufL=r0, bufR=w, bufRaw=raw0 } = do - let findEOL raw r | r == w = return (False, w) - | otherwise = do - (c,r') <- readCharBuf raw r - if c == '\n' - then return (True, r) - else findEOL raw r' - (eol, off) <- findEOL raw0 r0 - (t,r') <- if haInputNL == CRLF - then unpack_nl raw0 r0 off - else do t <- unpack raw0 r0 off - return (t,off) - if eol - then do writeIORef haCharBuffer (bufferAdjustL (off+1) buf) - return $ reverse (t:ts) - else do - let buf1 = bufferAdjustL r' buf - maybe_buf <- maybeFillReadBuffer hh buf1 - case maybe_buf of - -- Nothing indicates we caught an EOF, and we may have a - -- partial line to return. - Nothing -> do - -- we reached EOF. There might be a lone \r left - -- in the buffer, so check for that and - -- append it to the line if necessary. - let pre | isEmptyBuffer buf1 = T.empty - | otherwise = T.singleton '\r' - writeIORef haCharBuffer buf1{ bufL=0, bufR=0 } - let str = reverse . filter (not . T.null) $ pre:t:ts - if null str - then ioe_EOF - else return str - Just new_buf -> go (t:ts) new_buf - --- This function is lifted almost verbatim from GHC.IO.Handle.Text. -maybeFillReadBuffer :: Handle__ -> CharBuffer -> IO (Maybe CharBuffer) -maybeFillReadBuffer handle_ buf - = E.catch (Just `fmap` getSomeCharacters handle_ buf) $ \e -> - if isEOFError e - then return Nothing - else ioError e - -unpack :: RawCharBuffer -> Int -> Int -> IO Text -unpack !buf !r !w - | charSize /= 4 = sizeError "unpack" - | r >= w = return T.empty - | otherwise = withRawBuffer buf go - where - go pbuf = return $! unstream (Stream next r (exactSize (w-r))) - where - next !i | i >= w = Done - | otherwise = Yield (ix i) (i+1) - ix i = inlinePerformIO $ peekElemOff pbuf i - -unpack_nl :: RawCharBuffer -> Int -> Int -> IO (Text, Int) -unpack_nl !buf !r !w - | charSize /= 4 = sizeError "unpack_nl" - | r >= w = return (T.empty, 0) - | otherwise = withRawBuffer buf $ go - where - go pbuf = do - let !t = unstream (Stream next r (maxSize (w-r))) - w' = w - 1 - return $ if ix w' == '\r' - then (t,w') - else (t,w) - where - next !i | i >= w = Done - | c == '\r' = let i' = i + 1 - in if i' < w - then if ix i' == '\n' - then Yield '\n' (i+2) - else Yield '\n' i' - else Done - | otherwise = Yield c (i+1) - where c = ix i - ix i = inlinePerformIO $ peekElemOff pbuf i - --- This function is completely lifted from GHC.IO.Handle.Text. -getSomeCharacters :: Handle__ -> CharBuffer -> IO CharBuffer -getSomeCharacters handle_@Handle__{..} buf@Buffer{..} = - case bufferElems buf of - -- buffer empty: read some more - 0 -> {-# SCC "readTextDevice" #-} readTextDevice handle_ buf - - -- if the buffer has a single '\r' in it and we're doing newline - -- translation: read some more - 1 | haInputNL == CRLF -> do - (c,_) <- readCharBuf bufRaw bufL - if c == '\r' - then do -- shuffle the '\r' to the beginning. This is only safe - -- if we're about to call readTextDevice, otherwise it - -- would mess up flushCharBuffer. - -- See [note Buffer Flushing], GHC.IO.Handle.Types - _ <- writeCharBuf bufRaw 0 '\r' - let buf' = buf{ bufL=0, bufR=1 } - readTextDevice handle_ buf' - else do - return buf - - -- buffer has some chars in it already: just return it - _otherwise -> {-# SCC "otherwise" #-} return buf - --- | Read a single chunk of strict text from a buffer. Used by both --- the strict and lazy implementations of hGetContents. -readChunk :: Handle__ -> CharBuffer -> IO Text -readChunk hh@Handle__{..} buf = do - buf'@Buffer{..} <- getSomeCharacters hh buf - (t,r) <- if haInputNL == CRLF - then unpack_nl bufRaw bufL bufR - else do t <- unpack bufRaw bufL bufR - return (t,bufR) - writeIORef haCharBuffer (bufferAdjustL r buf') - return t - -sizeError :: String -> a -sizeError loc = error $ "Data.Text.IO." ++ loc ++ ": bad internal buffer size" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Encoding/Fusion.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Encoding/Fusion.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Encoding/Fusion.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Encoding/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,324 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} - --- | --- Module : Data.Text.Lazy.Encoding.Fusion --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fusible 'Stream'-oriented functions for converting between lazy --- 'Text' and several common encodings. - -module Data.Text.Internal.Lazy.Encoding.Fusion - ( - -- * Streaming - -- streamASCII - streamUtf8 - , streamUtf16LE - , streamUtf16BE - , streamUtf32LE - , streamUtf32BE - - -- * Unstreaming - , unstream - - , module Data.Text.Internal.Encoding.Fusion.Common - ) where - -import Data.ByteString.Lazy.Internal (ByteString(..), defaultChunkSize) -import qualified Data.ByteString as B -import qualified Data.ByteString.Unsafe as B -import Data.Text.Internal.Encoding.Fusion.Common -import Data.Text.Encoding.Error -import Data.Text.Internal.Fusion (Step(..), Stream(..)) -import Data.Text.Internal.Fusion.Size -import Data.Text.Internal.Unsafe.Char (unsafeChr, unsafeChr8, unsafeChr32) -import Data.Text.Internal.Unsafe.Shift (shiftL) -import Data.Word (Word8, Word16, Word32) -import qualified Data.Text.Internal.Encoding.Utf8 as U8 -import qualified Data.Text.Internal.Encoding.Utf16 as U16 -import qualified Data.Text.Internal.Encoding.Utf32 as U32 -import Data.Text.Unsafe (unsafeDupablePerformIO) -import Foreign.ForeignPtr (withForeignPtr, ForeignPtr) -import Foreign.Storable (pokeByteOff) -import Data.ByteString.Internal (mallocByteString, memcpy) -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import qualified Data.ByteString.Internal as B - -data S = S0 - | S1 {-# UNPACK #-} !Word8 - | S2 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - | S3 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - | S4 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - -data T = T !ByteString !S {-# UNPACK #-} !Int - --- | /O(n)/ Convert a lazy 'ByteString' into a 'Stream Char', using --- UTF-8 encoding. -streamUtf8 :: OnDecodeError -> ByteString -> Stream Char -streamUtf8 onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i < len && U8.validate1 a = - Yield (unsafeChr8 a) (T bs S0 (i+1)) - | i + 1 < len && U8.validate2 a b = - Yield (U8.chr2 a b) (T bs S0 (i+2)) - | i + 2 < len && U8.validate3 a b c = - Yield (U8.chr3 a b c) (T bs S0 (i+3)) - | i + 3 < len && U8.validate4 a b c d = - Yield (U8.chr4 a b c d) (T bs S0 (i+4)) - where len = B.length ps - a = B.unsafeIndex ps i - b = B.unsafeIndex ps (i+1) - c = B.unsafeIndex ps (i+2) - d = B.unsafeIndex ps (i+3) - next st@(T bs s i) = - case s of - S1 a | U8.validate1 a -> Yield (unsafeChr8 a) es - S2 a b | U8.validate2 a b -> Yield (U8.chr2 a b) es - S3 a b c | U8.validate3 a b c -> Yield (U8.chr3 a b c) es - S4 a b c d | U8.validate4 a b c d -> Yield (U8.chr4 a b c d) es - _ -> consume st - where es = T bs S0 i - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 a -> next (T bs (S2 a x) (i+1)) - S2 a b -> next (T bs (S3 a b x) (i+1)) - S3 a b c -> next (T bs (S4 a b c x) (i+1)) - S4 a b c d -> decodeError "streamUtf8" "UTF-8" onErr (Just a) - (T bs (S3 b c d) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf8" "UTF-8" onErr Nothing st -{-# INLINE [0] streamUtf8 #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little --- endian UTF-16 encoding. -streamUtf16LE :: OnDecodeError -> ByteString -> Stream Char -streamUtf16LE onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i + 1 < len && U16.validate1 x1 = - Yield (unsafeChr x1) (T bs S0 (i+2)) - | i + 3 < len && U16.validate2 x1 x2 = - Yield (U16.chr2 x1 x2) (T bs S0 (i+4)) - where len = B.length ps - x1 = c (idx i) (idx (i + 1)) - x2 = c (idx (i + 2)) (idx (i + 3)) - c w1 w2 = w1 + (w2 `shiftL` 8) - idx = fromIntegral . B.unsafeIndex ps :: Int -> Word16 - next st@(T bs s i) = - case s of - S2 w1 w2 | U16.validate1 (c w1 w2) -> - Yield (unsafeChr (c w1 w2)) es - S4 w1 w2 w3 w4 | U16.validate2 (c w1 w2) (c w3 w4) -> - Yield (U16.chr2 (c w1 w2) (c w3 w4)) es - _ -> consume st - where es = T bs S0 i - c :: Word8 -> Word8 -> Word16 - c w1 w2 = fromIntegral w1 + (fromIntegral w2 `shiftL` 8) - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 w1 -> next (T bs (S2 w1 x) (i+1)) - S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) - S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) - S4 w1 w2 w3 w4 -> decodeError "streamUtf16LE" "UTF-16LE" onErr (Just w1) - (T bs (S3 w2 w3 w4) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf16LE" "UTF-16LE" onErr Nothing st -{-# INLINE [0] streamUtf16LE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big --- endian UTF-16 encoding. -streamUtf16BE :: OnDecodeError -> ByteString -> Stream Char -streamUtf16BE onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i + 1 < len && U16.validate1 x1 = - Yield (unsafeChr x1) (T bs S0 (i+2)) - | i + 3 < len && U16.validate2 x1 x2 = - Yield (U16.chr2 x1 x2) (T bs S0 (i+4)) - where len = B.length ps - x1 = c (idx i) (idx (i + 1)) - x2 = c (idx (i + 2)) (idx (i + 3)) - c w1 w2 = (w1 `shiftL` 8) + w2 - idx = fromIntegral . B.unsafeIndex ps :: Int -> Word16 - next st@(T bs s i) = - case s of - S2 w1 w2 | U16.validate1 (c w1 w2) -> - Yield (unsafeChr (c w1 w2)) es - S4 w1 w2 w3 w4 | U16.validate2 (c w1 w2) (c w3 w4) -> - Yield (U16.chr2 (c w1 w2) (c w3 w4)) es - _ -> consume st - where es = T bs S0 i - c :: Word8 -> Word8 -> Word16 - c w1 w2 = (fromIntegral w1 `shiftL` 8) + fromIntegral w2 - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 w1 -> next (T bs (S2 w1 x) (i+1)) - S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) - S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) - S4 w1 w2 w3 w4 -> decodeError "streamUtf16BE" "UTF-16BE" onErr (Just w1) - (T bs (S3 w2 w3 w4) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf16BE" "UTF-16BE" onErr Nothing st -{-# INLINE [0] streamUtf16BE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big --- endian UTF-32 encoding. -streamUtf32BE :: OnDecodeError -> ByteString -> Stream Char -streamUtf32BE onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i + 3 < len && U32.validate x = - Yield (unsafeChr32 x) (T bs S0 (i+4)) - where len = B.length ps - x = shiftL x1 24 + shiftL x2 16 + shiftL x3 8 + x4 - x1 = idx i - x2 = idx (i+1) - x3 = idx (i+2) - x4 = idx (i+3) - idx = fromIntegral . B.unsafeIndex ps :: Int -> Word32 - next st@(T bs s i) = - case s of - S4 w1 w2 w3 w4 | U32.validate (c w1 w2 w3 w4) -> - Yield (unsafeChr32 (c w1 w2 w3 w4)) es - _ -> consume st - where es = T bs S0 i - c :: Word8 -> Word8 -> Word8 -> Word8 -> Word32 - c w1 w2 w3 w4 = shifted - where - shifted = shiftL x1 24 + shiftL x2 16 + shiftL x3 8 + x4 - x1 = fromIntegral w1 - x2 = fromIntegral w2 - x3 = fromIntegral w3 - x4 = fromIntegral w4 - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 w1 -> next (T bs (S2 w1 x) (i+1)) - S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) - S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) - S4 w1 w2 w3 w4 -> decodeError "streamUtf32BE" "UTF-32BE" onErr (Just w1) - (T bs (S3 w2 w3 w4) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf32BE" "UTF-32BE" onErr Nothing st -{-# INLINE [0] streamUtf32BE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little --- endian UTF-32 encoding. -streamUtf32LE :: OnDecodeError -> ByteString -> Stream Char -streamUtf32LE onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i + 3 < len && U32.validate x = - Yield (unsafeChr32 x) (T bs S0 (i+4)) - where len = B.length ps - x = shiftL x4 24 + shiftL x3 16 + shiftL x2 8 + x1 - x1 = idx i - x2 = idx (i+1) - x3 = idx (i+2) - x4 = idx (i+3) - idx = fromIntegral . B.unsafeIndex ps :: Int -> Word32 - next st@(T bs s i) = - case s of - S4 w1 w2 w3 w4 | U32.validate (c w1 w2 w3 w4) -> - Yield (unsafeChr32 (c w1 w2 w3 w4)) es - _ -> consume st - where es = T bs S0 i - c :: Word8 -> Word8 -> Word8 -> Word8 -> Word32 - c w1 w2 w3 w4 = shifted - where - shifted = shiftL x4 24 + shiftL x3 16 + shiftL x2 8 + x1 - x1 = fromIntegral w1 - x2 = fromIntegral w2 - x3 = fromIntegral w3 - x4 = fromIntegral w4 - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 w1 -> next (T bs (S2 w1 x) (i+1)) - S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) - S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) - S4 w1 w2 w3 w4 -> decodeError "streamUtf32LE" "UTF-32LE" onErr (Just w1) - (T bs (S3 w2 w3 w4) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf32LE" "UTF-32LE" onErr Nothing st -{-# INLINE [0] streamUtf32LE #-} - --- | /O(n)/ Convert a 'Stream' 'Word8' to a lazy 'ByteString'. -unstreamChunks :: Int -> Stream Word8 -> ByteString -unstreamChunks chunkSize (Stream next s0 len0) = chunk s0 (upperBound 4 len0) - where chunk s1 len1 = unsafeDupablePerformIO $ do - let len = max 4 (min len1 chunkSize) - mallocByteString len >>= loop len 0 s1 - where - loop !n !off !s fp = case next s of - Done | off == 0 -> return Empty - | otherwise -> return $! Chunk (trimUp fp off) Empty - Skip s' -> loop n off s' fp - Yield x s' - | off == chunkSize -> do - let !newLen = n - off - return $! Chunk (trimUp fp off) (chunk s newLen) - | off == n -> realloc fp n off s' x - | otherwise -> do - withForeignPtr fp $ \p -> pokeByteOff p off x - loop n (off+1) s' fp - {-# NOINLINE realloc #-} - realloc fp n off s x = do - let n' = min (n+n) chunkSize - fp' <- copy0 fp n n' - withForeignPtr fp' $ \p -> pokeByteOff p off x - loop n' (off+1) s fp' - trimUp fp off = B.PS fp 0 off - copy0 :: ForeignPtr Word8 -> Int -> Int -> IO (ForeignPtr Word8) - copy0 !src !srcLen !destLen = -#if defined(ASSERTS) - assert (srcLen <= destLen) $ -#endif - do - dest <- mallocByteString destLen - withForeignPtr src $ \src' -> - withForeignPtr dest $ \dest' -> - memcpy dest' src' (fromIntegral srcLen) - return dest - --- | /O(n)/ Convert a 'Stream' 'Word8' to a lazy 'ByteString'. -unstream :: Stream Word8 -> ByteString -unstream = unstreamChunks defaultChunkSize - -decodeError :: forall s. String -> String -> OnDecodeError -> Maybe Word8 - -> s -> Step s Char -decodeError func kind onErr mb i = - case onErr desc mb of - Nothing -> Skip i - Just c -> Yield c i - where desc = "Data.Text.Lazy.Encoding.Fusion." ++ func ++ ": Invalid " ++ - kind ++ " stream" diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Fusion.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Fusion.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Fusion.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -{-# LANGUAGE BangPatterns #-} --- | --- Module : Data.Text.Lazy.Fusion --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Core stream fusion functionality for text. - -module Data.Text.Internal.Lazy.Fusion - ( - stream - , unstream - , unstreamChunks - , length - , unfoldrN - , index - , countChar - ) where - -import Prelude hiding (length) -import qualified Data.Text.Internal.Fusion.Common as S -import Control.Monad.ST (runST) -import Data.Text.Internal.Fusion.Types -import Data.Text.Internal.Fusion.Size (isEmpty, unknownSize) -import Data.Text.Internal.Lazy -import qualified Data.Text.Internal as I -import qualified Data.Text.Array as A -import Data.Text.Internal.Unsafe.Char (unsafeWrite) -import Data.Text.Internal.Unsafe.Shift (shiftL) -import Data.Text.Unsafe (Iter(..), iter) -import Data.Int (Int64) - -default(Int64) - --- | /O(n)/ Convert a 'Text' into a 'Stream Char'. -stream :: Text -> Stream Char -stream text = Stream next (text :*: 0) unknownSize - where - next (Empty :*: _) = Done - next (txt@(Chunk t@(I.Text _ _ len) ts) :*: i) - | i >= len = next (ts :*: 0) - | otherwise = Yield c (txt :*: i+d) - where Iter c d = iter t i -{-# INLINE [0] stream #-} - --- | /O(n)/ Convert a 'Stream Char' into a 'Text', using the given --- chunk size. -unstreamChunks :: Int -> Stream Char -> Text -unstreamChunks !chunkSize (Stream next s0 len0) - | isEmpty len0 = Empty - | otherwise = outer s0 - where - outer so = {-# SCC "unstreamChunks/outer" #-} - case next so of - Done -> Empty - Skip s' -> outer s' - Yield x s' -> runST $ do - a <- A.new unknownLength - unsafeWrite a 0 x >>= inner a unknownLength s' - where unknownLength = 4 - where - inner marr !len s !i - | i + 1 >= chunkSize = finish marr i s - | i + 1 >= len = {-# SCC "unstreamChunks/resize" #-} do - let newLen = min (len `shiftL` 1) chunkSize - marr' <- A.new newLen - A.copyM marr' 0 marr 0 len - inner marr' newLen s i - | otherwise = - {-# SCC "unstreamChunks/inner" #-} - case next s of - Done -> finish marr i s - Skip s' -> inner marr len s' i - Yield x s' -> do d <- unsafeWrite marr i x - inner marr len s' (i+d) - finish marr len s' = do - arr <- A.unsafeFreeze marr - return (I.Text arr 0 len `Chunk` outer s') -{-# INLINE [0] unstreamChunks #-} - --- | /O(n)/ Convert a 'Stream Char' into a 'Text', using --- 'defaultChunkSize'. -unstream :: Stream Char -> Text -unstream = unstreamChunks defaultChunkSize -{-# INLINE [0] unstream #-} - --- | /O(n)/ Returns the number of characters in a text. -length :: Stream Char -> Int64 -length = S.lengthI -{-# INLINE[0] length #-} - -{-# RULES "LAZY STREAM stream/unstream fusion" forall s. - stream (unstream s) = s #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a stream from a seed --- value. However, the length of the result is limited by the --- first argument to 'unfoldrN'. This function is more efficient than --- 'unfoldr' when the length of the result is known. -unfoldrN :: Int64 -> (a -> Maybe (Char,a)) -> a -> Stream Char -unfoldrN n = S.unfoldrNI n -{-# INLINE [0] unfoldrN #-} - --- | /O(n)/ stream index (subscript) operator, starting from 0. -index :: Stream Char -> Int64 -> Char -index = S.indexI -{-# INLINE [0] index #-} - --- | /O(n)/ The 'count' function returns the number of times the query --- element appears in the given stream. -countChar :: Char -> Stream Char -> Int64 -countChar = S.countCharI -{-# INLINE [0] countChar #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Search.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Search.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Search.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Lazy/Search.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,134 +0,0 @@ -{-# LANGUAGE BangPatterns, ScopedTypeVariables #-} - --- | --- Module : Data.Text.Lazy.Search --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fast substring search for lazy 'Text', based on work by Boyer, --- Moore, Horspool, Sunday, and Lundh. Adapted from the strict --- implementation. - -module Data.Text.Internal.Lazy.Search - ( - indices - ) where - -import qualified Data.Text.Array as A -import Data.Int (Int64) -import Data.Word (Word16, Word64) -import qualified Data.Text.Internal as T -import Data.Text.Internal.Fusion.Types (PairS(..)) -import Data.Text.Internal.Lazy (Text(..), foldlChunks) -import Data.Bits ((.|.), (.&.)) -import Data.Text.Internal.Unsafe.Shift (shiftL) - --- | /O(n+m)/ Find the offsets of all non-overlapping indices of --- @needle@ within @haystack@. --- --- This function is strict in @needle@, and lazy (as far as possible) --- in the chunks of @haystack@. --- --- In (unlikely) bad cases, this algorithm's complexity degrades --- towards /O(n*m)/. -indices :: Text -- ^ Substring to search for (@needle@) - -> Text -- ^ Text to search in (@haystack@) - -> [Int64] -indices needle@(Chunk n ns) _haystack@(Chunk k ks) - | nlen <= 0 = [] - | nlen == 1 = indicesOne (nindex 0) 0 k ks - | otherwise = advance k ks 0 0 - where - advance x@(T.Text _ _ l) xs = scan - where - scan !g !i - | i >= m = case xs of - Empty -> [] - Chunk y ys -> advance y ys g (i-m) - | lackingHay (i + nlen) x xs = [] - | c == z && candidateMatch 0 = g : scan (g+nlen) (i+nlen) - | otherwise = scan (g+delta) (i+delta) - where - m = fromIntegral l - c = hindex (i + nlast) - delta | nextInPattern = nlen + 1 - | c == z = skip + 1 - | otherwise = 1 - nextInPattern = mask .&. swizzle (hindex (i+nlen)) == 0 - candidateMatch !j - | j >= nlast = True - | hindex (i+j) /= nindex j = False - | otherwise = candidateMatch (j+1) - hindex = index x xs - nlen = wordLength needle - nlast = nlen - 1 - nindex = index n ns - z = foldlChunks fin 0 needle - where fin _ (T.Text farr foff flen) = A.unsafeIndex farr (foff+flen-1) - (mask :: Word64) :*: skip = buildTable n ns 0 0 0 (nlen-2) - swizzle w = 1 `shiftL` (fromIntegral w .&. 0x3f) - buildTable (T.Text xarr xoff xlen) xs = go - where - go !(g::Int64) !i !msk !skp - | i >= xlast = case xs of - Empty -> (msk .|. swizzle z) :*: skp - Chunk y ys -> buildTable y ys g 0 msk' skp' - | otherwise = go (g+1) (i+1) msk' skp' - where c = A.unsafeIndex xarr (xoff+i) - msk' = msk .|. swizzle c - skp' | c == z = nlen - g - 2 - | otherwise = skp - xlast = xlen - 1 - -- | Check whether an attempt to index into the haystack at the - -- given offset would fail. - lackingHay q = go 0 - where - go p (T.Text _ _ l) ps = p' < q && case ps of - Empty -> True - Chunk r rs -> go p' r rs - where p' = p + fromIntegral l -indices _ _ = [] - --- | Fast index into a partly unpacked 'Text'. We take into account --- the possibility that the caller might try to access one element --- past the end. -index :: T.Text -> Text -> Int64 -> Word16 -index (T.Text arr off len) xs !i - | j < len = A.unsafeIndex arr (off+j) - | otherwise = case xs of - Empty - -- out of bounds, but legal - | j == len -> 0 - -- should never happen, due to lackingHay above - | otherwise -> emptyError "index" - Chunk c cs -> index c cs (i-fromIntegral len) - where j = fromIntegral i - --- | A variant of 'indices' that scans linearly for a single 'Word16'. -indicesOne :: Word16 -> Int64 -> T.Text -> Text -> [Int64] -indicesOne c = chunk - where - chunk !i (T.Text oarr ooff olen) os = go 0 - where - go h | h >= olen = case os of - Empty -> [] - Chunk y ys -> chunk (i+fromIntegral olen) y ys - | on == c = i + fromIntegral h : go (h+1) - | otherwise = go (h+1) - where on = A.unsafeIndex oarr (ooff+h) - --- | The number of 'Word16' values in a 'Text'. -wordLength :: Text -> Int64 -wordLength = foldlChunks sumLength 0 - where sumLength i (T.Text _ _ l) = i + fromIntegral l - -emptyError :: String -> a -emptyError fun = error ("Data.Text.Lazy.Search." ++ fun ++ ": empty input") diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Lazy.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Lazy.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Lazy.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -{-# LANGUAGE BangPatterns, DeriveDataTypeable #-} -{-# OPTIONS_HADDOCK not-home #-} - --- | --- Module : Data.Text.Internal.Lazy --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- A module containing private 'Text' internals. This exposes the --- 'Text' representation and low level construction functions. --- Modules which extend the 'Text' system may need to use this module. - -module Data.Text.Internal.Lazy - ( - Text(..) - , chunk - , empty - , foldrChunks - , foldlChunks - -- * Data type invariant and abstraction functions - - -- $invariant - , strictInvariant - , lazyInvariant - , showStructure - - -- * Chunk allocation sizes - , defaultChunkSize - , smallChunkSize - , chunkOverhead - ) where - -import Data.Text () -import Data.Text.Internal.Unsafe.Shift (shiftL) -import Data.Typeable (Typeable) -import Foreign.Storable (sizeOf) -import qualified Data.Text.Internal as T - -data Text = Empty - | Chunk {-# UNPACK #-} !T.Text Text - deriving (Typeable) - --- $invariant --- --- The data type invariant for lazy 'Text': Every 'Text' is either 'Empty' or --- consists of non-null 'T.Text's. All functions must preserve this, --- and the QC properties must check this. - --- | Check the invariant strictly. -strictInvariant :: Text -> Bool -strictInvariant Empty = True -strictInvariant x@(Chunk (T.Text _ _ len) cs) - | len > 0 = strictInvariant cs - | otherwise = error $ "Data.Text.Lazy: invariant violation: " - ++ showStructure x - --- | Check the invariant lazily. -lazyInvariant :: Text -> Text -lazyInvariant Empty = Empty -lazyInvariant x@(Chunk c@(T.Text _ _ len) cs) - | len > 0 = Chunk c (lazyInvariant cs) - | otherwise = error $ "Data.Text.Lazy: invariant violation: " - ++ showStructure x - --- | Display the internal structure of a lazy 'Text'. -showStructure :: Text -> String -showStructure Empty = "Empty" -showStructure (Chunk t Empty) = "Chunk " ++ show t ++ " Empty" -showStructure (Chunk t ts) = - "Chunk " ++ show t ++ " (" ++ showStructure ts ++ ")" - --- | Smart constructor for 'Chunk'. Guarantees the data type invariant. -chunk :: T.Text -> Text -> Text -{-# INLINE chunk #-} -chunk t@(T.Text _ _ len) ts | len == 0 = ts - | otherwise = Chunk t ts - --- | Smart constructor for 'Empty'. -empty :: Text -{-# INLINE [0] empty #-} -empty = Empty - --- | Consume the chunks of a lazy 'Text' with a natural right fold. -foldrChunks :: (T.Text -> a -> a) -> a -> Text -> a -foldrChunks f z = go - where go Empty = z - go (Chunk c cs) = f c (go cs) -{-# INLINE foldrChunks #-} - --- | Consume the chunks of a lazy 'Text' with a strict, tail-recursive, --- accumulating left fold. -foldlChunks :: (a -> T.Text -> a) -> a -> Text -> a -foldlChunks f z = go z - where go !a Empty = a - go !a (Chunk c cs) = go (f a c) cs -{-# INLINE foldlChunks #-} - --- | Currently set to 16 KiB, less the memory management overhead. -defaultChunkSize :: Int -defaultChunkSize = 16384 - chunkOverhead -{-# INLINE defaultChunkSize #-} - --- | Currently set to 128 bytes, less the memory management overhead. -smallChunkSize :: Int -smallChunkSize = 128 - chunkOverhead -{-# INLINE smallChunkSize #-} - --- | The memory management overhead. Currently this is tuned for GHC only. -chunkOverhead :: Int -chunkOverhead = sizeOf (undefined :: Int) `shiftL` 1 -{-# INLINE chunkOverhead #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Private.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Private.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Private.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Private.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -{-# LANGUAGE BangPatterns, Rank2Types, UnboxedTuples #-} - --- | --- Module : Data.Text.Internal.Private --- Copyright : (c) 2011 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC - -module Data.Text.Internal.Private - ( - runText - , span_ - ) where - -import Control.Monad.ST (ST, runST) -import Data.Text.Internal (Text(..), text) -import Data.Text.Unsafe (Iter(..), iter) -import qualified Data.Text.Array as A - -span_ :: (Char -> Bool) -> Text -> (# Text, Text #) -span_ p t@(Text arr off len) = (# hd,tl #) - where hd = text arr off k - tl = text arr (off+k) (len-k) - !k = loop 0 - loop !i | i < len && p c = loop (i+d) - | otherwise = i - where Iter c d = iter t i -{-# INLINE span_ #-} - -runText :: (forall s. (A.MArray s -> Int -> ST s Text) -> ST s Text) -> Text -runText act = runST (act $ \ !marr !len -> do - arr <- A.unsafeFreeze marr - return $! text arr 0 len) -{-# INLINE runText #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Read.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Read.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Read.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Read.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ --- | --- Module : Data.Text.Internal.Read --- Copyright : (c) 2014 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Common internal functiopns for reading textual data. -module Data.Text.Internal.Read - ( - IReader - , IParser(..) - , T(..) - , digitToInt - , hexDigitToInt - , perhaps - ) where - -import Control.Applicative (Applicative(..)) -import Control.Arrow (first) -import Control.Monad (ap) -import Data.Char (ord) - -type IReader t a = t -> Either String (a,t) - -newtype IParser t a = P { - runP :: IReader t a - } - -instance Functor (IParser t) where - fmap f m = P $ fmap (first f) . runP m - -instance Applicative (IParser t) where - pure a = P $ \t -> Right (a,t) - {-# INLINE pure #-} - (<*>) = ap - -instance Monad (IParser t) where - return = pure - m >>= k = P $ \t -> case runP m t of - Left err -> Left err - Right (a,t') -> runP (k a) t' - {-# INLINE (>>=) #-} - fail msg = P $ \_ -> Left msg - -data T = T !Integer !Int - -perhaps :: a -> IParser t a -> IParser t a -perhaps def m = P $ \t -> case runP m t of - Left _ -> Right (def,t) - r@(Right _) -> r - -hexDigitToInt :: Char -> Int -hexDigitToInt c - | c >= '0' && c <= '9' = ord c - ord '0' - | c >= 'a' && c <= 'f' = ord c - (ord 'a' - 10) - | otherwise = ord c - (ord 'A' - 10) - -digitToInt :: Char -> Int -digitToInt c = ord c - ord '0' diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Search.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Search.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Search.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Search.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -{-# LANGUAGE BangPatterns, ScopedTypeVariables #-} - --- | --- Module : Data.Text.Internal.Search --- Copyright : (c) Bryan O'Sullivan 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Fast substring search for 'Text', based on work by Boyer, Moore, --- Horspool, Sunday, and Lundh. --- --- References: --- --- * R. S. Boyer, J. S. Moore: A Fast String Searching Algorithm. --- Communications of the ACM, 20, 10, 762-772 (1977) --- --- * R. N. Horspool: Practical Fast Searching in Strings. Software - --- Practice and Experience 10, 501-506 (1980) --- --- * D. M. Sunday: A Very Fast Substring Search Algorithm. --- Communications of the ACM, 33, 8, 132-142 (1990) --- --- * F. Lundh: The Fast Search Algorithm. --- (2006) - -module Data.Text.Internal.Search - ( - indices - ) where - -import qualified Data.Text.Array as A -import Data.Word (Word64) -import Data.Text.Internal (Text(..)) -import Data.Bits ((.|.), (.&.)) -import Data.Text.Internal.Unsafe.Shift (shiftL) - -data T = {-# UNPACK #-} !Word64 :* {-# UNPACK #-} !Int - --- | /O(n+m)/ Find the offsets of all non-overlapping indices of --- @needle@ within @haystack@. The offsets returned represent --- uncorrected indices in the low-level \"needle\" array, to which its --- offset must be added. --- --- In (unlikely) bad cases, this algorithm's complexity degrades --- towards /O(n*m)/. -indices :: Text -- ^ Substring to search for (@needle@) - -> Text -- ^ Text to search in (@haystack@) - -> [Int] -indices _needle@(Text narr noff nlen) _haystack@(Text harr hoff hlen) - | nlen == 1 = scanOne (nindex 0) - | nlen <= 0 || ldiff < 0 = [] - | otherwise = scan 0 - where - ldiff = hlen - nlen - nlast = nlen - 1 - z = nindex nlast - nindex k = A.unsafeIndex narr (noff+k) - hindex k = A.unsafeIndex harr (hoff+k) - hindex' k | k == hlen = 0 - | otherwise = A.unsafeIndex harr (hoff+k) - buildTable !i !msk !skp - | i >= nlast = (msk .|. swizzle z) :* skp - | otherwise = buildTable (i+1) (msk .|. swizzle c) skp' - where c = nindex i - skp' | c == z = nlen - i - 2 - | otherwise = skp - swizzle k = 1 `shiftL` (fromIntegral k .&. 0x3f) - scan !i - | i > ldiff = [] - | c == z && candidateMatch 0 = i : scan (i + nlen) - | otherwise = scan (i + delta) - where c = hindex (i + nlast) - candidateMatch !j - | j >= nlast = True - | hindex (i+j) /= nindex j = False - | otherwise = candidateMatch (j+1) - delta | nextInPattern = nlen + 1 - | c == z = skip + 1 - | otherwise = 1 - where nextInPattern = mask .&. swizzle (hindex' (i+nlen)) == 0 - !(mask :* skip) = buildTable 0 0 (nlen-2) - scanOne c = loop 0 - where loop !i | i >= hlen = [] - | hindex i == c = i : loop (i+1) - | otherwise = loop (i+1) -{-# INLINE indices #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Unsafe/Char.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Unsafe/Char.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Unsafe/Char.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Unsafe/Char.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -{-# LANGUAGE CPP, MagicHash #-} - --- | --- Module : Data.Text.Internal.Unsafe.Char --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fast character manipulation functions. -module Data.Text.Internal.Unsafe.Char - ( - ord - , unsafeChr - , unsafeChr8 - , unsafeChr32 - , unsafeWrite - -- , unsafeWriteRev - ) where - -#ifdef ASSERTS -import Control.Exception (assert) -#endif -import Control.Monad.ST (ST) -import Data.Bits ((.&.)) -import Data.Text.Internal.Unsafe.Shift (shiftR) -import GHC.Exts (Char(..), Int(..), chr#, ord#, word2Int#) -import GHC.Word (Word8(..), Word16(..), Word32(..)) -import qualified Data.Text.Array as A - -ord :: Char -> Int -ord (C# c#) = I# (ord# c#) -{-# INLINE ord #-} - -unsafeChr :: Word16 -> Char -unsafeChr (W16# w#) = C# (chr# (word2Int# w#)) -{-# INLINE unsafeChr #-} - -unsafeChr8 :: Word8 -> Char -unsafeChr8 (W8# w#) = C# (chr# (word2Int# w#)) -{-# INLINE unsafeChr8 #-} - -unsafeChr32 :: Word32 -> Char -unsafeChr32 (W32# w#) = C# (chr# (word2Int# w#)) -{-# INLINE unsafeChr32 #-} - --- | Write a character into the array at the given offset. Returns --- the number of 'Word16's written. -unsafeWrite :: A.MArray s -> Int -> Char -> ST s Int -unsafeWrite marr i c - | n < 0x10000 = do -#if defined(ASSERTS) - assert (i >= 0) . assert (i < A.length marr) $ return () -#endif - A.unsafeWrite marr i (fromIntegral n) - return 1 - | otherwise = do -#if defined(ASSERTS) - assert (i >= 0) . assert (i < A.length marr - 1) $ return () -#endif - A.unsafeWrite marr i lo - A.unsafeWrite marr (i+1) hi - return 2 - where n = ord c - m = n - 0x10000 - lo = fromIntegral $ (m `shiftR` 10) + 0xD800 - hi = fromIntegral $ (m .&. 0x3FF) + 0xDC00 -{-# INLINE unsafeWrite #-} - -{- -unsafeWriteRev :: A.MArray s Word16 -> Int -> Char -> ST s Int -unsafeWriteRev marr i c - | n < 0x10000 = do - assert (i >= 0) . assert (i < A.length marr) $ - A.unsafeWrite marr i (fromIntegral n) - return (i-1) - | otherwise = do - assert (i >= 1) . assert (i < A.length marr) $ - A.unsafeWrite marr (i-1) lo - A.unsafeWrite marr i hi - return (i-2) - where n = ord c - m = n - 0x10000 - lo = fromIntegral $ (m `shiftR` 10) + 0xD800 - hi = fromIntegral $ (m .&. 0x3FF) + 0xDC00 -{-# INLINE unsafeWriteRev #-} --} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Unsafe/Shift.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Unsafe/Shift.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Unsafe/Shift.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Unsafe/Shift.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -{-# LANGUAGE MagicHash #-} - --- | --- Module : Data.Text.Internal.Unsafe.Shift --- Copyright : (c) Bryan O'Sullivan 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fast, unchecked bit shifting functions. - -module Data.Text.Internal.Unsafe.Shift - ( - UnsafeShift(..) - ) where - --- import qualified Data.Bits as Bits -import GHC.Base -import GHC.Word - --- | This is a workaround for poor optimisation in GHC 6.8.2. It --- fails to notice constant-width shifts, and adds a test and branch --- to every shift. This imposes about a 10% performance hit. --- --- These functions are undefined when the amount being shifted by is --- greater than the size in bits of a machine Int#. -class UnsafeShift a where - shiftL :: a -> Int -> a - shiftR :: a -> Int -> a - -instance UnsafeShift Word16 where - {-# INLINE shiftL #-} - shiftL (W16# x#) (I# i#) = W16# (narrow16Word# (x# `uncheckedShiftL#` i#)) - - {-# INLINE shiftR #-} - shiftR (W16# x#) (I# i#) = W16# (x# `uncheckedShiftRL#` i#) - -instance UnsafeShift Word32 where - {-# INLINE shiftL #-} - shiftL (W32# x#) (I# i#) = W32# (narrow32Word# (x# `uncheckedShiftL#` i#)) - - {-# INLINE shiftR #-} - shiftR (W32# x#) (I# i#) = W32# (x# `uncheckedShiftRL#` i#) - -instance UnsafeShift Word64 where - {-# INLINE shiftL #-} - shiftL (W64# x#) (I# i#) = W64# (x# `uncheckedShiftL64#` i#) - - {-# INLINE shiftR #-} - shiftR (W64# x#) (I# i#) = W64# (x# `uncheckedShiftRL64#` i#) - -instance UnsafeShift Int where - {-# INLINE shiftL #-} - shiftL (I# x#) (I# i#) = I# (x# `iShiftL#` i#) - - {-# INLINE shiftR #-} - shiftR (I# x#) (I# i#) = I# (x# `iShiftRA#` i#) - -{- -instance UnsafeShift Integer where - {-# INLINE shiftL #-} - shiftL = Bits.shiftL - - {-# INLINE shiftR #-} - shiftR = Bits.shiftR --} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Unsafe.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Unsafe.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal/Unsafe.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal/Unsafe.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} -{-# OPTIONS_HADDOCK not-home #-} - --- | --- Module : Data.Text.Internal.Unsafe --- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- A module containing /unsafe/ operations, for /very very careful/ use --- in /heavily tested/ code. -module Data.Text.Internal.Unsafe - ( - inlineInterleaveST - , inlinePerformIO - ) where - -import GHC.ST (ST(..)) -#if defined(__GLASGOW_HASKELL__) -import GHC.IO (IO(IO)) -import GHC.Base (realWorld#) -#endif - - --- | Just like unsafePerformIO, but we inline it. Big performance gains as --- it exposes lots of things to further inlining. /Very unsafe/. In --- particular, you should do no memory allocation inside an --- 'inlinePerformIO' block. On Hugs this is just @unsafePerformIO@. --- -{-# INLINE inlinePerformIO #-} -inlinePerformIO :: IO a -> a -#if defined(__GLASGOW_HASKELL__) -inlinePerformIO (IO m) = case m realWorld# of (# _, r #) -> r -#else -inlinePerformIO = unsafePerformIO -#endif - --- | Allow an 'ST' computation to be deferred lazily. When passed an --- action of type 'ST' @s@ @a@, the action will only be performed when --- the value of @a@ is demanded. --- --- This function is identical to the normal unsafeInterleaveST, but is --- inlined and hence faster. --- --- /Note/: This operation is highly unsafe, as it can introduce --- externally visible non-determinism into an 'ST' action. -inlineInterleaveST :: ST s a -> ST s a -inlineInterleaveST (ST m) = ST $ \ s -> - let r = case m s of (# _, res #) -> res in (# s, r #) -{-# INLINE inlineInterleaveST #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Internal.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Internal.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,188 +0,0 @@ -{-# LANGUAGE CPP, DeriveDataTypeable, UnboxedTuples #-} -{-# OPTIONS_HADDOCK not-home #-} - --- | --- Module : Data.Text.Internal --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- A module containing private 'Text' internals. This exposes the --- 'Text' representation and low level construction functions. --- Modules which extend the 'Text' system may need to use this module. --- --- You should not use this module unless you are determined to monkey --- with the internals, as the functions here do just about nothing to --- preserve data invariants. You have been warned! - -#if defined(__GLASGOW_HASKELL__) && !defined(__HADDOCK__) -#include "MachDeps.h" -#endif - -module Data.Text.Internal - ( - -- * Types - -- $internals - Text(..) - -- * Construction - , text - , textP - -- * Safety - , safe - -- * Code that must be here for accessibility - , empty - , empty_ - -- * Utilities - , firstf - -- * Checked multiplication - , mul - , mul32 - , mul64 - -- * Debugging - , showText - ) where - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.Bits -import Data.Int (Int32, Int64) -import Data.Text.Internal.Unsafe.Char (ord) -import Data.Typeable (Typeable) -import qualified Data.Text.Array as A - --- | A space efficient, packed, unboxed Unicode text type. -data Text = Text - {-# UNPACK #-} !A.Array -- payload (Word16 elements) - {-# UNPACK #-} !Int -- offset (units of Word16, not Char) - {-# UNPACK #-} !Int -- length (units of Word16, not Char) - deriving (Typeable) - --- | Smart constructor. -text_ :: A.Array -> Int -> Int -> Text -text_ arr off len = -#if defined(ASSERTS) - let c = A.unsafeIndex arr off - alen = A.length arr - in assert (len >= 0) . - assert (off >= 0) . - assert (alen == 0 || len == 0 || off < alen) . - assert (len == 0 || c < 0xDC00 || c > 0xDFFF) $ -#endif - Text arr off len -{-# INLINE text_ #-} - --- | /O(1)/ The empty 'Text'. -empty :: Text -empty = Text A.empty 0 0 -{-# INLINE [1] empty #-} - --- | A non-inlined version of 'empty'. -empty_ :: Text -empty_ = Text A.empty 0 0 -{-# NOINLINE empty_ #-} - --- | Construct a 'Text' without invisibly pinning its byte array in --- memory if its length has dwindled to zero. -text :: A.Array -> Int -> Int -> Text -text arr off len | len == 0 = empty - | otherwise = text_ arr off len -{-# INLINE text #-} - -textP :: A.Array -> Int -> Int -> Text -{-# DEPRECATED textP "Use text instead" #-} -textP = text - --- | A useful 'show'-like function for debugging purposes. -showText :: Text -> String -showText (Text arr off len) = - "Text " ++ show (A.toList arr off len) ++ ' ' : - show off ++ ' ' : show len - --- | Map a 'Char' to a 'Text'-safe value. --- --- UTF-16 surrogate code points are not included in the set of Unicode --- scalar values, but are unfortunately admitted as valid 'Char' --- values by Haskell. They cannot be represented in a 'Text'. This --- function remaps those code points to the Unicode replacement --- character (U+FFFD, \'�\'), and leaves other code points --- unchanged. -safe :: Char -> Char -safe c - | ord c .&. 0x1ff800 /= 0xd800 = c - | otherwise = '\xfffd' -{-# INLINE [0] safe #-} - --- | Apply a function to the first element of an optional pair. -firstf :: (a -> c) -> Maybe (a,b) -> Maybe (c,b) -firstf f (Just (a, b)) = Just (f a, b) -firstf _ Nothing = Nothing - --- | Checked multiplication. Calls 'error' if the result would --- overflow. -mul :: Int -> Int -> Int -#if WORD_SIZE_IN_BITS == 64 -mul a b = fromIntegral $ fromIntegral a `mul64` fromIntegral b -#else -mul a b = fromIntegral $ fromIntegral a `mul32` fromIntegral b -#endif -{-# INLINE mul #-} -infixl 7 `mul` - --- | Checked multiplication. Calls 'error' if the result would --- overflow. -mul64 :: Int64 -> Int64 -> Int64 -mul64 a b - | a >= 0 && b >= 0 = mul64_ a b - | a >= 0 = -mul64_ a (-b) - | b >= 0 = -mul64_ (-a) b - | otherwise = mul64_ (-a) (-b) -{-# INLINE mul64 #-} -infixl 7 `mul64` - -mul64_ :: Int64 -> Int64 -> Int64 -mul64_ a b - | ahi > 0 && bhi > 0 = error "overflow" - | top > 0x7fffffff = error "overflow" - | total < 0 = error "overflow" - | otherwise = total - where (# ahi, alo #) = (# a `shiftR` 32, a .&. 0xffffffff #) - (# bhi, blo #) = (# b `shiftR` 32, b .&. 0xffffffff #) - top = ahi * blo + alo * bhi - total = (top `shiftL` 32) + alo * blo -{-# INLINE mul64_ #-} - --- | Checked multiplication. Calls 'error' if the result would --- overflow. -mul32 :: Int32 -> Int32 -> Int32 -mul32 a b = case fromIntegral a * fromIntegral b of - ab | ab < min32 || ab > max32 -> error "overflow" - | otherwise -> fromIntegral ab - where min32 = -0x80000000 :: Int64 - max32 = 0x7fffffff -{-# INLINE mul32 #-} -infixl 7 `mul32` - --- $internals --- --- Internally, the 'Text' type is represented as an array of 'Word16' --- UTF-16 code units. The offset and length fields in the constructor --- are in these units, /not/ units of 'Char'. --- --- Invariants that all functions must maintain: --- --- * Since the 'Text' type uses UTF-16 internally, it cannot represent --- characters in the reserved surrogate code point range U+D800 to --- U+DFFF. To maintain this invariant, the 'safe' function maps --- 'Char' values in this range to the replacement character (U+FFFD, --- \'�\'). --- --- * A leading (or \"high\") surrogate code unit (0xD800–0xDBFF) must --- always be followed by a trailing (or \"low\") surrogate code unit --- (0xDC00-0xDFFF). A trailing surrogate code unit must always be --- preceded by a leading surrogate code unit. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/IO.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/IO.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/IO.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/IO.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,338 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, RecordWildCards, ScopedTypeVariables #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.IO --- Copyright : (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Simon Marlow --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Efficient locale-sensitive support for text I\/O. --- --- Skip past the synopsis for some important notes on performance and --- portability across different versions of GHC. - -module Data.Text.IO - ( - -- * Performance - -- $performance - - -- * Locale support - -- $locale - -- * File-at-a-time operations - readFile - , writeFile - , appendFile - -- * Operations on handles - , hGetContents - , hGetChunk - , hGetLine - , hPutStr - , hPutStrLn - -- * Special cases for standard input and output - , interact - , getContents - , getLine - , putStr - , putStrLn - ) where - -import Data.Text (Text) -import Prelude hiding (appendFile, getContents, getLine, interact, - putStr, putStrLn, readFile, writeFile) -import System.IO (Handle, IOMode(..), hPutChar, openFile, stdin, stdout, - withFile) -import qualified Control.Exception as E -import Control.Monad (liftM2, when) -import Data.IORef (readIORef, writeIORef) -import qualified Data.Text as T -import Data.Text.Internal.Fusion (stream) -import Data.Text.Internal.Fusion.Types (Step(..), Stream(..)) -import Data.Text.Internal.IO (hGetLineWith, readChunk) -import GHC.IO.Buffer (Buffer(..), BufferState(..), CharBufElem, CharBuffer, - RawCharBuffer, emptyBuffer, isEmptyBuffer, newCharBuffer, - writeCharBuf) -import GHC.IO.Exception (IOException(ioe_type), IOErrorType(InappropriateType)) -import GHC.IO.Handle.Internals (augmentIOError, hClose_help, wantReadableHandle, - wantWritableHandle) -import GHC.IO.Handle.Text (commitBuffer') -import GHC.IO.Handle.Types (BufferList(..), BufferMode(..), Handle__(..), - HandleType(..), Newline(..)) -import System.IO (hGetBuffering, hFileSize, hSetBuffering, hTell) -import System.IO.Error (isEOFError) - --- $performance --- #performance# --- --- The functions in this module obey the runtime system's locale, --- character set encoding, and line ending conversion settings. --- --- If you know in advance that you will be working with data that has --- a specific encoding (e.g. UTF-8), and your application is highly --- performance sensitive, you may find that it is faster to perform --- I\/O with bytestrings and to encode and decode yourself than to use --- the functions in this module. --- --- Whether this will hold depends on the version of GHC you are using, --- the platform you are working on, the data you are working with, and --- the encodings you are using, so be sure to test for yourself. - --- | The 'readFile' function reads a file and returns the contents of --- the file as a string. The entire file is read strictly, as with --- 'getContents'. -readFile :: FilePath -> IO Text -readFile name = openFile name ReadMode >>= hGetContents - --- | Write a string to a file. The file is truncated to zero length --- before writing begins. -writeFile :: FilePath -> Text -> IO () -writeFile p = withFile p WriteMode . flip hPutStr - --- | Write a string the end of a file. -appendFile :: FilePath -> Text -> IO () -appendFile p = withFile p AppendMode . flip hPutStr - -catchError :: String -> Handle -> Handle__ -> IOError -> IO Text -catchError caller h Handle__{..} err - | isEOFError err = do - buf <- readIORef haCharBuffer - return $ if isEmptyBuffer buf - then T.empty - else T.singleton '\r' - | otherwise = E.throwIO (augmentIOError err caller h) - --- | /Experimental./ Read a single chunk of strict text from a --- 'Handle'. The size of the chunk depends on the amount of input --- currently buffered. --- --- This function blocks only if there is no data available, and EOF --- has not yet been reached. Once EOF is reached, this function --- returns an empty string instead of throwing an exception. -hGetChunk :: Handle -> IO Text -hGetChunk h = wantReadableHandle "hGetChunk" h readSingleChunk - where - readSingleChunk hh@Handle__{..} = do - buf <- readIORef haCharBuffer - t <- readChunk hh buf `E.catch` catchError "hGetChunk" h hh - return (hh, t) - --- | Read the remaining contents of a 'Handle' as a string. The --- 'Handle' is closed once the contents have been read, or if an --- exception is thrown. --- --- Internally, this function reads a chunk at a time from the --- lower-level buffering abstraction, and concatenates the chunks into --- a single string once the entire file has been read. --- --- As a result, it requires approximately twice as much memory as its --- result to construct its result. For files more than a half of --- available RAM in size, this may result in memory exhaustion. -hGetContents :: Handle -> IO Text -hGetContents h = do - chooseGoodBuffering h - wantReadableHandle "hGetContents" h readAll - where - readAll hh@Handle__{..} = do - let readChunks = do - buf <- readIORef haCharBuffer - t <- readChunk hh buf `E.catch` catchError "hGetContents" h hh - if T.null t - then return [t] - else (t:) `fmap` readChunks - ts <- readChunks - (hh', _) <- hClose_help hh - return (hh'{haType=ClosedHandle}, T.concat ts) - --- | Use a more efficient buffer size if we're reading in --- block-buffered mode with the default buffer size. When we can --- determine the size of the handle we're reading, set the buffer size --- to that, so that we can read the entire file in one chunk. --- Otherwise, use a buffer size of at least 16KB. -chooseGoodBuffering :: Handle -> IO () -chooseGoodBuffering h = do - bufMode <- hGetBuffering h - case bufMode of - BlockBuffering Nothing -> do - d <- E.catch (liftM2 (-) (hFileSize h) (hTell h)) $ \(e::IOException) -> - if ioe_type e == InappropriateType - then return 16384 -- faster than the 2KB default - else E.throwIO e - when (d > 0) . hSetBuffering h . BlockBuffering . Just . fromIntegral $ d - _ -> return () - --- | Read a single line from a handle. -hGetLine :: Handle -> IO Text -hGetLine = hGetLineWith T.concat - --- | Write a string to a handle. -hPutStr :: Handle -> Text -> IO () --- This function is lifted almost verbatim from GHC.IO.Handle.Text. -hPutStr h t = do - (buffer_mode, nl) <- - wantWritableHandle "hPutStr" h $ \h_ -> do - bmode <- getSpareBuffer h_ - return (bmode, haOutputNL h_) - let str = stream t - case buffer_mode of - (NoBuffering, _) -> hPutChars h str - (LineBuffering, buf) -> writeLines h nl buf str - (BlockBuffering _, buf) - | nl == CRLF -> writeBlocksCRLF h buf str - | otherwise -> writeBlocksRaw h buf str - -hPutChars :: Handle -> Stream Char -> IO () -hPutChars h (Stream next0 s0 _len) = loop s0 - where - loop !s = case next0 s of - Done -> return () - Skip s' -> loop s' - Yield x s' -> hPutChar h x >> loop s' - --- The following functions are largely lifted from GHC.IO.Handle.Text, --- but adapted to a coinductive stream of data instead of an inductive --- list. --- --- We have several variations of more or less the same code for --- performance reasons. Splitting the original buffered write --- function into line- and block-oriented versions gave us a 2.1x --- performance improvement. Lifting out the raw/cooked newline --- handling gave a few more percent on top. - -writeLines :: Handle -> Newline -> Buffer CharBufElem -> Stream Char -> IO () -writeLines h nl buf0 (Stream next0 s0 _len) = outer s0 buf0 - where - outer s1 Buffer{bufRaw=raw, bufSize=len} = inner s1 (0::Int) - where - inner !s !n = - case next0 s of - Done -> commit n False{-no flush-} True{-release-} >> return () - Skip s' -> inner s' n - Yield x s' - | n + 1 >= len -> commit n True{-needs flush-} False >>= outer s - | x == '\n' -> do - n' <- if nl == CRLF - then do n1 <- writeCharBuf raw n '\r' - writeCharBuf raw n1 '\n' - else writeCharBuf raw n x - commit n' True{-needs flush-} False >>= outer s' - | otherwise -> writeCharBuf raw n x >>= inner s' - commit = commitBuffer h raw len - -writeBlocksCRLF :: Handle -> Buffer CharBufElem -> Stream Char -> IO () -writeBlocksCRLF h buf0 (Stream next0 s0 _len) = outer s0 buf0 - where - outer s1 Buffer{bufRaw=raw, bufSize=len} = inner s1 (0::Int) - where - inner !s !n = - case next0 s of - Done -> commit n False{-no flush-} True{-release-} >> return () - Skip s' -> inner s' n - Yield x s' - | n + 1 >= len -> commit n True{-needs flush-} False >>= outer s - | x == '\n' -> do n1 <- writeCharBuf raw n '\r' - writeCharBuf raw n1 '\n' >>= inner s' - | otherwise -> writeCharBuf raw n x >>= inner s' - commit = commitBuffer h raw len - -writeBlocksRaw :: Handle -> Buffer CharBufElem -> Stream Char -> IO () -writeBlocksRaw h buf0 (Stream next0 s0 _len) = outer s0 buf0 - where - outer s1 Buffer{bufRaw=raw, bufSize=len} = inner s1 (0::Int) - where - inner !s !n = - case next0 s of - Done -> commit n False{-no flush-} True{-release-} >> return () - Skip s' -> inner s' n - Yield x s' - | n + 1 >= len -> commit n True{-needs flush-} False >>= outer s - | otherwise -> writeCharBuf raw n x >>= inner s' - commit = commitBuffer h raw len - --- This function is completely lifted from GHC.IO.Handle.Text. -getSpareBuffer :: Handle__ -> IO (BufferMode, CharBuffer) -getSpareBuffer Handle__{haCharBuffer=ref, - haBuffers=spare_ref, - haBufferMode=mode} - = do - case mode of - NoBuffering -> return (mode, error "no buffer!") - _ -> do - bufs <- readIORef spare_ref - buf <- readIORef ref - case bufs of - BufferListCons b rest -> do - writeIORef spare_ref rest - return ( mode, emptyBuffer b (bufSize buf) WriteBuffer) - BufferListNil -> do - new_buf <- newCharBuffer (bufSize buf) WriteBuffer - return (mode, new_buf) - - --- This function is completely lifted from GHC.IO.Handle.Text. -commitBuffer :: Handle -> RawCharBuffer -> Int -> Int -> Bool -> Bool - -> IO CharBuffer -commitBuffer hdl !raw !sz !count flush release = - wantWritableHandle "commitAndReleaseBuffer" hdl $ - commitBuffer' raw sz count flush release -{-# INLINE commitBuffer #-} - --- | Write a string to a handle, followed by a newline. -hPutStrLn :: Handle -> Text -> IO () -hPutStrLn h t = hPutStr h t >> hPutChar h '\n' - --- | The 'interact' function takes a function of type @Text -> Text@ --- as its argument. The entire input from the standard input device is --- passed to this function as its argument, and the resulting string --- is output on the standard output device. -interact :: (Text -> Text) -> IO () -interact f = putStr . f =<< getContents - --- | Read all user input on 'stdin' as a single string. -getContents :: IO Text -getContents = hGetContents stdin - --- | Read a single line of user input from 'stdin'. -getLine :: IO Text -getLine = hGetLine stdin - --- | Write a string to 'stdout'. -putStr :: Text -> IO () -putStr = hPutStr stdout - --- | Write a string to 'stdout', followed by a newline. -putStrLn :: Text -> IO () -putStrLn = hPutStrLn stdout - --- $locale --- --- /Note/: The behaviour of functions in this module depends on the --- version of GHC you are using. --- --- Beginning with GHC 6.12, text I\/O is performed using the system or --- handle's current locale and line ending conventions. --- --- Under GHC 6.10 and earlier, the system I\/O libraries do not --- support locale-sensitive I\/O or line ending conversion. On these --- versions of GHC, functions in this library all use UTF-8. What --- does this mean in practice? --- --- * All data that is read will be decoded as UTF-8. --- --- * Before data is written, it is first encoded as UTF-8. --- --- * On both reading and writing, the platform's native newline --- conversion is performed. --- --- If you must use a non-UTF-8 locale on an older version of GHC, you --- will have to perform the transcoding yourself, e.g. as follows: --- --- > import qualified Data.ByteString as B --- > import Data.Text (Text) --- > import Data.Text.Encoding (encodeUtf16) --- > --- > putStr_Utf16LE :: Text -> IO () --- > putStr_Utf16LE t = B.putStr (encodeUtf16LE t) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Builder/Int.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Builder/Int.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Builder/Int.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Builder/Int.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,265 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, MagicHash, RankNTypes, ScopedTypeVariables, - UnboxedTuples #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - --- Module: Data.Text.Lazy.Builder.Int --- Copyright: (c) 2013 Bryan O'Sullivan --- (c) 2011 MailRank, Inc. --- License: BSD3 --- Maintainer: Bryan O'Sullivan --- Stability: experimental --- Portability: portable --- --- Efficiently write an integral value to a 'Builder'. - -module Data.Text.Lazy.Builder.Int - ( - decimal - , hexadecimal - ) where - -import Data.Int (Int8, Int16, Int32, Int64) -import Data.Monoid (mempty) -import qualified Data.ByteString.Unsafe as B -import Data.Text.Internal.Builder.Functions ((<>), i2d) -import Data.Text.Internal.Builder -import Data.Text.Internal.Builder.Int.Digits (digits) -import Data.Text.Array -import Data.Word (Word, Word8, Word16, Word32, Word64) -import GHC.Base (quotInt, remInt) -import GHC.Num (quotRemInteger) -import GHC.Types (Int(..)) -import Control.Monad.ST - -#ifdef __GLASGOW_HASKELL__ -# if defined(INTEGER_GMP) -import GHC.Integer.GMP.Internals -# elif defined(INTEGER_SIMPLE) -import GHC.Integer -# else -# error "You need to use either GMP or integer-simple." -# endif -#endif - -#if defined(INTEGER_GMP) || defined(INTEGER_SIMPLE) -# define PAIR(a,b) (# a,b #) -#else -# define PAIR(a,b) (a,b) -#endif - -decimal :: Integral a => a -> Builder -{-# RULES "decimal/Int8" decimal = boundedDecimal :: Int8 -> Builder #-} -{-# RULES "decimal/Int" decimal = boundedDecimal :: Int -> Builder #-} -{-# RULES "decimal/Int16" decimal = boundedDecimal :: Int16 -> Builder #-} -{-# RULES "decimal/Int32" decimal = boundedDecimal :: Int32 -> Builder #-} -{-# RULES "decimal/Int64" decimal = boundedDecimal :: Int64 -> Builder #-} -{-# RULES "decimal/Word" decimal = positive :: Word -> Builder #-} -{-# RULES "decimal/Word8" decimal = positive :: Word8 -> Builder #-} -{-# RULES "decimal/Word16" decimal = positive :: Word16 -> Builder #-} -{-# RULES "decimal/Word32" decimal = positive :: Word32 -> Builder #-} -{-# RULES "decimal/Word64" decimal = positive :: Word64 -> Builder #-} -{-# RULES "decimal/Integer" decimal = integer 10 :: Integer -> Builder #-} -decimal i = decimal' (<= -128) i -{-# NOINLINE decimal #-} - -boundedDecimal :: (Integral a, Bounded a) => a -> Builder -{-# SPECIALIZE boundedDecimal :: Int -> Builder #-} -{-# SPECIALIZE boundedDecimal :: Int8 -> Builder #-} -{-# SPECIALIZE boundedDecimal :: Int16 -> Builder #-} -{-# SPECIALIZE boundedDecimal :: Int32 -> Builder #-} -{-# SPECIALIZE boundedDecimal :: Int64 -> Builder #-} -boundedDecimal i = decimal' (== minBound) i - -decimal' :: (Integral a) => (a -> Bool) -> a -> Builder -{-# INLINE decimal' #-} -decimal' p i - | i < 0 = if p i - then let (q, r) = i `quotRem` 10 - qq = -q - !n = countDigits qq - in writeN (n + 2) $ \marr off -> do - unsafeWrite marr off minus - posDecimal marr (off+1) n qq - unsafeWrite marr (off+n+1) (i2w (-r)) - else let j = -i - !n = countDigits j - in writeN (n + 1) $ \marr off -> - unsafeWrite marr off minus >> posDecimal marr (off+1) n j - | otherwise = positive i - -positive :: (Integral a) => a -> Builder -{-# SPECIALIZE positive :: Int -> Builder #-} -{-# SPECIALIZE positive :: Int8 -> Builder #-} -{-# SPECIALIZE positive :: Int16 -> Builder #-} -{-# SPECIALIZE positive :: Int32 -> Builder #-} -{-# SPECIALIZE positive :: Int64 -> Builder #-} -{-# SPECIALIZE positive :: Word -> Builder #-} -{-# SPECIALIZE positive :: Word8 -> Builder #-} -{-# SPECIALIZE positive :: Word16 -> Builder #-} -{-# SPECIALIZE positive :: Word32 -> Builder #-} -{-# SPECIALIZE positive :: Word64 -> Builder #-} -positive i - | i < 10 = writeN 1 $ \marr off -> unsafeWrite marr off (i2w i) - | otherwise = let !n = countDigits i - in writeN n $ \marr off -> posDecimal marr off n i - -posDecimal :: (Integral a) => - forall s. MArray s -> Int -> Int -> a -> ST s () -{-# INLINE posDecimal #-} -posDecimal marr off0 ds v0 = go (off0 + ds - 1) v0 - where go off v - | v >= 100 = do - let (q, r) = v `quotRem` 100 - write2 off r - go (off - 2) q - | v < 10 = unsafeWrite marr off (i2w v) - | otherwise = write2 off v - write2 off i0 = do - let i = fromIntegral i0; j = i + i - unsafeWrite marr off $ get (j + 1) - unsafeWrite marr (off - 1) $ get j - get = fromIntegral . B.unsafeIndex digits - -minus, zero :: Word16 -{-# INLINE minus #-} -{-# INLINE zero #-} -minus = 45 -zero = 48 - -i2w :: (Integral a) => a -> Word16 -{-# INLINE i2w #-} -i2w v = zero + fromIntegral v - -countDigits :: (Integral a) => a -> Int -{-# INLINE countDigits #-} -countDigits v0 - | fromIntegral v64 == v0 = go 1 v64 - | otherwise = goBig 1 (fromIntegral v0) - where v64 = fromIntegral v0 - goBig !k (v :: Integer) - | v > big = goBig (k + 19) (v `quot` big) - | otherwise = go k (fromIntegral v) - big = 10000000000000000000 - go !k (v :: Word64) - | v < 10 = k - | v < 100 = k + 1 - | v < 1000 = k + 2 - | v < 1000000000000 = - k + if v < 100000000 - then if v < 1000000 - then if v < 10000 - then 3 - else 4 + fin v 100000 - else 6 + fin v 10000000 - else if v < 10000000000 - then 8 + fin v 1000000000 - else 10 + fin v 100000000000 - | otherwise = go (k + 12) (v `quot` 1000000000000) - fin v n = if v >= n then 1 else 0 - -hexadecimal :: Integral a => a -> Builder -{-# SPECIALIZE hexadecimal :: Int -> Builder #-} -{-# SPECIALIZE hexadecimal :: Int8 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Int16 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Int32 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Int64 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word8 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word16 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word32 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word64 -> Builder #-} -{-# RULES "hexadecimal/Integer" - hexadecimal = hexInteger :: Integer -> Builder #-} -hexadecimal i - | i < 0 = error hexErrMsg - | otherwise = go i - where - go n | n < 16 = hexDigit n - | otherwise = go (n `quot` 16) <> hexDigit (n `rem` 16) -{-# NOINLINE[0] hexadecimal #-} - -hexInteger :: Integer -> Builder -hexInteger i - | i < 0 = error hexErrMsg - | otherwise = integer 16 i - -hexErrMsg :: String -hexErrMsg = "Data.Text.Lazy.Builder.Int.hexadecimal: applied to negative number" - -hexDigit :: Integral a => a -> Builder -hexDigit n - | n <= 9 = singleton $! i2d (fromIntegral n) - | otherwise = singleton $! toEnum (fromIntegral n + 87) -{-# INLINE hexDigit #-} - -data T = T !Integer !Int - -integer :: Int -> Integer -> Builder -#ifdef INTEGER_GMP -integer 10 (S# i#) = decimal (I# i#) -integer 16 (S# i#) = hexadecimal (I# i#) -#else -integer 10 i = decimal i -integer 16 i = hexadecimal i -#endif -integer base i - | i < 0 = singleton '-' <> go (-i) - | otherwise = go i - where - go n | n < maxInt = int (fromInteger n) - | otherwise = putH (splitf (maxInt * maxInt) n) - - splitf p n - | p > n = [n] - | otherwise = splith p (splitf (p*p) n) - - splith p (n:ns) = case n `quotRemInteger` p of - PAIR(q,r) | q > 0 -> q : r : splitb p ns - | otherwise -> r : splitb p ns - splith _ _ = error "splith: the impossible happened." - - splitb p (n:ns) = case n `quotRemInteger` p of - PAIR(q,r) -> q : r : splitb p ns - splitb _ _ = [] - - T maxInt10 maxDigits10 = - until ((>mi) . (*10) . fstT) (\(T n d) -> T (n*10) (d+1)) (T 10 1) - where mi = fromIntegral (maxBound :: Int) - T maxInt16 maxDigits16 = - until ((>mi) . (*16) . fstT) (\(T n d) -> T (n*16) (d+1)) (T 16 1) - where mi = fromIntegral (maxBound :: Int) - - fstT (T a _) = a - - maxInt | base == 10 = maxInt10 - | otherwise = maxInt16 - maxDigits | base == 10 = maxDigits10 - | otherwise = maxDigits16 - - putH (n:ns) = case n `quotRemInteger` maxInt of - PAIR(x,y) - | q > 0 -> int q <> pblock r <> putB ns - | otherwise -> int r <> putB ns - where q = fromInteger x - r = fromInteger y - putH _ = error "putH: the impossible happened" - - putB (n:ns) = case n `quotRemInteger` maxInt of - PAIR(x,y) -> pblock q <> pblock r <> putB ns - where q = fromInteger x - r = fromInteger y - putB _ = mempty - - int :: Int -> Builder - int x | base == 10 = decimal x - | otherwise = hexadecimal x - - pblock = loop maxDigits - where - loop !d !n - | d == 1 = hexDigit n - | otherwise = loop (d-1) q <> hexDigit r - where q = n `quotInt` base - r = n `remInt` base diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Builder/RealFloat.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Builder/RealFloat.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Builder/RealFloat.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Builder/RealFloat.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,242 +0,0 @@ -{-# LANGUAGE CPP, OverloadedStrings #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - --- | --- Module: Data.Text.Lazy.Builder.RealFloat --- Copyright: (c) The University of Glasgow 1994-2002 --- License: see libraries/base/LICENSE --- --- Write a floating point value to a 'Builder'. - -module Data.Text.Lazy.Builder.RealFloat - ( - FPFormat(..) - , realFloat - , formatRealFloat - ) where - -import Data.Array.Base (unsafeAt) -import Data.Array.IArray -import Data.Text.Internal.Builder.Functions ((<>), i2d) -import Data.Text.Lazy.Builder.Int (decimal) -import Data.Text.Internal.Builder.RealFloat.Functions (roundTo) -import Data.Text.Lazy.Builder -import qualified Data.Text as T - --- | Control the rendering of floating point numbers. -data FPFormat = Exponent - -- ^ Scientific notation (e.g. @2.3e123@). - | Fixed - -- ^ Standard decimal notation. - | Generic - -- ^ Use decimal notation for values between @0.1@ and - -- @9,999,999@, and scientific notation otherwise. - deriving (Enum, Read, Show) - --- | Show a signed 'RealFloat' value to full precision, --- using standard decimal notation for arguments whose absolute value lies --- between @0.1@ and @9,999,999@, and scientific notation otherwise. -realFloat :: (RealFloat a) => a -> Builder -{-# SPECIALIZE realFloat :: Float -> Builder #-} -{-# SPECIALIZE realFloat :: Double -> Builder #-} -realFloat x = formatRealFloat Generic Nothing x - -formatRealFloat :: (RealFloat a) => - FPFormat - -> Maybe Int -- ^ Number of decimal places to render. - -> a - -> Builder -{-# SPECIALIZE formatRealFloat :: FPFormat -> Maybe Int -> Float -> Builder #-} -{-# SPECIALIZE formatRealFloat :: FPFormat -> Maybe Int -> Double -> Builder #-} -formatRealFloat fmt decs x - | isNaN x = "NaN" - | isInfinite x = if x < 0 then "-Infinity" else "Infinity" - | x < 0 || isNegativeZero x = singleton '-' <> doFmt fmt (floatToDigits (-x)) - | otherwise = doFmt fmt (floatToDigits x) - where - doFmt format (is, e) = - let ds = map i2d is in - case format of - Generic -> - doFmt (if e < 0 || e > 7 then Exponent else Fixed) - (is,e) - Exponent -> - case decs of - Nothing -> - let show_e' = decimal (e-1) in - case ds of - "0" -> "0.0e0" - [d] -> singleton d <> ".0e" <> show_e' - (d:ds') -> singleton d <> singleton '.' <> fromString ds' <> singleton 'e' <> show_e' - [] -> error "formatRealFloat/doFmt/Exponent: []" - Just dec -> - let dec' = max dec 1 in - case is of - [0] -> "0." <> fromText (T.replicate dec' "0") <> "e0" - _ -> - let - (ei,is') = roundTo (dec'+1) is - (d:ds') = map i2d (if ei > 0 then init is' else is') - in - singleton d <> singleton '.' <> fromString ds' <> singleton 'e' <> decimal (e-1+ei) - Fixed -> - let - mk0 ls = case ls of { "" -> "0" ; _ -> fromString ls} - in - case decs of - Nothing - | e <= 0 -> "0." <> fromText (T.replicate (-e) "0") <> fromString ds - | otherwise -> - let - f 0 s rs = mk0 (reverse s) <> singleton '.' <> mk0 rs - f n s "" = f (n-1) ('0':s) "" - f n s (r:rs) = f (n-1) (r:s) rs - in - f e "" ds - Just dec -> - let dec' = max dec 0 in - if e >= 0 then - let - (ei,is') = roundTo (dec' + e) is - (ls,rs) = splitAt (e+ei) (map i2d is') - in - mk0 ls <> (if null rs then "" else singleton '.' <> fromString rs) - else - let - (ei,is') = roundTo dec' (replicate (-e) 0 ++ is) - d:ds' = map i2d (if ei > 0 then is' else 0:is') - in - singleton d <> (if null ds' then "" else singleton '.' <> fromString ds') - - --- Based on "Printing Floating-Point Numbers Quickly and Accurately" --- by R.G. Burger and R.K. Dybvig in PLDI 96. --- This version uses a much slower logarithm estimator. It should be improved. - --- | 'floatToDigits' takes a base and a non-negative 'RealFloat' number, --- and returns a list of digits and an exponent. --- In particular, if @x>=0@, and --- --- > floatToDigits base x = ([d1,d2,...,dn], e) --- --- then --- --- (1) @n >= 1@ --- --- (2) @x = 0.d1d2...dn * (base**e)@ --- --- (3) @0 <= di <= base-1@ - -floatToDigits :: (RealFloat a) => a -> ([Int], Int) -{-# SPECIALIZE floatToDigits :: Float -> ([Int], Int) #-} -{-# SPECIALIZE floatToDigits :: Double -> ([Int], Int) #-} -floatToDigits 0 = ([0], 0) -floatToDigits x = - let - (f0, e0) = decodeFloat x - (minExp0, _) = floatRange x - p = floatDigits x - b = floatRadix x - minExp = minExp0 - p -- the real minimum exponent - -- Haskell requires that f be adjusted so denormalized numbers - -- will have an impossibly low exponent. Adjust for this. - (f, e) = - let n = minExp - e0 in - if n > 0 then (f0 `quot` (expt b n), e0+n) else (f0, e0) - (r, s, mUp, mDn) = - if e >= 0 then - let be = expt b e in - if f == expt b (p-1) then - (f*be*b*2, 2*b, be*b, be) -- according to Burger and Dybvig - else - (f*be*2, 2, be, be) - else - if e > minExp && f == expt b (p-1) then - (f*b*2, expt b (-e+1)*2, b, 1) - else - (f*2, expt b (-e)*2, 1, 1) - k :: Int - k = - let - k0 :: Int - k0 = - if b == 2 then - -- logBase 10 2 is very slightly larger than 8651/28738 - -- (about 5.3558e-10), so if log x >= 0, the approximation - -- k1 is too small, hence we add one and need one fixup step less. - -- If log x < 0, the approximation errs rather on the high side. - -- That is usually more than compensated for by ignoring the - -- fractional part of logBase 2 x, but when x is a power of 1/2 - -- or slightly larger and the exponent is a multiple of the - -- denominator of the rational approximation to logBase 10 2, - -- k1 is larger than logBase 10 x. If k1 > 1 + logBase 10 x, - -- we get a leading zero-digit we don't want. - -- With the approximation 3/10, this happened for - -- 0.5^1030, 0.5^1040, ..., 0.5^1070 and values close above. - -- The approximation 8651/28738 guarantees k1 < 1 + logBase 10 x - -- for IEEE-ish floating point types with exponent fields - -- <= 17 bits and mantissae of several thousand bits, earlier - -- convergents to logBase 10 2 would fail for long double. - -- Using quot instead of div is a little faster and requires - -- fewer fixup steps for negative lx. - let lx = p - 1 + e0 - k1 = (lx * 8651) `quot` 28738 - in if lx >= 0 then k1 + 1 else k1 - else - -- f :: Integer, log :: Float -> Float, - -- ceiling :: Float -> Int - ceiling ((log (fromInteger (f+1) :: Float) + - fromIntegral e * log (fromInteger b)) / - log 10) ---WAS: fromInt e * log (fromInteger b)) - - fixup n = - if n >= 0 then - if r + mUp <= expt 10 n * s then n else fixup (n+1) - else - if expt 10 (-n) * (r + mUp) <= s then n else fixup (n+1) - in - fixup k0 - - gen ds rn sN mUpN mDnN = - let - (dn, rn') = (rn * 10) `quotRem` sN - mUpN' = mUpN * 10 - mDnN' = mDnN * 10 - in - case (rn' < mDnN', rn' + mUpN' > sN) of - (True, False) -> dn : ds - (False, True) -> dn+1 : ds - (True, True) -> if rn' * 2 < sN then dn : ds else dn+1 : ds - (False, False) -> gen (dn:ds) rn' sN mUpN' mDnN' - - rds = - if k >= 0 then - gen [] r (s * expt 10 k) mUp mDn - else - let bk = expt 10 (-k) in - gen [] (r * bk) s (mUp * bk) (mDn * bk) - in - (map fromIntegral (reverse rds), k) - --- Exponentiation with a cache for the most common numbers. -minExpt, maxExpt :: Int -minExpt = 0 -maxExpt = 1100 - -expt :: Integer -> Int -> Integer -expt base n - | base == 2 && n >= minExpt && n <= maxExpt = expts `unsafeAt` n - | base == 10 && n <= maxExpt10 = expts10 `unsafeAt` n - | otherwise = base^n - -expts :: Array Int Integer -expts = array (minExpt,maxExpt) [(n,2^n) | n <- [minExpt .. maxExpt]] - -maxExpt10 :: Int -maxExpt10 = 324 - -expts10 :: Array Int Integer -expts10 = array (minExpt,maxExpt10) [(n,10^n) | n <- [minExpt .. maxExpt10]] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Builder.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Builder.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Builder.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Builder.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Data.Text.Lazy.Builder --- Copyright : (c) 2013 Bryan O'Sullivan --- (c) 2010 Johan Tibell --- License : BSD3-style (see LICENSE) --- --- Maintainer : Johan Tibell --- Stability : experimental --- Portability : portable to Hugs and GHC --- --- Efficient construction of lazy @Text@ values. The principal --- operations on a @Builder@ are @singleton@, @fromText@, and --- @fromLazyText@, which construct new builders, and 'mappend', which --- concatenates two builders. --- --- To get maximum performance when building lazy @Text@ values using a --- builder, associate @mappend@ calls to the right. For example, --- prefer --- --- > singleton 'a' `mappend` (singleton 'b' `mappend` singleton 'c') --- --- to --- --- > singleton 'a' `mappend` singleton 'b' `mappend` singleton 'c' --- --- as the latter associates @mappend@ to the left. Or, equivalently, --- prefer --- --- > singleton 'a' <> singleton 'b' <> singleton 'c' --- --- since the '<>' from recent versions of 'Data.Monoid' associates --- to the right. - ------------------------------------------------------------------------------ - -module Data.Text.Lazy.Builder - ( -- * The Builder type - Builder - , toLazyText - , toLazyTextWith - - -- * Constructing Builders - , singleton - , fromText - , fromLazyText - , fromString - - -- * Flushing the buffer state - , flush - ) where - -import Data.Text.Internal.Builder diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Encoding.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Encoding.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Encoding.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Encoding.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,250 +0,0 @@ -{-# LANGUAGE BangPatterns,CPP #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.Lazy.Encoding --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- Functions for converting lazy 'Text' values to and from lazy --- 'ByteString', using several standard encodings. --- --- To gain access to a much larger variety of encodings, use the --- @text-icu@ package: - -module Data.Text.Lazy.Encoding - ( - -- * Decoding ByteStrings to Text - -- $strict - decodeASCII - , decodeLatin1 - , decodeUtf8 - , decodeUtf16LE - , decodeUtf16BE - , decodeUtf32LE - , decodeUtf32BE - - -- ** Catchable failure - , decodeUtf8' - - -- ** Controllable error handling - , decodeUtf8With - , decodeUtf16LEWith - , decodeUtf16BEWith - , decodeUtf32LEWith - , decodeUtf32BEWith - - -- * Encoding Text to ByteStrings - , encodeUtf8 - , encodeUtf16LE - , encodeUtf16BE - , encodeUtf32LE - , encodeUtf32BE - -#if MIN_VERSION_bytestring(0,10,4) - -- * Encoding Text using ByteString Builders - , encodeUtf8Builder - , encodeUtf8BuilderEscaped -#endif - ) where - -import Control.Exception (evaluate, try) -import Data.Text.Encoding.Error (OnDecodeError, UnicodeException, strictDecode) -import Data.Text.Internal.Lazy (Text(..), chunk, empty, foldrChunks) -import qualified Data.ByteString as S -import qualified Data.ByteString.Lazy as B -import qualified Data.ByteString.Lazy.Internal as B -import qualified Data.ByteString.Unsafe as B -#if MIN_VERSION_bytestring(0,10,4) -import Data.Word (Word8) -import Data.Monoid (mempty, (<>)) -import qualified Data.ByteString.Builder as B -import qualified Data.ByteString.Builder.Extra as B (safeStrategy, toLazyByteStringWith) -import qualified Data.ByteString.Builder.Prim as BP -import qualified Data.Text as T -#endif -import qualified Data.Text.Encoding as TE -import qualified Data.Text.Lazy as L -import qualified Data.Text.Internal.Lazy.Encoding.Fusion as E -import qualified Data.Text.Internal.Lazy.Fusion as F -import Data.Text.Unsafe (unsafeDupablePerformIO) - --- $strict --- --- All of the single-parameter functions for decoding bytestrings --- encoded in one of the Unicode Transformation Formats (UTF) operate --- in a /strict/ mode: each will throw an exception if given invalid --- input. --- --- Each function has a variant, whose name is suffixed with -'With', --- that gives greater control over the handling of decoding errors. --- For instance, 'decodeUtf8' will throw an exception, but --- 'decodeUtf8With' allows the programmer to determine what to do on a --- decoding error. - --- | /Deprecated/. Decode a 'ByteString' containing 7-bit ASCII --- encoded text. -decodeASCII :: B.ByteString -> Text -decodeASCII = decodeUtf8 -{-# DEPRECATED decodeASCII "Use decodeUtf8 instead" #-} - --- | Decode a 'ByteString' containing Latin-1 (aka ISO-8859-1) encoded text. -decodeLatin1 :: B.ByteString -> Text -decodeLatin1 = foldr (chunk . TE.decodeLatin1) empty . B.toChunks - --- | Decode a 'ByteString' containing UTF-8 encoded text. -decodeUtf8With :: OnDecodeError -> B.ByteString -> Text -decodeUtf8With onErr (B.Chunk b0 bs0) = - case TE.streamDecodeUtf8With onErr b0 of - TE.Some t l f -> chunk t (go f l bs0) - where - go f0 _ (B.Chunk b bs) = - case f0 b of - TE.Some t l f -> chunk t (go f l bs) - go _ l _ - | S.null l = empty - | otherwise = case onErr desc (Just (B.unsafeHead l)) of - Nothing -> empty - Just c -> L.singleton c - desc = "Data.Text.Lazy.Encoding.decodeUtf8With: Invalid UTF-8 stream" -decodeUtf8With _ _ = empty - --- | Decode a 'ByteString' containing UTF-8 encoded text that is known --- to be valid. --- --- If the input contains any invalid UTF-8 data, an exception will be --- thrown that cannot be caught in pure code. For more control over --- the handling of invalid data, use 'decodeUtf8'' or --- 'decodeUtf8With'. -decodeUtf8 :: B.ByteString -> Text -decodeUtf8 = decodeUtf8With strictDecode -{-# INLINE[0] decodeUtf8 #-} - --- This rule seems to cause performance loss. -{- RULES "LAZY STREAM stream/decodeUtf8' fusion" [1] - forall bs. F.stream (decodeUtf8' bs) = E.streamUtf8 strictDecode bs #-} - --- | Decode a 'ByteString' containing UTF-8 encoded text.. --- --- If the input contains any invalid UTF-8 data, the relevant --- exception will be returned, otherwise the decoded text. --- --- /Note/: this function is /not/ lazy, as it must decode its entire --- input before it can return a result. If you need lazy (streaming) --- decoding, use 'decodeUtf8With' in lenient mode. -decodeUtf8' :: B.ByteString -> Either UnicodeException Text -decodeUtf8' bs = unsafeDupablePerformIO $ do - let t = decodeUtf8 bs - try (evaluate (rnf t `seq` t)) - where - rnf Empty = () - rnf (Chunk _ ts) = rnf ts -{-# INLINE decodeUtf8' #-} - -encodeUtf8 :: Text -> B.ByteString -#if MIN_VERSION_bytestring(0,10,4) -encodeUtf8 Empty = B.empty -encodeUtf8 lt@(Chunk t _) = - B.toLazyByteStringWith strategy B.empty $ encodeUtf8Builder lt - where - -- To improve our small string performance, we use a strategy that - -- allocates a buffer that is guaranteed to be large enough for the - -- encoding of the first chunk, but not larger than the default - -- B.smallChunkSize. We clamp the firstChunkSize to ensure that we don't - -- generate too large buffers which hamper streaming. - firstChunkSize = min B.smallChunkSize (4 * (T.length t + 1)) - strategy = B.safeStrategy firstChunkSize B.defaultChunkSize - -encodeUtf8Builder :: Text -> B.Builder -encodeUtf8Builder = foldrChunks (\c b -> TE.encodeUtf8Builder c <> b) mempty - -{-# INLINE encodeUtf8BuilderEscaped #-} -encodeUtf8BuilderEscaped :: BP.BoundedPrim Word8 -> Text -> B.Builder -encodeUtf8BuilderEscaped prim = - foldrChunks (\c b -> TE.encodeUtf8BuilderEscaped prim c <> b) mempty - -#else -encodeUtf8 (Chunk c cs) = B.Chunk (TE.encodeUtf8 c) (encodeUtf8 cs) -encodeUtf8 Empty = B.Empty -#endif - --- | Decode text from little endian UTF-16 encoding. -decodeUtf16LEWith :: OnDecodeError -> B.ByteString -> Text -decodeUtf16LEWith onErr bs = F.unstream (E.streamUtf16LE onErr bs) -{-# INLINE decodeUtf16LEWith #-} - --- | Decode text from little endian UTF-16 encoding. --- --- If the input contains any invalid little endian UTF-16 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf16LEWith'. -decodeUtf16LE :: B.ByteString -> Text -decodeUtf16LE = decodeUtf16LEWith strictDecode -{-# INLINE decodeUtf16LE #-} - --- | Decode text from big endian UTF-16 encoding. -decodeUtf16BEWith :: OnDecodeError -> B.ByteString -> Text -decodeUtf16BEWith onErr bs = F.unstream (E.streamUtf16BE onErr bs) -{-# INLINE decodeUtf16BEWith #-} - --- | Decode text from big endian UTF-16 encoding. --- --- If the input contains any invalid big endian UTF-16 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf16BEWith'. -decodeUtf16BE :: B.ByteString -> Text -decodeUtf16BE = decodeUtf16BEWith strictDecode -{-# INLINE decodeUtf16BE #-} - --- | Encode text using little endian UTF-16 encoding. -encodeUtf16LE :: Text -> B.ByteString -encodeUtf16LE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf16LE) [] txt) -{-# INLINE encodeUtf16LE #-} - --- | Encode text using big endian UTF-16 encoding. -encodeUtf16BE :: Text -> B.ByteString -encodeUtf16BE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf16BE) [] txt) -{-# INLINE encodeUtf16BE #-} - --- | Decode text from little endian UTF-32 encoding. -decodeUtf32LEWith :: OnDecodeError -> B.ByteString -> Text -decodeUtf32LEWith onErr bs = F.unstream (E.streamUtf32LE onErr bs) -{-# INLINE decodeUtf32LEWith #-} - --- | Decode text from little endian UTF-32 encoding. --- --- If the input contains any invalid little endian UTF-32 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf32LEWith'. -decodeUtf32LE :: B.ByteString -> Text -decodeUtf32LE = decodeUtf32LEWith strictDecode -{-# INLINE decodeUtf32LE #-} - --- | Decode text from big endian UTF-32 encoding. -decodeUtf32BEWith :: OnDecodeError -> B.ByteString -> Text -decodeUtf32BEWith onErr bs = F.unstream (E.streamUtf32BE onErr bs) -{-# INLINE decodeUtf32BEWith #-} - --- | Decode text from big endian UTF-32 encoding. --- --- If the input contains any invalid big endian UTF-32 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf32BEWith'. -decodeUtf32BE :: B.ByteString -> Text -decodeUtf32BE = decodeUtf32BEWith strictDecode -{-# INLINE decodeUtf32BE #-} - --- | Encode text using little endian UTF-32 encoding. -encodeUtf32LE :: Text -> B.ByteString -encodeUtf32LE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf32LE) [] txt) -{-# INLINE encodeUtf32LE #-} - --- | Encode text using big endian UTF-32 encoding. -encodeUtf32BE :: Text -> B.ByteString -encodeUtf32BE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf32BE) [] txt) -{-# INLINE encodeUtf32BE #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Internal.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Internal.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Internal.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Internal.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -{-# LANGUAGE BangPatterns, DeriveDataTypeable #-} --- | --- Module : Data.Text.Lazy.Internal --- Copyright : (c) 2013 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- This module has been renamed to 'Data.Text.Internal.Lazy'. This --- name for the module will be removed in the next major release. - -module Data.Text.Lazy.Internal - {-# DEPRECATED "Use Data.Text.Internal.Lazy instead" #-} - ( - module Data.Text.Internal.Lazy - ) where - -import Data.Text.Internal.Lazy diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/IO.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/IO.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/IO.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/IO.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,196 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, RecordWildCards #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.Lazy.IO --- Copyright : (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Simon Marlow --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Efficient locale-sensitive support for lazy text I\/O. --- --- Skip past the synopsis for some important notes on performance and --- portability across different versions of GHC. - -module Data.Text.Lazy.IO - ( - -- * Performance - -- $performance - - -- * Locale support - -- $locale - -- * File-at-a-time operations - readFile - , writeFile - , appendFile - -- * Operations on handles - , hGetContents - , hGetLine - , hPutStr - , hPutStrLn - -- * Special cases for standard input and output - , interact - , getContents - , getLine - , putStr - , putStrLn - ) where - -import Data.Text.Lazy (Text) -import Prelude hiding (appendFile, getContents, getLine, interact, - putStr, putStrLn, readFile, writeFile) -import System.IO (Handle, IOMode(..), hPutChar, openFile, stdin, stdout, - withFile) -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as L -import qualified Control.Exception as E -import Control.Monad (when) -import Data.IORef (readIORef) -import Data.Text.Internal.IO (hGetLineWith, readChunk) -import Data.Text.Internal.Lazy (chunk, empty) -import GHC.IO.Buffer (isEmptyBuffer) -import GHC.IO.Exception (IOException(..), IOErrorType(..), ioException) -import GHC.IO.Handle.Internals (augmentIOError, hClose_help, - wantReadableHandle, withHandle) -import GHC.IO.Handle.Types (Handle__(..), HandleType(..)) -import System.IO (BufferMode(..), hGetBuffering, hSetBuffering) -import System.IO.Error (isEOFError) -import System.IO.Unsafe (unsafeInterleaveIO) - --- $performance --- --- The functions in this module obey the runtime system's locale, --- character set encoding, and line ending conversion settings. --- --- If you know in advance that you will be working with data that has --- a specific encoding (e.g. UTF-8), and your application is highly --- performance sensitive, you may find that it is faster to perform --- I\/O with bytestrings and to encode and decode yourself than to use --- the functions in this module. --- --- Whether this will hold depends on the version of GHC you are using, --- the platform you are working on, the data you are working with, and --- the encodings you are using, so be sure to test for yourself. - --- | Read a file and return its contents as a string. The file is --- read lazily, as with 'getContents'. -readFile :: FilePath -> IO Text -readFile name = openFile name ReadMode >>= hGetContents - --- | Write a string to a file. The file is truncated to zero length --- before writing begins. -writeFile :: FilePath -> Text -> IO () -writeFile p = withFile p WriteMode . flip hPutStr - --- | Write a string the end of a file. -appendFile :: FilePath -> Text -> IO () -appendFile p = withFile p AppendMode . flip hPutStr - --- | Lazily read the remaining contents of a 'Handle'. The 'Handle' --- will be closed after the read completes, or on error. -hGetContents :: Handle -> IO Text -hGetContents h = do - chooseGoodBuffering h - wantReadableHandle "hGetContents" h $ \hh -> do - ts <- lazyRead h - return (hh{haType=SemiClosedHandle}, ts) - --- | Use a more efficient buffer size if we're reading in --- block-buffered mode with the default buffer size. -chooseGoodBuffering :: Handle -> IO () -chooseGoodBuffering h = do - bufMode <- hGetBuffering h - when (bufMode == BlockBuffering Nothing) $ - hSetBuffering h (BlockBuffering (Just 16384)) - -lazyRead :: Handle -> IO Text -lazyRead h = unsafeInterleaveIO $ - withHandle "hGetContents" h $ \hh -> do - case haType hh of - ClosedHandle -> return (hh, L.empty) - SemiClosedHandle -> lazyReadBuffered h hh - _ -> ioException - (IOError (Just h) IllegalOperation "hGetContents" - "illegal handle type" Nothing Nothing) - -lazyReadBuffered :: Handle -> Handle__ -> IO (Handle__, Text) -lazyReadBuffered h hh@Handle__{..} = do - buf <- readIORef haCharBuffer - (do t <- readChunk hh buf - ts <- lazyRead h - return (hh, chunk t ts)) `E.catch` \e -> do - (hh', _) <- hClose_help hh - if isEOFError e - then return $ if isEmptyBuffer buf - then (hh', empty) - else (hh', L.singleton '\r') - else E.throwIO (augmentIOError e "hGetContents" h) - --- | Read a single line from a handle. -hGetLine :: Handle -> IO Text -hGetLine = hGetLineWith L.fromChunks - --- | Write a string to a handle. -hPutStr :: Handle -> Text -> IO () -hPutStr h = mapM_ (T.hPutStr h) . L.toChunks - --- | Write a string to a handle, followed by a newline. -hPutStrLn :: Handle -> Text -> IO () -hPutStrLn h t = hPutStr h t >> hPutChar h '\n' - --- | The 'interact' function takes a function of type @Text -> Text@ --- as its argument. The entire input from the standard input device is --- passed (lazily) to this function as its argument, and the resulting --- string is output on the standard output device. -interact :: (Text -> Text) -> IO () -interact f = putStr . f =<< getContents - --- | Lazily read all user input on 'stdin' as a single string. -getContents :: IO Text -getContents = hGetContents stdin - --- | Read a single line of user input from 'stdin'. -getLine :: IO Text -getLine = hGetLine stdin - --- | Write a string to 'stdout'. -putStr :: Text -> IO () -putStr = hPutStr stdout - --- | Write a string to 'stdout', followed by a newline. -putStrLn :: Text -> IO () -putStrLn = hPutStrLn stdout - --- $locale --- --- /Note/: The behaviour of functions in this module depends on the --- version of GHC you are using. --- --- Beginning with GHC 6.12, text I\/O is performed using the system or --- handle's current locale and line ending conventions. --- --- Under GHC 6.10 and earlier, the system I\/O libraries /do not --- support/ locale-sensitive I\/O or line ending conversion. On these --- versions of GHC, functions in this library all use UTF-8. What --- does this mean in practice? --- --- * All data that is read will be decoded as UTF-8. --- --- * Before data is written, it is first encoded as UTF-8. --- --- * On both reading and writing, the platform's native newline --- conversion is performed. --- --- If you must use a non-UTF-8 locale on an older version of GHC, you --- will have to perform the transcoding yourself, e.g. as follows: --- --- > import qualified Data.ByteString.Lazy as B --- > import Data.Text.Lazy (Text) --- > import Data.Text.Lazy.Encoding (encodeUtf16) --- > --- > putStr_Utf16LE :: Text -> IO () --- > putStr_Utf16LE t = B.putStr (encodeUtf16LE t) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Read.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Read.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy/Read.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy/Read.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,191 +0,0 @@ -{-# LANGUAGE OverloadedStrings, CPP #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - --- | --- Module : Data.Text.Lazy.Read --- Copyright : (c) 2010, 2011 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Functions used frequently when reading textual data. -module Data.Text.Lazy.Read - ( - Reader - , decimal - , hexadecimal - , signed - , rational - , double - ) where - -import Control.Monad (liftM) -import Data.Char (isDigit, isHexDigit) -import Data.Int (Int8, Int16, Int32, Int64) -import Data.Ratio ((%)) -import Data.Text.Internal.Read -import Data.Text.Lazy as T -import Data.Word (Word, Word8, Word16, Word32, Word64) - --- | Read some text. If the read succeeds, return its value and the --- remaining text, otherwise an error message. -type Reader a = IReader Text a -type Parser = IParser Text - --- | Read a decimal integer. The input must begin with at least one --- decimal digit, and is consumed until a non-digit or end of string --- is reached. --- --- This function does not handle leading sign characters. If you need --- to handle signed input, use @'signed' 'decimal'@. --- --- /Note/: For fixed-width integer types, this function does not --- attempt to detect overflow, so a sufficiently long input may give --- incorrect results. If you are worried about overflow, use --- 'Integer' for your result type. -decimal :: Integral a => Reader a -{-# SPECIALIZE decimal :: Reader Int #-} -{-# SPECIALIZE decimal :: Reader Int8 #-} -{-# SPECIALIZE decimal :: Reader Int16 #-} -{-# SPECIALIZE decimal :: Reader Int32 #-} -{-# SPECIALIZE decimal :: Reader Int64 #-} -{-# SPECIALIZE decimal :: Reader Integer #-} -{-# SPECIALIZE decimal :: Reader Word #-} -{-# SPECIALIZE decimal :: Reader Word8 #-} -{-# SPECIALIZE decimal :: Reader Word16 #-} -{-# SPECIALIZE decimal :: Reader Word32 #-} -{-# SPECIALIZE decimal :: Reader Word64 #-} -decimal txt - | T.null h = Left "input does not start with a digit" - | otherwise = Right (T.foldl' go 0 h, t) - where (h,t) = T.span isDigit txt - go n d = (n * 10 + fromIntegral (digitToInt d)) - --- | Read a hexadecimal integer, consisting of an optional leading --- @\"0x\"@ followed by at least one decimal digit. Input is consumed --- until a non-hex-digit or end of string is reached. This function --- is case insensitive. --- --- This function does not handle leading sign characters. If you need --- to handle signed input, use @'signed' 'hexadecimal'@. --- --- /Note/: For fixed-width integer types, this function does not --- attempt to detect overflow, so a sufficiently long input may give --- incorrect results. If you are worried about overflow, use --- 'Integer' for your result type. -hexadecimal :: Integral a => Reader a -{-# SPECIALIZE hexadecimal :: Reader Int #-} -{-# SPECIALIZE hexadecimal :: Reader Integer #-} -hexadecimal txt - | h == "0x" || h == "0X" = hex t - | otherwise = hex txt - where (h,t) = T.splitAt 2 txt - -hex :: Integral a => Reader a -{-# SPECIALIZE hexadecimal :: Reader Int #-} -{-# SPECIALIZE hexadecimal :: Reader Int8 #-} -{-# SPECIALIZE hexadecimal :: Reader Int16 #-} -{-# SPECIALIZE hexadecimal :: Reader Int32 #-} -{-# SPECIALIZE hexadecimal :: Reader Int64 #-} -{-# SPECIALIZE hexadecimal :: Reader Integer #-} -{-# SPECIALIZE hexadecimal :: Reader Word #-} -{-# SPECIALIZE hexadecimal :: Reader Word8 #-} -{-# SPECIALIZE hexadecimal :: Reader Word16 #-} -{-# SPECIALIZE hexadecimal :: Reader Word32 #-} -{-# SPECIALIZE hexadecimal :: Reader Word64 #-} -hex txt - | T.null h = Left "input does not start with a hexadecimal digit" - | otherwise = Right (T.foldl' go 0 h, t) - where (h,t) = T.span isHexDigit txt - go n d = (n * 16 + fromIntegral (hexDigitToInt d)) - --- | Read an optional leading sign character (@\'-\'@ or @\'+\'@) and --- apply it to the result of applying the given reader. -signed :: Num a => Reader a -> Reader a -{-# INLINE signed #-} -signed f = runP (signa (P f)) - --- | Read a rational number. --- --- This function accepts an optional leading sign character, followed --- by at least one decimal digit. The syntax similar to that accepted --- by the 'read' function, with the exception that a trailing @\'.\'@ --- or @\'e\'@ /not/ followed by a number is not consumed. --- --- Examples: --- --- >rational "3" == Right (3.0, "") --- >rational "3.1" == Right (3.1, "") --- >rational "3e4" == Right (30000.0, "") --- >rational "3.1e4" == Right (31000.0, "") --- >rational ".3" == Left "input does not start with a digit" --- >rational "e3" == Left "input does not start with a digit" --- --- Examples of differences from 'read': --- --- >rational "3.foo" == Right (3.0, ".foo") --- >rational "3e" == Right (3.0, "e") -rational :: Fractional a => Reader a -{-# SPECIALIZE rational :: Reader Double #-} -rational = floaty $ \real frac fracDenom -> fromRational $ - real % 1 + frac % fracDenom - --- | Read a rational number. --- --- The syntax accepted by this function is the same as for 'rational'. --- --- /Note/: This function is almost ten times faster than 'rational', --- but is slightly less accurate. --- --- The 'Double' type supports about 16 decimal places of accuracy. --- For 94.2% of numbers, this function and 'rational' give identical --- results, but for the remaining 5.8%, this function loses precision --- around the 15th decimal place. For 0.001% of numbers, this --- function will lose precision at the 13th or 14th decimal place. -double :: Reader Double -double = floaty $ \real frac fracDenom -> - fromIntegral real + - fromIntegral frac / fromIntegral fracDenom - -signa :: Num a => Parser a -> Parser a -{-# SPECIALIZE signa :: Parser Int -> Parser Int #-} -{-# SPECIALIZE signa :: Parser Int8 -> Parser Int8 #-} -{-# SPECIALIZE signa :: Parser Int16 -> Parser Int16 #-} -{-# SPECIALIZE signa :: Parser Int32 -> Parser Int32 #-} -{-# SPECIALIZE signa :: Parser Int64 -> Parser Int64 #-} -{-# SPECIALIZE signa :: Parser Integer -> Parser Integer #-} -signa p = do - sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') - if sign == '+' then p else negate `liftM` p - -char :: (Char -> Bool) -> Parser Char -char p = P $ \t -> case T.uncons t of - Just (c,t') | p c -> Right (c,t') - _ -> Left "character does not match" - -floaty :: Fractional a => (Integer -> Integer -> Integer -> a) -> Reader a -{-# INLINE floaty #-} -floaty f = runP $ do - sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') - real <- P decimal - T fraction fracDigits <- perhaps (T 0 0) $ do - _ <- char (=='.') - digits <- P $ \t -> Right (fromIntegral . T.length $ T.takeWhile isDigit t, t) - n <- P decimal - return $ T n digits - let e c = c == 'e' || c == 'E' - power <- perhaps 0 (char e >> signa (P decimal) :: Parser Int) - let n = if fracDigits == 0 - then if power == 0 - then fromIntegral real - else fromIntegral real * (10 ^^ power) - else if power == 0 - then f real fraction (10 ^ fracDigits) - else f real fraction (10 ^ fracDigits) * (10 ^^ power) - return $! if sign == '+' - then n - else -n diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Lazy.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1606 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-orphans #-} -{-# LANGUAGE BangPatterns, MagicHash, CPP #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif -#if __GLASGOW_HASKELL__ >= 708 -{-# LANGUAGE TypeFamilies #-} -#endif - --- | --- Module : Data.Text.Lazy --- Copyright : (c) 2009, 2010, 2012 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- A time and space-efficient implementation of Unicode text using --- lists of packed arrays. --- --- /Note/: Read below the synopsis for important notes on the use of --- this module. --- --- The representation used by this module is suitable for high --- performance use and for streaming large quantities of data. It --- provides a means to manipulate a large body of text without --- requiring that the entire content be resident in memory. --- --- Some operations, such as 'concat', 'append', 'reverse' and 'cons', --- have better time complexity than their "Data.Text" equivalents, due --- to the underlying representation being a list of chunks. For other --- operations, lazy 'Text's are usually within a few percent of strict --- ones, but often with better heap usage if used in a streaming --- fashion. For data larger than available memory, or if you have --- tight memory constraints, this module will be the only option. --- --- This module is intended to be imported @qualified@, to avoid name --- clashes with "Prelude" functions. eg. --- --- > import qualified Data.Text.Lazy as L - -module Data.Text.Lazy - ( - -- * Fusion - -- $fusion - - -- * Acceptable data - -- $replacement - - -- * Types - Text - - -- * Creation and elimination - , pack - , unpack - , singleton - , empty - , fromChunks - , toChunks - , toStrict - , fromStrict - , foldrChunks - , foldlChunks - - -- * Basic interface - , cons - , snoc - , append - , uncons - , head - , last - , tail - , init - , null - , length - , compareLength - - -- * Transformations - , map - , intercalate - , intersperse - , transpose - , reverse - , replace - - -- ** Case conversion - -- $case - , toCaseFold - , toLower - , toUpper - , toTitle - - -- ** Justification - , justifyLeft - , justifyRight - , center - - -- * Folds - , foldl - , foldl' - , foldl1 - , foldl1' - , foldr - , foldr1 - - -- ** Special folds - , concat - , concatMap - , any - , all - , maximum - , minimum - - -- * Construction - - -- ** Scans - , scanl - , scanl1 - , scanr - , scanr1 - - -- ** Accumulating maps - , mapAccumL - , mapAccumR - - -- ** Generation and unfolding - , replicate - , unfoldr - , unfoldrN - - -- * Substrings - - -- ** Breaking strings - , take - , takeEnd - , drop - , dropEnd - , takeWhile - , dropWhile - , dropWhileEnd - , dropAround - , strip - , stripStart - , stripEnd - , splitAt - , span - , breakOn - , breakOnEnd - , break - , group - , groupBy - , inits - , tails - - -- ** Breaking into many substrings - -- $split - , splitOn - , split - , chunksOf - -- , breakSubstring - - -- ** Breaking into lines and words - , lines - , words - , unlines - , unwords - - -- * Predicates - , isPrefixOf - , isSuffixOf - , isInfixOf - - -- ** View patterns - , stripPrefix - , stripSuffix - , commonPrefixes - - -- * Searching - , filter - , find - , breakOnAll - , partition - - -- , findSubstring - - -- * Indexing - , index - , count - - -- * Zipping and unzipping - , zip - , zipWith - - -- -* Ordered text - -- , sort - ) where - -import Prelude (Char, Bool(..), Maybe(..), String, - Eq(..), Ord(..), Ordering(..), Read(..), Show(..), - (&&), (||), (+), (-), (.), ($), (++), - error, flip, fmap, fromIntegral, not, otherwise, quot) -import qualified Prelude as P -#if defined(HAVE_DEEPSEQ) -import Control.DeepSeq (NFData(..)) -#endif -import Data.Int (Int64) -import qualified Data.List as L -import Data.Char (isSpace) -import Data.Data (Data(gfoldl, toConstr, gunfold, dataTypeOf)) -import Data.Data (mkNoRepType) -import Data.Monoid (Monoid(..)) -import Data.String (IsString(..)) -import qualified Data.Text as T -import qualified Data.Text.Internal as T -import qualified Data.Text.Internal.Fusion.Common as S -import qualified Data.Text.Unsafe as T -import qualified Data.Text.Internal.Lazy.Fusion as S -import Data.Text.Internal.Fusion.Types (PairS(..)) -import Data.Text.Internal.Lazy.Fusion (stream, unstream) -import Data.Text.Internal.Lazy (Text(..), chunk, empty, foldlChunks, foldrChunks) -import Data.Text.Internal (firstf, safe, text) -import qualified Data.Text.Internal.Functions as F -import Data.Text.Internal.Lazy.Search (indices) -#if __GLASGOW_HASKELL__ >= 702 -import qualified GHC.CString as GHC -#else -import qualified GHC.Base as GHC -#endif -#if __GLASGOW_HASKELL__ >= 708 -import qualified GHC.Exts as Exts -#endif -import GHC.Prim (Addr#) - --- $fusion --- --- Most of the functions in this module are subject to /fusion/, --- meaning that a pipeline of such functions will usually allocate at --- most one 'Text' value. --- --- As an example, consider the following pipeline: --- --- > import Data.Text.Lazy as T --- > import Data.Text.Lazy.Encoding as E --- > import Data.ByteString.Lazy (ByteString) --- > --- > countChars :: ByteString -> Int --- > countChars = T.length . T.toUpper . E.decodeUtf8 --- --- From the type signatures involved, this looks like it should --- allocate one 'ByteString' value, and two 'Text' values. However, --- when a module is compiled with optimisation enabled under GHC, the --- two intermediate 'Text' values will be optimised away, and the --- function will be compiled down to a single loop over the source --- 'ByteString'. --- --- Functions that can be fused by the compiler are documented with the --- phrase \"Subject to fusion\". - --- $replacement --- --- A 'Text' value is a sequence of Unicode scalar values, as defined --- in §3.9, definition D76 of the Unicode 5.2 standard: --- . As --- such, a 'Text' cannot contain values in the range U+D800 to U+DFFF --- inclusive. Haskell implementations admit all Unicode code points --- (§3.4, definition D10) as 'Char' values, including code points --- from this invalid range. This means that there are some 'Char' --- values that are not valid Unicode scalar values, and the functions --- in this module must handle those cases. --- --- Within this module, many functions construct a 'Text' from one or --- more 'Char' values. Those functions will substitute 'Char' values --- that are not valid Unicode scalar values with the replacement --- character \"�\" (U+FFFD). Functions that perform this --- inspection and replacement are documented with the phrase --- \"Performs replacement on invalid scalar values\". --- --- (One reason for this policy of replacement is that internally, a --- 'Text' value is represented as packed UTF-16 data. Values in the --- range U+D800 through U+DFFF are used by UTF-16 to denote surrogate --- code points, and so cannot be represented. The functions replace --- invalid scalar values, instead of dropping them, as a security --- measure. For details, see Unicode Technical Report 36, §3.5: --- ) - -equal :: Text -> Text -> Bool -equal Empty Empty = True -equal Empty _ = False -equal _ Empty = False -equal (Chunk a as) (Chunk b bs) = - case compare lenA lenB of - LT -> a == (T.takeWord16 lenA b) && - as `equal` Chunk (T.dropWord16 lenA b) bs - EQ -> a == b && as `equal` bs - GT -> T.takeWord16 lenB a == b && - Chunk (T.dropWord16 lenB a) as `equal` bs - where lenA = T.lengthWord16 a - lenB = T.lengthWord16 b - -instance Eq Text where - (==) = equal - {-# INLINE (==) #-} - -instance Ord Text where - compare = compareText - -compareText :: Text -> Text -> Ordering -compareText Empty Empty = EQ -compareText Empty _ = LT -compareText _ Empty = GT -compareText (Chunk a0 as) (Chunk b0 bs) = outer a0 b0 - where - outer ta@(T.Text arrA offA lenA) tb@(T.Text arrB offB lenB) = go 0 0 - where - go !i !j - | i >= lenA = compareText as (chunk (T.Text arrB (offB+j) (lenB-j)) bs) - | j >= lenB = compareText (chunk (T.Text arrA (offA+i) (lenA-i)) as) bs - | a < b = LT - | a > b = GT - | otherwise = go (i+di) (j+dj) - where T.Iter a di = T.iter ta i - T.Iter b dj = T.iter tb j - -instance Show Text where - showsPrec p ps r = showsPrec p (unpack ps) r - -instance Read Text where - readsPrec p str = [(pack x,y) | (x,y) <- readsPrec p str] - -instance Monoid Text where - mempty = empty - mappend = append - mconcat = concat - -instance IsString Text where - fromString = pack - -#if __GLASGOW_HASKELL__ >= 708 -instance Exts.IsList Text where - type Item Text = Char - fromList = pack - toList = unpack -#endif - -#if defined(HAVE_DEEPSEQ) -instance NFData Text where - rnf Empty = () - rnf (Chunk _ ts) = rnf ts -#endif - -instance Data Text where - gfoldl f z txt = z pack `f` (unpack txt) - toConstr _ = error "Data.Text.Lazy.Text.toConstr" - gunfold _ _ = error "Data.Text.Lazy.Text.gunfold" - dataTypeOf _ = mkNoRepType "Data.Text.Lazy.Text" - --- | /O(n)/ Convert a 'String' into a 'Text'. --- --- Subject to fusion. Performs replacement on invalid scalar values. -pack :: String -> Text -pack = unstream . S.streamList . L.map safe -{-# INLINE [1] pack #-} - --- | /O(n)/ Convert a 'Text' into a 'String'. --- Subject to fusion. -unpack :: Text -> String -unpack t = S.unstreamList (stream t) -{-# INLINE [1] unpack #-} - --- | /O(n)/ Convert a literal string into a Text. -unpackCString# :: Addr# -> Text -unpackCString# addr# = unstream (S.streamCString# addr#) -{-# NOINLINE unpackCString# #-} - -{-# RULES "TEXT literal" forall a. - unstream (S.streamList (L.map safe (GHC.unpackCString# a))) - = unpackCString# a #-} - -{-# RULES "TEXT literal UTF8" forall a. - unstream (S.streamList (L.map safe (GHC.unpackCStringUtf8# a))) - = unpackCString# a #-} - -{-# RULES "LAZY TEXT empty literal" - unstream (S.streamList (L.map safe [])) - = Empty #-} - -{-# RULES "LAZY TEXT empty literal" forall a. - unstream (S.streamList (L.map safe [a])) - = Chunk (T.singleton a) Empty #-} - --- | /O(1)/ Convert a character into a Text. Subject to fusion. --- Performs replacement on invalid scalar values. -singleton :: Char -> Text -singleton c = Chunk (T.singleton c) Empty -{-# INLINE [1] singleton #-} - -{-# RULES -"LAZY TEXT singleton -> fused" [~1] forall c. - singleton c = unstream (S.singleton c) -"LAZY TEXT singleton -> unfused" [1] forall c. - unstream (S.singleton c) = singleton c - #-} - --- | /O(c)/ Convert a list of strict 'T.Text's into a lazy 'Text'. -fromChunks :: [T.Text] -> Text -fromChunks cs = L.foldr chunk Empty cs - --- | /O(n)/ Convert a lazy 'Text' into a list of strict 'T.Text's. -toChunks :: Text -> [T.Text] -toChunks cs = foldrChunks (:) [] cs - --- | /O(n)/ Convert a lazy 'Text' into a strict 'T.Text'. -toStrict :: Text -> T.Text -toStrict t = T.concat (toChunks t) -{-# INLINE [1] toStrict #-} - --- | /O(c)/ Convert a strict 'T.Text' into a lazy 'Text'. -fromStrict :: T.Text -> Text -fromStrict t = chunk t Empty -{-# INLINE [1] fromStrict #-} - --- ----------------------------------------------------------------------------- --- * Basic functions - --- | /O(n)/ Adds a character to the front of a 'Text'. This function --- is more costly than its 'List' counterpart because it requires --- copying a new array. Subject to fusion. -cons :: Char -> Text -> Text -cons c t = Chunk (T.singleton c) t -{-# INLINE [1] cons #-} - -infixr 5 `cons` - -{-# RULES -"LAZY TEXT cons -> fused" [~1] forall c t. - cons c t = unstream (S.cons c (stream t)) -"LAZY TEXT cons -> unfused" [1] forall c t. - unstream (S.cons c (stream t)) = cons c t - #-} - --- | /O(n)/ Adds a character to the end of a 'Text'. This copies the --- entire array in the process, unless fused. Subject to fusion. -snoc :: Text -> Char -> Text -snoc t c = foldrChunks Chunk (singleton c) t -{-# INLINE [1] snoc #-} - -{-# RULES -"LAZY TEXT snoc -> fused" [~1] forall t c. - snoc t c = unstream (S.snoc (stream t) c) -"LAZY TEXT snoc -> unfused" [1] forall t c. - unstream (S.snoc (stream t) c) = snoc t c - #-} - --- | /O(n\/c)/ Appends one 'Text' to another. Subject to fusion. -append :: Text -> Text -> Text -append xs ys = foldrChunks Chunk ys xs -{-# INLINE [1] append #-} - -{-# RULES -"LAZY TEXT append -> fused" [~1] forall t1 t2. - append t1 t2 = unstream (S.append (stream t1) (stream t2)) -"LAZY TEXT append -> unfused" [1] forall t1 t2. - unstream (S.append (stream t1) (stream t2)) = append t1 t2 - #-} - --- | /O(1)/ Returns the first character and rest of a 'Text', or --- 'Nothing' if empty. Subject to fusion. -uncons :: Text -> Maybe (Char, Text) -uncons Empty = Nothing -uncons (Chunk t ts) = Just (T.unsafeHead t, ts') - where ts' | T.compareLength t 1 == EQ = ts - | otherwise = Chunk (T.unsafeTail t) ts -{-# INLINE uncons #-} - --- | /O(1)/ Returns the first character of a 'Text', which must be --- non-empty. Subject to fusion. -head :: Text -> Char -head t = S.head (stream t) -{-# INLINE head #-} - --- | /O(1)/ Returns all characters after the head of a 'Text', which --- must be non-empty. Subject to fusion. -tail :: Text -> Text -tail (Chunk t ts) = chunk (T.tail t) ts -tail Empty = emptyError "tail" -{-# INLINE [1] tail #-} - -{-# RULES -"LAZY TEXT tail -> fused" [~1] forall t. - tail t = unstream (S.tail (stream t)) -"LAZY TEXT tail -> unfused" [1] forall t. - unstream (S.tail (stream t)) = tail t - #-} - --- | /O(1)/ Returns all but the last character of a 'Text', which must --- be non-empty. Subject to fusion. -init :: Text -> Text -init (Chunk t0 ts0) = go t0 ts0 - where go t (Chunk t' ts) = Chunk t (go t' ts) - go t Empty = chunk (T.init t) Empty -init Empty = emptyError "init" -{-# INLINE [1] init #-} - -{-# RULES -"LAZY TEXT init -> fused" [~1] forall t. - init t = unstream (S.init (stream t)) -"LAZY TEXT init -> unfused" [1] forall t. - unstream (S.init (stream t)) = init t - #-} - --- | /O(1)/ Tests whether a 'Text' is empty or not. Subject to --- fusion. -null :: Text -> Bool -null Empty = True -null _ = False -{-# INLINE [1] null #-} - -{-# RULES -"LAZY TEXT null -> fused" [~1] forall t. - null t = S.null (stream t) -"LAZY TEXT null -> unfused" [1] forall t. - S.null (stream t) = null t - #-} - --- | /O(1)/ Tests whether a 'Text' contains exactly one character. --- Subject to fusion. -isSingleton :: Text -> Bool -isSingleton = S.isSingleton . stream -{-# INLINE isSingleton #-} - --- | /O(1)/ Returns the last character of a 'Text', which must be --- non-empty. Subject to fusion. -last :: Text -> Char -last Empty = emptyError "last" -last (Chunk t ts) = go t ts - where go _ (Chunk t' ts') = go t' ts' - go t' Empty = T.last t' -{-# INLINE [1] last #-} - -{-# RULES -"LAZY TEXT last -> fused" [~1] forall t. - last t = S.last (stream t) -"LAZY TEXT last -> unfused" [1] forall t. - S.last (stream t) = last t - #-} - --- | /O(n)/ Returns the number of characters in a 'Text'. --- Subject to fusion. -length :: Text -> Int64 -length = foldlChunks go 0 - where go l t = l + fromIntegral (T.length t) -{-# INLINE [1] length #-} - -{-# RULES -"LAZY TEXT length -> fused" [~1] forall t. - length t = S.length (stream t) -"LAZY TEXT length -> unfused" [1] forall t. - S.length (stream t) = length t - #-} - --- | /O(n)/ Compare the count of characters in a 'Text' to a number. --- Subject to fusion. --- --- This function gives the same answer as comparing against the result --- of 'length', but can short circuit if the count of characters is --- greater than the number, and hence be more efficient. -compareLength :: Text -> Int64 -> Ordering -compareLength t n = S.compareLengthI (stream t) n -{-# INLINE [1] compareLength #-} - --- We don't apply those otherwise appealing length-to-compareLength --- rewrite rules here, because they can change the strictness --- properties of code. - --- | /O(n)/ 'map' @f@ @t@ is the 'Text' obtained by applying @f@ to --- each element of @t@. Subject to fusion. Performs replacement on --- invalid scalar values. -map :: (Char -> Char) -> Text -> Text -map f t = unstream (S.map (safe . f) (stream t)) -{-# INLINE [1] map #-} - --- | /O(n)/ The 'intercalate' function takes a 'Text' and a list of --- 'Text's and concatenates the list after interspersing the first --- argument between each element of the list. -intercalate :: Text -> [Text] -> Text -intercalate t = concat . (F.intersperse t) -{-# INLINE intercalate #-} - --- | /O(n)/ The 'intersperse' function takes a character and places it --- between the characters of a 'Text'. Subject to fusion. Performs --- replacement on invalid scalar values. -intersperse :: Char -> Text -> Text -intersperse c t = unstream (S.intersperse (safe c) (stream t)) -{-# INLINE intersperse #-} - --- | /O(n)/ Left-justify a string to the given length, using the --- specified fill character on the right. Subject to fusion. Performs --- replacement on invalid scalar values. --- --- Examples: --- --- > justifyLeft 7 'x' "foo" == "fooxxxx" --- > justifyLeft 3 'x' "foobar" == "foobar" -justifyLeft :: Int64 -> Char -> Text -> Text -justifyLeft k c t - | len >= k = t - | otherwise = t `append` replicateChar (k-len) c - where len = length t -{-# INLINE [1] justifyLeft #-} - -{-# RULES -"LAZY TEXT justifyLeft -> fused" [~1] forall k c t. - justifyLeft k c t = unstream (S.justifyLeftI k c (stream t)) -"LAZY TEXT justifyLeft -> unfused" [1] forall k c t. - unstream (S.justifyLeftI k c (stream t)) = justifyLeft k c t - #-} - --- | /O(n)/ Right-justify a string to the given length, using the --- specified fill character on the left. Performs replacement on --- invalid scalar values. --- --- Examples: --- --- > justifyRight 7 'x' "bar" == "xxxxbar" --- > justifyRight 3 'x' "foobar" == "foobar" -justifyRight :: Int64 -> Char -> Text -> Text -justifyRight k c t - | len >= k = t - | otherwise = replicateChar (k-len) c `append` t - where len = length t -{-# INLINE justifyRight #-} - --- | /O(n)/ Center a string to the given length, using the specified --- fill character on either side. Performs replacement on invalid --- scalar values. --- --- Examples: --- --- > center 8 'x' "HS" = "xxxHSxxx" -center :: Int64 -> Char -> Text -> Text -center k c t - | len >= k = t - | otherwise = replicateChar l c `append` t `append` replicateChar r c - where len = length t - d = k - len - r = d `quot` 2 - l = d - r -{-# INLINE center #-} - --- | /O(n)/ The 'transpose' function transposes the rows and columns --- of its 'Text' argument. Note that this function uses 'pack', --- 'unpack', and the list version of transpose, and is thus not very --- efficient. -transpose :: [Text] -> [Text] -transpose ts = L.map (\ss -> Chunk (T.pack ss) Empty) - (L.transpose (L.map unpack ts)) --- TODO: make this fast - --- | /O(n)/ 'reverse' @t@ returns the elements of @t@ in reverse order. -reverse :: Text -> Text -reverse = rev Empty - where rev a Empty = a - rev a (Chunk t ts) = rev (Chunk (T.reverse t) a) ts - --- | /O(m+n)/ Replace every non-overlapping occurrence of @needle@ in --- @haystack@ with @replacement@. --- --- This function behaves as though it was defined as follows: --- --- @ --- replace needle replacement haystack = --- 'intercalate' replacement ('splitOn' needle haystack) --- @ --- --- As this suggests, each occurrence is replaced exactly once. So if --- @needle@ occurs in @replacement@, that occurrence will /not/ itself --- be replaced recursively: --- --- > replace "oo" "foo" "oo" == "foo" --- --- In cases where several instances of @needle@ overlap, only the --- first one will be replaced: --- --- > replace "ofo" "bar" "ofofo" == "barfo" --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -replace :: Text - -- ^ @needle@ to search for. If this string is empty, an - -- error will occur. - -> Text - -- ^ @replacement@ to replace @needle@ with. - -> Text - -- ^ @haystack@ in which to search. - -> Text -replace s d = intercalate d . splitOn s -{-# INLINE replace #-} - --- ---------------------------------------------------------------------------- --- ** Case conversions (folds) - --- $case --- --- With Unicode text, it is incorrect to use combinators like @map --- toUpper@ to case convert each character of a string individually. --- Instead, use the whole-string case conversion functions from this --- module. For correctness in different writing systems, these --- functions may map one input character to two or three output --- characters. - --- | /O(n)/ Convert a string to folded case. Subject to fusion. --- --- This function is mainly useful for performing caseless (or case --- insensitive) string comparisons. --- --- A string @x@ is a caseless match for a string @y@ if and only if: --- --- @toCaseFold x == toCaseFold y@ --- --- The result string may be longer than the input string, and may --- differ from applying 'toLower' to the input string. For instance, --- the Armenian small ligature men now (U+FB13) is case folded to the --- bigram men now (U+0574 U+0576), while the micro sign (U+00B5) is --- case folded to the Greek small letter letter mu (U+03BC) instead of --- itself. -toCaseFold :: Text -> Text -toCaseFold t = unstream (S.toCaseFold (stream t)) -{-# INLINE [0] toCaseFold #-} - --- | /O(n)/ Convert a string to lower case, using simple case --- conversion. Subject to fusion. --- --- The result string may be longer than the input string. For --- instance, the Latin capital letter I with dot above (U+0130) maps --- to the sequence Latin small letter i (U+0069) followed by combining --- dot above (U+0307). -toLower :: Text -> Text -toLower t = unstream (S.toLower (stream t)) -{-# INLINE toLower #-} - --- | /O(n)/ Convert a string to upper case, using simple case --- conversion. Subject to fusion. --- --- The result string may be longer than the input string. For --- instance, the German eszett (U+00DF) maps to the two-letter --- sequence SS. -toUpper :: Text -> Text -toUpper t = unstream (S.toUpper (stream t)) -{-# INLINE toUpper #-} - - --- | /O(n)/ Convert a string to title case, using simple case --- conversion. Subject to fusion. --- --- The first letter of the input is converted to title case, as is --- every subsequent letter that immediately follows a non-letter. --- Every letter that immediately follows another letter is converted --- to lower case. --- --- The result string may be longer than the input string. For example, --- the Latin small ligature fl (U+FB02) is converted to the --- sequence Latin capital letter F (U+0046) followed by Latin small --- letter l (U+006C). --- --- /Note/: this function does not take language or culture specific --- rules into account. For instance, in English, different style --- guides disagree on whether the book name \"The Hill of the Red --- Fox\" is correctly title cased—but this function will --- capitalize /every/ word. -toTitle :: Text -> Text -toTitle t = unstream (S.toTitle (stream t)) -{-# INLINE toTitle #-} - --- | /O(n)/ 'foldl', applied to a binary operator, a starting value --- (typically the left-identity of the operator), and a 'Text', --- reduces the 'Text' using the binary operator, from left to right. --- Subject to fusion. -foldl :: (a -> Char -> a) -> a -> Text -> a -foldl f z t = S.foldl f z (stream t) -{-# INLINE foldl #-} - --- | /O(n)/ A strict version of 'foldl'. --- Subject to fusion. -foldl' :: (a -> Char -> a) -> a -> Text -> a -foldl' f z t = S.foldl' f z (stream t) -{-# INLINE foldl' #-} - --- | /O(n)/ A variant of 'foldl' that has no starting value argument, --- and thus must be applied to a non-empty 'Text'. Subject to fusion. -foldl1 :: (Char -> Char -> Char) -> Text -> Char -foldl1 f t = S.foldl1 f (stream t) -{-# INLINE foldl1 #-} - --- | /O(n)/ A strict version of 'foldl1'. Subject to fusion. -foldl1' :: (Char -> Char -> Char) -> Text -> Char -foldl1' f t = S.foldl1' f (stream t) -{-# INLINE foldl1' #-} - --- | /O(n)/ 'foldr', applied to a binary operator, a starting value --- (typically the right-identity of the operator), and a 'Text', --- reduces the 'Text' using the binary operator, from right to left. --- Subject to fusion. -foldr :: (Char -> a -> a) -> a -> Text -> a -foldr f z t = S.foldr f z (stream t) -{-# INLINE foldr #-} - --- | /O(n)/ A variant of 'foldr' that has no starting value argument, --- and thus must be applied to a non-empty 'Text'. Subject to --- fusion. -foldr1 :: (Char -> Char -> Char) -> Text -> Char -foldr1 f t = S.foldr1 f (stream t) -{-# INLINE foldr1 #-} - --- | /O(n)/ Concatenate a list of 'Text's. -concat :: [Text] -> Text -concat = to - where - go Empty css = to css - go (Chunk c cs) css = Chunk c (go cs css) - to [] = Empty - to (cs:css) = go cs css -{-# INLINE concat #-} - --- | /O(n)/ Map a function over a 'Text' that results in a 'Text', and --- concatenate the results. -concatMap :: (Char -> Text) -> Text -> Text -concatMap f = concat . foldr ((:) . f) [] -{-# INLINE concatMap #-} - --- | /O(n)/ 'any' @p@ @t@ determines whether any character in the --- 'Text' @t@ satisifes the predicate @p@. Subject to fusion. -any :: (Char -> Bool) -> Text -> Bool -any p t = S.any p (stream t) -{-# INLINE any #-} - --- | /O(n)/ 'all' @p@ @t@ determines whether all characters in the --- 'Text' @t@ satisify the predicate @p@. Subject to fusion. -all :: (Char -> Bool) -> Text -> Bool -all p t = S.all p (stream t) -{-# INLINE all #-} - --- | /O(n)/ 'maximum' returns the maximum value from a 'Text', which --- must be non-empty. Subject to fusion. -maximum :: Text -> Char -maximum t = S.maximum (stream t) -{-# INLINE maximum #-} - --- | /O(n)/ 'minimum' returns the minimum value from a 'Text', which --- must be non-empty. Subject to fusion. -minimum :: Text -> Char -minimum t = S.minimum (stream t) -{-# INLINE minimum #-} - --- | /O(n)/ 'scanl' is similar to 'foldl', but returns a list of --- successive reduced values from the left. Subject to fusion. --- Performs replacement on invalid scalar values. --- --- > scanl f z [x1, x2, ...] == [z, z `f` x1, (z `f` x1) `f` x2, ...] --- --- Note that --- --- > last (scanl f z xs) == foldl f z xs. -scanl :: (Char -> Char -> Char) -> Char -> Text -> Text -scanl f z t = unstream (S.scanl g z (stream t)) - where g a b = safe (f a b) -{-# INLINE scanl #-} - --- | /O(n)/ 'scanl1' is a variant of 'scanl' that has no starting --- value argument. Subject to fusion. Performs replacement on --- invalid scalar values. --- --- > scanl1 f [x1, x2, ...] == [x1, x1 `f` x2, ...] -scanl1 :: (Char -> Char -> Char) -> Text -> Text -scanl1 f t0 = case uncons t0 of - Nothing -> empty - Just (t,ts) -> scanl f t ts -{-# INLINE scanl1 #-} - --- | /O(n)/ 'scanr' is the right-to-left dual of 'scanl'. Performs --- replacement on invalid scalar values. --- --- > scanr f v == reverse . scanl (flip f) v . reverse -scanr :: (Char -> Char -> Char) -> Char -> Text -> Text -scanr f v = reverse . scanl g v . reverse - where g a b = safe (f b a) - --- | /O(n)/ 'scanr1' is a variant of 'scanr' that has no starting --- value argument. Performs replacement on invalid scalar values. -scanr1 :: (Char -> Char -> Char) -> Text -> Text -scanr1 f t | null t = empty - | otherwise = scanr f (last t) (init t) - --- | /O(n)/ Like a combination of 'map' and 'foldl''. Applies a --- function to each element of a 'Text', passing an accumulating --- parameter from left to right, and returns a final 'Text'. Performs --- replacement on invalid scalar values. -mapAccumL :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) -mapAccumL f = go - where - go z (Chunk c cs) = (z'', Chunk c' cs') - where (z', c') = T.mapAccumL f z c - (z'', cs') = go z' cs - go z Empty = (z, Empty) -{-# INLINE mapAccumL #-} - --- | The 'mapAccumR' function behaves like a combination of 'map' and --- a strict 'foldr'; it applies a function to each element of a --- 'Text', passing an accumulating parameter from right to left, and --- returning a final value of this accumulator together with the new --- 'Text'. Performs replacement on invalid scalar values. -mapAccumR :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) -mapAccumR f = go - where - go z (Chunk c cs) = (z'', Chunk c' cs') - where (z'', c') = T.mapAccumR f z' c - (z', cs') = go z cs - go z Empty = (z, Empty) -{-# INLINE mapAccumR #-} - --- | /O(n*m)/ 'replicate' @n@ @t@ is a 'Text' consisting of the input --- @t@ repeated @n@ times. -replicate :: Int64 -> Text -> Text -replicate n t - | null t || n <= 0 = empty - | isSingleton t = replicateChar n (head t) - | otherwise = concat (rep 0) - where rep !i | i >= n = [] - | otherwise = t : rep (i+1) -{-# INLINE [1] replicate #-} - --- | /O(n)/ 'replicateChar' @n@ @c@ is a 'Text' of length @n@ with @c@ the --- value of every element. Subject to fusion. -replicateChar :: Int64 -> Char -> Text -replicateChar n c = unstream (S.replicateCharI n (safe c)) -{-# INLINE replicateChar #-} - -{-# RULES -"LAZY TEXT replicate/singleton -> replicateChar" [~1] forall n c. - replicate n (singleton c) = replicateChar n c - #-} - --- | /O(n)/, where @n@ is the length of the result. The 'unfoldr' --- function is analogous to the List 'L.unfoldr'. 'unfoldr' builds a --- 'Text' from a seed value. The function takes the element and --- returns 'Nothing' if it is done producing the 'Text', otherwise --- 'Just' @(a,b)@. In this case, @a@ is the next 'Char' in the --- string, and @b@ is the seed value for further production. Performs --- replacement on invalid scalar values. -unfoldr :: (a -> Maybe (Char,a)) -> a -> Text -unfoldr f s = unstream (S.unfoldr (firstf safe . f) s) -{-# INLINE unfoldr #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a 'Text' from a seed --- value. However, the length of the result should be limited by the --- first argument to 'unfoldrN'. This function is more efficient than --- 'unfoldr' when the maximum length of the result is known and --- correct, otherwise its performance is similar to 'unfoldr'. --- Performs replacement on invalid scalar values. -unfoldrN :: Int64 -> (a -> Maybe (Char,a)) -> a -> Text -unfoldrN n f s = unstream (S.unfoldrN n (firstf safe . f) s) -{-# INLINE unfoldrN #-} - --- | /O(n)/ 'take' @n@, applied to a 'Text', returns the prefix of the --- 'Text' of length @n@, or the 'Text' itself if @n@ is greater than --- the length of the Text. Subject to fusion. -take :: Int64 -> Text -> Text -take i _ | i <= 0 = Empty -take i t0 = take' i t0 - where take' 0 _ = Empty - take' _ Empty = Empty - take' n (Chunk t ts) - | n < len = Chunk (T.take (fromIntegral n) t) Empty - | otherwise = Chunk t (take' (n - len) ts) - where len = fromIntegral (T.length t) -{-# INLINE [1] take #-} - -{-# RULES -"LAZY TEXT take -> fused" [~1] forall n t. - take n t = unstream (S.take n (stream t)) -"LAZY TEXT take -> unfused" [1] forall n t. - unstream (S.take n (stream t)) = take n t - #-} - --- | /O(n)/ 'takeEnd' @n@ @t@ returns the suffix remaining after --- taking @n@ characters from the end of @t@. --- --- Examples: --- --- > takeEnd 3 "foobar" == "bar" -takeEnd :: Int64 -> Text -> Text -takeEnd n t0 - | n <= 0 = empty - | otherwise = takeChunk n empty . L.reverse . toChunks $ t0 - where takeChunk _ acc [] = acc - takeChunk i acc (t:ts) - | i <= l = chunk (T.takeEnd (fromIntegral i) t) acc - | otherwise = takeChunk (i-l) (Chunk t acc) ts - where l = fromIntegral (T.length t) - --- | /O(n)/ 'drop' @n@, applied to a 'Text', returns the suffix of the --- 'Text' after the first @n@ characters, or the empty 'Text' if @n@ --- is greater than the length of the 'Text'. Subject to fusion. -drop :: Int64 -> Text -> Text -drop i t0 - | i <= 0 = t0 - | otherwise = drop' i t0 - where drop' 0 ts = ts - drop' _ Empty = Empty - drop' n (Chunk t ts) - | n < len = Chunk (T.drop (fromIntegral n) t) ts - | otherwise = drop' (n - len) ts - where len = fromIntegral (T.length t) -{-# INLINE [1] drop #-} - -{-# RULES -"LAZY TEXT drop -> fused" [~1] forall n t. - drop n t = unstream (S.drop n (stream t)) -"LAZY TEXT drop -> unfused" [1] forall n t. - unstream (S.drop n (stream t)) = drop n t - #-} - --- | /O(n)/ 'dropEnd' @n@ @t@ returns the prefix remaining after --- dropping @n@ characters from the end of @t@. --- --- Examples: --- --- > dropEnd 3 "foobar" == "foo" -dropEnd :: Int64 -> Text -> Text -dropEnd n t0 - | n <= 0 = t0 - | otherwise = dropChunk n . L.reverse . toChunks $ t0 - where dropChunk _ [] = empty - dropChunk m (t:ts) - | m >= l = dropChunk (m-l) ts - | otherwise = fromChunks . L.reverse $ - T.dropEnd (fromIntegral m) t : ts - where l = fromIntegral (T.length t) - --- | /O(n)/ 'dropWords' @n@ returns the suffix with @n@ 'Word16' --- values dropped, or the empty 'Text' if @n@ is greater than the --- number of 'Word16' values present. -dropWords :: Int64 -> Text -> Text -dropWords i t0 - | i <= 0 = t0 - | otherwise = drop' i t0 - where drop' 0 ts = ts - drop' _ Empty = Empty - drop' n (Chunk (T.Text arr off len) ts) - | n < len' = chunk (text arr (off+n') (len-n')) ts - | otherwise = drop' (n - len') ts - where len' = fromIntegral len - n' = fromIntegral n - --- | /O(n)/ 'takeWhile', applied to a predicate @p@ and a 'Text', --- returns the longest prefix (possibly empty) of elements that --- satisfy @p@. Subject to fusion. -takeWhile :: (Char -> Bool) -> Text -> Text -takeWhile p t0 = takeWhile' t0 - where takeWhile' Empty = Empty - takeWhile' (Chunk t ts) = - case T.findIndex (not . p) t of - Just n | n > 0 -> Chunk (T.take n t) Empty - | otherwise -> Empty - Nothing -> Chunk t (takeWhile' ts) -{-# INLINE [1] takeWhile #-} - -{-# RULES -"LAZY TEXT takeWhile -> fused" [~1] forall p t. - takeWhile p t = unstream (S.takeWhile p (stream t)) -"LAZY TEXT takeWhile -> unfused" [1] forall p t. - unstream (S.takeWhile p (stream t)) = takeWhile p t - #-} - --- | /O(n)/ 'dropWhile' @p@ @t@ returns the suffix remaining after --- 'takeWhile' @p@ @t@. Subject to fusion. -dropWhile :: (Char -> Bool) -> Text -> Text -dropWhile p t0 = dropWhile' t0 - where dropWhile' Empty = Empty - dropWhile' (Chunk t ts) = - case T.findIndex (not . p) t of - Just n -> Chunk (T.drop n t) ts - Nothing -> dropWhile' ts -{-# INLINE [1] dropWhile #-} - -{-# RULES -"LAZY TEXT dropWhile -> fused" [~1] forall p t. - dropWhile p t = unstream (S.dropWhile p (stream t)) -"LAZY TEXT dropWhile -> unfused" [1] forall p t. - unstream (S.dropWhile p (stream t)) = dropWhile p t - #-} --- | /O(n)/ 'dropWhileEnd' @p@ @t@ returns the prefix remaining after --- dropping characters that fail the predicate @p@ from the end of --- @t@. --- Examples: --- --- > dropWhileEnd (=='.') "foo..." == "foo" -dropWhileEnd :: (Char -> Bool) -> Text -> Text -dropWhileEnd p = go - where go Empty = Empty - go (Chunk t Empty) = if T.null t' - then Empty - else Chunk t' Empty - where t' = T.dropWhileEnd p t - go (Chunk t ts) = case go ts of - Empty -> go (Chunk t Empty) - ts' -> Chunk t ts' -{-# INLINE dropWhileEnd #-} - --- | /O(n)/ 'dropAround' @p@ @t@ returns the substring remaining after --- dropping characters that fail the predicate @p@ from both the --- beginning and end of @t@. Subject to fusion. -dropAround :: (Char -> Bool) -> Text -> Text -dropAround p = dropWhile p . dropWhileEnd p -{-# INLINE [1] dropAround #-} - --- | /O(n)/ Remove leading white space from a string. Equivalent to: --- --- > dropWhile isSpace -stripStart :: Text -> Text -stripStart = dropWhile isSpace -{-# INLINE [1] stripStart #-} - --- | /O(n)/ Remove trailing white space from a string. Equivalent to: --- --- > dropWhileEnd isSpace -stripEnd :: Text -> Text -stripEnd = dropWhileEnd isSpace -{-# INLINE [1] stripEnd #-} - --- | /O(n)/ Remove leading and trailing white space from a string. --- Equivalent to: --- --- > dropAround isSpace -strip :: Text -> Text -strip = dropAround isSpace -{-# INLINE [1] strip #-} - --- | /O(n)/ 'splitAt' @n t@ returns a pair whose first element is a --- prefix of @t@ of length @n@, and whose second is the remainder of --- the string. It is equivalent to @('take' n t, 'drop' n t)@. -splitAt :: Int64 -> Text -> (Text, Text) -splitAt = loop - where loop _ Empty = (empty, empty) - loop n t | n <= 0 = (empty, t) - loop n (Chunk t ts) - | n < len = let (t',t'') = T.splitAt (fromIntegral n) t - in (Chunk t' Empty, Chunk t'' ts) - | otherwise = let (ts',ts'') = loop (n - len) ts - in (Chunk t ts', ts'') - where len = fromIntegral (T.length t) - --- | /O(n)/ 'splitAtWord' @n t@ returns a strict pair whose first --- element is a prefix of @t@ whose chunks contain @n@ 'Word16' --- values, and whose second is the remainder of the string. -splitAtWord :: Int64 -> Text -> PairS Text Text -splitAtWord _ Empty = empty :*: empty -splitAtWord x (Chunk c@(T.Text arr off len) cs) - | y >= len = let h :*: t = splitAtWord (x-fromIntegral len) cs - in Chunk c h :*: t - | otherwise = chunk (text arr off y) empty :*: - chunk (text arr (off+y) (len-y)) cs - where y = fromIntegral x - --- | /O(n+m)/ Find the first instance of @needle@ (which must be --- non-'null') in @haystack@. The first element of the returned tuple --- is the prefix of @haystack@ before @needle@ is matched. The second --- is the remainder of @haystack@, starting with the match. --- --- Examples: --- --- > breakOn "::" "a::b::c" ==> ("a", "::b::c") --- > breakOn "/" "foobar" ==> ("foobar", "") --- --- Laws: --- --- > append prefix match == haystack --- > where (prefix, match) = breakOn needle haystack --- --- If you need to break a string by a substring repeatedly (e.g. you --- want to break on every instance of a substring), use 'breakOnAll' --- instead, as it has lower startup overhead. --- --- This function is strict in its first argument, and lazy in its --- second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -breakOn :: Text -> Text -> (Text, Text) -breakOn pat src - | null pat = emptyError "breakOn" - | otherwise = case indices pat src of - [] -> (src, empty) - (x:_) -> let h :*: t = splitAtWord x src - in (h, t) - --- | /O(n+m)/ Similar to 'breakOn', but searches from the end of the string. --- --- The first element of the returned tuple is the prefix of @haystack@ --- up to and including the last match of @needle@. The second is the --- remainder of @haystack@, following the match. --- --- > breakOnEnd "::" "a::b::c" ==> ("a::b::", "c") -breakOnEnd :: Text -> Text -> (Text, Text) -breakOnEnd pat src = let (a,b) = breakOn (reverse pat) (reverse src) - in (reverse b, reverse a) -{-# INLINE breakOnEnd #-} - --- | /O(n+m)/ Find all non-overlapping instances of @needle@ in --- @haystack@. Each element of the returned list consists of a pair: --- --- * The entire string prior to the /k/th match (i.e. the prefix) --- --- * The /k/th match, followed by the remainder of the string --- --- Examples: --- --- > breakOnAll "::" "" --- > ==> [] --- > breakOnAll "/" "a/b/c/" --- > ==> [("a", "/b/c/"), ("a/b", "/c/"), ("a/b/c", "/")] --- --- This function is strict in its first argument, and lazy in its --- second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. --- --- The @needle@ parameter may not be empty. -breakOnAll :: Text -- ^ @needle@ to search for - -> Text -- ^ @haystack@ in which to search - -> [(Text, Text)] -breakOnAll pat src - | null pat = emptyError "breakOnAll" - | otherwise = go 0 empty src (indices pat src) - where - go !n p s (x:xs) = let h :*: t = splitAtWord (x-n) s - h' = append p h - in (h',t) : go x h' t xs - go _ _ _ _ = [] - --- | /O(n)/ 'break' is like 'span', but the prefix returned is over --- elements that fail the predicate @p@. -break :: (Char -> Bool) -> Text -> (Text, Text) -break p t0 = break' t0 - where break' Empty = (empty, empty) - break' c@(Chunk t ts) = - case T.findIndex p t of - Nothing -> let (ts', ts'') = break' ts - in (Chunk t ts', ts'') - Just n | n == 0 -> (Empty, c) - | otherwise -> let (a,b) = T.splitAt n t - in (Chunk a Empty, Chunk b ts) - --- | /O(n)/ 'span', applied to a predicate @p@ and text @t@, returns --- a pair whose first element is the longest prefix (possibly empty) --- of @t@ of elements that satisfy @p@, and whose second is the --- remainder of the list. -span :: (Char -> Bool) -> Text -> (Text, Text) -span p = break (not . p) -{-# INLINE span #-} - --- | The 'group' function takes a 'Text' and returns a list of 'Text's --- such that the concatenation of the result is equal to the argument. --- Moreover, each sublist in the result contains only equal elements. --- For example, --- --- > group "Mississippi" = ["M","i","ss","i","ss","i","pp","i"] --- --- It is a special case of 'groupBy', which allows the programmer to --- supply their own equality test. -group :: Text -> [Text] -group = groupBy (==) -{-# INLINE group #-} - --- | The 'groupBy' function is the non-overloaded version of 'group'. -groupBy :: (Char -> Char -> Bool) -> Text -> [Text] -groupBy _ Empty = [] -groupBy eq (Chunk t ts) = cons x ys : groupBy eq zs - where (ys,zs) = span (eq x) xs - x = T.unsafeHead t - xs = chunk (T.unsafeTail t) ts - --- | /O(n)/ Return all initial segments of the given 'Text', --- shortest first. -inits :: Text -> [Text] -inits = (Empty :) . inits' - where inits' Empty = [] - inits' (Chunk t ts) = L.map (\t' -> Chunk t' Empty) (L.tail (T.inits t)) - ++ L.map (Chunk t) (inits' ts) - --- | /O(n)/ Return all final segments of the given 'Text', longest --- first. -tails :: Text -> [Text] -tails Empty = Empty : [] -tails ts@(Chunk t ts') - | T.length t == 1 = ts : tails ts' - | otherwise = ts : tails (Chunk (T.unsafeTail t) ts') - --- $split --- --- Splitting functions in this library do not perform character-wise --- copies to create substrings; they just construct new 'Text's that --- are slices of the original. - --- | /O(m+n)/ Break a 'Text' into pieces separated by the first 'Text' --- argument (which cannot be an empty string), consuming the --- delimiter. An empty delimiter is invalid, and will cause an error --- to be raised. --- --- Examples: --- --- > splitOn "\r\n" "a\r\nb\r\nd\r\ne" == ["a","b","d","e"] --- > splitOn "aaa" "aaaXaaaXaaaXaaa" == ["","X","X","X",""] --- > splitOn "x" "x" == ["",""] --- --- and --- --- > intercalate s . splitOn s == id --- > splitOn (singleton c) == split (==c) --- --- (Note: the string @s@ to split on above cannot be empty.) --- --- This function is strict in its first argument, and lazy in its --- second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -splitOn :: Text - -- ^ String to split on. If this string is empty, an error - -- will occur. - -> Text - -- ^ Input text. - -> [Text] -splitOn pat src - | null pat = emptyError "splitOn" - | isSingleton pat = split (== head pat) src - | otherwise = go 0 (indices pat src) src - where - go _ [] cs = [cs] - go !i (x:xs) cs = let h :*: t = splitAtWord (x-i) cs - in h : go (x+l) xs (dropWords l t) - l = foldlChunks (\a (T.Text _ _ b) -> a + fromIntegral b) 0 pat -{-# INLINE [1] splitOn #-} - -{-# RULES -"LAZY TEXT splitOn/singleton -> split/==" [~1] forall c t. - splitOn (singleton c) t = split (==c) t - #-} - --- | /O(n)/ Splits a 'Text' into components delimited by separators, --- where the predicate returns True for a separator element. The --- resulting components do not contain the separators. Two adjacent --- separators result in an empty component in the output. eg. --- --- > split (=='a') "aabbaca" == ["","","bb","c",""] --- > split (=='a') [] == [""] -split :: (Char -> Bool) -> Text -> [Text] -split _ Empty = [Empty] -split p (Chunk t0 ts0) = comb [] (T.split p t0) ts0 - where comb acc (s:[]) Empty = revChunks (s:acc) : [] - comb acc (s:[]) (Chunk t ts) = comb (s:acc) (T.split p t) ts - comb acc (s:ss) ts = revChunks (s:acc) : comb [] ss ts - comb _ [] _ = impossibleError "split" -{-# INLINE split #-} - --- | /O(n)/ Splits a 'Text' into components of length @k@. The last --- element may be shorter than the other chunks, depending on the --- length of the input. Examples: --- --- > chunksOf 3 "foobarbaz" == ["foo","bar","baz"] --- > chunksOf 4 "haskell.org" == ["hask","ell.","org"] -chunksOf :: Int64 -> Text -> [Text] -chunksOf k = go - where - go t = case splitAt k t of - (a,b) | null a -> [] - | otherwise -> a : go b -{-# INLINE chunksOf #-} - --- | /O(n)/ Breaks a 'Text' up into a list of 'Text's at --- newline 'Char's. The resulting strings do not contain newlines. -lines :: Text -> [Text] -lines Empty = [] -lines t = let (l,t') = break ((==) '\n') t - in l : if null t' then [] - else lines (tail t') - --- | /O(n)/ Breaks a 'Text' up into a list of words, delimited by 'Char's --- representing white space. -words :: Text -> [Text] -words = L.filter (not . null) . split isSpace -{-# INLINE words #-} - --- | /O(n)/ Joins lines, after appending a terminating newline to --- each. -unlines :: [Text] -> Text -unlines = concat . L.map (`snoc` '\n') -{-# INLINE unlines #-} - --- | /O(n)/ Joins words using single space characters. -unwords :: [Text] -> Text -unwords = intercalate (singleton ' ') -{-# INLINE unwords #-} - --- | /O(n)/ The 'isPrefixOf' function takes two 'Text's and returns --- 'True' iff the first is a prefix of the second. Subject to fusion. -isPrefixOf :: Text -> Text -> Bool -isPrefixOf Empty _ = True -isPrefixOf _ Empty = False -isPrefixOf (Chunk x xs) (Chunk y ys) - | lx == ly = x == y && isPrefixOf xs ys - | lx < ly = x == yh && isPrefixOf xs (Chunk yt ys) - | otherwise = xh == y && isPrefixOf (Chunk xt xs) ys - where (xh,xt) = T.splitAt ly x - (yh,yt) = T.splitAt lx y - lx = T.length x - ly = T.length y -{-# INLINE [1] isPrefixOf #-} - -{-# RULES -"LAZY TEXT isPrefixOf -> fused" [~1] forall s t. - isPrefixOf s t = S.isPrefixOf (stream s) (stream t) -"LAZY TEXT isPrefixOf -> unfused" [1] forall s t. - S.isPrefixOf (stream s) (stream t) = isPrefixOf s t - #-} - --- | /O(n)/ The 'isSuffixOf' function takes two 'Text's and returns --- 'True' iff the first is a suffix of the second. -isSuffixOf :: Text -> Text -> Bool -isSuffixOf x y = reverse x `isPrefixOf` reverse y -{-# INLINE isSuffixOf #-} --- TODO: a better implementation - --- | /O(n+m)/ The 'isInfixOf' function takes two 'Text's and returns --- 'True' iff the first is contained, wholly and intact, anywhere --- within the second. --- --- This function is strict in its first argument, and lazy in its --- second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -isInfixOf :: Text -> Text -> Bool -isInfixOf needle haystack - | null needle = True - | isSingleton needle = S.elem (head needle) . S.stream $ haystack - | otherwise = not . L.null . indices needle $ haystack -{-# INLINE [1] isInfixOf #-} - -{-# RULES -"LAZY TEXT isInfixOf/singleton -> S.elem/S.stream" [~1] forall n h. - isInfixOf (singleton n) h = S.elem n (S.stream h) - #-} - -------------------------------------------------------------------------------- --- * View patterns - --- | /O(n)/ Return the suffix of the second string if its prefix --- matches the entire first string. --- --- Examples: --- --- > stripPrefix "foo" "foobar" == Just "bar" --- > stripPrefix "" "baz" == Just "baz" --- > stripPrefix "foo" "quux" == Nothing --- --- This is particularly useful with the @ViewPatterns@ extension to --- GHC, as follows: --- --- > {-# LANGUAGE ViewPatterns #-} --- > import Data.Text.Lazy as T --- > --- > fnordLength :: Text -> Int --- > fnordLength (stripPrefix "fnord" -> Just suf) = T.length suf --- > fnordLength _ = -1 -stripPrefix :: Text -> Text -> Maybe Text -stripPrefix p t - | null p = Just t - | otherwise = case commonPrefixes p t of - Just (_,c,r) | null c -> Just r - _ -> Nothing - --- | /O(n)/ Find the longest non-empty common prefix of two strings --- and return it, along with the suffixes of each string at which they --- no longer match. --- --- If the strings do not have a common prefix or either one is empty, --- this function returns 'Nothing'. --- --- Examples: --- --- > commonPrefixes "foobar" "fooquux" == Just ("foo","bar","quux") --- > commonPrefixes "veeble" "fetzer" == Nothing --- > commonPrefixes "" "baz" == Nothing -commonPrefixes :: Text -> Text -> Maybe (Text,Text,Text) -commonPrefixes Empty _ = Nothing -commonPrefixes _ Empty = Nothing -commonPrefixes a0 b0 = Just (go a0 b0 []) - where - go t0@(Chunk x xs) t1@(Chunk y ys) ps - = case T.commonPrefixes x y of - Just (p,a,b) - | T.null a -> go xs (chunk b ys) (p:ps) - | T.null b -> go (chunk a xs) ys (p:ps) - | otherwise -> (fromChunks (L.reverse (p:ps)),chunk a xs, chunk b ys) - Nothing -> (fromChunks (L.reverse ps),t0,t1) - go t0 t1 ps = (fromChunks (L.reverse ps),t0,t1) - --- | /O(n)/ Return the prefix of the second string if its suffix --- matches the entire first string. --- --- Examples: --- --- > stripSuffix "bar" "foobar" == Just "foo" --- > stripSuffix "" "baz" == Just "baz" --- > stripSuffix "foo" "quux" == Nothing --- --- This is particularly useful with the @ViewPatterns@ extension to --- GHC, as follows: --- --- > {-# LANGUAGE ViewPatterns #-} --- > import Data.Text.Lazy as T --- > --- > quuxLength :: Text -> Int --- > quuxLength (stripSuffix "quux" -> Just pre) = T.length pre --- > quuxLength _ = -1 -stripSuffix :: Text -> Text -> Maybe Text -stripSuffix p t = reverse `fmap` stripPrefix (reverse p) (reverse t) - --- | /O(n)/ 'filter', applied to a predicate and a 'Text', --- returns a 'Text' containing those characters that satisfy the --- predicate. -filter :: (Char -> Bool) -> Text -> Text -filter p t = unstream (S.filter p (stream t)) -{-# INLINE filter #-} - --- | /O(n)/ The 'find' function takes a predicate and a 'Text', and --- returns the first element in matching the predicate, or 'Nothing' --- if there is no such element. -find :: (Char -> Bool) -> Text -> Maybe Char -find p t = S.findBy p (stream t) -{-# INLINE find #-} - --- | /O(n)/ The 'partition' function takes a predicate and a 'Text', --- and returns the pair of 'Text's with elements which do and do not --- satisfy the predicate, respectively; i.e. --- --- > partition p t == (filter p t, filter (not . p) t) -partition :: (Char -> Bool) -> Text -> (Text, Text) -partition p t = (filter p t, filter (not . p) t) -{-# INLINE partition #-} - --- | /O(n)/ 'Text' index (subscript) operator, starting from 0. -index :: Text -> Int64 -> Char -index t n = S.index (stream t) n -{-# INLINE index #-} - --- | /O(n+m)/ The 'count' function returns the number of times the --- query string appears in the given 'Text'. An empty query string is --- invalid, and will cause an error to be raised. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -count :: Text -> Text -> Int64 -count pat src - | null pat = emptyError "count" - | otherwise = go 0 (indices pat src) - where go !n [] = n - go !n (_:xs) = go (n+1) xs -{-# INLINE [1] count #-} - -{-# RULES -"LAZY TEXT count/singleton -> countChar" [~1] forall c t. - count (singleton c) t = countChar c t - #-} - --- | /O(n)/ The 'countChar' function returns the number of times the --- query element appears in the given 'Text'. Subject to fusion. -countChar :: Char -> Text -> Int64 -countChar c t = S.countChar c (stream t) - --- | /O(n)/ 'zip' takes two 'Text's and returns a list of --- corresponding pairs of bytes. If one input 'Text' is short, --- excess elements of the longer 'Text' are discarded. This is --- equivalent to a pair of 'unpack' operations. -zip :: Text -> Text -> [(Char,Char)] -zip a b = S.unstreamList $ S.zipWith (,) (stream a) (stream b) -{-# INLINE [0] zip #-} - --- | /O(n)/ 'zipWith' generalises 'zip' by zipping with the function --- given as the first argument, instead of a tupling function. --- Performs replacement on invalid scalar values. -zipWith :: (Char -> Char -> Char) -> Text -> Text -> Text -zipWith f t1 t2 = unstream (S.zipWith g (stream t1) (stream t2)) - where g a b = safe (f a b) -{-# INLINE [0] zipWith #-} - -revChunks :: [T.Text] -> Text -revChunks = L.foldl' (flip chunk) Empty - -emptyError :: String -> a -emptyError fun = P.error ("Data.Text.Lazy." ++ fun ++ ": empty input") - -impossibleError :: String -> a -impossibleError fun = P.error ("Data.Text.Lazy." ++ fun ++ ": impossible case") diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Read.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Read.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Read.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Read.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ -{-# LANGUAGE OverloadedStrings, UnboxedTuples, CPP #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - --- | --- Module : Data.Text.Read --- Copyright : (c) 2010, 2011 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Functions used frequently when reading textual data. -module Data.Text.Read - ( - Reader - , decimal - , hexadecimal - , signed - , rational - , double - ) where - -import Control.Monad (liftM) -import Data.Char (isDigit, isHexDigit) -import Data.Int (Int8, Int16, Int32, Int64) -import Data.Ratio ((%)) -import Data.Text as T -import Data.Text.Internal.Private (span_) -import Data.Text.Internal.Read -import Data.Word (Word, Word8, Word16, Word32, Word64) - --- | Read some text. If the read succeeds, return its value and the --- remaining text, otherwise an error message. -type Reader a = IReader Text a -type Parser a = IParser Text a - --- | Read a decimal integer. The input must begin with at least one --- decimal digit, and is consumed until a non-digit or end of string --- is reached. --- --- This function does not handle leading sign characters. If you need --- to handle signed input, use @'signed' 'decimal'@. --- --- /Note/: For fixed-width integer types, this function does not --- attempt to detect overflow, so a sufficiently long input may give --- incorrect results. If you are worried about overflow, use --- 'Integer' for your result type. -decimal :: Integral a => Reader a -{-# SPECIALIZE decimal :: Reader Int #-} -{-# SPECIALIZE decimal :: Reader Int8 #-} -{-# SPECIALIZE decimal :: Reader Int16 #-} -{-# SPECIALIZE decimal :: Reader Int32 #-} -{-# SPECIALIZE decimal :: Reader Int64 #-} -{-# SPECIALIZE decimal :: Reader Integer #-} -{-# SPECIALIZE decimal :: Reader Word #-} -{-# SPECIALIZE decimal :: Reader Word8 #-} -{-# SPECIALIZE decimal :: Reader Word16 #-} -{-# SPECIALIZE decimal :: Reader Word32 #-} -{-# SPECIALIZE decimal :: Reader Word64 #-} -decimal txt - | T.null h = Left "input does not start with a digit" - | otherwise = Right (T.foldl' go 0 h, t) - where (# h,t #) = span_ isDigit txt - go n d = (n * 10 + fromIntegral (digitToInt d)) - --- | Read a hexadecimal integer, consisting of an optional leading --- @\"0x\"@ followed by at least one decimal digit. Input is consumed --- until a non-hex-digit or end of string is reached. This function --- is case insensitive. --- --- This function does not handle leading sign characters. If you need --- to handle signed input, use @'signed' 'hexadecimal'@. --- --- /Note/: For fixed-width integer types, this function does not --- attempt to detect overflow, so a sufficiently long input may give --- incorrect results. If you are worried about overflow, use --- 'Integer' for your result type. -hexadecimal :: Integral a => Reader a -{-# SPECIALIZE hexadecimal :: Reader Int #-} -{-# SPECIALIZE hexadecimal :: Reader Int8 #-} -{-# SPECIALIZE hexadecimal :: Reader Int16 #-} -{-# SPECIALIZE hexadecimal :: Reader Int32 #-} -{-# SPECIALIZE hexadecimal :: Reader Int64 #-} -{-# SPECIALIZE hexadecimal :: Reader Integer #-} -{-# SPECIALIZE hexadecimal :: Reader Word #-} -{-# SPECIALIZE hexadecimal :: Reader Word8 #-} -{-# SPECIALIZE hexadecimal :: Reader Word16 #-} -{-# SPECIALIZE hexadecimal :: Reader Word32 #-} -{-# SPECIALIZE hexadecimal :: Reader Word64 #-} -hexadecimal txt - | h == "0x" || h == "0X" = hex t - | otherwise = hex txt - where (h,t) = T.splitAt 2 txt - -hex :: Integral a => Reader a -{-# SPECIALIZE hex :: Reader Int #-} -{-# SPECIALIZE hex :: Reader Int8 #-} -{-# SPECIALIZE hex :: Reader Int16 #-} -{-# SPECIALIZE hex :: Reader Int32 #-} -{-# SPECIALIZE hex :: Reader Int64 #-} -{-# SPECIALIZE hex :: Reader Integer #-} -{-# SPECIALIZE hex :: Reader Word #-} -{-# SPECIALIZE hex :: Reader Word8 #-} -{-# SPECIALIZE hex :: Reader Word16 #-} -{-# SPECIALIZE hex :: Reader Word32 #-} -{-# SPECIALIZE hex :: Reader Word64 #-} -hex txt - | T.null h = Left "input does not start with a hexadecimal digit" - | otherwise = Right (T.foldl' go 0 h, t) - where (# h,t #) = span_ isHexDigit txt - go n d = (n * 16 + fromIntegral (hexDigitToInt d)) - --- | Read an optional leading sign character (@\'-\'@ or @\'+\'@) and --- apply it to the result of applying the given reader. -signed :: Num a => Reader a -> Reader a -{-# INLINE signed #-} -signed f = runP (signa (P f)) - --- | Read a rational number. --- --- This function accepts an optional leading sign character, followed --- by at least one decimal digit. The syntax similar to that accepted --- by the 'read' function, with the exception that a trailing @\'.\'@ --- or @\'e\'@ /not/ followed by a number is not consumed. --- --- Examples (with behaviour identical to 'read'): --- --- >rational "3" == Right (3.0, "") --- >rational "3.1" == Right (3.1, "") --- >rational "3e4" == Right (30000.0, "") --- >rational "3.1e4" == Right (31000.0, "") --- >rational ".3" == Left "input does not start with a digit" --- >rational "e3" == Left "input does not start with a digit" --- --- Examples of differences from 'read': --- --- >rational "3.foo" == Right (3.0, ".foo") --- >rational "3e" == Right (3.0, "e") -rational :: Fractional a => Reader a -{-# SPECIALIZE rational :: Reader Double #-} -rational = floaty $ \real frac fracDenom -> fromRational $ - real % 1 + frac % fracDenom - --- | Read a rational number. --- --- The syntax accepted by this function is the same as for 'rational'. --- --- /Note/: This function is almost ten times faster than 'rational', --- but is slightly less accurate. --- --- The 'Double' type supports about 16 decimal places of accuracy. --- For 94.2% of numbers, this function and 'rational' give identical --- results, but for the remaining 5.8%, this function loses precision --- around the 15th decimal place. For 0.001% of numbers, this --- function will lose precision at the 13th or 14th decimal place. -double :: Reader Double -double = floaty $ \real frac fracDenom -> - fromIntegral real + - fromIntegral frac / fromIntegral fracDenom - -signa :: Num a => Parser a -> Parser a -{-# SPECIALIZE signa :: Parser Int -> Parser Int #-} -{-# SPECIALIZE signa :: Parser Int8 -> Parser Int8 #-} -{-# SPECIALIZE signa :: Parser Int16 -> Parser Int16 #-} -{-# SPECIALIZE signa :: Parser Int32 -> Parser Int32 #-} -{-# SPECIALIZE signa :: Parser Int64 -> Parser Int64 #-} -{-# SPECIALIZE signa :: Parser Integer -> Parser Integer #-} -signa p = do - sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') - if sign == '+' then p else negate `liftM` p - -char :: (Char -> Bool) -> Parser Char -char p = P $ \t -> case T.uncons t of - Just (c,t') | p c -> Right (c,t') - _ -> Left "character does not match" - -floaty :: Fractional a => (Integer -> Integer -> Integer -> a) -> Reader a -{-# INLINE floaty #-} -floaty f = runP $ do - sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') - real <- P decimal - T fraction fracDigits <- perhaps (T 0 0) $ do - _ <- char (=='.') - digits <- P $ \t -> Right (T.length $ T.takeWhile isDigit t, t) - n <- P decimal - return $ T n digits - let e c = c == 'e' || c == 'E' - power <- perhaps 0 (char e >> signa (P decimal) :: Parser Int) - let n = if fracDigits == 0 - then if power == 0 - then fromIntegral real - else fromIntegral real * (10 ^^ power) - else if power == 0 - then f real fraction (10 ^ fracDigits) - else f real fraction (10 ^ fracDigits) * (10 ^^ power) - return $! if sign == '+' - then n - else -n diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Unsafe.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Unsafe.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text/Unsafe.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text/Unsafe.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} --- | --- Module : Data.Text.Unsafe --- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- A module containing unsafe 'Text' operations, for very very careful --- use in heavily tested code. -module Data.Text.Unsafe - ( - inlineInterleaveST - , inlinePerformIO - , unsafeDupablePerformIO - , Iter(..) - , iter - , iter_ - , reverseIter - , reverseIter_ - , unsafeHead - , unsafeTail - , lengthWord16 - , takeWord16 - , dropWord16 - ) where - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.Text.Internal.Encoding.Utf16 (chr2) -import Data.Text.Internal (Text(..)) -import Data.Text.Internal.Unsafe (inlineInterleaveST, inlinePerformIO) -import Data.Text.Internal.Unsafe.Char (unsafeChr) -import qualified Data.Text.Array as A -import GHC.IO (unsafeDupablePerformIO) - --- | /O(1)/ A variant of 'head' for non-empty 'Text'. 'unsafeHead' --- omits the check for the empty case, so there is an obligation on --- the programmer to provide a proof that the 'Text' is non-empty. -unsafeHead :: Text -> Char -unsafeHead (Text arr off _len) - | m < 0xD800 || m > 0xDBFF = unsafeChr m - | otherwise = chr2 m n - where m = A.unsafeIndex arr off - n = A.unsafeIndex arr (off+1) -{-# INLINE unsafeHead #-} - --- | /O(1)/ A variant of 'tail' for non-empty 'Text'. 'unsafeTail' --- omits the check for the empty case, so there is an obligation on --- the programmer to provide a proof that the 'Text' is non-empty. -unsafeTail :: Text -> Text -unsafeTail t@(Text arr off len) = -#if defined(ASSERTS) - assert (d <= len) $ -#endif - Text arr (off+d) (len-d) - where d = iter_ t 0 -{-# INLINE unsafeTail #-} - -data Iter = Iter {-# UNPACK #-} !Char {-# UNPACK #-} !Int - --- | /O(1)/ Iterate (unsafely) one step forwards through a UTF-16 --- array, returning the current character and the delta to add to give --- the next offset to iterate at. -iter :: Text -> Int -> Iter -iter (Text arr off _len) i - | m < 0xD800 || m > 0xDBFF = Iter (unsafeChr m) 1 - | otherwise = Iter (chr2 m n) 2 - where m = A.unsafeIndex arr j - n = A.unsafeIndex arr k - j = off + i - k = j + 1 -{-# INLINE iter #-} - --- | /O(1)/ Iterate one step through a UTF-16 array, returning the --- delta to add to give the next offset to iterate at. -iter_ :: Text -> Int -> Int -iter_ (Text arr off _len) i | m < 0xD800 || m > 0xDBFF = 1 - | otherwise = 2 - where m = A.unsafeIndex arr (off+i) -{-# INLINE iter_ #-} - --- | /O(1)/ Iterate one step backwards through a UTF-16 array, --- returning the current character and the delta to add (i.e. a --- negative number) to give the next offset to iterate at. -reverseIter :: Text -> Int -> (Char,Int) -reverseIter (Text arr off _len) i - | m < 0xDC00 || m > 0xDFFF = (unsafeChr m, -1) - | otherwise = (chr2 n m, -2) - where m = A.unsafeIndex arr j - n = A.unsafeIndex arr k - j = off + i - k = j - 1 -{-# INLINE reverseIter #-} - --- | /O(1)/ Iterate one step backwards through a UTF-16 array, --- returning the delta to add (i.e. a negative number) to give the --- next offset to iterate at. -reverseIter_ :: Text -> Int -> Int -reverseIter_ (Text arr off _len) i - | m < 0xDC00 || m > 0xDFFF = -1 - | otherwise = -2 - where m = A.unsafeIndex arr (off+i) -{-# INLINE reverseIter_ #-} - --- | /O(1)/ Return the length of a 'Text' in units of 'Word16'. This --- is useful for sizing a target array appropriately before using --- 'unsafeCopyToPtr'. -lengthWord16 :: Text -> Int -lengthWord16 (Text _arr _off len) = len -{-# INLINE lengthWord16 #-} - --- | /O(1)/ Unchecked take of 'k' 'Word16's from the front of a 'Text'. -takeWord16 :: Int -> Text -> Text -takeWord16 k (Text arr off _len) = Text arr off k -{-# INLINE takeWord16 #-} - --- | /O(1)/ Unchecked drop of 'k' 'Word16's from the front of a 'Text'. -dropWord16 :: Int -> Text -> Text -dropWord16 k (Text arr off len) = Text arr (off+k) (len-k) -{-# INLINE dropWord16 #-} diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Data/Text.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Data/Text.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1750 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, MagicHash, Rank2Types, UnboxedTuples #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif -#if __GLASGOW_HASKELL__ >= 708 -{-# LANGUAGE TypeFamilies #-} -#endif - --- | --- Module : Data.Text --- Copyright : (c) 2009, 2010, 2011, 2012 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts, --- (c) 2008, 2009 Tom Harper --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- A time and space-efficient implementation of Unicode text. --- Suitable for performance critical use, both in terms of large data --- quantities and high speed. --- --- /Note/: Read below the synopsis for important notes on the use of --- this module. --- --- This module is intended to be imported @qualified@, to avoid name --- clashes with "Prelude" functions, e.g. --- --- > import qualified Data.Text as T --- --- To use an extended and very rich family of functions for working --- with Unicode text (including normalization, regular expressions, --- non-standard encodings, text breaking, and locales), see --- . - -module Data.Text - ( - -- * Strict vs lazy types - -- $strict - - -- * Acceptable data - -- $replacement - - -- * Fusion - -- $fusion - - -- * Types - Text - - -- * Creation and elimination - , pack - , unpack - , singleton - , empty - - -- * Basic interface - , cons - , snoc - , append - , uncons - , head - , last - , tail - , init - , null - , length - , compareLength - - -- * Transformations - , map - , intercalate - , intersperse - , transpose - , reverse - , replace - - -- ** Case conversion - -- $case - , toCaseFold - , toLower - , toUpper - , toTitle - - -- ** Justification - , justifyLeft - , justifyRight - , center - - -- * Folds - , foldl - , foldl' - , foldl1 - , foldl1' - , foldr - , foldr1 - - -- ** Special folds - , concat - , concatMap - , any - , all - , maximum - , minimum - - -- * Construction - - -- ** Scans - , scanl - , scanl1 - , scanr - , scanr1 - - -- ** Accumulating maps - , mapAccumL - , mapAccumR - - -- ** Generation and unfolding - , replicate - , unfoldr - , unfoldrN - - -- * Substrings - - -- ** Breaking strings - , take - , takeEnd - , drop - , dropEnd - , takeWhile - , dropWhile - , dropWhileEnd - , dropAround - , strip - , stripStart - , stripEnd - , splitAt - , breakOn - , breakOnEnd - , break - , span - , group - , groupBy - , inits - , tails - - -- ** Breaking into many substrings - -- $split - , splitOn - , split - , chunksOf - - -- ** Breaking into lines and words - , lines - --, lines' - , words - , unlines - , unwords - - -- * Predicates - , isPrefixOf - , isSuffixOf - , isInfixOf - - -- ** View patterns - , stripPrefix - , stripSuffix - , commonPrefixes - - -- * Searching - , filter - , breakOnAll - , find - , partition - - -- , findSubstring - - -- * Indexing - -- $index - , index - , findIndex - , count - - -- * Zipping - , zip - , zipWith - - -- -* Ordered text - -- , sort - - -- * Low level operations - , copy - ) where - -import Prelude (Char, Bool(..), Int, Maybe(..), String, - Eq(..), Ord(..), Ordering(..), (++), - Read(..), Show(..), - (&&), (||), (+), (-), (.), ($), ($!), (>>), - not, return, otherwise, quot) -#if defined(HAVE_DEEPSEQ) -import Control.DeepSeq (NFData(rnf)) -#endif -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.Char (isSpace) -import Data.Data (Data(gfoldl, toConstr, gunfold, dataTypeOf), constrIndex, - Constr, mkConstr, DataType, mkDataType, Fixity(Prefix)) -import Control.Monad (foldM) -import Control.Monad.ST (ST) -import qualified Data.Text.Array as A -import qualified Data.List as L -import Data.Monoid (Monoid(..)) -import Data.String (IsString(..)) -import qualified Data.Text.Internal.Fusion as S -import qualified Data.Text.Internal.Fusion.Common as S -import Data.Text.Internal.Fusion (stream, reverseStream, unstream) -import Data.Text.Internal.Private (span_) -import Data.Text.Internal (Text(..), empty, empty_, firstf, mul, safe, text) -import qualified Prelude as P -import Data.Text.Unsafe (Iter(..), iter, iter_, lengthWord16, reverseIter, - reverseIter_, unsafeHead, unsafeTail) -import Data.Text.Internal.Unsafe.Char (unsafeChr, unsafeWrite) -import qualified Data.Text.Internal.Functions as F -import qualified Data.Text.Internal.Encoding.Utf16 as U16 -import Data.Text.Internal.Search (indices) -#if defined(__HADDOCK__) -import Data.ByteString (ByteString) -import qualified Data.Text.Lazy as L -import Data.Int (Int64) -#endif -#if __GLASGOW_HASKELL__ >= 702 -import qualified GHC.CString as GHC -#else -import qualified GHC.Base as GHC -#endif -#if __GLASGOW_HASKELL__ >= 708 -import qualified GHC.Exts as Exts -#endif -import GHC.Prim (Addr#) - --- $strict --- --- This package provides both strict and lazy 'Text' types. The --- strict type is provided by the "Data.Text" module, while the lazy --- type is provided by the "Data.Text.Lazy" module. Internally, the --- lazy @Text@ type consists of a list of strict chunks. --- --- The strict 'Text' type requires that an entire string fit into --- memory at once. The lazy 'Data.Text.Lazy.Text' type is capable of --- streaming strings that are larger than memory using a small memory --- footprint. In many cases, the overhead of chunked streaming makes --- the lazy 'Data.Text.Lazy.Text' type slower than its strict --- counterpart, but this is not always the case. Sometimes, the time --- complexity of a function in one module may be different from the --- other, due to their differing internal structures. --- --- Each module provides an almost identical API, with the main --- difference being that the strict module uses 'Int' values for --- lengths and counts, while the lazy module uses 'Data.Int.Int64' --- lengths. - --- $replacement --- --- A 'Text' value is a sequence of Unicode scalar values, as defined --- in --- . --- As such, a 'Text' cannot contain values in the range U+D800 to --- U+DFFF inclusive. Haskell implementations admit all Unicode code --- points --- () --- as 'Char' values, including code points from this invalid range. --- This means that there are some 'Char' values that are not valid --- Unicode scalar values, and the functions in this module must handle --- those cases. --- --- Within this module, many functions construct a 'Text' from one or --- more 'Char' values. Those functions will substitute 'Char' values --- that are not valid Unicode scalar values with the replacement --- character \"�\" (U+FFFD). Functions that perform this --- inspection and replacement are documented with the phrase --- \"Performs replacement on invalid scalar values\". --- --- (One reason for this policy of replacement is that internally, a --- 'Text' value is represented as packed UTF-16 data. Values in the --- range U+D800 through U+DFFF are used by UTF-16 to denote surrogate --- code points, and so cannot be represented. The functions replace --- invalid scalar values, instead of dropping them, as a security --- measure. For details, see --- .) - --- $fusion --- --- Most of the functions in this module are subject to /fusion/, --- meaning that a pipeline of such functions will usually allocate at --- most one 'Text' value. --- --- As an example, consider the following pipeline: --- --- > import Data.Text as T --- > import Data.Text.Encoding as E --- > import Data.ByteString (ByteString) --- > --- > countChars :: ByteString -> Int --- > countChars = T.length . T.toUpper . E.decodeUtf8 --- --- From the type signatures involved, this looks like it should --- allocate one 'Data.ByteString.ByteString' value, and two 'Text' --- values. However, when a module is compiled with optimisation --- enabled under GHC, the two intermediate 'Text' values will be --- optimised away, and the function will be compiled down to a single --- loop over the source 'Data.ByteString.ByteString'. --- --- Functions that can be fused by the compiler are documented with the --- phrase \"Subject to fusion\". - -instance Eq Text where - Text arrA offA lenA == Text arrB offB lenB - | lenA == lenB = A.equal arrA offA arrB offB lenA - | otherwise = False - {-# INLINE (==) #-} - -instance Ord Text where - compare = compareText - -instance Show Text where - showsPrec p ps r = showsPrec p (unpack ps) r - -instance Read Text where - readsPrec p str = [(pack x,y) | (x,y) <- readsPrec p str] - -instance Monoid Text where - mempty = empty - mappend = append - mconcat = concat - -instance IsString Text where - fromString = pack - -#if __GLASGOW_HASKELL__ >= 708 -instance Exts.IsList Text where - type Item Text = Char - fromList = pack - toList = unpack -#endif - -#if defined(HAVE_DEEPSEQ) -instance NFData Text where rnf !_ = () -#endif - --- | This instance preserves data abstraction at the cost of inefficiency. --- We omit reflection services for the sake of data abstraction. --- --- This instance was created by copying the updated behavior of --- @"Data.Set".@'Data.Set.Set' and @"Data.Map".@'Data.Map.Map'. If you --- feel a mistake has been made, please feel free to submit --- improvements. --- --- The original discussion is archived here: --- --- --- The followup discussion that changed the behavior of 'Data.Set.Set' --- and 'Data.Map.Map' is archived here: --- - -instance Data Text where - gfoldl f z txt = z pack `f` (unpack txt) - toConstr _ = packConstr - gunfold k z c = case constrIndex c of - 1 -> k (z pack) - _ -> P.error "gunfold" - dataTypeOf _ = textDataType - -packConstr :: Constr -packConstr = mkConstr textDataType "pack" [] Prefix - -textDataType :: DataType -textDataType = mkDataType "Data.Text.Text" [packConstr] - --- | /O(n)/ Compare two 'Text' values lexicographically. -compareText :: Text -> Text -> Ordering -compareText ta@(Text _arrA _offA lenA) tb@(Text _arrB _offB lenB) - | lenA == 0 && lenB == 0 = EQ - | otherwise = go 0 0 - where - go !i !j - | i >= lenA || j >= lenB = compare lenA lenB - | a < b = LT - | a > b = GT - | otherwise = go (i+di) (j+dj) - where Iter a di = iter ta i - Iter b dj = iter tb j - --- ----------------------------------------------------------------------------- --- * Conversion to/from 'Text' - --- | /O(n)/ Convert a 'String' into a 'Text'. Subject to --- fusion. Performs replacement on invalid scalar values. -pack :: String -> Text -pack = unstream . S.map safe . S.streamList -{-# INLINE [1] pack #-} - --- | /O(n)/ Convert a 'Text' into a 'String'. Subject to fusion. -unpack :: Text -> String -unpack = S.unstreamList . stream -{-# INLINE [1] unpack #-} - --- | /O(n)/ Convert a literal string into a Text. Subject to fusion. -unpackCString# :: Addr# -> Text -unpackCString# addr# = unstream (S.streamCString# addr#) -{-# NOINLINE unpackCString# #-} - -{-# RULES "TEXT literal" forall a. - unstream (S.map safe (S.streamList (GHC.unpackCString# a))) - = unpackCString# a #-} - -{-# RULES "TEXT literal UTF8" forall a. - unstream (S.map safe (S.streamList (GHC.unpackCStringUtf8# a))) - = unpackCString# a #-} - -{-# RULES "TEXT empty literal" - unstream (S.map safe (S.streamList [])) - = empty_ #-} - -{-# RULES "TEXT singleton literal" forall a. - unstream (S.map safe (S.streamList [a])) - = singleton_ a #-} - --- | /O(1)/ Convert a character into a Text. Subject to fusion. --- Performs replacement on invalid scalar values. -singleton :: Char -> Text -singleton = unstream . S.singleton . safe -{-# INLINE [1] singleton #-} - -{-# RULES "TEXT singleton" forall a. - unstream (S.singleton (safe a)) - = singleton_ a #-} - --- This is intended to reduce inlining bloat. -singleton_ :: Char -> Text -singleton_ c = Text (A.run x) 0 len - where x :: ST s (A.MArray s) - x = do arr <- A.new len - _ <- unsafeWrite arr 0 d - return arr - len | d < '\x10000' = 1 - | otherwise = 2 - d = safe c -{-# NOINLINE singleton_ #-} - --- ----------------------------------------------------------------------------- --- * Basic functions - --- | /O(n)/ Adds a character to the front of a 'Text'. This function --- is more costly than its 'List' counterpart because it requires --- copying a new array. Subject to fusion. Performs replacement on --- invalid scalar values. -cons :: Char -> Text -> Text -cons c t = unstream (S.cons (safe c) (stream t)) -{-# INLINE cons #-} - -infixr 5 `cons` - --- | /O(n)/ Adds a character to the end of a 'Text'. This copies the --- entire array in the process, unless fused. Subject to fusion. --- Performs replacement on invalid scalar values. -snoc :: Text -> Char -> Text -snoc t c = unstream (S.snoc (stream t) (safe c)) -{-# INLINE snoc #-} - --- | /O(n)/ Appends one 'Text' to the other by copying both of them --- into a new 'Text'. Subject to fusion. -append :: Text -> Text -> Text -append a@(Text arr1 off1 len1) b@(Text arr2 off2 len2) - | len1 == 0 = b - | len2 == 0 = a - | len > 0 = Text (A.run x) 0 len - | otherwise = overflowError "append" - where - len = len1+len2 - x :: ST s (A.MArray s) - x = do - arr <- A.new len - A.copyI arr 0 arr1 off1 len1 - A.copyI arr len1 arr2 off2 len - return arr -{-# NOINLINE append #-} - -{-# RULES -"TEXT append -> fused" [~1] forall t1 t2. - append t1 t2 = unstream (S.append (stream t1) (stream t2)) -"TEXT append -> unfused" [1] forall t1 t2. - unstream (S.append (stream t1) (stream t2)) = append t1 t2 - #-} - --- | /O(1)/ Returns the first character of a 'Text', which must be --- non-empty. Subject to fusion. -head :: Text -> Char -head t = S.head (stream t) -{-# INLINE head #-} - --- | /O(1)/ Returns the first character and rest of a 'Text', or --- 'Nothing' if empty. Subject to fusion. -uncons :: Text -> Maybe (Char, Text) -uncons t@(Text arr off len) - | len <= 0 = Nothing - | otherwise = Just (c, text arr (off+d) (len-d)) - where Iter c d = iter t 0 -{-# INLINE [1] uncons #-} - --- | Lifted from Control.Arrow and specialized. -second :: (b -> c) -> (a,b) -> (a,c) -second f (a, b) = (a, f b) - --- | /O(1)/ Returns the last character of a 'Text', which must be --- non-empty. Subject to fusion. -last :: Text -> Char -last (Text arr off len) - | len <= 0 = emptyError "last" - | n < 0xDC00 || n > 0xDFFF = unsafeChr n - | otherwise = U16.chr2 n0 n - where n = A.unsafeIndex arr (off+len-1) - n0 = A.unsafeIndex arr (off+len-2) -{-# INLINE [1] last #-} - -{-# RULES -"TEXT last -> fused" [~1] forall t. - last t = S.last (stream t) -"TEXT last -> unfused" [1] forall t. - S.last (stream t) = last t - #-} - --- | /O(1)/ Returns all characters after the head of a 'Text', which --- must be non-empty. Subject to fusion. -tail :: Text -> Text -tail t@(Text arr off len) - | len <= 0 = emptyError "tail" - | otherwise = text arr (off+d) (len-d) - where d = iter_ t 0 -{-# INLINE [1] tail #-} - -{-# RULES -"TEXT tail -> fused" [~1] forall t. - tail t = unstream (S.tail (stream t)) -"TEXT tail -> unfused" [1] forall t. - unstream (S.tail (stream t)) = tail t - #-} - --- | /O(1)/ Returns all but the last character of a 'Text', which must --- be non-empty. Subject to fusion. -init :: Text -> Text -init (Text arr off len) | len <= 0 = emptyError "init" - | n >= 0xDC00 && n <= 0xDFFF = text arr off (len-2) - | otherwise = text arr off (len-1) - where - n = A.unsafeIndex arr (off+len-1) -{-# INLINE [1] init #-} - -{-# RULES -"TEXT init -> fused" [~1] forall t. - init t = unstream (S.init (stream t)) -"TEXT init -> unfused" [1] forall t. - unstream (S.init (stream t)) = init t - #-} - --- | /O(1)/ Tests whether a 'Text' is empty or not. Subject to --- fusion. -null :: Text -> Bool -null (Text _arr _off len) = -#if defined(ASSERTS) - assert (len >= 0) $ -#endif - len <= 0 -{-# INLINE [1] null #-} - -{-# RULES -"TEXT null -> fused" [~1] forall t. - null t = S.null (stream t) -"TEXT null -> unfused" [1] forall t. - S.null (stream t) = null t - #-} - --- | /O(1)/ Tests whether a 'Text' contains exactly one character. --- Subject to fusion. -isSingleton :: Text -> Bool -isSingleton = S.isSingleton . stream -{-# INLINE isSingleton #-} - --- | /O(n)/ Returns the number of characters in a 'Text'. --- Subject to fusion. -length :: Text -> Int -length t = S.length (stream t) -{-# INLINE length #-} - --- | /O(n)/ Compare the count of characters in a 'Text' to a number. --- Subject to fusion. --- --- This function gives the same answer as comparing against the result --- of 'length', but can short circuit if the count of characters is --- greater than the number, and hence be more efficient. -compareLength :: Text -> Int -> Ordering -compareLength t n = S.compareLengthI (stream t) n -{-# INLINE [1] compareLength #-} - -{-# RULES -"TEXT compareN/length -> compareLength" [~1] forall t n. - compare (length t) n = compareLength t n - #-} - -{-# RULES -"TEXT ==N/length -> compareLength/==EQ" [~1] forall t n. - (==) (length t) n = compareLength t n == EQ - #-} - -{-# RULES -"TEXT /=N/length -> compareLength//=EQ" [~1] forall t n. - (/=) (length t) n = compareLength t n /= EQ - #-} - -{-# RULES -"TEXT compareLength/==LT" [~1] forall t n. - (<) (length t) n = compareLength t n == LT - #-} - -{-# RULES -"TEXT <=N/length -> compareLength//=GT" [~1] forall t n. - (<=) (length t) n = compareLength t n /= GT - #-} - -{-# RULES -"TEXT >N/length -> compareLength/==GT" [~1] forall t n. - (>) (length t) n = compareLength t n == GT - #-} - -{-# RULES -"TEXT >=N/length -> compareLength//=LT" [~1] forall t n. - (>=) (length t) n = compareLength t n /= LT - #-} - --- ----------------------------------------------------------------------------- --- * Transformations --- | /O(n)/ 'map' @f@ @t@ is the 'Text' obtained by applying @f@ to --- each element of @t@. Subject to fusion. Performs replacement on --- invalid scalar values. -map :: (Char -> Char) -> Text -> Text -map f t = unstream (S.map (safe . f) (stream t)) -{-# INLINE [1] map #-} - --- | /O(n)/ The 'intercalate' function takes a 'Text' and a list of --- 'Text's and concatenates the list after interspersing the first --- argument between each element of the list. -intercalate :: Text -> [Text] -> Text -intercalate t = concat . (F.intersperse t) -{-# INLINE intercalate #-} - --- | /O(n)/ The 'intersperse' function takes a character and places it --- between the characters of a 'Text'. Subject to fusion. Performs --- replacement on invalid scalar values. -intersperse :: Char -> Text -> Text -intersperse c t = unstream (S.intersperse (safe c) (stream t)) -{-# INLINE intersperse #-} - --- | /O(n)/ Reverse the characters of a string. Subject to fusion. -reverse :: Text -> Text -reverse t = S.reverse (stream t) -{-# INLINE reverse #-} - --- | /O(m+n)/ Replace every non-overlapping occurrence of @needle@ in --- @haystack@ with @replacement@. --- --- This function behaves as though it was defined as follows: --- --- @ --- replace needle replacement haystack = --- 'intercalate' replacement ('splitOn' needle haystack) --- @ --- --- As this suggests, each occurrence is replaced exactly once. So if --- @needle@ occurs in @replacement@, that occurrence will /not/ itself --- be replaced recursively: --- --- > replace "oo" "foo" "oo" == "foo" --- --- In cases where several instances of @needle@ overlap, only the --- first one will be replaced: --- --- > replace "ofo" "bar" "ofofo" == "barfo" --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -replace :: Text - -- ^ @needle@ to search for. If this string is empty, an - -- error will occur. - -> Text - -- ^ @replacement@ to replace @needle@ with. - -> Text - -- ^ @haystack@ in which to search. - -> Text -replace needle@(Text _ _ neeLen) - (Text repArr repOff repLen) - haystack@(Text hayArr hayOff hayLen) - | neeLen == 0 = emptyError "replace" - | L.null ixs = haystack - | len > 0 = Text (A.run x) 0 len - | otherwise = empty - where - ixs = indices needle haystack - len = hayLen - (neeLen - repLen) `mul` L.length ixs - x :: ST s (A.MArray s) - x = do - marr <- A.new len - let loop (i:is) o d = do - let d0 = d + i - o - d1 = d0 + repLen - A.copyI marr d hayArr (hayOff+o) d0 - A.copyI marr d0 repArr repOff d1 - loop is (i + neeLen) d1 - loop [] o d = A.copyI marr d hayArr (hayOff+o) len - loop ixs 0 0 - return marr - --- ---------------------------------------------------------------------------- --- ** Case conversions (folds) - --- $case --- --- When case converting 'Text' values, do not use combinators like --- @map toUpper@ to case convert each character of a string --- individually, as this gives incorrect results according to the --- rules of some writing systems. The whole-string case conversion --- functions from this module, such as @toUpper@, obey the correct --- case conversion rules. As a result, these functions may map one --- input character to two or three output characters. For examples, --- see the documentation of each function. --- --- /Note/: In some languages, case conversion is a locale- and --- context-dependent operation. The case conversion functions in this --- module are /not/ locale sensitive. Programs that require locale --- sensitivity should use appropriate versions of the --- . - --- | /O(n)/ Convert a string to folded case. Subject to fusion. --- --- This function is mainly useful for performing caseless (also known --- as case insensitive) string comparisons. --- --- A string @x@ is a caseless match for a string @y@ if and only if: --- --- @toCaseFold x == toCaseFold y@ --- --- The result string may be longer than the input string, and may --- differ from applying 'toLower' to the input string. For instance, --- the Armenian small ligature \"ﬓ\" (men now, U+FB13) is case --- folded to the sequence \"մ\" (men, U+0574) followed by --- \"ն\" (now, U+0576), while the Greek \"µ\" (micro sign, --- U+00B5) is case folded to \"μ\" (small letter mu, U+03BC) --- instead of itself. -toCaseFold :: Text -> Text -toCaseFold t = unstream (S.toCaseFold (stream t)) -{-# INLINE [0] toCaseFold #-} - --- | /O(n)/ Convert a string to lower case, using simple case --- conversion. Subject to fusion. --- --- The result string may be longer than the input string. For --- instance, \"İ\" (Latin capital letter I with dot above, --- U+0130) maps to the sequence \"i\" (Latin small letter i, U+0069) --- followed by \" ̇\" (combining dot above, U+0307). -toLower :: Text -> Text -toLower t = unstream (S.toLower (stream t)) -{-# INLINE toLower #-} - --- | /O(n)/ Convert a string to upper case, using simple case --- conversion. Subject to fusion. --- --- The result string may be longer than the input string. For --- instance, the German \"ß\" (eszett, U+00DF) maps to the --- two-letter sequence \"SS\". -toUpper :: Text -> Text -toUpper t = unstream (S.toUpper (stream t)) -{-# INLINE toUpper #-} - --- | /O(n)/ Convert a string to title case, using simple case --- conversion. Subject to fusion. --- --- The first letter of the input is converted to title case, as is --- every subsequent letter that immediately follows a non-letter. --- Every letter that immediately follows another letter is converted --- to lower case. --- --- The result string may be longer than the input string. For example, --- the Latin small ligature fl (U+FB02) is converted to the --- sequence Latin capital letter F (U+0046) followed by Latin small --- letter l (U+006C). --- --- /Note/: this function does not take language or culture specific --- rules into account. For instance, in English, different style --- guides disagree on whether the book name \"The Hill of the Red --- Fox\" is correctly title cased—but this function will --- capitalize /every/ word. -toTitle :: Text -> Text -toTitle t = unstream (S.toTitle (stream t)) -{-# INLINE toTitle #-} - --- | /O(n)/ Left-justify a string to the given length, using the --- specified fill character on the right. Subject to fusion. --- Performs replacement on invalid scalar values. --- --- Examples: --- --- > justifyLeft 7 'x' "foo" == "fooxxxx" --- > justifyLeft 3 'x' "foobar" == "foobar" -justifyLeft :: Int -> Char -> Text -> Text -justifyLeft k c t - | len >= k = t - | otherwise = t `append` replicateChar (k-len) c - where len = length t -{-# INLINE [1] justifyLeft #-} - -{-# RULES -"TEXT justifyLeft -> fused" [~1] forall k c t. - justifyLeft k c t = unstream (S.justifyLeftI k c (stream t)) -"TEXT justifyLeft -> unfused" [1] forall k c t. - unstream (S.justifyLeftI k c (stream t)) = justifyLeft k c t - #-} - --- | /O(n)/ Right-justify a string to the given length, using the --- specified fill character on the left. Performs replacement on --- invalid scalar values. --- --- Examples: --- --- > justifyRight 7 'x' "bar" == "xxxxbar" --- > justifyRight 3 'x' "foobar" == "foobar" -justifyRight :: Int -> Char -> Text -> Text -justifyRight k c t - | len >= k = t - | otherwise = replicateChar (k-len) c `append` t - where len = length t -{-# INLINE justifyRight #-} - --- | /O(n)/ Center a string to the given length, using the specified --- fill character on either side. Performs replacement on invalid --- scalar values. --- --- Examples: --- --- > center 8 'x' "HS" = "xxxHSxxx" -center :: Int -> Char -> Text -> Text -center k c t - | len >= k = t - | otherwise = replicateChar l c `append` t `append` replicateChar r c - where len = length t - d = k - len - r = d `quot` 2 - l = d - r -{-# INLINE center #-} - --- | /O(n)/ The 'transpose' function transposes the rows and columns --- of its 'Text' argument. Note that this function uses 'pack', --- 'unpack', and the list version of transpose, and is thus not very --- efficient. -transpose :: [Text] -> [Text] -transpose ts = P.map pack (L.transpose (P.map unpack ts)) - --- ----------------------------------------------------------------------------- --- * Reducing 'Text's (folds) - --- | /O(n)/ 'foldl', applied to a binary operator, a starting value --- (typically the left-identity of the operator), and a 'Text', --- reduces the 'Text' using the binary operator, from left to right. --- Subject to fusion. -foldl :: (a -> Char -> a) -> a -> Text -> a -foldl f z t = S.foldl f z (stream t) -{-# INLINE foldl #-} - --- | /O(n)/ A strict version of 'foldl'. Subject to fusion. -foldl' :: (a -> Char -> a) -> a -> Text -> a -foldl' f z t = S.foldl' f z (stream t) -{-# INLINE foldl' #-} - --- | /O(n)/ A variant of 'foldl' that has no starting value argument, --- and thus must be applied to a non-empty 'Text'. Subject to fusion. -foldl1 :: (Char -> Char -> Char) -> Text -> Char -foldl1 f t = S.foldl1 f (stream t) -{-# INLINE foldl1 #-} - --- | /O(n)/ A strict version of 'foldl1'. Subject to fusion. -foldl1' :: (Char -> Char -> Char) -> Text -> Char -foldl1' f t = S.foldl1' f (stream t) -{-# INLINE foldl1' #-} - --- | /O(n)/ 'foldr', applied to a binary operator, a starting value --- (typically the right-identity of the operator), and a 'Text', --- reduces the 'Text' using the binary operator, from right to left. --- Subject to fusion. -foldr :: (Char -> a -> a) -> a -> Text -> a -foldr f z t = S.foldr f z (stream t) -{-# INLINE foldr #-} - --- | /O(n)/ A variant of 'foldr' that has no starting value argument, --- and thus must be applied to a non-empty 'Text'. Subject to --- fusion. -foldr1 :: (Char -> Char -> Char) -> Text -> Char -foldr1 f t = S.foldr1 f (stream t) -{-# INLINE foldr1 #-} - --- ----------------------------------------------------------------------------- --- ** Special folds - --- | /O(n)/ Concatenate a list of 'Text's. -concat :: [Text] -> Text -concat ts = case ts' of - [] -> empty - [t] -> t - _ -> Text (A.run go) 0 len - where - ts' = L.filter (not . null) ts - len = sumP "concat" $ L.map lengthWord16 ts' - go :: ST s (A.MArray s) - go = do - arr <- A.new len - let step i (Text a o l) = - let !j = i + l in A.copyI arr i a o j >> return j - foldM step 0 ts' >> return arr - --- | /O(n)/ Map a function over a 'Text' that results in a 'Text', and --- concatenate the results. -concatMap :: (Char -> Text) -> Text -> Text -concatMap f = concat . foldr ((:) . f) [] -{-# INLINE concatMap #-} - --- | /O(n)/ 'any' @p@ @t@ determines whether any character in the --- 'Text' @t@ satisifes the predicate @p@. Subject to fusion. -any :: (Char -> Bool) -> Text -> Bool -any p t = S.any p (stream t) -{-# INLINE any #-} - --- | /O(n)/ 'all' @p@ @t@ determines whether all characters in the --- 'Text' @t@ satisify the predicate @p@. Subject to fusion. -all :: (Char -> Bool) -> Text -> Bool -all p t = S.all p (stream t) -{-# INLINE all #-} - --- | /O(n)/ 'maximum' returns the maximum value from a 'Text', which --- must be non-empty. Subject to fusion. -maximum :: Text -> Char -maximum t = S.maximum (stream t) -{-# INLINE maximum #-} - --- | /O(n)/ 'minimum' returns the minimum value from a 'Text', which --- must be non-empty. Subject to fusion. -minimum :: Text -> Char -minimum t = S.minimum (stream t) -{-# INLINE minimum #-} - --- ----------------------------------------------------------------------------- --- * Building 'Text's - --- | /O(n)/ 'scanl' is similar to 'foldl', but returns a list of --- successive reduced values from the left. Subject to fusion. --- Performs replacement on invalid scalar values. --- --- > scanl f z [x1, x2, ...] == [z, z `f` x1, (z `f` x1) `f` x2, ...] --- --- Note that --- --- > last (scanl f z xs) == foldl f z xs. -scanl :: (Char -> Char -> Char) -> Char -> Text -> Text -scanl f z t = unstream (S.scanl g z (stream t)) - where g a b = safe (f a b) -{-# INLINE scanl #-} - --- | /O(n)/ 'scanl1' is a variant of 'scanl' that has no starting --- value argument. Subject to fusion. Performs replacement on --- invalid scalar values. --- --- > scanl1 f [x1, x2, ...] == [x1, x1 `f` x2, ...] -scanl1 :: (Char -> Char -> Char) -> Text -> Text -scanl1 f t | null t = empty - | otherwise = scanl f (unsafeHead t) (unsafeTail t) -{-# INLINE scanl1 #-} - --- | /O(n)/ 'scanr' is the right-to-left dual of 'scanl'. Performs --- replacement on invalid scalar values. --- --- > scanr f v == reverse . scanl (flip f) v . reverse -scanr :: (Char -> Char -> Char) -> Char -> Text -> Text -scanr f z = S.reverse . S.reverseScanr g z . reverseStream - where g a b = safe (f a b) -{-# INLINE scanr #-} - --- | /O(n)/ 'scanr1' is a variant of 'scanr' that has no starting --- value argument. Subject to fusion. Performs replacement on --- invalid scalar values. -scanr1 :: (Char -> Char -> Char) -> Text -> Text -scanr1 f t | null t = empty - | otherwise = scanr f (last t) (init t) -{-# INLINE scanr1 #-} - --- | /O(n)/ Like a combination of 'map' and 'foldl''. Applies a --- function to each element of a 'Text', passing an accumulating --- parameter from left to right, and returns a final 'Text'. Performs --- replacement on invalid scalar values. -mapAccumL :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) -mapAccumL f z0 = S.mapAccumL g z0 . stream - where g a b = second safe (f a b) -{-# INLINE mapAccumL #-} - --- | The 'mapAccumR' function behaves like a combination of 'map' and --- a strict 'foldr'; it applies a function to each element of a --- 'Text', passing an accumulating parameter from right to left, and --- returning a final value of this accumulator together with the new --- 'Text'. --- Performs replacement on invalid scalar values. -mapAccumR :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) -mapAccumR f z0 = second reverse . S.mapAccumL g z0 . reverseStream - where g a b = second safe (f a b) -{-# INLINE mapAccumR #-} - --- ----------------------------------------------------------------------------- --- ** Generating and unfolding 'Text's - --- | /O(n*m)/ 'replicate' @n@ @t@ is a 'Text' consisting of the input --- @t@ repeated @n@ times. -replicate :: Int -> Text -> Text -replicate n t@(Text a o l) - | n <= 0 || l <= 0 = empty - | n == 1 = t - | isSingleton t = replicateChar n (unsafeHead t) - | otherwise = Text (A.run x) 0 len - where - len = l `mul` n - x :: ST s (A.MArray s) - x = do - arr <- A.new len - let loop !d !i | i >= n = return arr - | otherwise = let m = d + l - in A.copyI arr d a o m >> loop m (i+1) - loop 0 0 -{-# INLINE [1] replicate #-} - -{-# RULES -"TEXT replicate/singleton -> replicateChar" [~1] forall n c. - replicate n (singleton c) = replicateChar n c - #-} - --- | /O(n)/ 'replicateChar' @n@ @c@ is a 'Text' of length @n@ with @c@ the --- value of every element. Subject to fusion. -replicateChar :: Int -> Char -> Text -replicateChar n c = unstream (S.replicateCharI n (safe c)) -{-# INLINE replicateChar #-} - --- | /O(n)/, where @n@ is the length of the result. The 'unfoldr' --- function is analogous to the List 'L.unfoldr'. 'unfoldr' builds a --- 'Text' from a seed value. The function takes the element and --- returns 'Nothing' if it is done producing the 'Text', otherwise --- 'Just' @(a,b)@. In this case, @a@ is the next 'Char' in the --- string, and @b@ is the seed value for further production. Subject --- to fusion. Performs replacement on invalid scalar values. -unfoldr :: (a -> Maybe (Char,a)) -> a -> Text -unfoldr f s = unstream (S.unfoldr (firstf safe . f) s) -{-# INLINE unfoldr #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a 'Text' from a seed --- value. However, the length of the result should be limited by the --- first argument to 'unfoldrN'. This function is more efficient than --- 'unfoldr' when the maximum length of the result is known and --- correct, otherwise its performance is similar to 'unfoldr'. Subject --- to fusion. Performs replacement on invalid scalar values. -unfoldrN :: Int -> (a -> Maybe (Char,a)) -> a -> Text -unfoldrN n f s = unstream (S.unfoldrN n (firstf safe . f) s) -{-# INLINE unfoldrN #-} - --- ----------------------------------------------------------------------------- --- * Substrings - --- | /O(n)/ 'take' @n@, applied to a 'Text', returns the prefix of the --- 'Text' of length @n@, or the 'Text' itself if @n@ is greater than --- the length of the Text. Subject to fusion. -take :: Int -> Text -> Text -take n t@(Text arr off len) - | n <= 0 = empty - | n >= len = t - | otherwise = text arr off (iterN n t) -{-# INLINE [1] take #-} - -iterN :: Int -> Text -> Int -iterN n t@(Text _arr _off len) = loop 0 0 - where loop !i !cnt - | i >= len || cnt >= n = i - | otherwise = loop (i+d) (cnt+1) - where d = iter_ t i - -{-# RULES -"TEXT take -> fused" [~1] forall n t. - take n t = unstream (S.take n (stream t)) -"TEXT take -> unfused" [1] forall n t. - unstream (S.take n (stream t)) = take n t - #-} - --- | /O(n)/ 'takeEnd' @n@ @t@ returns the suffix remaining after --- taking @n@ characters from the end of @t@. --- --- Examples: --- --- > takeEnd 3 "foobar" == "bar" -takeEnd :: Int -> Text -> Text -takeEnd n t@(Text arr off len) - | n <= 0 = empty - | n >= len = t - | otherwise = text arr (off+i) (len-i) - where i = iterNEnd n t - -iterNEnd :: Int -> Text -> Int -iterNEnd n t@(Text _arr _off len) = loop (len-1) n - where loop i !m - | i <= 0 = 0 - | m <= 1 = i - | otherwise = loop (i+d) (m-1) - where d = reverseIter_ t i - --- | /O(n)/ 'drop' @n@, applied to a 'Text', returns the suffix of the --- 'Text' after the first @n@ characters, or the empty 'Text' if @n@ --- is greater than the length of the 'Text'. Subject to fusion. -drop :: Int -> Text -> Text -drop n t@(Text arr off len) - | n <= 0 = t - | n >= len = empty - | otherwise = text arr (off+i) (len-i) - where i = iterN n t -{-# INLINE [1] drop #-} - -{-# RULES -"TEXT drop -> fused" [~1] forall n t. - drop n t = unstream (S.drop n (stream t)) -"TEXT drop -> unfused" [1] forall n t. - unstream (S.drop n (stream t)) = drop n t - #-} - --- | /O(n)/ 'dropEnd' @n@ @t@ returns the prefix remaining after --- dropping @n@ characters from the end of @t@. --- --- Examples: --- --- > dropEnd 3 "foobar" == "foo" -dropEnd :: Int -> Text -> Text -dropEnd n t@(Text arr off len) - | n <= 0 = t - | n >= len = empty - | otherwise = text arr off (iterNEnd n t) - --- | /O(n)/ 'takeWhile', applied to a predicate @p@ and a 'Text', --- returns the longest prefix (possibly empty) of elements that --- satisfy @p@. Subject to fusion. -takeWhile :: (Char -> Bool) -> Text -> Text -takeWhile p t@(Text arr off len) = loop 0 - where loop !i | i >= len = t - | p c = loop (i+d) - | otherwise = text arr off i - where Iter c d = iter t i -{-# INLINE [1] takeWhile #-} - -{-# RULES -"TEXT takeWhile -> fused" [~1] forall p t. - takeWhile p t = unstream (S.takeWhile p (stream t)) -"TEXT takeWhile -> unfused" [1] forall p t. - unstream (S.takeWhile p (stream t)) = takeWhile p t - #-} - --- | /O(n)/ 'dropWhile' @p@ @t@ returns the suffix remaining after --- 'takeWhile' @p@ @t@. Subject to fusion. -dropWhile :: (Char -> Bool) -> Text -> Text -dropWhile p t@(Text arr off len) = loop 0 0 - where loop !i !l | l >= len = empty - | p c = loop (i+d) (l+d) - | otherwise = Text arr (off+i) (len-l) - where Iter c d = iter t i -{-# INLINE [1] dropWhile #-} - -{-# RULES -"TEXT dropWhile -> fused" [~1] forall p t. - dropWhile p t = unstream (S.dropWhile p (stream t)) -"TEXT dropWhile -> unfused" [1] forall p t. - unstream (S.dropWhile p (stream t)) = dropWhile p t - #-} - --- | /O(n)/ 'dropWhileEnd' @p@ @t@ returns the prefix remaining after --- dropping characters that fail the predicate @p@ from the end of --- @t@. Subject to fusion. --- Examples: --- --- > dropWhileEnd (=='.') "foo..." == "foo" -dropWhileEnd :: (Char -> Bool) -> Text -> Text -dropWhileEnd p t@(Text arr off len) = loop (len-1) len - where loop !i !l | l <= 0 = empty - | p c = loop (i+d) (l+d) - | otherwise = Text arr off l - where (c,d) = reverseIter t i -{-# INLINE [1] dropWhileEnd #-} - -{-# RULES -"TEXT dropWhileEnd -> fused" [~1] forall p t. - dropWhileEnd p t = S.reverse (S.dropWhile p (S.reverseStream t)) -"TEXT dropWhileEnd -> unfused" [1] forall p t. - S.reverse (S.dropWhile p (S.reverseStream t)) = dropWhileEnd p t - #-} - --- | /O(n)/ 'dropAround' @p@ @t@ returns the substring remaining after --- dropping characters that fail the predicate @p@ from both the --- beginning and end of @t@. Subject to fusion. -dropAround :: (Char -> Bool) -> Text -> Text -dropAround p = dropWhile p . dropWhileEnd p -{-# INLINE [1] dropAround #-} - --- | /O(n)/ Remove leading white space from a string. Equivalent to: --- --- > dropWhile isSpace -stripStart :: Text -> Text -stripStart = dropWhile isSpace -{-# INLINE [1] stripStart #-} - --- | /O(n)/ Remove trailing white space from a string. Equivalent to: --- --- > dropWhileEnd isSpace -stripEnd :: Text -> Text -stripEnd = dropWhileEnd isSpace -{-# INLINE [1] stripEnd #-} - --- | /O(n)/ Remove leading and trailing white space from a string. --- Equivalent to: --- --- > dropAround isSpace -strip :: Text -> Text -strip = dropAround isSpace -{-# INLINE [1] strip #-} - --- | /O(n)/ 'splitAt' @n t@ returns a pair whose first element is a --- prefix of @t@ of length @n@, and whose second is the remainder of --- the string. It is equivalent to @('take' n t, 'drop' n t)@. -splitAt :: Int -> Text -> (Text, Text) -splitAt n t@(Text arr off len) - | n <= 0 = (empty, t) - | n >= len = (t, empty) - | otherwise = let k = iterN n t - in (text arr off k, text arr (off+k) (len-k)) - --- | /O(n)/ 'span', applied to a predicate @p@ and text @t@, returns --- a pair whose first element is the longest prefix (possibly empty) --- of @t@ of elements that satisfy @p@, and whose second is the --- remainder of the list. -span :: (Char -> Bool) -> Text -> (Text, Text) -span p t = case span_ p t of - (# hd,tl #) -> (hd,tl) -{-# INLINE span #-} - --- | /O(n)/ 'break' is like 'span', but the prefix returned is --- over elements that fail the predicate @p@. -break :: (Char -> Bool) -> Text -> (Text, Text) -break p = span (not . p) -{-# INLINE break #-} - --- | /O(n)/ Group characters in a string according to a predicate. -groupBy :: (Char -> Char -> Bool) -> Text -> [Text] -groupBy p = loop - where - loop t@(Text arr off len) - | null t = [] - | otherwise = text arr off n : loop (text arr (off+n) (len-n)) - where Iter c d = iter t 0 - n = d + findAIndexOrEnd (not . p c) (Text arr (off+d) (len-d)) - --- | Returns the /array/ index (in units of 'Word16') at which a --- character may be found. This is /not/ the same as the logical --- index returned by e.g. 'findIndex'. -findAIndexOrEnd :: (Char -> Bool) -> Text -> Int -findAIndexOrEnd q t@(Text _arr _off len) = go 0 - where go !i | i >= len || q c = i - | otherwise = go (i+d) - where Iter c d = iter t i - --- | /O(n)/ Group characters in a string by equality. -group :: Text -> [Text] -group = groupBy (==) - --- | /O(n)/ Return all initial segments of the given 'Text', shortest --- first. -inits :: Text -> [Text] -inits t@(Text arr off len) = loop 0 - where loop i | i >= len = [t] - | otherwise = Text arr off i : loop (i + iter_ t i) - --- | /O(n)/ Return all final segments of the given 'Text', longest --- first. -tails :: Text -> [Text] -tails t | null t = [empty] - | otherwise = t : tails (unsafeTail t) - --- $split --- --- Splitting functions in this library do not perform character-wise --- copies to create substrings; they just construct new 'Text's that --- are slices of the original. - --- | /O(m+n)/ Break a 'Text' into pieces separated by the first 'Text' --- argument (which cannot be empty), consuming the delimiter. An empty --- delimiter is invalid, and will cause an error to be raised. --- --- Examples: --- --- > splitOn "\r\n" "a\r\nb\r\nd\r\ne" == ["a","b","d","e"] --- > splitOn "aaa" "aaaXaaaXaaaXaaa" == ["","X","X","X",""] --- > splitOn "x" "x" == ["",""] --- --- and --- --- > intercalate s . splitOn s == id --- > splitOn (singleton c) == split (==c) --- --- (Note: the string @s@ to split on above cannot be empty.) --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -splitOn :: Text - -- ^ String to split on. If this string is empty, an error - -- will occur. - -> Text - -- ^ Input text. - -> [Text] -splitOn pat@(Text _ _ l) src@(Text arr off len) - | l <= 0 = emptyError "splitOn" - | isSingleton pat = split (== unsafeHead pat) src - | otherwise = go 0 (indices pat src) - where - go !s (x:xs) = text arr (s+off) (x-s) : go (x+l) xs - go s _ = [text arr (s+off) (len-s)] -{-# INLINE [1] splitOn #-} - -{-# RULES -"TEXT splitOn/singleton -> split/==" [~1] forall c t. - splitOn (singleton c) t = split (==c) t - #-} - --- | /O(n)/ Splits a 'Text' into components delimited by separators, --- where the predicate returns True for a separator element. The --- resulting components do not contain the separators. Two adjacent --- separators result in an empty component in the output. eg. --- --- > split (=='a') "aabbaca" == ["","","bb","c",""] --- > split (=='a') "" == [""] -split :: (Char -> Bool) -> Text -> [Text] -split _ t@(Text _off _arr 0) = [t] -split p t = loop t - where loop s | null s' = [l] - | otherwise = l : loop (unsafeTail s') - where (# l, s' #) = span_ (not . p) s -{-# INLINE split #-} - --- | /O(n)/ Splits a 'Text' into components of length @k@. The last --- element may be shorter than the other chunks, depending on the --- length of the input. Examples: --- --- > chunksOf 3 "foobarbaz" == ["foo","bar","baz"] --- > chunksOf 4 "haskell.org" == ["hask","ell.","org"] -chunksOf :: Int -> Text -> [Text] -chunksOf k = go - where - go t = case splitAt k t of - (a,b) | null a -> [] - | otherwise -> a : go b -{-# INLINE chunksOf #-} - --- ---------------------------------------------------------------------------- --- * Searching - -------------------------------------------------------------------------------- --- ** Searching with a predicate - --- | /O(n)/ The 'find' function takes a predicate and a 'Text', and --- returns the first element matching the predicate, or 'Nothing' if --- there is no such element. -find :: (Char -> Bool) -> Text -> Maybe Char -find p t = S.findBy p (stream t) -{-# INLINE find #-} - --- | /O(n)/ The 'partition' function takes a predicate and a 'Text', --- and returns the pair of 'Text's with elements which do and do not --- satisfy the predicate, respectively; i.e. --- --- > partition p t == (filter p t, filter (not . p) t) -partition :: (Char -> Bool) -> Text -> (Text, Text) -partition p t = (filter p t, filter (not . p) t) -{-# INLINE partition #-} - --- | /O(n)/ 'filter', applied to a predicate and a 'Text', --- returns a 'Text' containing those characters that satisfy the --- predicate. -filter :: (Char -> Bool) -> Text -> Text -filter p t = unstream (S.filter p (stream t)) -{-# INLINE filter #-} - --- | /O(n+m)/ Find the first instance of @needle@ (which must be --- non-'null') in @haystack@. The first element of the returned tuple --- is the prefix of @haystack@ before @needle@ is matched. The second --- is the remainder of @haystack@, starting with the match. --- --- Examples: --- --- > breakOn "::" "a::b::c" ==> ("a", "::b::c") --- > breakOn "/" "foobar" ==> ("foobar", "") --- --- Laws: --- --- > append prefix match == haystack --- > where (prefix, match) = breakOn needle haystack --- --- If you need to break a string by a substring repeatedly (e.g. you --- want to break on every instance of a substring), use 'breakOnAll' --- instead, as it has lower startup overhead. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -breakOn :: Text -> Text -> (Text, Text) -breakOn pat src@(Text arr off len) - | null pat = emptyError "breakOn" - | otherwise = case indices pat src of - [] -> (src, empty) - (x:_) -> (text arr off x, text arr (off+x) (len-x)) -{-# INLINE breakOn #-} - --- | /O(n+m)/ Similar to 'breakOn', but searches from the end of the --- string. --- --- The first element of the returned tuple is the prefix of @haystack@ --- up to and including the last match of @needle@. The second is the --- remainder of @haystack@, following the match. --- --- > breakOnEnd "::" "a::b::c" ==> ("a::b::", "c") -breakOnEnd :: Text -> Text -> (Text, Text) -breakOnEnd pat src = (reverse b, reverse a) - where (a,b) = breakOn (reverse pat) (reverse src) -{-# INLINE breakOnEnd #-} - --- | /O(n+m)/ Find all non-overlapping instances of @needle@ in --- @haystack@. Each element of the returned list consists of a pair: --- --- * The entire string prior to the /k/th match (i.e. the prefix) --- --- * The /k/th match, followed by the remainder of the string --- --- Examples: --- --- > breakOnAll "::" "" --- > ==> [] --- > breakOnAll "/" "a/b/c/" --- > ==> [("a", "/b/c/"), ("a/b", "/c/"), ("a/b/c", "/")] --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. --- --- The @needle@ parameter may not be empty. -breakOnAll :: Text -- ^ @needle@ to search for - -> Text -- ^ @haystack@ in which to search - -> [(Text, Text)] -breakOnAll pat src@(Text arr off slen) - | null pat = emptyError "breakOnAll" - | otherwise = L.map step (indices pat src) - where - step x = (chunk 0 x, chunk x (slen-x)) - chunk !n !l = text arr (n+off) l -{-# INLINE breakOnAll #-} - -------------------------------------------------------------------------------- --- ** Indexing 'Text's - --- $index --- --- If you think of a 'Text' value as an array of 'Char' values (which --- it is not), you run the risk of writing inefficient code. --- --- An idiom that is common in some languages is to find the numeric --- offset of a character or substring, then use that number to split --- or trim the searched string. With a 'Text' value, this approach --- would require two /O(n)/ operations: one to perform the search, and --- one to operate from wherever the search ended. --- --- For example, suppose you have a string that you want to split on --- the substring @\"::\"@, such as @\"foo::bar::quux\"@. Instead of --- searching for the index of @\"::\"@ and taking the substrings --- before and after that index, you would instead use @breakOnAll \"::\"@. - --- | /O(n)/ 'Text' index (subscript) operator, starting from 0. -index :: Text -> Int -> Char -index t n = S.index (stream t) n -{-# INLINE index #-} - --- | /O(n)/ The 'findIndex' function takes a predicate and a 'Text' --- and returns the index of the first element in the 'Text' satisfying --- the predicate. Subject to fusion. -findIndex :: (Char -> Bool) -> Text -> Maybe Int -findIndex p t = S.findIndex p (stream t) -{-# INLINE findIndex #-} - --- | /O(n+m)/ The 'count' function returns the number of times the --- query string appears in the given 'Text'. An empty query string is --- invalid, and will cause an error to be raised. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -count :: Text -> Text -> Int -count pat src - | null pat = emptyError "count" - | isSingleton pat = countChar (unsafeHead pat) src - | otherwise = L.length (indices pat src) -{-# INLINE [1] count #-} - -{-# RULES -"TEXT count/singleton -> countChar" [~1] forall c t. - count (singleton c) t = countChar c t - #-} - --- | /O(n)/ The 'countChar' function returns the number of times the --- query element appears in the given 'Text'. Subject to fusion. -countChar :: Char -> Text -> Int -countChar c t = S.countChar c (stream t) -{-# INLINE countChar #-} - -------------------------------------------------------------------------------- --- * Zipping - --- | /O(n)/ 'zip' takes two 'Text's and returns a list of --- corresponding pairs of bytes. If one input 'Text' is short, --- excess elements of the longer 'Text' are discarded. This is --- equivalent to a pair of 'unpack' operations. -zip :: Text -> Text -> [(Char,Char)] -zip a b = S.unstreamList $ S.zipWith (,) (stream a) (stream b) -{-# INLINE [0] zip #-} - --- | /O(n)/ 'zipWith' generalises 'zip' by zipping with the function --- given as the first argument, instead of a tupling function. --- Performs replacement on invalid scalar values. -zipWith :: (Char -> Char -> Char) -> Text -> Text -> Text -zipWith f t1 t2 = unstream (S.zipWith g (stream t1) (stream t2)) - where g a b = safe (f a b) -{-# INLINE [0] zipWith #-} - --- | /O(n)/ Breaks a 'Text' up into a list of words, delimited by 'Char's --- representing white space. -words :: Text -> [Text] -words t@(Text arr off len) = loop 0 0 - where - loop !start !n - | n >= len = if start == n - then [] - else [Text arr (start+off) (n-start)] - | isSpace c = - if start == n - then loop (start+1) (start+1) - else Text arr (start+off) (n-start) : loop (n+d) (n+d) - | otherwise = loop start (n+d) - where Iter c d = iter t n -{-# INLINE words #-} - --- | /O(n)/ Breaks a 'Text' up into a list of 'Text's at --- newline 'Char's. The resulting strings do not contain newlines. -lines :: Text -> [Text] -lines ps | null ps = [] - | otherwise = h : if null t - then [] - else lines (unsafeTail t) - where (# h,t #) = span_ (/= '\n') ps -{-# INLINE lines #-} - -{- --- | /O(n)/ Portably breaks a 'Text' up into a list of 'Text's at line --- boundaries. --- --- A line boundary is considered to be either a line feed, a carriage --- return immediately followed by a line feed, or a carriage return. --- This accounts for both Unix and Windows line ending conventions, --- and for the old convention used on Mac OS 9 and earlier. -lines' :: Text -> [Text] -lines' ps | null ps = [] - | otherwise = h : case uncons t of - Nothing -> [] - Just (c,t') - | c == '\n' -> lines t' - | c == '\r' -> case uncons t' of - Just ('\n',t'') -> lines t'' - _ -> lines t' - where (h,t) = span notEOL ps - notEOL c = c /= '\n' && c /= '\r' -{-# INLINE lines' #-} --} - --- | /O(n)/ Joins lines, after appending a terminating newline to --- each. -unlines :: [Text] -> Text -unlines = concat . L.map (`snoc` '\n') -{-# INLINE unlines #-} - --- | /O(n)/ Joins words using single space characters. -unwords :: [Text] -> Text -unwords = intercalate (singleton ' ') -{-# INLINE unwords #-} - --- | /O(n)/ The 'isPrefixOf' function takes two 'Text's and returns --- 'True' iff the first is a prefix of the second. Subject to fusion. -isPrefixOf :: Text -> Text -> Bool -isPrefixOf a@(Text _ _ alen) b@(Text _ _ blen) = - alen <= blen && S.isPrefixOf (stream a) (stream b) -{-# INLINE [1] isPrefixOf #-} - -{-# RULES -"TEXT isPrefixOf -> fused" [~1] forall s t. - isPrefixOf s t = S.isPrefixOf (stream s) (stream t) - #-} - --- | /O(n)/ The 'isSuffixOf' function takes two 'Text's and returns --- 'True' iff the first is a suffix of the second. -isSuffixOf :: Text -> Text -> Bool -isSuffixOf a@(Text _aarr _aoff alen) b@(Text barr boff blen) = - d >= 0 && a == b' - where d = blen - alen - b' | d == 0 = b - | otherwise = Text barr (boff+d) alen -{-# INLINE isSuffixOf #-} - --- | /O(n+m)/ The 'isInfixOf' function takes two 'Text's and returns --- 'True' iff the first is contained, wholly and intact, anywhere --- within the second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -isInfixOf :: Text -> Text -> Bool -isInfixOf needle haystack - | null needle = True - | isSingleton needle = S.elem (unsafeHead needle) . S.stream $ haystack - | otherwise = not . L.null . indices needle $ haystack -{-# INLINE [1] isInfixOf #-} - -{-# RULES -"TEXT isInfixOf/singleton -> S.elem/S.stream" [~1] forall n h. - isInfixOf (singleton n) h = S.elem n (S.stream h) - #-} - -------------------------------------------------------------------------------- --- * View patterns - --- | /O(n)/ Return the suffix of the second string if its prefix --- matches the entire first string. --- --- Examples: --- --- > stripPrefix "foo" "foobar" == Just "bar" --- > stripPrefix "" "baz" == Just "baz" --- > stripPrefix "foo" "quux" == Nothing --- --- This is particularly useful with the @ViewPatterns@ extension to --- GHC, as follows: --- --- > {-# LANGUAGE ViewPatterns #-} --- > import Data.Text as T --- > --- > fnordLength :: Text -> Int --- > fnordLength (stripPrefix "fnord" -> Just suf) = T.length suf --- > fnordLength _ = -1 -stripPrefix :: Text -> Text -> Maybe Text -stripPrefix p@(Text _arr _off plen) t@(Text arr off len) - | p `isPrefixOf` t = Just $! text arr (off+plen) (len-plen) - | otherwise = Nothing - --- | /O(n)/ Find the longest non-empty common prefix of two strings --- and return it, along with the suffixes of each string at which they --- no longer match. --- --- If the strings do not have a common prefix or either one is empty, --- this function returns 'Nothing'. --- --- Examples: --- --- > commonPrefixes "foobar" "fooquux" == Just ("foo","bar","quux") --- > commonPrefixes "veeble" "fetzer" == Nothing --- > commonPrefixes "" "baz" == Nothing -commonPrefixes :: Text -> Text -> Maybe (Text,Text,Text) -commonPrefixes t0@(Text arr0 off0 len0) t1@(Text arr1 off1 len1) = go 0 0 - where - go !i !j | i < len0 && j < len1 && a == b = go (i+d0) (j+d1) - | i > 0 = Just (Text arr0 off0 i, - text arr0 (off0+i) (len0-i), - text arr1 (off1+j) (len1-j)) - | otherwise = Nothing - where Iter a d0 = iter t0 i - Iter b d1 = iter t1 j - --- | /O(n)/ Return the prefix of the second string if its suffix --- matches the entire first string. --- --- Examples: --- --- > stripSuffix "bar" "foobar" == Just "foo" --- > stripSuffix "" "baz" == Just "baz" --- > stripSuffix "foo" "quux" == Nothing --- --- This is particularly useful with the @ViewPatterns@ extension to --- GHC, as follows: --- --- > {-# LANGUAGE ViewPatterns #-} --- > import Data.Text as T --- > --- > quuxLength :: Text -> Int --- > quuxLength (stripSuffix "quux" -> Just pre) = T.length pre --- > quuxLength _ = -1 -stripSuffix :: Text -> Text -> Maybe Text -stripSuffix p@(Text _arr _off plen) t@(Text arr off len) - | p `isSuffixOf` t = Just $! text arr off (len-plen) - | otherwise = Nothing - --- | Add a list of non-negative numbers. Errors out on overflow. -sumP :: String -> [Int] -> Int -sumP fun = go 0 - where go !a (x:xs) - | ax >= 0 = go ax xs - | otherwise = overflowError fun - where ax = a + x - go a _ = a - -emptyError :: String -> a -emptyError fun = P.error $ "Data.Text." ++ fun ++ ": empty input" - -overflowError :: String -> a -overflowError fun = P.error $ "Data.Text." ++ fun ++ ": size overflow" - --- | /O(n)/ Make a distinct copy of the given string, sharing no --- storage with the original string. --- --- As an example, suppose you read a large string, of which you need --- only a small portion. If you do not use 'copy', the entire original --- array will be kept alive in memory by the smaller string. Making a --- copy \"breaks the link\" to the original array, allowing it to be --- garbage collected if there are no other live references to it. -copy :: Text -> Text -copy (Text arr off len) = Text (A.run go) 0 len - where - go :: ST s (A.MArray s) - go = do - marr <- A.new len - A.copyI marr 0 arr off len - return marr diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/include/text_cbits.h cabal-install-1.22-1.22.9.0/=unpacked-tar8=/include/text_cbits.h --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/include/text_cbits.h 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/include/text_cbits.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -/* - * Copyright (c) 2013 Bryan O'Sullivan . - */ - -#ifndef _text_cbits_h -#define _text_cbits_h - -#define UTF8_ACCEPT 0 -#define UTF8_REJECT 12 - -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar8=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/LICENSE 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -Copyright (c) 2008-2009, Tom Harper -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/README.markdown cabal-install-1.22-1.22.9.0/=unpacked-tar8=/README.markdown --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/README.markdown 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/README.markdown 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -# Text: Fast, packed Unicode strings, using stream fusion - -This package provides the Data.Text library, a library for the space- -and time-efficient manipulation of Unicode text in Haskell. - - -# Normalization, conversion, and collation, oh my! - -This library intentionally provides a simple API based on the -Haskell prelude's list manipulation functions. For more complicated -real-world tasks, such as Unicode normalization, conversion to and -from a larger variety of encodings, and collation, use the [text-icu -package](http://hackage.haskell.org/cgi-bin/hackage-scripts/package/text-icu). - -That library uses the well-respected and liberally licensed ICU -library to provide these facilities. - - -# Get involved! - -Please report bugs via the -[github issue tracker](https://github.com/bos/text/issues). - -Master [git repository](https://github.com/bos/text): - -* `git clone git://github.com/bos/text.git` - -There's also a [Mercurial mirror](https://bitbucket.org/bos/text): - -* `hg clone https://bitbucket.org/bos/text` - -(You can create and contribute changes using either Mercurial or git.) - - -# Authors - -The base code for this library was originally written by Tom Harper, -based on the stream fusion framework developed by Roman Leshchinskiy, -Duncan Coutts, and Don Stewart. - -The core library was fleshed out, debugged, and tested by Bryan -O'Sullivan , and he is the current maintainer. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/ApiCompare.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/ApiCompare.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/ApiCompare.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/ApiCompare.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ --- This script compares the strict and lazy Text APIs to ensure that --- they're reasonably in sync. - -{-# LANGUAGE OverloadedStrings #-} - -import qualified Data.Set as S -import qualified Data.Text as T -import System.Process - -main = do - let tidy pkg = (S.fromList . filter (T.isInfixOf "::") . T.lines . - T.replace "GHC.Int.Int64" "Int" . - T.replace "\n " "" . - T.replace (T.append (T.pack pkg) ".") "" . T.pack) `fmap` - readProcess "ghci" [] (":browse " ++ pkg) - let diff a b = mapM_ (putStrLn . (" "++) . T.unpack) . S.toList $ - S.difference a b - text <- tidy "Data.Text" - lazy <- tidy "Data.Text.Lazy" - list <- tidy "Data.List" - putStrLn "Text \\ List:" - diff text list - putStrLn "" - putStrLn "Text \\ Lazy:" - diff text lazy - putStrLn "" - putStrLn "Lazy \\ Text:" - diff lazy text diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/Arsec.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/Arsec.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/Arsec.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/Arsec.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -module Arsec - ( - Comment - , comment - , semi - , showC - , unichar - , unichars - , module Control.Applicative - , module Control.Monad - , module Data.Char - , module Text.ParserCombinators.Parsec.Char - , module Text.ParserCombinators.Parsec.Combinator - , module Text.ParserCombinators.Parsec.Error - , module Text.ParserCombinators.Parsec.Prim - ) where - -import Control.Monad -import Control.Applicative -import Data.Char -import Numeric -import Text.ParserCombinators.Parsec.Char hiding (lower, upper) -import Text.ParserCombinators.Parsec.Combinator hiding (optional) -import Text.ParserCombinators.Parsec.Error -import Text.ParserCombinators.Parsec.Prim hiding ((<|>), many) - -type Comment = String - -unichar :: Parser Char -unichar = chr . fst . head . readHex <$> many1 hexDigit - -unichars :: Parser [Char] -unichars = manyTill (unichar <* spaces) semi - -semi :: Parser () -semi = char ';' *> spaces *> pure () - -comment :: Parser Comment -comment = (char '#' *> manyTill anyToken (char '\n')) <|> string "\n" - -showC :: Char -> String -showC c = "'\\x" ++ d ++ "'" - where h = showHex (ord c) "" - d = replicate (4 - length h) '0' ++ h diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/CaseFolding.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/CaseFolding.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/CaseFolding.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/CaseFolding.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ --- This script processes the following source file: --- --- http://unicode.org/Public/UNIDATA/CaseFolding.txt - -module CaseFolding - ( - CaseFolding(..) - , Fold(..) - , parseCF - , mapCF - ) where - -import Arsec - -data Fold = Fold { - code :: Char - , status :: Char - , mapping :: [Char] - , name :: String - } deriving (Eq, Ord, Show) - -data CaseFolding = CF { cfComments :: [Comment], cfFolding :: [Fold] } - deriving (Show) - -entries :: Parser CaseFolding -entries = CF <$> many comment <*> many (entry <* many comment) - where - entry = Fold <$> unichar <* semi - <*> oneOf "CFST" <* semi - <*> unichars - <*> (string "# " *> manyTill anyToken (char '\n')) - -parseCF :: FilePath -> IO (Either ParseError CaseFolding) -parseCF name = parse entries name <$> readFile name - -mapCF :: CaseFolding -> [String] -mapCF (CF _ ms) = typ ++ (map nice . filter p $ ms) ++ [last] - where - typ = ["foldMapping :: forall s. Char -> s -> Step (CC s) Char" - ,"{-# INLINE foldMapping #-}"] - last = "foldMapping c s = Yield (toLower c) (CC s '\\0' '\\0')" - nice c = "-- " ++ name c ++ "\n" ++ - "foldMapping " ++ showC (code c) ++ " s = Yield " ++ x ++ " (CC s " ++ y ++ " " ++ z ++ ")" - where [x,y,z] = (map showC . take 3) (mapping c ++ repeat '\0') - p f = status f `elem` "CF" && - mapping f /= [toLower (code f)] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/CaseMapping.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/CaseMapping.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/CaseMapping.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/CaseMapping.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -import System.Environment -import System.IO - -import Arsec -import CaseFolding -import SpecialCasing - -main = do - args <- getArgs - let oname = case args of - [] -> "../Data/Text/Fusion/CaseMapping.hs" - [o] -> o - psc <- parseSC "SpecialCasing.txt" - pcf <- parseCF "CaseFolding.txt" - scs <- case psc of - Left err -> print err >> return undefined - Right ms -> return ms - cfs <- case pcf of - Left err -> print err >> return undefined - Right ms -> return ms - h <- openFile oname WriteMode - let comments = map ("--" ++) $ - take 2 (cfComments cfs) ++ take 2 (scComments scs) - mapM_ (hPutStrLn h) $ - ["{-# LANGUAGE Rank2Types #-}" - ,"-- AUTOMATICALLY GENERATED - DO NOT EDIT" - ,"-- Generated by scripts/SpecialCasing.hs"] ++ - comments ++ - ["" - ,"module Data.Text.Fusion.CaseMapping where" - ,"import Data.Char" - ,"import Data.Text.Internal.Fusion.Types" - ,""] - mapM_ (hPutStrLn h) (mapSC "upper" upper toUpper scs) - mapM_ (hPutStrLn h) (mapSC "lower" lower toLower scs) - mapM_ (hPutStrLn h) (mapSC "title" title toTitle scs) - mapM_ (hPutStrLn h) (mapCF cfs) - hClose h diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/SpecialCasing.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/SpecialCasing.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/scripts/SpecialCasing.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/scripts/SpecialCasing.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ --- This script processes the following source file: --- --- http://unicode.org/Public/UNIDATA/SpecialCasing.txt - -module SpecialCasing - ( - SpecialCasing(..) - , Case(..) - , parseSC - , mapSC - ) where - -import Arsec - -data SpecialCasing = SC { scComments :: [Comment], scCasing :: [Case] } - deriving (Show) - -data Case = Case { - code :: Char - , lower :: [Char] - , title :: [Char] - , upper :: [Char] - , conditions :: String - , name :: String - } deriving (Eq, Ord, Show) - -entries :: Parser SpecialCasing -entries = SC <$> many comment <*> many (entry <* many comment) - where - entry = Case <$> unichar <* semi - <*> unichars - <*> unichars - <*> unichars - <*> manyTill anyToken (string "# ") - <*> manyTill anyToken (char '\n') - -parseSC :: FilePath -> IO (Either ParseError SpecialCasing) -parseSC name = parse entries name <$> readFile name - -mapSC :: String -> (Case -> String) -> (Char -> Char) -> SpecialCasing - -> [String] -mapSC which access twiddle (SC _ ms) = - typ ++ (map nice . filter p $ ms) ++ [last] - where - typ = [which ++ "Mapping :: forall s. Char -> s -> Step (CC s) Char" - ,"{-# INLINE " ++ which ++ "Mapping #-}"] - last = which ++ "Mapping c s = Yield (to" ++ ucFirst which ++ " c) (CC s '\\0' '\\0')" - nice c = "-- " ++ name c ++ "\n" ++ - which ++ "Mapping " ++ showC (code c) ++ " s = Yield " ++ x ++ " (CC s " ++ y ++ " " ++ z ++ ")" - where [x,y,z] = (map showC . take 3) (access c ++ repeat '\0') - p c = [k] /= a && a /= [twiddle k] && null (conditions c) - where a = access c - k = code c - -ucFirst (c:cs) = toUpper c : cs -ucFirst [] = [] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Setup.lhs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Setup.lhs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/Setup.lhs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/Setup.lhs 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -#!/usr/bin/env runhaskell -> import Distribution.Simple -> main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/cabal.config cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/cabal.config --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/cabal.config 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/cabal.config 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ --- These flags help to speed up building the test suite. - -documentation: False -executable-profiling: False -executable-stripping: False -flags: developer -library-profiling: False diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/.ghci cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/.ghci --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/.ghci 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/.ghci 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -:set -DHAVE_DEEPSEQ -isrc -i../.. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Makefile cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Makefile --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Makefile 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -count = 1000 - -coverage: build coverage/hpc_index.html - -build: text-test-data - cabal configure -fhpc - cabal build - -text-test-data: - hg clone https://bitbucket.org/bos/text-test-data - $(MAKE) -C text-test-data - -coverage/text-tests.tix: - -mkdir -p coverage - ./dist/build/text-tests/text-tests -a $(count) - mv text-tests.tix $@ - -coverage/text-tests-stdio.tix: - -mkdir -p coverage - ./scripts/cover-stdio.sh ./dist/build/text-tests-stdio/text-tests-stdio - mv text-tests-stdio.tix $@ - -coverage/coverage.tix: coverage/text-tests.tix coverage/text-tests-stdio.tix - hpc combine --output=$@ \ - --exclude=Main \ - coverage/text-tests.tix \ - coverage/text-tests-stdio.tix - -coverage/hpc_index.html: coverage/coverage.tix - hpc markup --destdir=coverage coverage/coverage.tix - -clean: - rm -rf dist coverage .hpc - -.PHONY: build coverage diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/scripts/cover-stdio.sh cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/scripts/cover-stdio.sh --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/scripts/cover-stdio.sh 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/scripts/cover-stdio.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -#!/bin/bash - -if [[ $# < 1 ]]; then - echo "Usage: $0 " - exit 1 -fi - -exe=$1 - -rm -f $exe.tix - -f=$(mktemp stdio-f.XXXXXX) -g=$(mktemp stdio-g.XXXXXX) - -for t in T TL; do - echo $t.readFile > $f - $exe $t.readFile $f > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.readFile 1>&2 - fi - - $exe $t.writeFile $f $t.writeFile - echo -n $t.writeFile > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.writeFile 1>&2 - fi - - echo -n quux > $f - $exe $t.appendFile $f $t.appendFile - echo -n quux$t.appendFile > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.appendFile 1>&2 - fi - - echo $t.interact | $exe $t.interact > $f - echo $t.interact > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.interact 1>&2 - fi - - echo $t.getContents | $exe $t.getContents > $f - echo $t.getContents > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.getContents 1>&2 - fi - - echo $t.getLine | $exe $t.getLine > $f - echo $t.getLine > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.getLine 1>&2 - fi -done - -rm -f $f $g - -exit $errs diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/IO.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/IO.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/IO.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/IO.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ --- | Program which exposes some haskell functions as an exutable. The results --- and coverage of this module is meant to be checked using a shell script. --- -module Main - ( - main - ) where - -import System.Environment (getArgs) -import System.Exit (exitFailure) -import System.IO (hPutStrLn, stderr) -import qualified Data.Text as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.IO as TL - -main :: IO () -main = do - args <- getArgs - case args of - ["T.readFile", name] -> T.putStr =<< T.readFile name - ["T.writeFile", name, t] -> T.writeFile name (T.pack t) - ["T.appendFile", name, t] -> T.appendFile name (T.pack t) - ["T.interact"] -> T.interact id - ["T.getContents"] -> T.putStr =<< T.getContents - ["T.getLine"] -> T.putStrLn =<< T.getLine - - ["TL.readFile", name] -> TL.putStr =<< TL.readFile name - ["TL.writeFile", name, t] -> TL.writeFile name (TL.pack t) - ["TL.appendFile", name, t] -> TL.appendFile name (TL.pack t) - ["TL.interact"] -> TL.interact id - ["TL.getContents"] -> TL.putStr =<< TL.getContents - ["TL.getLine"] -> TL.putStrLn =<< TL.getLine - _ -> hPutStrLn stderr "invalid directive!" >> exitFailure diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/Properties/Mul.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/Properties/Mul.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/Properties/Mul.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/Properties/Mul.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -{-# LANGUAGE ScopedTypeVariables #-} - -module Tests.Properties.Mul (tests) where - -import Control.Applicative ((<$>), pure) -import Control.Exception as E (SomeException, catch, evaluate) -import Data.Int (Int32, Int64) -import Data.Text.Internal (mul, mul32, mul64) -import System.IO.Unsafe (unsafePerformIO) -import Test.Framework (Test) -import Test.Framework.Providers.QuickCheck2 (testProperty) -import Test.QuickCheck hiding ((.&.)) - -mulRef :: (Integral a, Bounded a) => a -> a -> Maybe a -mulRef a b - | ab < bot || ab > top = Nothing - | otherwise = Just (fromIntegral ab) - where ab = fromIntegral a * fromIntegral b - top = fromIntegral (maxBound `asTypeOf` a) :: Integer - bot = fromIntegral (minBound `asTypeOf` a) :: Integer - -eval :: (a -> b -> c) -> a -> b -> Maybe c -eval f a b = unsafePerformIO $ - (Just <$> evaluate (f a b)) `E.catch` (\(_::SomeException) -> pure Nothing) - -t_mul32 :: Int32 -> Int32 -> Property -t_mul32 a b = mulRef a b === eval mul32 a b - -t_mul64 :: Int64 -> Int64 -> Property -t_mul64 a b = mulRef a b === eval mul64 a b - -t_mul :: Int -> Int -> Property -t_mul a b = mulRef a b === eval mul a b - -tests :: [Test] -tests = [ - testProperty "t_mul" t_mul - , testProperty "t_mul32" t_mul32 - , testProperty "t_mul64" t_mul64 - ] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/Properties.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/Properties.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/Properties.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/Properties.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1342 +0,0 @@ --- | General quicktest properties for the text library --- -{-# LANGUAGE BangPatterns, FlexibleInstances, OverloadedStrings, - ScopedTypeVariables, TypeSynonymInstances #-} -{-# OPTIONS_GHC -fno-enable-rewrite-rules #-} -{-# OPTIONS_GHC -fno-warn-missing-signatures #-} -module Tests.Properties - ( - tests - ) where - -import Control.Applicative ((<$>), (<*>)) -import Control.Arrow ((***), second) -import Data.Bits ((.&.)) -import Data.Char (chr, isDigit, isHexDigit, isLower, isSpace, isUpper, ord) -import Data.Int (Int8, Int16, Int32, Int64) -import Data.Monoid (Monoid(..)) -import Data.String (fromString) -import Data.Text.Encoding.Error -import Data.Text.Foreign -import Data.Text.Internal.Encoding.Utf8 -import Data.Text.Internal.Fusion.Size -import Data.Text.Internal.Search (indices) -import Data.Text.Lazy.Read as TL -import Data.Text.Read as T -import Data.Word (Word, Word8, Word16, Word32, Word64) -import Numeric (showEFloat, showFFloat, showGFloat, showHex) -import Prelude hiding (replicate) -import Test.Framework (Test, testGroup) -import Test.Framework.Providers.QuickCheck2 (testProperty) -import Test.QuickCheck hiding ((.&.)) -import Test.QuickCheck.Monadic -import Test.QuickCheck.Property (Property(..)) -import Tests.QuickCheckUtils -import Tests.Utils -import Text.Show.Functions () -import qualified Control.Exception as Exception -import qualified Data.Bits as Bits (shiftL, shiftR) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.List as L -import qualified Data.Text as T -import qualified Data.Text.Encoding as E -import qualified Data.Text.IO as T -import qualified Data.Text.Internal.Fusion as S -import qualified Data.Text.Internal.Fusion.Common as S -import qualified Data.Text.Internal.Lazy.Fusion as SL -import qualified Data.Text.Internal.Lazy.Search as S (indices) -import qualified Data.Text.Internal.Unsafe.Shift as U -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Builder as TB -import qualified Data.Text.Lazy.Builder.Int as TB -import qualified Data.Text.Lazy.Builder.RealFloat as TB -import qualified Data.Text.Lazy.Encoding as EL -import qualified Data.Text.Lazy.IO as TL -import qualified System.IO as IO -import qualified Tests.Properties.Mul as Mul -import qualified Tests.SlowFunctions as Slow - -t_pack_unpack = (T.unpack . T.pack) `eq` id -tl_pack_unpack = (TL.unpack . TL.pack) `eq` id -t_stream_unstream = (S.unstream . S.stream) `eq` id -tl_stream_unstream = (SL.unstream . SL.stream) `eq` id -t_reverse_stream t = (S.reverse . S.reverseStream) t == t -t_singleton c = [c] == (T.unpack . T.singleton) c -tl_singleton c = [c] == (TL.unpack . TL.singleton) c -tl_unstreamChunks x = f 11 x == f 1000 x - where f n = SL.unstreamChunks n . S.streamList -tl_chunk_unchunk = (TL.fromChunks . TL.toChunks) `eq` id -tl_from_to_strict = (TL.fromStrict . TL.toStrict) `eq` id - --- Note: this silently truncates code-points > 255 to 8-bit due to 'B.pack' -encodeL1 :: T.Text -> B.ByteString -encodeL1 = B.pack . map (fromIntegral . fromEnum) . T.unpack -encodeLazyL1 :: TL.Text -> BL.ByteString -encodeLazyL1 = BL.fromChunks . map encodeL1 . TL.toChunks - -t_ascii t = E.decodeASCII (E.encodeUtf8 a) == a - where a = T.map (\c -> chr (ord c `mod` 128)) t -tl_ascii t = EL.decodeASCII (EL.encodeUtf8 a) == a - where a = TL.map (\c -> chr (ord c `mod` 128)) t -t_latin1 t = E.decodeLatin1 (encodeL1 a) == a - where a = T.map (\c -> chr (ord c `mod` 256)) t -tl_latin1 t = EL.decodeLatin1 (encodeLazyL1 a) == a - where a = TL.map (\c -> chr (ord c `mod` 256)) t -t_utf8 = forAll genUnicode $ (E.decodeUtf8 . E.encodeUtf8) `eq` id -t_utf8' = forAll genUnicode $ (E.decodeUtf8' . E.encodeUtf8) `eq` (id . Right) -tl_utf8 = forAll genUnicode $ (EL.decodeUtf8 . EL.encodeUtf8) `eq` id -tl_utf8' = forAll genUnicode $ (EL.decodeUtf8' . EL.encodeUtf8) `eq` (id . Right) -t_utf16LE = forAll genUnicode $ (E.decodeUtf16LE . E.encodeUtf16LE) `eq` id -tl_utf16LE = forAll genUnicode $ (EL.decodeUtf16LE . EL.encodeUtf16LE) `eq` id -t_utf16BE = forAll genUnicode $ (E.decodeUtf16BE . E.encodeUtf16BE) `eq` id -tl_utf16BE = forAll genUnicode $ (EL.decodeUtf16BE . EL.encodeUtf16BE) `eq` id -t_utf32LE = forAll genUnicode $ (E.decodeUtf32LE . E.encodeUtf32LE) `eq` id -tl_utf32LE = forAll genUnicode $ (EL.decodeUtf32LE . EL.encodeUtf32LE) `eq` id -t_utf32BE = forAll genUnicode $ (E.decodeUtf32BE . E.encodeUtf32BE) `eq` id -tl_utf32BE = forAll genUnicode $ (EL.decodeUtf32BE . EL.encodeUtf32BE) `eq` id - -t_utf8_incr = forAll genUnicode $ \s (Positive n) -> (recode n `eq` id) s - where recode n = T.concat . map fst . feedChunksOf n E.streamDecodeUtf8 . - E.encodeUtf8 - -feedChunksOf :: Int -> (B.ByteString -> E.Decoding) -> B.ByteString - -> [(T.Text, B.ByteString)] -feedChunksOf n f bs - | B.null bs = [] - | otherwise = let (x,y) = B.splitAt n bs - E.Some t b f' = f x - in (t,b) : feedChunksOf n f' y - -t_utf8_undecoded = forAll genUnicode $ \t -> - let b = E.encodeUtf8 t - ls = concatMap (leftover . E.encodeUtf8 . T.singleton) . T.unpack $ t - leftover = (++ [B.empty]) . init . tail . B.inits - in (map snd . feedChunksOf 1 E.streamDecodeUtf8) b == ls - -data Badness = Solo | Leading | Trailing - deriving (Eq, Show) - -instance Arbitrary Badness where - arbitrary = elements [Solo, Leading, Trailing] - -t_utf8_err :: Badness -> DecodeErr -> Property -t_utf8_err bad de = do - let gen = case bad of - Solo -> genInvalidUTF8 - Leading -> B.append <$> genInvalidUTF8 <*> genUTF8 - Trailing -> B.append <$> genUTF8 <*> genInvalidUTF8 - genUTF8 = E.encodeUtf8 <$> genUnicode - forAll gen $ \bs -> MkProperty $ do - onErr <- genDecodeErr de - unProperty . monadicIO $ do - l <- run $ let len = T.length (E.decodeUtf8With onErr bs) - in (len `seq` return (Right len)) `Exception.catch` - (\(e::UnicodeException) -> return (Left e)) - assert $ case l of - Left err -> length (show err) >= 0 - Right _ -> de /= Strict - -t_utf8_err' :: B.ByteString -> Property -t_utf8_err' bs = monadicIO . assert $ case E.decodeUtf8' bs of - Left err -> length (show err) >= 0 - Right t -> T.length t >= 0 - -genInvalidUTF8 :: Gen B.ByteString -genInvalidUTF8 = B.pack <$> oneof [ - -- invalid leading byte of a 2-byte sequence - (:) <$> choose (0xC0, 0xC1) <*> upTo 1 contByte - -- invalid leading byte of a 4-byte sequence - , (:) <$> choose (0xF5, 0xFF) <*> upTo 3 contByte - -- 4-byte sequence greater than U+10FFFF - , do k <- choose (0x11, 0x13) - let w0 = 0xF0 + (k `Bits.shiftR` 2) - w1 = 0x80 + ((k .&. 3) `Bits.shiftL` 4) - ([w0,w1]++) <$> vectorOf 2 contByte - -- continuation bytes without a start byte - , listOf1 contByte - -- short 2-byte sequence - , (:[]) <$> choose (0xC2, 0xDF) - -- short 3-byte sequence - , (:) <$> choose (0xE0, 0xEF) <*> upTo 1 contByte - -- short 4-byte sequence - , (:) <$> choose (0xF0, 0xF4) <*> upTo 2 contByte - -- overlong encoding - , do k <- choose (0,0xFFFF) - let c = chr k - case k of - _ | k < 0x80 -> oneof [ let (w,x) = ord2 c in return [w,x] - , let (w,x,y) = ord3 c in return [w,x,y] - , let (w,x,y,z) = ord4 c in return [w,x,y,z] ] - | k < 0x7FF -> oneof [ let (w,x,y) = ord3 c in return [w,x,y] - , let (w,x,y,z) = ord4 c in return [w,x,y,z] ] - | otherwise -> let (w,x,y,z) = ord4 c in return [w,x,y,z] - ] - where - contByte = (0x80 +) <$> choose (0, 0x3f) - upTo n gen = do - k <- choose (0,n) - vectorOf k gen - -s_Eq s = (s==) `eq` ((S.streamList s==) . S.streamList) - where _types = s :: String -sf_Eq p s = - ((L.filter p s==) . L.filter p) `eq` - (((S.filter p $ S.streamList s)==) . S.filter p . S.streamList) -t_Eq s = (s==) `eq` ((T.pack s==) . T.pack) -tl_Eq s = (s==) `eq` ((TL.pack s==) . TL.pack) -s_Ord s = (compare s) `eq` (compare (S.streamList s) . S.streamList) - where _types = s :: String -sf_Ord p s = - ((compare $ L.filter p s) . L.filter p) `eq` - (compare (S.filter p $ S.streamList s) . S.filter p . S.streamList) -t_Ord s = (compare s) `eq` (compare (T.pack s) . T.pack) -tl_Ord s = (compare s) `eq` (compare (TL.pack s) . TL.pack) -t_Read = id `eq` (T.unpack . read . show) -tl_Read = id `eq` (TL.unpack . read . show) -t_Show = show `eq` (show . T.pack) -tl_Show = show `eq` (show . TL.pack) -t_mappend s = mappend s`eqP` (unpackS . mappend (T.pack s)) -tl_mappend s = mappend s`eqP` (unpackS . mappend (TL.pack s)) -t_mconcat = unsquare $ - mconcat `eq` (unpackS . mconcat . L.map T.pack) -tl_mconcat = unsquare $ - mconcat `eq` (unpackS . mconcat . L.map TL.pack) -t_mempty = mempty == (unpackS (mempty :: T.Text)) -tl_mempty = mempty == (unpackS (mempty :: TL.Text)) -t_IsString = fromString `eqP` (T.unpack . fromString) -tl_IsString = fromString `eqP` (TL.unpack . fromString) - -s_cons x = (x:) `eqP` (unpackS . S.cons x) -s_cons_s x = (x:) `eqP` (unpackS . S.unstream . S.cons x) -sf_cons p x = ((x:) . L.filter p) `eqP` (unpackS . S.cons x . S.filter p) -t_cons x = (x:) `eqP` (unpackS . T.cons x) -tl_cons x = (x:) `eqP` (unpackS . TL.cons x) -s_snoc x = (++ [x]) `eqP` (unpackS . (flip S.snoc) x) -t_snoc x = (++ [x]) `eqP` (unpackS . (flip T.snoc) x) -tl_snoc x = (++ [x]) `eqP` (unpackS . (flip TL.snoc) x) -s_append s = (s++) `eqP` (unpackS . S.append (S.streamList s)) -s_append_s s = (s++) `eqP` - (unpackS . S.unstream . S.append (S.streamList s)) -sf_append p s = (L.filter p s++) `eqP` - (unpackS . S.append (S.filter p $ S.streamList s)) -t_append s = (s++) `eqP` (unpackS . T.append (packS s)) - -uncons (x:xs) = Just (x,xs) -uncons _ = Nothing - -s_uncons = uncons `eqP` (fmap (second unpackS) . S.uncons) -sf_uncons p = (uncons . L.filter p) `eqP` - (fmap (second unpackS) . S.uncons . S.filter p) -t_uncons = uncons `eqP` (fmap (second unpackS) . T.uncons) -tl_uncons = uncons `eqP` (fmap (second unpackS) . TL.uncons) -s_head = head `eqP` S.head -sf_head p = (head . L.filter p) `eqP` (S.head . S.filter p) -t_head = head `eqP` T.head -tl_head = head `eqP` TL.head -s_last = last `eqP` S.last -sf_last p = (last . L.filter p) `eqP` (S.last . S.filter p) -t_last = last `eqP` T.last -tl_last = last `eqP` TL.last -s_tail = tail `eqP` (unpackS . S.tail) -s_tail_s = tail `eqP` (unpackS . S.unstream . S.tail) -sf_tail p = (tail . L.filter p) `eqP` (unpackS . S.tail . S.filter p) -t_tail = tail `eqP` (unpackS . T.tail) -tl_tail = tail `eqP` (unpackS . TL.tail) -s_init = init `eqP` (unpackS . S.init) -s_init_s = init `eqP` (unpackS . S.unstream . S.init) -sf_init p = (init . L.filter p) `eqP` (unpackS . S.init . S.filter p) -t_init = init `eqP` (unpackS . T.init) -tl_init = init `eqP` (unpackS . TL.init) -s_null = null `eqP` S.null -sf_null p = (null . L.filter p) `eqP` (S.null . S.filter p) -t_null = null `eqP` T.null -tl_null = null `eqP` TL.null -s_length = length `eqP` S.length -sf_length p = (length . L.filter p) `eqP` (S.length . S.filter p) -sl_length = (fromIntegral . length) `eqP` SL.length -t_length = length `eqP` T.length -tl_length = L.genericLength `eqP` TL.length -t_compareLength t = (compare (T.length t)) `eq` T.compareLength t -tl_compareLength t= (compare (TL.length t)) `eq` TL.compareLength t - -s_map f = map f `eqP` (unpackS . S.map f) -s_map_s f = map f `eqP` (unpackS . S.unstream . S.map f) -sf_map p f = (map f . L.filter p) `eqP` (unpackS . S.map f . S.filter p) -t_map f = map f `eqP` (unpackS . T.map f) -tl_map f = map f `eqP` (unpackS . TL.map f) -s_intercalate c = unsquare $ - L.intercalate c `eq` - (unpackS . S.intercalate (packS c) . map packS) -t_intercalate c = unsquare $ - L.intercalate c `eq` - (unpackS . T.intercalate (packS c) . map packS) -tl_intercalate c = unsquare $ - L.intercalate c `eq` - (unpackS . TL.intercalate (TL.pack c) . map TL.pack) -s_intersperse c = L.intersperse c `eqP` - (unpackS . S.intersperse c) -s_intersperse_s c = L.intersperse c `eqP` - (unpackS . S.unstream . S.intersperse c) -sf_intersperse p c= (L.intersperse c . L.filter p) `eqP` - (unpackS . S.intersperse c . S.filter p) -t_intersperse c = unsquare $ - L.intersperse c `eqP` (unpackS . T.intersperse c) -tl_intersperse c = unsquare $ - L.intersperse c `eqP` (unpackS . TL.intersperse c) -t_transpose = unsquare $ - L.transpose `eq` (map unpackS . T.transpose . map packS) -tl_transpose = unsquare $ - L.transpose `eq` (map unpackS . TL.transpose . map TL.pack) -t_reverse = L.reverse `eqP` (unpackS . T.reverse) -tl_reverse = L.reverse `eqP` (unpackS . TL.reverse) -t_reverse_short n = L.reverse `eqP` (unpackS . S.reverse . shorten n . S.stream) - -t_replace s d = (L.intercalate d . splitOn s) `eqP` - (unpackS . T.replace (T.pack s) (T.pack d)) -tl_replace s d = (L.intercalate d . splitOn s) `eqP` - (unpackS . TL.replace (TL.pack s) (TL.pack d)) - -splitOn :: (Eq a) => [a] -> [a] -> [[a]] -splitOn pat src0 - | l == 0 = error "splitOn: empty" - | otherwise = go src0 - where - l = length pat - go src = search 0 src - where - search _ [] = [src] - search !n s@(_:s') - | pat `L.isPrefixOf` s = take n src : go (drop l s) - | otherwise = search (n+1) s' - -s_toCaseFold_length xs = S.length (S.toCaseFold s) >= length xs - where s = S.streamList xs -sf_toCaseFold_length p xs = - (S.length . S.toCaseFold . S.filter p $ s) >= (length . L.filter p $ xs) - where s = S.streamList xs -t_toCaseFold_length t = T.length (T.toCaseFold t) >= T.length t -tl_toCaseFold_length t = TL.length (TL.toCaseFold t) >= TL.length t -t_toLower_length t = T.length (T.toLower t) >= T.length t -t_toLower_lower t = p (T.toLower t) >= p t - where p = T.length . T.filter isLower -tl_toLower_lower t = p (TL.toLower t) >= p t - where p = TL.length . TL.filter isLower -t_toUpper_length t = T.length (T.toUpper t) >= T.length t -t_toUpper_upper t = p (T.toUpper t) >= p t - where p = T.length . T.filter isUpper -tl_toUpper_upper t = p (TL.toUpper t) >= p t - where p = TL.length . TL.filter isUpper - -justifyLeft k c xs = xs ++ L.replicate (k - length xs) c -justifyRight m n xs = L.replicate (m - length xs) n ++ xs -center k c xs - | len >= k = xs - | otherwise = L.replicate l c ++ xs ++ L.replicate r c - where len = length xs - d = k - len - r = d `div` 2 - l = d - r - -s_justifyLeft k c = justifyLeft j c `eqP` (unpackS . S.justifyLeftI j c) - where j = fromIntegral (k :: Word8) -s_justifyLeft_s k c = justifyLeft j c `eqP` - (unpackS . S.unstream . S.justifyLeftI j c) - where j = fromIntegral (k :: Word8) -sf_justifyLeft p k c = (justifyLeft j c . L.filter p) `eqP` - (unpackS . S.justifyLeftI j c . S.filter p) - where j = fromIntegral (k :: Word8) -t_justifyLeft k c = justifyLeft j c `eqP` (unpackS . T.justifyLeft j c) - where j = fromIntegral (k :: Word8) -tl_justifyLeft k c = justifyLeft j c `eqP` - (unpackS . TL.justifyLeft (fromIntegral j) c) - where j = fromIntegral (k :: Word8) -t_justifyRight k c = justifyRight j c `eqP` (unpackS . T.justifyRight j c) - where j = fromIntegral (k :: Word8) -tl_justifyRight k c = justifyRight j c `eqP` - (unpackS . TL.justifyRight (fromIntegral j) c) - where j = fromIntegral (k :: Word8) -t_center k c = center j c `eqP` (unpackS . T.center j c) - where j = fromIntegral (k :: Word8) -tl_center k c = center j c `eqP` (unpackS . TL.center (fromIntegral j) c) - where j = fromIntegral (k :: Word8) - -sf_foldl p f z = (L.foldl f z . L.filter p) `eqP` (S.foldl f z . S.filter p) - where _types = f :: Char -> Char -> Char -t_foldl f z = L.foldl f z `eqP` (T.foldl f z) - where _types = f :: Char -> Char -> Char -tl_foldl f z = L.foldl f z `eqP` (TL.foldl f z) - where _types = f :: Char -> Char -> Char -sf_foldl' p f z = (L.foldl' f z . L.filter p) `eqP` - (S.foldl' f z . S.filter p) - where _types = f :: Char -> Char -> Char -t_foldl' f z = L.foldl' f z `eqP` T.foldl' f z - where _types = f :: Char -> Char -> Char -tl_foldl' f z = L.foldl' f z `eqP` TL.foldl' f z - where _types = f :: Char -> Char -> Char -sf_foldl1 p f = (L.foldl1 f . L.filter p) `eqP` (S.foldl1 f . S.filter p) -t_foldl1 f = L.foldl1 f `eqP` T.foldl1 f -tl_foldl1 f = L.foldl1 f `eqP` TL.foldl1 f -sf_foldl1' p f = (L.foldl1' f . L.filter p) `eqP` (S.foldl1' f . S.filter p) -t_foldl1' f = L.foldl1' f `eqP` T.foldl1' f -tl_foldl1' f = L.foldl1' f `eqP` TL.foldl1' f -sf_foldr p f z = (L.foldr f z . L.filter p) `eqP` (S.foldr f z . S.filter p) - where _types = f :: Char -> Char -> Char -t_foldr f z = L.foldr f z `eqP` T.foldr f z - where _types = f :: Char -> Char -> Char -tl_foldr f z = unsquare $ - L.foldr f z `eqP` TL.foldr f z - where _types = f :: Char -> Char -> Char -sf_foldr1 p f = unsquare $ - (L.foldr1 f . L.filter p) `eqP` (S.foldr1 f . S.filter p) -t_foldr1 f = L.foldr1 f `eqP` T.foldr1 f -tl_foldr1 f = unsquare $ - L.foldr1 f `eqP` TL.foldr1 f - -s_concat_s = unsquare $ - L.concat `eq` (unpackS . S.unstream . S.concat . map packS) -sf_concat p = unsquare $ - (L.concat . map (L.filter p)) `eq` - (unpackS . S.concat . map (S.filter p . packS)) -t_concat = unsquare $ - L.concat `eq` (unpackS . T.concat . map packS) -tl_concat = unsquare $ - L.concat `eq` (unpackS . TL.concat . map TL.pack) -sf_concatMap p f = unsquare $ (L.concatMap f . L.filter p) `eqP` - (unpackS . S.concatMap (packS . f) . S.filter p) -t_concatMap f = unsquare $ - L.concatMap f `eqP` (unpackS . T.concatMap (packS . f)) -tl_concatMap f = unsquare $ - L.concatMap f `eqP` (unpackS . TL.concatMap (TL.pack . f)) -sf_any q p = (L.any p . L.filter q) `eqP` (S.any p . S.filter q) -t_any p = L.any p `eqP` T.any p -tl_any p = L.any p `eqP` TL.any p -sf_all q p = (L.all p . L.filter q) `eqP` (S.all p . S.filter q) -t_all p = L.all p `eqP` T.all p -tl_all p = L.all p `eqP` TL.all p -sf_maximum p = (L.maximum . L.filter p) `eqP` (S.maximum . S.filter p) -t_maximum = L.maximum `eqP` T.maximum -tl_maximum = L.maximum `eqP` TL.maximum -sf_minimum p = (L.minimum . L.filter p) `eqP` (S.minimum . S.filter p) -t_minimum = L.minimum `eqP` T.minimum -tl_minimum = L.minimum `eqP` TL.minimum - -sf_scanl p f z = (L.scanl f z . L.filter p) `eqP` - (unpackS . S.scanl f z . S.filter p) -t_scanl f z = L.scanl f z `eqP` (unpackS . T.scanl f z) -tl_scanl f z = L.scanl f z `eqP` (unpackS . TL.scanl f z) -t_scanl1 f = L.scanl1 f `eqP` (unpackS . T.scanl1 f) -tl_scanl1 f = L.scanl1 f `eqP` (unpackS . TL.scanl1 f) -t_scanr f z = L.scanr f z `eqP` (unpackS . T.scanr f z) -tl_scanr f z = L.scanr f z `eqP` (unpackS . TL.scanr f z) -t_scanr1 f = L.scanr1 f `eqP` (unpackS . T.scanr1 f) -tl_scanr1 f = L.scanr1 f `eqP` (unpackS . TL.scanr1 f) - -t_mapAccumL f z = L.mapAccumL f z `eqP` (second unpackS . T.mapAccumL f z) - where _types = f :: Int -> Char -> (Int,Char) -tl_mapAccumL f z = L.mapAccumL f z `eqP` (second unpackS . TL.mapAccumL f z) - where _types = f :: Int -> Char -> (Int,Char) -t_mapAccumR f z = L.mapAccumR f z `eqP` (second unpackS . T.mapAccumR f z) - where _types = f :: Int -> Char -> (Int,Char) -tl_mapAccumR f z = L.mapAccumR f z `eqP` (second unpackS . TL.mapAccumR f z) - where _types = f :: Int -> Char -> (Int,Char) - -replicate n l = concat (L.replicate n l) - -s_replicate n = replicate m `eq` - (unpackS . S.replicateI (fromIntegral m) . packS) - where m = fromIntegral (n :: Word8) -t_replicate n = replicate m `eq` (unpackS . T.replicate m . packS) - where m = fromIntegral (n :: Word8) -tl_replicate n = replicate m `eq` - (unpackS . TL.replicate (fromIntegral m) . packS) - where m = fromIntegral (n :: Word8) - -unf :: Int -> Char -> Maybe (Char, Char) -unf n c | fromEnum c * 100 > n = Nothing - | otherwise = Just (c, succ c) - -t_unfoldr n = L.unfoldr (unf m) `eq` (unpackS . T.unfoldr (unf m)) - where m = fromIntegral (n :: Word16) -tl_unfoldr n = L.unfoldr (unf m) `eq` (unpackS . TL.unfoldr (unf m)) - where m = fromIntegral (n :: Word16) -t_unfoldrN n m = (L.take i . L.unfoldr (unf j)) `eq` - (unpackS . T.unfoldrN i (unf j)) - where i = fromIntegral (n :: Word16) - j = fromIntegral (m :: Word16) -tl_unfoldrN n m = (L.take i . L.unfoldr (unf j)) `eq` - (unpackS . TL.unfoldrN (fromIntegral i) (unf j)) - where i = fromIntegral (n :: Word16) - j = fromIntegral (m :: Word16) - -unpack2 :: (Stringy s) => (s,s) -> (String,String) -unpack2 = unpackS *** unpackS - -s_take n = L.take n `eqP` (unpackS . S.take n) -s_take_s m = L.take n `eqP` (unpackS . S.unstream . S.take n) - where n = small m -sf_take p n = (L.take n . L.filter p) `eqP` - (unpackS . S.take n . S.filter p) -t_take n = L.take n `eqP` (unpackS . T.take n) -t_takeEnd n = (L.reverse . L.take n . L.reverse) `eqP` - (unpackS . T.takeEnd n) -tl_take n = L.take n `eqP` (unpackS . TL.take (fromIntegral n)) -tl_takeEnd n = (L.reverse . L.take (fromIntegral n) . L.reverse) `eqP` - (unpackS . TL.takeEnd n) -s_drop n = L.drop n `eqP` (unpackS . S.drop n) -s_drop_s m = L.drop n `eqP` (unpackS . S.unstream . S.drop n) - where n = small m -sf_drop p n = (L.drop n . L.filter p) `eqP` - (unpackS . S.drop n . S.filter p) -t_drop n = L.drop n `eqP` (unpackS . T.drop n) -t_dropEnd n = (L.reverse . L.drop n . L.reverse) `eqP` - (unpackS . T.dropEnd n) -tl_drop n = L.drop n `eqP` (unpackS . TL.drop (fromIntegral n)) -tl_dropEnd n = (L.reverse . L.drop n . L.reverse) `eqP` - (unpackS . TL.dropEnd (fromIntegral n)) -s_take_drop m = (L.take n . L.drop n) `eqP` (unpackS . S.take n . S.drop n) - where n = small m -s_take_drop_s m = (L.take n . L.drop n) `eqP` - (unpackS . S.unstream . S.take n . S.drop n) - where n = small m -s_takeWhile p = L.takeWhile p `eqP` (unpackS . S.takeWhile p) -s_takeWhile_s p = L.takeWhile p `eqP` (unpackS . S.unstream . S.takeWhile p) -sf_takeWhile q p = (L.takeWhile p . L.filter q) `eqP` - (unpackS . S.takeWhile p . S.filter q) -t_takeWhile p = L.takeWhile p `eqP` (unpackS . T.takeWhile p) -tl_takeWhile p = L.takeWhile p `eqP` (unpackS . TL.takeWhile p) -s_dropWhile p = L.dropWhile p `eqP` (unpackS . S.dropWhile p) -s_dropWhile_s p = L.dropWhile p `eqP` (unpackS . S.unstream . S.dropWhile p) -sf_dropWhile q p = (L.dropWhile p . L.filter q) `eqP` - (unpackS . S.dropWhile p . S.filter q) -t_dropWhile p = L.dropWhile p `eqP` (unpackS . T.dropWhile p) -tl_dropWhile p = L.dropWhile p `eqP` (unpackS . S.dropWhile p) -t_dropWhileEnd p = (L.reverse . L.dropWhile p . L.reverse) `eqP` - (unpackS . T.dropWhileEnd p) -tl_dropWhileEnd p = (L.reverse . L.dropWhile p . L.reverse) `eqP` - (unpackS . TL.dropWhileEnd p) -t_dropAround p = (L.dropWhile p . L.reverse . L.dropWhile p . L.reverse) - `eqP` (unpackS . T.dropAround p) -tl_dropAround p = (L.dropWhile p . L.reverse . L.dropWhile p . L.reverse) - `eqP` (unpackS . TL.dropAround p) -t_stripStart = T.dropWhile isSpace `eq` T.stripStart -tl_stripStart = TL.dropWhile isSpace `eq` TL.stripStart -t_stripEnd = T.dropWhileEnd isSpace `eq` T.stripEnd -tl_stripEnd = TL.dropWhileEnd isSpace `eq` TL.stripEnd -t_strip = T.dropAround isSpace `eq` T.strip -tl_strip = TL.dropAround isSpace `eq` TL.strip -t_splitAt n = L.splitAt n `eqP` (unpack2 . T.splitAt n) -tl_splitAt n = L.splitAt n `eqP` (unpack2 . TL.splitAt (fromIntegral n)) -t_span p = L.span p `eqP` (unpack2 . T.span p) -tl_span p = L.span p `eqP` (unpack2 . TL.span p) - -t_breakOn_id s = squid `eq` (uncurry T.append . T.breakOn s) - where squid t | T.null s = error "empty" - | otherwise = t -tl_breakOn_id s = squid `eq` (uncurry TL.append . TL.breakOn s) - where squid t | TL.null s = error "empty" - | otherwise = t -t_breakOn_start (NotEmpty s) t = - let (k,m) = T.breakOn s t - in k `T.isPrefixOf` t && (T.null m || s `T.isPrefixOf` m) -tl_breakOn_start (NotEmpty s) t = - let (k,m) = TL.breakOn s t - in k `TL.isPrefixOf` t && TL.null m || s `TL.isPrefixOf` m -t_breakOnEnd_end (NotEmpty s) t = - let (m,k) = T.breakOnEnd s t - in k `T.isSuffixOf` t && (T.null m || s `T.isSuffixOf` m) -tl_breakOnEnd_end (NotEmpty s) t = - let (m,k) = TL.breakOnEnd s t - in k `TL.isSuffixOf` t && (TL.null m || s `TL.isSuffixOf` m) -t_break p = L.break p `eqP` (unpack2 . T.break p) -tl_break p = L.break p `eqP` (unpack2 . TL.break p) -t_group = L.group `eqP` (map unpackS . T.group) -tl_group = L.group `eqP` (map unpackS . TL.group) -t_groupBy p = L.groupBy p `eqP` (map unpackS . T.groupBy p) -tl_groupBy p = L.groupBy p `eqP` (map unpackS . TL.groupBy p) -t_inits = L.inits `eqP` (map unpackS . T.inits) -tl_inits = L.inits `eqP` (map unpackS . TL.inits) -t_tails = L.tails `eqP` (map unpackS . T.tails) -tl_tails = unsquare $ - L.tails `eqP` (map unpackS . TL.tails) -t_findAppendId = unsquare $ \(NotEmpty s) ts -> - let t = T.intercalate s ts - in all (==t) $ map (uncurry T.append) (T.breakOnAll s t) -tl_findAppendId = unsquare $ \(NotEmpty s) ts -> - let t = TL.intercalate s ts - in all (==t) $ map (uncurry TL.append) (TL.breakOnAll s t) -t_findContains = unsquare $ \(NotEmpty s) -> - all (T.isPrefixOf s . snd) . T.breakOnAll s . T.intercalate s -tl_findContains = unsquare $ \(NotEmpty s) -> all (TL.isPrefixOf s . snd) . - TL.breakOnAll s . TL.intercalate s -sl_filterCount c = (L.genericLength . L.filter (==c)) `eqP` SL.countChar c -t_findCount s = (L.length . T.breakOnAll s) `eq` T.count s -tl_findCount s = (L.genericLength . TL.breakOnAll s) `eq` TL.count s - -t_splitOn_split s = unsquare $ - (T.splitOn s `eq` Slow.splitOn s) . T.intercalate s -tl_splitOn_split s = unsquare $ - ((TL.splitOn (TL.fromStrict s) . TL.fromStrict) `eq` - (map TL.fromStrict . T.splitOn s)) . T.intercalate s -t_splitOn_i (NotEmpty t) = id `eq` (T.intercalate t . T.splitOn t) -tl_splitOn_i (NotEmpty t) = id `eq` (TL.intercalate t . TL.splitOn t) - -t_split p = split p `eqP` (map unpackS . T.split p) -t_split_count c = (L.length . T.split (==c)) `eq` - ((1+) . T.count (T.singleton c)) -t_split_splitOn c = T.split (==c) `eq` T.splitOn (T.singleton c) -tl_split p = split p `eqP` (map unpackS . TL.split p) - -split :: (a -> Bool) -> [a] -> [[a]] -split _ [] = [[]] -split p xs = loop xs - where loop s | null s' = [l] - | otherwise = l : loop (tail s') - where (l, s') = break p s - -t_chunksOf_same_lengths k = all ((==k) . T.length) . ini . T.chunksOf k - where ini [] = [] - ini xs = init xs - -t_chunksOf_length k t = len == T.length t || (k <= 0 && len == 0) - where len = L.sum . L.map T.length $ T.chunksOf k t - -tl_chunksOf k = T.chunksOf k `eq` (map (T.concat . TL.toChunks) . - TL.chunksOf (fromIntegral k) . TL.fromStrict) - -t_lines = L.lines `eqP` (map unpackS . T.lines) -tl_lines = L.lines `eqP` (map unpackS . TL.lines) -{- -t_lines' = lines' `eqP` (map unpackS . T.lines') - where lines' "" = [] - lines' s = let (l, s') = break eol s - in l : case s' of - [] -> [] - ('\r':'\n':s'') -> lines' s'' - (_:s'') -> lines' s'' - eol c = c == '\r' || c == '\n' --} -t_words = L.words `eqP` (map unpackS . T.words) - -tl_words = L.words `eqP` (map unpackS . TL.words) -t_unlines = unsquare $ - L.unlines `eq` (unpackS . T.unlines . map packS) -tl_unlines = unsquare $ - L.unlines `eq` (unpackS . TL.unlines . map packS) -t_unwords = unsquare $ - L.unwords `eq` (unpackS . T.unwords . map packS) -tl_unwords = unsquare $ - L.unwords `eq` (unpackS . TL.unwords . map packS) - -s_isPrefixOf s = L.isPrefixOf s `eqP` - (S.isPrefixOf (S.stream $ packS s) . S.stream) -sf_isPrefixOf p s = (L.isPrefixOf s . L.filter p) `eqP` - (S.isPrefixOf (S.stream $ packS s) . S.filter p . S.stream) -t_isPrefixOf s = L.isPrefixOf s`eqP` T.isPrefixOf (packS s) -tl_isPrefixOf s = L.isPrefixOf s`eqP` TL.isPrefixOf (packS s) -t_isSuffixOf s = L.isSuffixOf s`eqP` T.isSuffixOf (packS s) -tl_isSuffixOf s = L.isSuffixOf s`eqP` TL.isSuffixOf (packS s) -t_isInfixOf s = L.isInfixOf s `eqP` T.isInfixOf (packS s) -tl_isInfixOf s = L.isInfixOf s `eqP` TL.isInfixOf (packS s) - -t_stripPrefix s = (fmap packS . L.stripPrefix s) `eqP` T.stripPrefix (packS s) -tl_stripPrefix s = (fmap packS . L.stripPrefix s) `eqP` TL.stripPrefix (packS s) - -stripSuffix p t = reverse `fmap` L.stripPrefix (reverse p) (reverse t) - -t_stripSuffix s = (fmap packS . stripSuffix s) `eqP` T.stripSuffix (packS s) -tl_stripSuffix s = (fmap packS . stripSuffix s) `eqP` TL.stripSuffix (packS s) - -commonPrefixes a0@(_:_) b0@(_:_) = Just (go a0 b0 []) - where go (a:as) (b:bs) ps - | a == b = go as bs (a:ps) - go as bs ps = (reverse ps,as,bs) -commonPrefixes _ _ = Nothing - -t_commonPrefixes a b (NonEmpty p) - = commonPrefixes pa pb == - repack `fmap` T.commonPrefixes (packS pa) (packS pb) - where repack (x,y,z) = (unpackS x,unpackS y,unpackS z) - pa = p ++ a - pb = p ++ b - -tl_commonPrefixes a b (NonEmpty p) - = commonPrefixes pa pb == - repack `fmap` TL.commonPrefixes (packS pa) (packS pb) - where repack (x,y,z) = (unpackS x,unpackS y,unpackS z) - pa = p ++ a - pb = p ++ b - -sf_elem p c = (L.elem c . L.filter p) `eqP` (S.elem c . S.filter p) -sf_filter q p = (L.filter p . L.filter q) `eqP` - (unpackS . S.filter p . S.filter q) -t_filter p = L.filter p `eqP` (unpackS . T.filter p) -tl_filter p = L.filter p `eqP` (unpackS . TL.filter p) -sf_findBy q p = (L.find p . L.filter q) `eqP` (S.findBy p . S.filter q) -t_find p = L.find p `eqP` T.find p -tl_find p = L.find p `eqP` TL.find p -t_partition p = L.partition p `eqP` (unpack2 . T.partition p) -tl_partition p = L.partition p `eqP` (unpack2 . TL.partition p) - -sf_index p s = forAll (choose (-l,l*2)) - ((L.filter p s L.!!) `eq` S.index (S.filter p $ packS s)) - where l = L.length s -t_index s = forAll (choose (-l,l*2)) ((s L.!!) `eq` T.index (packS s)) - where l = L.length s - -tl_index s = forAll (choose (-l,l*2)) - ((s L.!!) `eq` (TL.index (packS s) . fromIntegral)) - where l = L.length s - -t_findIndex p = L.findIndex p `eqP` T.findIndex p -t_count (NotEmpty t) = (subtract 1 . L.length . T.splitOn t) `eq` T.count t -tl_count (NotEmpty t) = (subtract 1 . L.genericLength . TL.splitOn t) `eq` - TL.count t -t_zip s = L.zip s `eqP` T.zip (packS s) -tl_zip s = L.zip s `eqP` TL.zip (packS s) -sf_zipWith p c s = (L.zipWith c (L.filter p s) . L.filter p) `eqP` - (unpackS . S.zipWith c (S.filter p $ packS s) . S.filter p) -t_zipWith c s = L.zipWith c s `eqP` (unpackS . T.zipWith c (packS s)) -tl_zipWith c s = L.zipWith c s `eqP` (unpackS . TL.zipWith c (packS s)) - -t_indices (NotEmpty s) = Slow.indices s `eq` indices s -tl_indices (NotEmpty s) = lazyIndices s `eq` S.indices s - where lazyIndices ss t = map fromIntegral $ Slow.indices (conc ss) (conc t) - conc = T.concat . TL.toChunks -t_indices_occurs = unsquare $ \(NotEmpty t) ts -> - let s = T.intercalate t ts - in Slow.indices t s == indices t s - --- Bit shifts. -shiftL w = forAll (choose (0,width-1)) $ \k -> Bits.shiftL w k == U.shiftL w k - where width = round (log (fromIntegral m) / log 2 :: Double) - (m,_) = (maxBound, m == w) -shiftR w = forAll (choose (0,width-1)) $ \k -> Bits.shiftR w k == U.shiftR w k - where width = round (log (fromIntegral m) / log 2 :: Double) - (m,_) = (maxBound, m == w) - -shiftL_Int = shiftL :: Int -> Property -shiftL_Word16 = shiftL :: Word16 -> Property -shiftL_Word32 = shiftL :: Word32 -> Property -shiftR_Int = shiftR :: Int -> Property -shiftR_Word16 = shiftR :: Word16 -> Property -shiftR_Word32 = shiftR :: Word32 -> Property - --- Builder. - -tb_singleton = id `eqP` - (unpackS . TB.toLazyText . mconcat . map TB.singleton) -tb_fromText = L.concat `eq` (unpackS . TB.toLazyText . mconcat . - map (TB.fromText . packS)) -tb_associative s1 s2 s3 = - TB.toLazyText (b1 `mappend` (b2 `mappend` b3)) == - TB.toLazyText ((b1 `mappend` b2) `mappend` b3) - where b1 = TB.fromText (packS s1) - b2 = TB.fromText (packS s2) - b3 = TB.fromText (packS s3) - --- Numeric builder stuff. - -tb_decimal :: (Integral a, Show a) => a -> Bool -tb_decimal = (TB.toLazyText . TB.decimal) `eq` (TL.pack . show) - -tb_decimal_integer (a::Integer) = tb_decimal a -tb_decimal_integer_big (Big a) = tb_decimal a -tb_decimal_int (a::Int) = tb_decimal a -tb_decimal_int8 (a::Int8) = tb_decimal a -tb_decimal_int16 (a::Int16) = tb_decimal a -tb_decimal_int32 (a::Int32) = tb_decimal a -tb_decimal_int64 (a::Int64) = tb_decimal a -tb_decimal_word (a::Word) = tb_decimal a -tb_decimal_word8 (a::Word8) = tb_decimal a -tb_decimal_word16 (a::Word16) = tb_decimal a -tb_decimal_word32 (a::Word32) = tb_decimal a -tb_decimal_word64 (a::Word64) = tb_decimal a - -tb_decimal_big_int (BigBounded (a::Int)) = tb_decimal a -tb_decimal_big_int64 (BigBounded (a::Int64)) = tb_decimal a -tb_decimal_big_word (BigBounded (a::Word)) = tb_decimal a -tb_decimal_big_word64 (BigBounded (a::Word64)) = tb_decimal a - -tb_hex :: (Integral a, Show a) => a -> Bool -tb_hex = (TB.toLazyText . TB.hexadecimal) `eq` (TL.pack . flip showHex "") - -tb_hexadecimal_integer (a::Integer) = tb_hex a -tb_hexadecimal_int (a::Int) = tb_hex a -tb_hexadecimal_int8 (a::Int8) = tb_hex a -tb_hexadecimal_int16 (a::Int16) = tb_hex a -tb_hexadecimal_int32 (a::Int32) = tb_hex a -tb_hexadecimal_int64 (a::Int64) = tb_hex a -tb_hexadecimal_word (a::Word) = tb_hex a -tb_hexadecimal_word8 (a::Word8) = tb_hex a -tb_hexadecimal_word16 (a::Word16) = tb_hex a -tb_hexadecimal_word32 (a::Word32) = tb_hex a -tb_hexadecimal_word64 (a::Word64) = tb_hex a - -tb_realfloat :: (RealFloat a, Show a) => a -> Bool -tb_realfloat = (TB.toLazyText . TB.realFloat) `eq` (TL.pack . show) - -tb_realfloat_float (a::Float) = tb_realfloat a -tb_realfloat_double (a::Double) = tb_realfloat a - -showFloat :: (RealFloat a) => TB.FPFormat -> Maybe Int -> a -> ShowS -showFloat TB.Exponent = showEFloat -showFloat TB.Fixed = showFFloat -showFloat TB.Generic = showGFloat - -tb_formatRealFloat :: (RealFloat a, Show a) => a -> TB.FPFormat -> Precision a -> Property -tb_formatRealFloat a fmt prec = - TB.formatRealFloat fmt p a === - TB.fromString (showFloat fmt p a "") - where p = precision a prec - -tb_formatRealFloat_float (a::Float) = tb_formatRealFloat a -tb_formatRealFloat_double (a::Double) = tb_formatRealFloat a - --- Reading. - -t_decimal (n::Int) s = - T.signed T.decimal (T.pack (show n) `T.append` t) == Right (n,t) - where t = T.dropWhile isDigit s -tl_decimal (n::Int) s = - TL.signed TL.decimal (TL.pack (show n) `TL.append` t) == Right (n,t) - where t = TL.dropWhile isDigit s -t_hexadecimal m s ox = - T.hexadecimal (T.concat [p, T.pack (showHex n ""), t]) == Right (n,t) - where t = T.dropWhile isHexDigit s - p = if ox then "0x" else "" - n = getPositive m :: Int -tl_hexadecimal m s ox = - TL.hexadecimal (TL.concat [p, TL.pack (showHex n ""), t]) == Right (n,t) - where t = TL.dropWhile isHexDigit s - p = if ox then "0x" else "" - n = getPositive m :: Int - -isFloaty c = c `elem` "+-.0123456789eE" - -t_read_rational p tol (n::Double) s = - case p (T.pack (show n) `T.append` t) of - Left _err -> False - Right (n',t') -> t == t' && abs (n-n') <= tol - where t = T.dropWhile isFloaty s - -tl_read_rational p tol (n::Double) s = - case p (TL.pack (show n) `TL.append` t) of - Left _err -> False - Right (n',t') -> t == t' && abs (n-n') <= tol - where t = TL.dropWhile isFloaty s - -t_double = t_read_rational T.double 1e-13 -tl_double = tl_read_rational TL.double 1e-13 -t_rational = t_read_rational T.rational 1e-16 -tl_rational = tl_read_rational TL.rational 1e-16 - --- Input and output. - -t_put_get = write_read T.unlines T.filter put get - where put h = withRedirect h IO.stdout . T.putStr - get h = withRedirect h IO.stdin T.getContents -tl_put_get = write_read TL.unlines TL.filter put get - where put h = withRedirect h IO.stdout . TL.putStr - get h = withRedirect h IO.stdin TL.getContents -t_write_read = write_read T.unlines T.filter T.hPutStr T.hGetContents -tl_write_read = write_read TL.unlines TL.filter TL.hPutStr TL.hGetContents - -t_write_read_line e m b t = write_read head T.filter T.hPutStrLn - T.hGetLine e m b [t] -tl_write_read_line e m b t = write_read head TL.filter TL.hPutStrLn - TL.hGetLine e m b [t] - --- Low-level. - -t_dropWord16 m t = dropWord16 m t `T.isSuffixOf` t -t_takeWord16 m t = takeWord16 m t `T.isPrefixOf` t -t_take_drop_16 m t = T.append (takeWord16 n t) (dropWord16 n t) == t - where n = small m -t_use_from t = monadicIO $ assert . (==t) =<< run (useAsPtr t fromPtr) - -t_copy t = T.copy t == t - --- Regression tests. -s_filter_eq s = S.filter p t == S.streamList (filter p s) - where p = (/= S.last t) - t = S.streamList s - --- Make a stream appear shorter than it really is, to ensure that --- functions that consume inaccurately sized streams behave --- themselves. -shorten :: Int -> S.Stream a -> S.Stream a -shorten n t@(S.Stream arr off len) - | n > 0 = S.Stream arr off (smaller (exactSize n) len) - | otherwise = t - -tests :: Test -tests = - testGroup "Properties" [ - testGroup "creation/elimination" [ - testProperty "t_pack_unpack" t_pack_unpack, - testProperty "tl_pack_unpack" tl_pack_unpack, - testProperty "t_stream_unstream" t_stream_unstream, - testProperty "tl_stream_unstream" tl_stream_unstream, - testProperty "t_reverse_stream" t_reverse_stream, - testProperty "t_singleton" t_singleton, - testProperty "tl_singleton" tl_singleton, - testProperty "tl_unstreamChunks" tl_unstreamChunks, - testProperty "tl_chunk_unchunk" tl_chunk_unchunk, - testProperty "tl_from_to_strict" tl_from_to_strict - ], - - testGroup "transcoding" [ - testProperty "t_ascii" t_ascii, - testProperty "tl_ascii" tl_ascii, - testProperty "t_latin1" t_latin1, - testProperty "tl_latin1" tl_latin1, - testProperty "t_utf8" t_utf8, - testProperty "t_utf8'" t_utf8', - testProperty "t_utf8_incr" t_utf8_incr, - testProperty "t_utf8_undecoded" t_utf8_undecoded, - testProperty "tl_utf8" tl_utf8, - testProperty "tl_utf8'" tl_utf8', - testProperty "t_utf16LE" t_utf16LE, - testProperty "tl_utf16LE" tl_utf16LE, - testProperty "t_utf16BE" t_utf16BE, - testProperty "tl_utf16BE" tl_utf16BE, - testProperty "t_utf32LE" t_utf32LE, - testProperty "tl_utf32LE" tl_utf32LE, - testProperty "t_utf32BE" t_utf32BE, - testProperty "tl_utf32BE" tl_utf32BE, - testGroup "errors" [ - testProperty "t_utf8_err" t_utf8_err, - testProperty "t_utf8_err'" t_utf8_err' - ] - ], - - testGroup "instances" [ - testProperty "s_Eq" s_Eq, - testProperty "sf_Eq" sf_Eq, - testProperty "t_Eq" t_Eq, - testProperty "tl_Eq" tl_Eq, - testProperty "s_Ord" s_Ord, - testProperty "sf_Ord" sf_Ord, - testProperty "t_Ord" t_Ord, - testProperty "tl_Ord" tl_Ord, - testProperty "t_Read" t_Read, - testProperty "tl_Read" tl_Read, - testProperty "t_Show" t_Show, - testProperty "tl_Show" tl_Show, - testProperty "t_mappend" t_mappend, - testProperty "tl_mappend" tl_mappend, - testProperty "t_mconcat" t_mconcat, - testProperty "tl_mconcat" tl_mconcat, - testProperty "t_mempty" t_mempty, - testProperty "tl_mempty" tl_mempty, - testProperty "t_IsString" t_IsString, - testProperty "tl_IsString" tl_IsString - ], - - testGroup "basics" [ - testProperty "s_cons" s_cons, - testProperty "s_cons_s" s_cons_s, - testProperty "sf_cons" sf_cons, - testProperty "t_cons" t_cons, - testProperty "tl_cons" tl_cons, - testProperty "s_snoc" s_snoc, - testProperty "t_snoc" t_snoc, - testProperty "tl_snoc" tl_snoc, - testProperty "s_append" s_append, - testProperty "s_append_s" s_append_s, - testProperty "sf_append" sf_append, - testProperty "t_append" t_append, - testProperty "s_uncons" s_uncons, - testProperty "sf_uncons" sf_uncons, - testProperty "t_uncons" t_uncons, - testProperty "tl_uncons" tl_uncons, - testProperty "s_head" s_head, - testProperty "sf_head" sf_head, - testProperty "t_head" t_head, - testProperty "tl_head" tl_head, - testProperty "s_last" s_last, - testProperty "sf_last" sf_last, - testProperty "t_last" t_last, - testProperty "tl_last" tl_last, - testProperty "s_tail" s_tail, - testProperty "s_tail_s" s_tail_s, - testProperty "sf_tail" sf_tail, - testProperty "t_tail" t_tail, - testProperty "tl_tail" tl_tail, - testProperty "s_init" s_init, - testProperty "s_init_s" s_init_s, - testProperty "sf_init" sf_init, - testProperty "t_init" t_init, - testProperty "tl_init" tl_init, - testProperty "s_null" s_null, - testProperty "sf_null" sf_null, - testProperty "t_null" t_null, - testProperty "tl_null" tl_null, - testProperty "s_length" s_length, - testProperty "sf_length" sf_length, - testProperty "sl_length" sl_length, - testProperty "t_length" t_length, - testProperty "tl_length" tl_length, - testProperty "t_compareLength" t_compareLength, - testProperty "tl_compareLength" tl_compareLength - ], - - testGroup "transformations" [ - testProperty "s_map" s_map, - testProperty "s_map_s" s_map_s, - testProperty "sf_map" sf_map, - testProperty "t_map" t_map, - testProperty "tl_map" tl_map, - testProperty "s_intercalate" s_intercalate, - testProperty "t_intercalate" t_intercalate, - testProperty "tl_intercalate" tl_intercalate, - testProperty "s_intersperse" s_intersperse, - testProperty "s_intersperse_s" s_intersperse_s, - testProperty "sf_intersperse" sf_intersperse, - testProperty "t_intersperse" t_intersperse, - testProperty "tl_intersperse" tl_intersperse, - testProperty "t_transpose" t_transpose, - testProperty "tl_transpose" tl_transpose, - testProperty "t_reverse" t_reverse, - testProperty "tl_reverse" tl_reverse, - testProperty "t_reverse_short" t_reverse_short, - testProperty "t_replace" t_replace, - testProperty "tl_replace" tl_replace, - - testGroup "case conversion" [ - testProperty "s_toCaseFold_length" s_toCaseFold_length, - testProperty "sf_toCaseFold_length" sf_toCaseFold_length, - testProperty "t_toCaseFold_length" t_toCaseFold_length, - testProperty "tl_toCaseFold_length" tl_toCaseFold_length, - testProperty "t_toLower_length" t_toLower_length, - testProperty "t_toLower_lower" t_toLower_lower, - testProperty "tl_toLower_lower" tl_toLower_lower, - testProperty "t_toUpper_length" t_toUpper_length, - testProperty "t_toUpper_upper" t_toUpper_upper, - testProperty "tl_toUpper_upper" tl_toUpper_upper - ], - - testGroup "justification" [ - testProperty "s_justifyLeft" s_justifyLeft, - testProperty "s_justifyLeft_s" s_justifyLeft_s, - testProperty "sf_justifyLeft" sf_justifyLeft, - testProperty "t_justifyLeft" t_justifyLeft, - testProperty "tl_justifyLeft" tl_justifyLeft, - testProperty "t_justifyRight" t_justifyRight, - testProperty "tl_justifyRight" tl_justifyRight, - testProperty "t_center" t_center, - testProperty "tl_center" tl_center - ] - ], - - testGroup "folds" [ - testProperty "sf_foldl" sf_foldl, - testProperty "t_foldl" t_foldl, - testProperty "tl_foldl" tl_foldl, - testProperty "sf_foldl'" sf_foldl', - testProperty "t_foldl'" t_foldl', - testProperty "tl_foldl'" tl_foldl', - testProperty "sf_foldl1" sf_foldl1, - testProperty "t_foldl1" t_foldl1, - testProperty "tl_foldl1" tl_foldl1, - testProperty "t_foldl1'" t_foldl1', - testProperty "sf_foldl1'" sf_foldl1', - testProperty "tl_foldl1'" tl_foldl1', - testProperty "sf_foldr" sf_foldr, - testProperty "t_foldr" t_foldr, - testProperty "tl_foldr" tl_foldr, - testProperty "sf_foldr1" sf_foldr1, - testProperty "t_foldr1" t_foldr1, - testProperty "tl_foldr1" tl_foldr1, - - testGroup "special" [ - testProperty "s_concat_s" s_concat_s, - testProperty "sf_concat" sf_concat, - testProperty "t_concat" t_concat, - testProperty "tl_concat" tl_concat, - testProperty "sf_concatMap" sf_concatMap, - testProperty "t_concatMap" t_concatMap, - testProperty "tl_concatMap" tl_concatMap, - testProperty "sf_any" sf_any, - testProperty "t_any" t_any, - testProperty "tl_any" tl_any, - testProperty "sf_all" sf_all, - testProperty "t_all" t_all, - testProperty "tl_all" tl_all, - testProperty "sf_maximum" sf_maximum, - testProperty "t_maximum" t_maximum, - testProperty "tl_maximum" tl_maximum, - testProperty "sf_minimum" sf_minimum, - testProperty "t_minimum" t_minimum, - testProperty "tl_minimum" tl_minimum - ] - ], - - testGroup "construction" [ - testGroup "scans" [ - testProperty "sf_scanl" sf_scanl, - testProperty "t_scanl" t_scanl, - testProperty "tl_scanl" tl_scanl, - testProperty "t_scanl1" t_scanl1, - testProperty "tl_scanl1" tl_scanl1, - testProperty "t_scanr" t_scanr, - testProperty "tl_scanr" tl_scanr, - testProperty "t_scanr1" t_scanr1, - testProperty "tl_scanr1" tl_scanr1 - ], - - testGroup "mapAccum" [ - testProperty "t_mapAccumL" t_mapAccumL, - testProperty "tl_mapAccumL" tl_mapAccumL, - testProperty "t_mapAccumR" t_mapAccumR, - testProperty "tl_mapAccumR" tl_mapAccumR - ], - - testGroup "unfolds" [ - testProperty "s_replicate" s_replicate, - testProperty "t_replicate" t_replicate, - testProperty "tl_replicate" tl_replicate, - testProperty "t_unfoldr" t_unfoldr, - testProperty "tl_unfoldr" tl_unfoldr, - testProperty "t_unfoldrN" t_unfoldrN, - testProperty "tl_unfoldrN" tl_unfoldrN - ] - ], - - testGroup "substrings" [ - testGroup "breaking" [ - testProperty "s_take" s_take, - testProperty "s_take_s" s_take_s, - testProperty "sf_take" sf_take, - testProperty "t_take" t_take, - testProperty "t_takeEnd" t_takeEnd, - testProperty "tl_take" tl_take, - testProperty "tl_takeEnd" tl_takeEnd, - testProperty "s_drop" s_drop, - testProperty "s_drop_s" s_drop_s, - testProperty "sf_drop" sf_drop, - testProperty "t_drop" t_drop, - testProperty "t_dropEnd" t_dropEnd, - testProperty "tl_drop" tl_drop, - testProperty "tl_dropEnd" tl_dropEnd, - testProperty "s_take_drop" s_take_drop, - testProperty "s_take_drop_s" s_take_drop_s, - testProperty "s_takeWhile" s_takeWhile, - testProperty "s_takeWhile_s" s_takeWhile_s, - testProperty "sf_takeWhile" sf_takeWhile, - testProperty "t_takeWhile" t_takeWhile, - testProperty "tl_takeWhile" tl_takeWhile, - testProperty "sf_dropWhile" sf_dropWhile, - testProperty "s_dropWhile" s_dropWhile, - testProperty "s_dropWhile_s" s_dropWhile_s, - testProperty "t_dropWhile" t_dropWhile, - testProperty "tl_dropWhile" tl_dropWhile, - testProperty "t_dropWhileEnd" t_dropWhileEnd, - testProperty "tl_dropWhileEnd" tl_dropWhileEnd, - testProperty "t_dropAround" t_dropAround, - testProperty "tl_dropAround" tl_dropAround, - testProperty "t_stripStart" t_stripStart, - testProperty "tl_stripStart" tl_stripStart, - testProperty "t_stripEnd" t_stripEnd, - testProperty "tl_stripEnd" tl_stripEnd, - testProperty "t_strip" t_strip, - testProperty "tl_strip" tl_strip, - testProperty "t_splitAt" t_splitAt, - testProperty "tl_splitAt" tl_splitAt, - testProperty "t_span" t_span, - testProperty "tl_span" tl_span, - testProperty "t_breakOn_id" t_breakOn_id, - testProperty "tl_breakOn_id" tl_breakOn_id, - testProperty "t_breakOn_start" t_breakOn_start, - testProperty "tl_breakOn_start" tl_breakOn_start, - testProperty "t_breakOnEnd_end" t_breakOnEnd_end, - testProperty "tl_breakOnEnd_end" tl_breakOnEnd_end, - testProperty "t_break" t_break, - testProperty "tl_break" tl_break, - testProperty "t_group" t_group, - testProperty "tl_group" tl_group, - testProperty "t_groupBy" t_groupBy, - testProperty "tl_groupBy" tl_groupBy, - testProperty "t_inits" t_inits, - testProperty "tl_inits" tl_inits, - testProperty "t_tails" t_tails, - testProperty "tl_tails" tl_tails - ], - - testGroup "breaking many" [ - testProperty "t_findAppendId" t_findAppendId, - testProperty "tl_findAppendId" tl_findAppendId, - testProperty "t_findContains" t_findContains, - testProperty "tl_findContains" tl_findContains, - testProperty "sl_filterCount" sl_filterCount, - testProperty "t_findCount" t_findCount, - testProperty "tl_findCount" tl_findCount, - testProperty "t_splitOn_split" t_splitOn_split, - testProperty "tl_splitOn_split" tl_splitOn_split, - testProperty "t_splitOn_i" t_splitOn_i, - testProperty "tl_splitOn_i" tl_splitOn_i, - testProperty "t_split" t_split, - testProperty "t_split_count" t_split_count, - testProperty "t_split_splitOn" t_split_splitOn, - testProperty "tl_split" tl_split, - testProperty "t_chunksOf_same_lengths" t_chunksOf_same_lengths, - testProperty "t_chunksOf_length" t_chunksOf_length, - testProperty "tl_chunksOf" tl_chunksOf - ], - - testGroup "lines and words" [ - testProperty "t_lines" t_lines, - testProperty "tl_lines" tl_lines, - --testProperty "t_lines'" t_lines', - testProperty "t_words" t_words, - testProperty "tl_words" tl_words, - testProperty "t_unlines" t_unlines, - testProperty "tl_unlines" tl_unlines, - testProperty "t_unwords" t_unwords, - testProperty "tl_unwords" tl_unwords - ] - ], - - testGroup "predicates" [ - testProperty "s_isPrefixOf" s_isPrefixOf, - testProperty "sf_isPrefixOf" sf_isPrefixOf, - testProperty "t_isPrefixOf" t_isPrefixOf, - testProperty "tl_isPrefixOf" tl_isPrefixOf, - testProperty "t_isSuffixOf" t_isSuffixOf, - testProperty "tl_isSuffixOf" tl_isSuffixOf, - testProperty "t_isInfixOf" t_isInfixOf, - testProperty "tl_isInfixOf" tl_isInfixOf, - - testGroup "view" [ - testProperty "t_stripPrefix" t_stripPrefix, - testProperty "tl_stripPrefix" tl_stripPrefix, - testProperty "t_stripSuffix" t_stripSuffix, - testProperty "tl_stripSuffix" tl_stripSuffix, - testProperty "t_commonPrefixes" t_commonPrefixes, - testProperty "tl_commonPrefixes" tl_commonPrefixes - ] - ], - - testGroup "searching" [ - testProperty "sf_elem" sf_elem, - testProperty "sf_filter" sf_filter, - testProperty "t_filter" t_filter, - testProperty "tl_filter" tl_filter, - testProperty "sf_findBy" sf_findBy, - testProperty "t_find" t_find, - testProperty "tl_find" tl_find, - testProperty "t_partition" t_partition, - testProperty "tl_partition" tl_partition - ], - - testGroup "indexing" [ - testProperty "sf_index" sf_index, - testProperty "t_index" t_index, - testProperty "tl_index" tl_index, - testProperty "t_findIndex" t_findIndex, - testProperty "t_count" t_count, - testProperty "tl_count" tl_count, - testProperty "t_indices" t_indices, - testProperty "tl_indices" tl_indices, - testProperty "t_indices_occurs" t_indices_occurs - ], - - testGroup "zips" [ - testProperty "t_zip" t_zip, - testProperty "tl_zip" tl_zip, - testProperty "sf_zipWith" sf_zipWith, - testProperty "t_zipWith" t_zipWith, - testProperty "tl_zipWith" tl_zipWith - ], - - testGroup "regressions" [ - testProperty "s_filter_eq" s_filter_eq - ], - - testGroup "shifts" [ - testProperty "shiftL_Int" shiftL_Int, - testProperty "shiftL_Word16" shiftL_Word16, - testProperty "shiftL_Word32" shiftL_Word32, - testProperty "shiftR_Int" shiftR_Int, - testProperty "shiftR_Word16" shiftR_Word16, - testProperty "shiftR_Word32" shiftR_Word32 - ], - - testGroup "builder" [ - testProperty "tb_associative" tb_associative, - testGroup "decimal" [ - testProperty "tb_decimal_int" tb_decimal_int, - testProperty "tb_decimal_int8" tb_decimal_int8, - testProperty "tb_decimal_int16" tb_decimal_int16, - testProperty "tb_decimal_int32" tb_decimal_int32, - testProperty "tb_decimal_int64" tb_decimal_int64, - testProperty "tb_decimal_integer" tb_decimal_integer, - testProperty "tb_decimal_integer_big" tb_decimal_integer_big, - testProperty "tb_decimal_word" tb_decimal_word, - testProperty "tb_decimal_word8" tb_decimal_word8, - testProperty "tb_decimal_word16" tb_decimal_word16, - testProperty "tb_decimal_word32" tb_decimal_word32, - testProperty "tb_decimal_word64" tb_decimal_word64, - testProperty "tb_decimal_big_int" tb_decimal_big_int, - testProperty "tb_decimal_big_word" tb_decimal_big_word, - testProperty "tb_decimal_big_int64" tb_decimal_big_int64, - testProperty "tb_decimal_big_word64" tb_decimal_big_word64 - ], - testGroup "hexadecimal" [ - testProperty "tb_hexadecimal_int" tb_hexadecimal_int, - testProperty "tb_hexadecimal_int8" tb_hexadecimal_int8, - testProperty "tb_hexadecimal_int16" tb_hexadecimal_int16, - testProperty "tb_hexadecimal_int32" tb_hexadecimal_int32, - testProperty "tb_hexadecimal_int64" tb_hexadecimal_int64, - testProperty "tb_hexadecimal_integer" tb_hexadecimal_integer, - testProperty "tb_hexadecimal_word" tb_hexadecimal_word, - testProperty "tb_hexadecimal_word8" tb_hexadecimal_word8, - testProperty "tb_hexadecimal_word16" tb_hexadecimal_word16, - testProperty "tb_hexadecimal_word32" tb_hexadecimal_word32, - testProperty "tb_hexadecimal_word64" tb_hexadecimal_word64 - ], - testGroup "realfloat" [ - testProperty "tb_realfloat_double" tb_realfloat_double, - testProperty "tb_realfloat_float" tb_realfloat_float, - testProperty "tb_formatRealFloat_float" tb_formatRealFloat_float, - testProperty "tb_formatRealFloat_double" tb_formatRealFloat_double - ], - testProperty "tb_fromText" tb_fromText, - testProperty "tb_singleton" tb_singleton - ], - - testGroup "read" [ - testProperty "t_decimal" t_decimal, - testProperty "tl_decimal" tl_decimal, - testProperty "t_hexadecimal" t_hexadecimal, - testProperty "tl_hexadecimal" tl_hexadecimal, - testProperty "t_double" t_double, - testProperty "tl_double" tl_double, - testProperty "t_rational" t_rational, - testProperty "tl_rational" tl_rational - ], - - {- - testGroup "input-output" [ - testProperty "t_write_read" t_write_read, - testProperty "tl_write_read" tl_write_read, - testProperty "t_write_read_line" t_write_read_line, - testProperty "tl_write_read_line" tl_write_read_line - -- These tests are subject to I/O race conditions when run under - -- test-framework-quickcheck2. - -- testProperty "t_put_get" t_put_get - -- testProperty "tl_put_get" tl_put_get - ], - -} - - testGroup "lowlevel" [ - testProperty "t_dropWord16" t_dropWord16, - testProperty "t_takeWord16" t_takeWord16, - testProperty "t_take_drop_16" t_take_drop_16, - testProperty "t_use_from" t_use_from, - testProperty "t_copy" t_copy - ], - - testGroup "mul" Mul.tests - ] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/QuickCheckUtils.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/QuickCheckUtils.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/QuickCheckUtils.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/QuickCheckUtils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,360 +0,0 @@ --- | This module provides quickcheck utilities, e.g. arbitrary and show --- instances, and comparison functions, so we can focus on the actual properties --- in the 'Tests.Properties' module. --- -{-# LANGUAGE CPP, FlexibleInstances, TypeSynonymInstances #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -module Tests.QuickCheckUtils - ( - genUnicode - , unsquare - , smallArbitrary - - , BigBounded(..) - , BigInt(..) - , NotEmpty (..) - - , Small (..) - , small - - , Precision(..) - , precision - - , integralRandomR - - , DecodeErr (..) - , genDecodeErr - - , Stringy (..) - , eq - , eqP - - , Encoding (..) - - , write_read - ) where - -import Control.Applicative ((<$>)) -import Control.Arrow (first, (***)) -import Control.DeepSeq (NFData (..), deepseq) -import Control.Exception (bracket) -import Data.String (IsString, fromString) -import Data.Text.Foreign (I16) -import Data.Text.Lazy.Builder.RealFloat (FPFormat(..)) -import Data.Word (Word8, Word16) -import Debug.Trace (trace) -import System.Random (Random (..), RandomGen) -import Test.QuickCheck hiding (Fixed(..), Small (..), (.&.)) -import Test.QuickCheck.Monadic (assert, monadicIO, run) -import Test.QuickCheck.Unicode (string) -import Tests.Utils -import qualified Data.ByteString as B -import qualified Data.Text as T -import qualified Data.Text.Encoding.Error as T -import qualified Data.Text.Internal.Fusion as TF -import qualified Data.Text.Internal.Fusion.Common as TF -import qualified Data.Text.Internal.Lazy as TL -import qualified Data.Text.Internal.Lazy.Fusion as TLF -import qualified Data.Text.Lazy as TL -import qualified System.IO as IO - -#if !MIN_VERSION_base(4,4,0) -import Data.Int (Int64) -import Data.Word (Word, Word64) -#endif - -genUnicode :: IsString a => Gen a -genUnicode = fromString <$> string - -instance Random I16 where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Arbitrary I16 where - arbitrary = arbitrarySizedIntegral - shrink = shrinkIntegral - -instance Arbitrary B.ByteString where - arbitrary = B.pack `fmap` arbitrary - shrink = map B.pack . shrink . B.unpack - -#if !MIN_VERSION_base(4,4,0) -instance Random Int64 where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Random Word where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Random Word8 where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Random Word64 where - randomR = integralRandomR - random = randomR (minBound,maxBound) -#endif - --- For tests that have O(n^2) running times or input sizes, resize --- their inputs to the square root of the originals. -unsquare :: (Arbitrary a, Show a, Testable b) => (a -> b) -> Property -unsquare = forAll smallArbitrary - -smallArbitrary :: (Arbitrary a, Show a) => Gen a -smallArbitrary = sized $ \n -> resize (smallish n) arbitrary - where smallish = round . (sqrt :: Double -> Double) . fromIntegral . abs - -instance Arbitrary T.Text where - arbitrary = T.pack `fmap` arbitrary - shrink = map T.pack . shrink . T.unpack - -instance Arbitrary TL.Text where - arbitrary = (TL.fromChunks . map notEmpty) `fmap` smallArbitrary - shrink = map TL.pack . shrink . TL.unpack - -newtype BigInt = Big Integer - deriving (Eq, Show) - -instance Arbitrary BigInt where - arbitrary = choose (1::Int,200) >>= \e -> Big <$> choose (10^(e-1),10^e) - shrink (Big a) = [Big (a `div` 2^(l-e)) | e <- shrink l] - where l = truncate (log (fromIntegral a) / log 2 :: Double) :: Integer - -newtype BigBounded a = BigBounded a - deriving (Eq, Show) - -instance (Bounded a, Random a, Arbitrary a) => Arbitrary (BigBounded a) where - arbitrary = BigBounded <$> choose (minBound, maxBound) - -newtype NotEmpty a = NotEmpty { notEmpty :: a } - deriving (Eq, Ord) - -instance Show a => Show (NotEmpty a) where - show (NotEmpty a) = show a - -instance Functor NotEmpty where - fmap f (NotEmpty a) = NotEmpty (f a) - -instance Arbitrary a => Arbitrary (NotEmpty [a]) where - arbitrary = sized (\n -> NotEmpty `fmap` (choose (1,n+1) >>= vector)) - shrink = shrinkNotEmpty null - -instance Arbitrary (NotEmpty T.Text) where - arbitrary = (fmap T.pack) `fmap` arbitrary - shrink = shrinkNotEmpty T.null - -instance Arbitrary (NotEmpty TL.Text) where - arbitrary = (fmap TL.pack) `fmap` arbitrary - shrink = shrinkNotEmpty TL.null - -instance Arbitrary (NotEmpty B.ByteString) where - arbitrary = (fmap B.pack) `fmap` arbitrary - shrink = shrinkNotEmpty B.null - -shrinkNotEmpty :: Arbitrary a => (a -> Bool) -> NotEmpty a -> [NotEmpty a] -shrinkNotEmpty isNull (NotEmpty xs) = - [ NotEmpty xs' | xs' <- shrink xs, not (isNull xs') ] - -data Small = S0 | S1 | S2 | S3 | S4 | S5 | S6 | S7 - | S8 | S9 | S10 | S11 | S12 | S13 | S14 | S15 - | S16 | S17 | S18 | S19 | S20 | S21 | S22 | S23 - | S24 | S25 | S26 | S27 | S28 | S29 | S30 | S31 - deriving (Eq, Ord, Enum, Bounded) - -small :: Integral a => Small -> a -small = fromIntegral . fromEnum - -intf :: (Int -> Int -> Int) -> Small -> Small -> Small -intf f a b = toEnum ((fromEnum a `f` fromEnum b) `mod` 32) - -instance Show Small where - show = show . fromEnum - -instance Read Small where - readsPrec n = map (first toEnum) . readsPrec n - -instance Num Small where - fromInteger = toEnum . fromIntegral - signum _ = 1 - abs = id - (+) = intf (+) - (-) = intf (-) - (*) = intf (*) - -instance Real Small where - toRational = toRational . fromEnum - -instance Integral Small where - toInteger = toInteger . fromEnum - quotRem a b = (toEnum x, toEnum y) - where (x, y) = fromEnum a `quotRem` fromEnum b - -instance Random Small where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Arbitrary Small where - arbitrary = choose (minBound, maxBound) - shrink = shrinkIntegral - -integralRandomR :: (Integral a, RandomGen g) => (a,a) -> g -> (a,g) -integralRandomR (a,b) g = case randomR (fromIntegral a :: Integer, - fromIntegral b :: Integer) g of - (x,h) -> (fromIntegral x, h) - -data DecodeErr = Lenient | Ignore | Strict | Replace - deriving (Show, Eq) - -genDecodeErr :: DecodeErr -> Gen T.OnDecodeError -genDecodeErr Lenient = return T.lenientDecode -genDecodeErr Ignore = return T.ignore -genDecodeErr Strict = return T.strictDecode -genDecodeErr Replace = arbitrary - -instance Arbitrary DecodeErr where - arbitrary = elements [Lenient, Ignore, Strict, Replace] - -class Stringy s where - packS :: String -> s - unpackS :: s -> String - splitAtS :: Int -> s -> (s,s) - packSChunkSize :: Int -> String -> s - packSChunkSize _ = packS - -instance Stringy String where - packS = id - unpackS = id - splitAtS = splitAt - -instance Stringy (TF.Stream Char) where - packS = TF.streamList - unpackS = TF.unstreamList - splitAtS n s = (TF.take n s, TF.drop n s) - -instance Stringy T.Text where - packS = T.pack - unpackS = T.unpack - splitAtS = T.splitAt - -instance Stringy TL.Text where - packSChunkSize k = TLF.unstreamChunks k . TF.streamList - packS = TL.pack - unpackS = TL.unpack - splitAtS = ((TL.lazyInvariant *** TL.lazyInvariant) .) . - TL.splitAt . fromIntegral - --- Do two functions give the same answer? -eq :: (Eq a, Show a) => (t -> a) -> (t -> a) -> t -> Bool -eq a b s = a s =^= b s - --- What about with the RHS packed? -eqP :: (Eq a, Show a, Stringy s) => - (String -> a) -> (s -> a) -> String -> Word8 -> Bool -eqP f g s w = eql "orig" (f s) (g t) && - eql "mini" (f s) (g mini) && - eql "head" (f sa) (g ta) && - eql "tail" (f sb) (g tb) - where t = packS s - mini = packSChunkSize 10 s - (sa,sb) = splitAt m s - (ta,tb) = splitAtS m t - l = length s - m | l == 0 = n - | otherwise = n `mod` l - n = fromIntegral w - eql d a b - | a =^= b = True - | otherwise = trace (d ++ ": " ++ show a ++ " /= " ++ show b) False - -instance Arbitrary FPFormat where - arbitrary = elements [Exponent, Fixed, Generic] - -newtype Precision a = Precision (Maybe Int) - deriving (Eq, Show) - -precision :: a -> Precision a -> Maybe Int -precision _ (Precision prec) = prec - -arbitraryPrecision :: Int -> Gen (Precision a) -arbitraryPrecision maxDigits = Precision <$> do - n <- choose (-1,maxDigits) - return $ if n == -1 - then Nothing - else Just n - -instance Arbitrary (Precision Float) where - arbitrary = arbitraryPrecision 11 - shrink = map Precision . shrink . precision undefined - -instance Arbitrary (Precision Double) where - arbitrary = arbitraryPrecision 22 - shrink = map Precision . shrink . precision undefined - --- Work around lack of Show instance for TextEncoding. -data Encoding = E String IO.TextEncoding - -instance Show Encoding where show (E n _) = "utf" ++ n - -instance Arbitrary Encoding where - arbitrary = oneof . map return $ - [ E "8" IO.utf8, E "8_bom" IO.utf8_bom, E "16" IO.utf16 - , E "16le" IO.utf16le, E "16be" IO.utf16be, E "32" IO.utf32 - , E "32le" IO.utf32le, E "32be" IO.utf32be - ] - -windowsNewlineMode :: IO.NewlineMode -windowsNewlineMode = IO.NewlineMode - { IO.inputNL = IO.CRLF, IO.outputNL = IO.CRLF - } - -instance Arbitrary IO.NewlineMode where - arbitrary = oneof . map return $ - [ IO.noNewlineTranslation, IO.universalNewlineMode, IO.nativeNewlineMode - , windowsNewlineMode - ] - -instance Arbitrary IO.BufferMode where - arbitrary = oneof [ return IO.NoBuffering, - return IO.LineBuffering, - return (IO.BlockBuffering Nothing), - (IO.BlockBuffering . Just . (+1) . fromIntegral) `fmap` - (arbitrary :: Gen Word16) ] - --- This test harness is complex! What property are we checking? --- --- Reading after writing a multi-line file should give the same --- results as were written. --- --- What do we vary while checking this property? --- * The lines themselves, scrubbed to contain neither CR nor LF. (By --- working with a list of lines, we ensure that the data will --- sometimes contain line endings.) --- * Encoding. --- * Newline translation mode. --- * Buffering. -write_read :: (NFData a, Eq a) - => ([b] -> a) - -> ((Char -> Bool) -> a -> b) - -> (IO.Handle -> a -> IO ()) - -> (IO.Handle -> IO a) - -> Encoding - -> IO.NewlineMode - -> IO.BufferMode - -> [a] - -> Property -write_read unline filt writer reader (E _ _) nl buf ts = - monadicIO $ assert . (==t) =<< run act - where t = unline . map (filt (not . (`elem` "\r\n"))) $ ts - act = withTempFile $ \path h -> do - -- hSetEncoding h enc - IO.hSetNewlineMode h nl - IO.hSetBuffering h buf - () <- writer h t - IO.hClose h - bracket (IO.openFile path IO.ReadMode) IO.hClose $ \h' -> do - -- hSetEncoding h' enc - IO.hSetNewlineMode h' nl - IO.hSetBuffering h' buf - r <- reader h' - r `deepseq` return r diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/Regressions.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/Regressions.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/Regressions.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/Regressions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ --- | Regression tests for specific bugs. --- -{-# LANGUAGE OverloadedStrings, ScopedTypeVariables #-} -module Tests.Regressions - ( - tests - ) where - -import Control.Exception (SomeException, handle) -import System.IO -import Test.HUnit (assertBool, assertEqual, assertFailure) -import qualified Data.ByteString as B -import Data.ByteString.Char8 () -import qualified Data.ByteString.Lazy as LB -import qualified Data.Text as T -import qualified Data.Text.Encoding as TE -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as LT -import qualified Data.Text.Lazy.Encoding as LE -import qualified Data.Text.Unsafe as T -import qualified Test.Framework as F -import qualified Test.Framework.Providers.HUnit as F - -import Tests.Utils (withTempFile) - --- Reported by Michael Snoyman: UTF-8 encoding a large lazy bytestring --- caused either a segfault or attempt to allocate a negative number --- of bytes. -lazy_encode_crash :: IO () -lazy_encode_crash = withTempFile $ \ _ h -> - LB.hPut h . LE.encodeUtf8 . LT.pack . replicate 100000 $ 'a' - --- Reported by Pieter Laeremans: attempting to read an incorrectly --- encoded file can result in a crash in the RTS (i.e. not merely an --- exception). -hGetContents_crash :: IO () -hGetContents_crash = withTempFile $ \ path h -> do - B.hPut h (B.pack [0x78, 0xc4 ,0x0a]) >> hClose h - h' <- openFile path ReadMode - hSetEncoding h' utf8 - handle (\(_::SomeException) -> return ()) $ - T.hGetContents h' >> assertFailure "T.hGetContents should crash" - --- Reported by Ian Lynagh: attempting to allocate a sufficiently large --- string (via either Array.new or Text.replicate) could result in an --- integer overflow. -replicate_crash :: IO () -replicate_crash = handle (\(_::SomeException) -> return ()) $ - T.replicate (2^power) "0123456789abcdef" `seq` - assertFailure "T.replicate should crash" - where - power | maxBound == (2147483647::Int) = 28 - | otherwise = 60 :: Int - --- Reported by John Millikin: a UTF-8 decode error handler could --- return a bogus substitution character, which we would write without --- checking. -utf8_decode_unsafe :: IO () -utf8_decode_unsafe = do - let t = TE.decodeUtf8With (\_ _ -> Just '\xdc00') "\x80" - assertBool "broken error recovery shouldn't break us" (t == "\xfffd") - --- Reported by Eric Seidel: we mishandled mapping Chars that fit in a --- single Word16 to Chars that require two. -mapAccumL_resize :: IO () -mapAccumL_resize = do - let f a _ = (a, '\65536') - count = 5 - val = T.mapAccumL f (0::Int) (T.replicate count "a") - assertEqual "mapAccumL should correctly fill buffers for two-word results" - (0, T.replicate count "\65536") val - assertEqual "mapAccumL should correctly size buffers for two-word results" - (count * 2) (T.lengthWord16 (snd val)) - -tests :: F.Test -tests = F.testGroup "Regressions" - [ F.testCase "hGetContents_crash" hGetContents_crash - , F.testCase "lazy_encode_crash" lazy_encode_crash - , F.testCase "mapAccumL_resize" mapAccumL_resize - , F.testCase "replicate_crash" replicate_crash - , F.testCase "utf8_decode_unsafe" utf8_decode_unsafe - ] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/SlowFunctions.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/SlowFunctions.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/SlowFunctions.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/SlowFunctions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -{-# LANGUAGE BangPatterns #-} -module Tests.SlowFunctions - ( - indices - , splitOn - ) where - -import qualified Data.Text as T -import Data.Text.Internal (Text(..)) -import Data.Text.Unsafe (iter_, unsafeHead, unsafeTail) - -indices :: T.Text -- ^ Substring to search for (@needle@) - -> T.Text -- ^ Text to search in (@haystack@) - -> [Int] -indices needle@(Text _narr _noff nlen) haystack@(Text harr hoff hlen) - | T.null needle = [] - | otherwise = scan 0 - where - scan i | i >= hlen = [] - | needle `T.isPrefixOf` t = i : scan (i+nlen) - | otherwise = scan (i+d) - where t = Text harr (hoff+i) (hlen-i) - d = iter_ haystack i - -splitOn :: T.Text -- ^ Text to split on - -> T.Text -- ^ Input text - -> [T.Text] -splitOn pat src0 - | T.null pat = error "splitOn: empty" - | l == 1 = T.split (== (unsafeHead pat)) src0 - | otherwise = go src0 - where - l = T.length pat - go src = search 0 src - where - search !n !s - | T.null s = [src] -- not found - | pat `T.isPrefixOf` s = T.take n src : go (T.drop l s) - | otherwise = search (n+1) (unsafeTail s) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/Utils.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/Utils.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests/Utils.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests/Utils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ --- | Miscellaneous testing utilities --- -{-# LANGUAGE ScopedTypeVariables #-} -module Tests.Utils - ( - (=^=) - , withRedirect - , withTempFile - ) where - -import Control.Exception (SomeException, bracket, bracket_, evaluate, try) -import Control.Monad (when) -import Debug.Trace (trace) -import GHC.IO.Handle.Internals (withHandle) -import System.Directory (removeFile) -import System.IO (Handle, hClose, hFlush, hIsOpen, hIsWritable, openTempFile) -import System.IO.Unsafe (unsafePerformIO) - --- Ensure that two potentially bottom values (in the sense of crashing --- for some inputs, not looping infinitely) either both crash, or both --- give comparable results for some input. -(=^=) :: (Eq a, Show a) => a -> a -> Bool -i =^= j = unsafePerformIO $ do - x <- try (evaluate i) - y <- try (evaluate j) - case (x,y) of - (Left (_ :: SomeException), Left (_ :: SomeException)) - -> return True - (Right a, Right b) -> return (a == b) - e -> trace ("*** Divergence: " ++ show e) return False -infix 4 =^= -{-# NOINLINE (=^=) #-} - -withTempFile :: (FilePath -> Handle -> IO a) -> IO a -withTempFile = bracket (openTempFile "." "crashy.txt") cleanupTemp . uncurry - where - cleanupTemp (path,h) = do - open <- hIsOpen h - when open (hClose h) - removeFile path - -withRedirect :: Handle -> Handle -> IO a -> IO a -withRedirect tmp h = bracket_ swap swap - where - whenM p a = p >>= (`when` a) - swap = do - whenM (hIsOpen tmp) $ whenM (hIsWritable tmp) $ hFlush tmp - whenM (hIsOpen h) $ whenM (hIsWritable h) $ hFlush h - withHandle "spam" tmp $ \tmph -> do - hh <- withHandle "spam" h $ \hh -> - return (tmph,hh) - return (hh,()) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests.hs cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/Tests.hs 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/Tests.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ --- | Provides a simple main function which runs all the tests --- -module Main - ( main - ) where - -import Test.Framework (defaultMain) - -import qualified Tests.Properties as Properties -import qualified Tests.Regressions as Regressions - -main :: IO () -main = defaultMain [Properties.tests, Regressions.tests] diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/text-tests.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/text-tests.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests/text-tests.cabal 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests/text-tests.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,141 +0,0 @@ -name: text-tests -version: 0.0.0.0 -synopsis: Functional tests for the text package -description: Functional tests for the text package -homepage: https://github.com/bos/text -license: BSD3 -license-file: ../LICENSE -author: Jasper Van der Jeugt , - Bryan O'Sullivan , - Tom Harper , - Duncan Coutts -maintainer: Bryan O'Sullivan -category: Text -build-type: Simple - -cabal-version: >=1.8 - -flag hpc - description: Enable HPC to generate coverage reports - default: False - manual: True - -executable text-tests - main-is: Tests.hs - - other-modules: - Tests.IO - Tests.Properties - Tests.Properties.Mul - Tests.QuickCheckUtils - Tests.Regressions - Tests.SlowFunctions - Tests.Utils - - ghc-options: - -Wall -threaded -O0 -rtsopts - - if flag(hpc) - ghc-options: - -fhpc - - cpp-options: - -DTEST_SUITE - -DASSERTS - -DHAVE_DEEPSEQ - - build-depends: - HUnit >= 1.2, - QuickCheck >= 2.7, - base == 4.*, - bytestring, - deepseq, - directory, - quickcheck-unicode, - random, - test-framework >= 0.4, - test-framework-hunit >= 0.2, - test-framework-quickcheck2 >= 0.2, - text-tests - -executable text-tests-stdio - main-is: Tests/IO.hs - - ghc-options: - -Wall -threaded -rtsopts - - -- Optional HPC support - if flag(hpc) - ghc-options: - -fhpc - - build-depends: - text-tests, - base >= 4 && < 5 - -library - hs-source-dirs: .. - c-sources: ../cbits/cbits.c - include-dirs: ../include - ghc-options: -Wall - exposed-modules: - Data.Text - Data.Text.Array - Data.Text.Encoding - Data.Text.Encoding.Error - Data.Text.Internal.Encoding.Fusion - Data.Text.Internal.Encoding.Fusion.Common - Data.Text.Internal.Encoding.Utf16 - Data.Text.Internal.Encoding.Utf32 - Data.Text.Internal.Encoding.Utf8 - Data.Text.Foreign - Data.Text.Internal.Fusion - Data.Text.Internal.Fusion.CaseMapping - Data.Text.Internal.Fusion.Common - Data.Text.Internal.Fusion.Size - Data.Text.Internal.Fusion.Types - Data.Text.IO - Data.Text.Internal.IO - Data.Text.Internal - Data.Text.Lazy - Data.Text.Lazy.Builder - Data.Text.Internal.Builder.Functions - Data.Text.Lazy.Builder.Int - Data.Text.Internal.Builder.Int.Digits - Data.Text.Internal.Builder - Data.Text.Lazy.Builder.RealFloat - Data.Text.Internal.Builder.RealFloat.Functions - Data.Text.Lazy.Encoding - Data.Text.Internal.Lazy.Encoding.Fusion - Data.Text.Internal.Lazy.Fusion - Data.Text.Lazy.IO - Data.Text.Internal.Lazy - Data.Text.Lazy.Read - Data.Text.Internal.Lazy.Search - Data.Text.Internal.Private - Data.Text.Read - Data.Text.Internal.Read - Data.Text.Internal.Search - Data.Text.Unsafe - Data.Text.Internal.Unsafe - Data.Text.Internal.Unsafe.Char - Data.Text.Internal.Unsafe.Shift - Data.Text.Internal.Functions - - if flag(hpc) - ghc-options: - -fhpc - - cpp-options: - -DTEST_SUITE - -DHAVE_DEEPSEQ - -DASSERTS - -DINTEGER_GMP - - build-depends: - array, - base == 4.*, - bytestring, - deepseq, - ghc-prim, - integer-gmp diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests-and-benchmarks.markdown cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests-and-benchmarks.markdown --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/tests-and-benchmarks.markdown 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/tests-and-benchmarks.markdown 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -Tests and benchmarks -==================== - -Prerequisites -------------- - -To run the tests and benchmarks, you will need the test data, which -you can clone from one of the following locations: - -* Mercurial master repository: - [bitbucket.org/bos/text-test-data](https://bitbucket.org/bos/text-test-data) - -* Git mirror repository: - [github.com/bos/text-test-data](https://github.com/bos/text-test-data) - -You should clone that repository into the `tests` subdirectory (your -clone must be named `text-test-data` locally), then run `make -C -tests/text-test-data` to uncompress the test files. Many tests and -benchmarks will fail if the test files are missing. - -Functional tests ----------------- - -The functional tests are located in the `tests` subdirectory. An overview of -what's in that directory: - - Makefile Has targets for common tasks - Tests Source files of the testing code - scripts Various utility scripts - text-tests.cabal Cabal file that compiles all benchmarks - -The `text-tests.cabal` builds: - -- A copy of the text library, sharing the source code, but exposing all internal - modules, for testing purposes -- The different test suites - -To compile, run all tests, and generate a coverage report, simply use `make`. - -Benchmarks ----------- - -The benchmarks are located in the `benchmarks` subdirectory. An overview of -what's in that directory: - - Makefile Has targets for common tasks - haskell Source files of the haskell benchmarks - python Python implementations of some benchmarks - ruby Ruby implementations of some benchmarks - text-benchmarks.cabal Cabal file which compiles all benchmarks - -To compile the benchmarks, navigate to the `benchmarks` subdirectory and run -`cabal configure && cabal build`. Then, you can run the benchmarks using: - - ./dist/build/text-benchmarks/text-benchmarks - -However, since there's quite a lot of benchmarks, you usually don't want to -run them all. Instead, use the `-l` flag to get a list of benchmarks: - - ./dist/build/text-benchmarks/text-benchmarks - -And run the ones you want to inspect. If you want to configure the benchmarks -further, the exact parameters can be changed in `Benchmarks.hs`. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar8=/text.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar8=/text.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar8=/text.cabal 2014-12-12 21:23:19.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar8=/text.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,191 +0,0 @@ -name: text -version: 1.2.0.3 -homepage: https://github.com/bos/text -bug-reports: https://github.com/bos/text/issues -synopsis: An efficient packed Unicode text type. -description: - . - An efficient packed, immutable Unicode text type (both strict and - lazy), with a powerful loop fusion optimization framework. - . - The 'Text' type represents Unicode character strings, in a time and - space-efficient manner. This package provides text processing - capabilities that are optimized for performance critical use, both - in terms of large data quantities and high speed. - . - The 'Text' type provides character-encoding, type-safe case - conversion via whole-string case conversion functions. It also - provides a range of functions for converting 'Text' values to and from - 'ByteStrings', using several standard encodings. - . - Efficient locale-sensitive support for text IO is also supported. - . - These modules are intended to be imported qualified, to avoid name - clashes with Prelude functions, e.g. - . - > import qualified Data.Text as T - . - To use an extended and very rich family of functions for working - with Unicode text (including normalization, regular expressions, - non-standard encodings, text breaking, and locales), see - the @text-icu@ package: - - -license: BSD3 -license-file: LICENSE -author: Bryan O'Sullivan -maintainer: Bryan O'Sullivan -copyright: 2009-2011 Bryan O'Sullivan, 2008-2009 Tom Harper -category: Data, Text -build-type: Simple -cabal-version: >= 1.8 -extra-source-files: - -- scripts/CaseFolding.txt - -- scripts/SpecialCasing.txt - README.markdown - benchmarks/Setup.hs - benchmarks/cbits/*.c - benchmarks/haskell/*.hs - benchmarks/haskell/Benchmarks/*.hs - benchmarks/haskell/Benchmarks/Programs/*.hs - benchmarks/python/*.py - benchmarks/ruby/*.rb - benchmarks/text-benchmarks.cabal - changelog.md - include/*.h - scripts/*.hs - tests-and-benchmarks.markdown - tests/*.hs - tests/.ghci - tests/Makefile - tests/Tests/*.hs - tests/Tests/Properties/*.hs - tests/cabal.config - tests/scripts/*.sh - tests/text-tests.cabal - -flag developer - description: operate in developer mode - default: False - manual: True - -flag integer-simple - description: Use the simple integer library instead of GMP - default: False - -library - c-sources: cbits/cbits.c - include-dirs: include - - exposed-modules: - Data.Text - Data.Text.Array - Data.Text.Encoding - Data.Text.Encoding.Error - Data.Text.Foreign - Data.Text.IO - Data.Text.Internal - Data.Text.Internal.Builder - Data.Text.Internal.Builder.Functions - Data.Text.Internal.Builder.Int.Digits - Data.Text.Internal.Builder.RealFloat.Functions - Data.Text.Internal.Encoding.Fusion - Data.Text.Internal.Encoding.Fusion.Common - Data.Text.Internal.Encoding.Utf16 - Data.Text.Internal.Encoding.Utf32 - Data.Text.Internal.Encoding.Utf8 - Data.Text.Internal.Functions - Data.Text.Internal.Fusion - Data.Text.Internal.Fusion.CaseMapping - Data.Text.Internal.Fusion.Common - Data.Text.Internal.Fusion.Size - Data.Text.Internal.Fusion.Types - Data.Text.Internal.IO - Data.Text.Internal.Lazy - Data.Text.Internal.Lazy.Encoding.Fusion - Data.Text.Internal.Lazy.Fusion - Data.Text.Internal.Lazy.Search - Data.Text.Internal.Private - Data.Text.Internal.Read - Data.Text.Internal.Search - Data.Text.Internal.Unsafe - Data.Text.Internal.Unsafe.Char - Data.Text.Internal.Unsafe.Shift - Data.Text.Lazy - Data.Text.Lazy.Builder - Data.Text.Lazy.Builder.Int - Data.Text.Lazy.Builder.RealFloat - Data.Text.Lazy.Encoding - Data.Text.Lazy.IO - Data.Text.Lazy.Internal - Data.Text.Lazy.Read - Data.Text.Read - Data.Text.Unsafe - - build-depends: - array >= 0.3, - base >= 4.2 && < 5, - deepseq >= 1.1.0.0, - ghc-prim >= 0.2 - - if impl(ghc >= 7.7) - build-depends: bytestring >= 0.10.4.0 - else - build-depends: bytestring >= 0.9 - - cpp-options: -DHAVE_DEEPSEQ - ghc-options: -Wall -fwarn-tabs -funbox-strict-fields -O2 - if flag(developer) - ghc-prof-options: -auto-all - ghc-options: -Werror - cpp-options: -DASSERTS - - if flag(integer-simple) - cpp-options: -DINTEGER_SIMPLE - build-depends: integer-simple >= 0.1 && < 0.5 - else - cpp-options: -DINTEGER_GMP - build-depends: integer-gmp >= 0.2 - -test-suite tests - type: exitcode-stdio-1.0 - hs-source-dirs: tests . - main-is: Tests.hs - c-sources: cbits/cbits.c - include-dirs: include - - ghc-options: - -Wall -threaded -O0 -rtsopts - - cpp-options: - -DASSERTS -DHAVE_DEEPSEQ -DTEST_SUITE - - build-depends: - HUnit >= 1.2, - QuickCheck >= 2.7, - array, - base, - bytestring, - deepseq, - directory, - ghc-prim, - quickcheck-unicode, - random, - test-framework >= 0.4, - test-framework-hunit >= 0.2, - test-framework-quickcheck2 >= 0.2 - - if flag(integer-simple) - cpp-options: -DINTEGER_SIMPLE - build-depends: integer-simple >= 0.1 && < 0.5 - else - cpp-options: -DINTEGER_GMP - build-depends: integer-gmp >= 0.2 - -source-repository head - type: git - location: https://github.com/bos/text - -source-repository head - type: mercurial - location: https://bitbucket.org/bos/text diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/changelog.md cabal-install-1.22-1.22.9.0/=unpacked-tar9=/changelog.md --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/changelog.md 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/changelog.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -# Changelog for [`stm` package](http://hackage.haskell.org/package/stm) - -## 2.4.4 *Dec 2014* - - * Add support for `base-4.8.0.0` - - * Tighten Safe Haskell bounds - - * Add `mkWeakTMVar` to `Control.Concurrent.STM.TMVar` - - * Add `@since`-annotations - -## 2.4.3 *Mar 2014* - - * Update behaviour of `newBroadcastTChanIO` to match - `newBroadcastTChan` in causing an error on a read from the - broadcast channel - - * Add `mkWeakTVar` - - * Add `isFullTBQueue` - - * Fix `TChan` created via `newBroadcastTChanIO` to throw same - exception on a `readTChan` as when created via `newBroadcastTChan` - - * Update to Cabal 1.10 format - -## 2.4.2 *Nov 2012* - - * Add `Control.Concurrent.STM.TSem` (transactional semaphore) - - * Add Applicative/Alternative instances of STM for GHC <7.0 - - * Throw proper exception when `readTChan` called on a broadcast `TChan` - -## 2.4 *Jul 2012* - - * Add `Control.Concurrent.STM.TQueue` (a faster `TChan`) - - * Add `Control.Concurrent.STM.TBQueue` (a bounded channel based on `TQueue`) - - * Add `Eq` instance for `TChan` - - * Add `newBroadcastTChan` and `newBroadcastTChanIO` - - * Some performance improvements for `TChan` - - * Add `cloneTChan` diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TArray.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TArray.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TArray.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TArray.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -{-# LANGUAGE CPP, DeriveDataTypeable, FlexibleInstances, MultiParamTypeClasses #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TArray --- Copyright : (c) The University of Glasgow 2005 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- TArrays: transactional arrays, for use in the STM monad --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TArray ( - TArray -) where - -import Data.Array (Array, bounds) -import Data.Array.Base (listArray, arrEleBottom, unsafeAt, MArray(..), - IArray(numElements)) -import Data.Ix (rangeSize) -import Data.Typeable (Typeable) -import Control.Concurrent.STM.TVar (TVar, newTVar, readTVar, writeTVar) -#ifdef __GLASGOW_HASKELL__ -import GHC.Conc (STM) -#else -import Control.Sequential.STM (STM) -#endif - --- |TArray is a transactional array, supporting the usual 'MArray' --- interface for mutable arrays. --- --- It is currently implemented as @Array ix (TVar e)@, --- but it may be replaced by a more efficient implementation in the future --- (the interface will remain the same, however). --- -newtype TArray i e = TArray (Array i (TVar e)) deriving (Eq, Typeable) - -instance MArray TArray e STM where - getBounds (TArray a) = return (bounds a) - newArray b e = do - a <- rep (rangeSize b) (newTVar e) - return $ TArray (listArray b a) - newArray_ b = do - a <- rep (rangeSize b) (newTVar arrEleBottom) - return $ TArray (listArray b a) - unsafeRead (TArray a) i = readTVar $ unsafeAt a i - unsafeWrite (TArray a) i e = writeTVar (unsafeAt a i) e - getNumElements (TArray a) = return (numElements a) - --- | Like 'replicateM' but uses an accumulator to prevent stack overflows. --- Unlike 'replicateM' the returned list is in reversed order. --- This doesn't matter though since this function is only used to create --- arrays with identical elements. -rep :: Monad m => Int -> m a -> m [a] -rep n m = go n [] - where - go 0 xs = return xs - go i xs = do - x <- m - go (i-1) (x:xs) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TBQueue.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TBQueue.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TBQueue.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TBQueue.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,197 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-name-shadowing #-} -{-# LANGUAGE CPP, DeriveDataTypeable #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TBQueue --- Copyright : (c) The University of Glasgow 2012 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- 'TBQueue' is a bounded version of 'TQueue'. The queue has a maximum --- capacity set when it is created. If the queue already contains the --- maximum number of elements, then 'writeTBQueue' blocks until an --- element is removed from the queue. --- --- The implementation is based on the traditional purely-functional --- queue representation that uses two lists to obtain amortised /O(1)/ --- enqueue and dequeue operations. --- --- @since 2.4 ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TBQueue ( - -- * TBQueue - TBQueue, - newTBQueue, - newTBQueueIO, - readTBQueue, - tryReadTBQueue, - peekTBQueue, - tryPeekTBQueue, - writeTBQueue, - unGetTBQueue, - isEmptyTBQueue, - isFullTBQueue, - ) where - -import Data.Typeable -import GHC.Conc - -#define _UPK_(x) {-# UNPACK #-} !(x) - --- | 'TBQueue' is an abstract type representing a bounded FIFO channel. --- --- @since 2.4 -data TBQueue a - = TBQueue _UPK_(TVar Int) -- CR: read capacity - _UPK_(TVar [a]) -- R: elements waiting to be read - _UPK_(TVar Int) -- CW: write capacity - _UPK_(TVar [a]) -- W: elements written (head is most recent) - deriving Typeable - -instance Eq (TBQueue a) where - TBQueue a _ _ _ == TBQueue b _ _ _ = a == b - --- Total channel capacity remaining is CR + CW. Reads only need to --- access CR, writes usually need to access only CW but sometimes need --- CR. So in the common case we avoid contention between CR and CW. --- --- - when removing an element from R: --- CR := CR + 1 --- --- - when adding an element to W: --- if CW is non-zero --- then CW := CW - 1 --- then if CR is non-zero --- then CW := CR - 1; CR := 0 --- else **FULL** - --- |Build and returns a new instance of 'TBQueue' -newTBQueue :: Int -- ^ maximum number of elements the queue can hold - -> STM (TBQueue a) -newTBQueue size = do - read <- newTVar [] - write <- newTVar [] - rsize <- newTVar 0 - wsize <- newTVar size - return (TBQueue rsize read wsize write) - --- |@IO@ version of 'newTBQueue'. This is useful for creating top-level --- 'TBQueue's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newTBQueueIO :: Int -> IO (TBQueue a) -newTBQueueIO size = do - read <- newTVarIO [] - write <- newTVarIO [] - rsize <- newTVarIO 0 - wsize <- newTVarIO size - return (TBQueue rsize read wsize write) - --- |Write a value to a 'TBQueue'; blocks if the queue is full. -writeTBQueue :: TBQueue a -> a -> STM () -writeTBQueue (TBQueue rsize _read wsize write) a = do - w <- readTVar wsize - if (w /= 0) - then do writeTVar wsize (w - 1) - else do - r <- readTVar rsize - if (r /= 0) - then do writeTVar rsize 0 - writeTVar wsize (r - 1) - else retry - listend <- readTVar write - writeTVar write (a:listend) - --- |Read the next value from the 'TBQueue'. -readTBQueue :: TBQueue a -> STM a -readTBQueue (TBQueue rsize read _wsize write) = do - xs <- readTVar read - r <- readTVar rsize - writeTVar rsize (r + 1) - case xs of - (x:xs') -> do - writeTVar read xs' - return x - [] -> do - ys <- readTVar write - case ys of - [] -> retry - _ -> do - let (z:zs) = reverse ys -- NB. lazy: we want the transaction to be - -- short, otherwise it will conflict - writeTVar write [] - writeTVar read zs - return z - --- | A version of 'readTBQueue' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryReadTBQueue :: TBQueue a -> STM (Maybe a) -tryReadTBQueue c = fmap Just (readTBQueue c) `orElse` return Nothing - --- | Get the next value from the @TBQueue@ without removing it, --- retrying if the channel is empty. -peekTBQueue :: TBQueue a -> STM a -peekTBQueue c = do - x <- readTBQueue c - unGetTBQueue c x - return x - --- | A version of 'peekTBQueue' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryPeekTBQueue :: TBQueue a -> STM (Maybe a) -tryPeekTBQueue c = do - m <- tryReadTBQueue c - case m of - Nothing -> return Nothing - Just x -> do - unGetTBQueue c x - return m - --- |Put a data item back onto a channel, where it will be the next item read. --- Blocks if the queue is full. -unGetTBQueue :: TBQueue a -> a -> STM () -unGetTBQueue (TBQueue rsize read wsize _write) a = do - r <- readTVar rsize - if (r > 0) - then do writeTVar rsize (r - 1) - else do - w <- readTVar wsize - if (w > 0) - then writeTVar wsize (w - 1) - else retry - xs <- readTVar read - writeTVar read (a:xs) - --- |Returns 'True' if the supplied 'TBQueue' is empty. -isEmptyTBQueue :: TBQueue a -> STM Bool -isEmptyTBQueue (TBQueue _rsize read _wsize write) = do - xs <- readTVar read - case xs of - (_:_) -> return False - [] -> do ys <- readTVar write - case ys of - [] -> return True - _ -> return False - --- |Returns 'True' if the supplied 'TBQueue' is full. --- --- @since 2.4.3 -isFullTBQueue :: TBQueue a -> STM Bool -isFullTBQueue (TBQueue rsize _read wsize _write) = do - w <- readTVar wsize - if (w > 0) - then return False - else do - r <- readTVar rsize - if (r > 0) - then return False - else return True diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TChan.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TChan.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TChan.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TChan.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,203 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-name-shadowing #-} -{-# LANGUAGE CPP, DeriveDataTypeable #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TChan --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- TChan: Transactional channels --- (GHC only) --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TChan ( -#ifdef __GLASGOW_HASKELL__ - -- * TChans - TChan, - - -- ** Construction - newTChan, - newTChanIO, - newBroadcastTChan, - newBroadcastTChanIO, - dupTChan, - cloneTChan, - - -- ** Reading and writing - readTChan, - tryReadTChan, - peekTChan, - tryPeekTChan, - writeTChan, - unGetTChan, - isEmptyTChan -#endif - ) where - -#ifdef __GLASGOW_HASKELL__ -import GHC.Conc - -import Data.Typeable (Typeable) - -#define _UPK_(x) {-# UNPACK #-} !(x) - --- | 'TChan' is an abstract type representing an unbounded FIFO channel. -data TChan a = TChan _UPK_(TVar (TVarList a)) - _UPK_(TVar (TVarList a)) - deriving (Eq, Typeable) - -type TVarList a = TVar (TList a) -data TList a = TNil | TCons a _UPK_(TVarList a) - --- |Build and return a new instance of 'TChan' -newTChan :: STM (TChan a) -newTChan = do - hole <- newTVar TNil - read <- newTVar hole - write <- newTVar hole - return (TChan read write) - --- |@IO@ version of 'newTChan'. This is useful for creating top-level --- 'TChan's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newTChanIO :: IO (TChan a) -newTChanIO = do - hole <- newTVarIO TNil - read <- newTVarIO hole - write <- newTVarIO hole - return (TChan read write) - --- | Create a write-only 'TChan'. More precisely, 'readTChan' will 'retry' --- even after items have been written to the channel. The only way to read --- a broadcast channel is to duplicate it with 'dupTChan'. --- --- Consider a server that broadcasts messages to clients: --- --- >serve :: TChan Message -> Client -> IO loop --- >serve broadcastChan client = do --- > myChan <- dupTChan broadcastChan --- > forever $ do --- > message <- readTChan myChan --- > send client message --- --- The problem with using 'newTChan' to create the broadcast channel is that if --- it is only written to and never read, items will pile up in memory. By --- using 'newBroadcastTChan' to create the broadcast channel, items can be --- garbage collected after clients have seen them. --- --- @since 2.4 -newBroadcastTChan :: STM (TChan a) -newBroadcastTChan = do - write_hole <- newTVar TNil - read <- newTVar (error "reading from a TChan created by newBroadcastTChan; use dupTChan first") - write <- newTVar write_hole - return (TChan read write) - --- | @IO@ version of 'newBroadcastTChan'. --- --- @since 2.4 -newBroadcastTChanIO :: IO (TChan a) -newBroadcastTChanIO = do - write_hole <- newTVarIO TNil - read <- newTVarIO (error "reading from a TChan created by newBroadcastTChanIO; use dupTChan first") - write <- newTVarIO write_hole - return (TChan read write) - --- |Write a value to a 'TChan'. -writeTChan :: TChan a -> a -> STM () -writeTChan (TChan _read write) a = do - listend <- readTVar write -- listend == TVar pointing to TNil - new_listend <- newTVar TNil - writeTVar listend (TCons a new_listend) - writeTVar write new_listend - --- |Read the next value from the 'TChan'. -readTChan :: TChan a -> STM a -readTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> retry - TCons a tail -> do - writeTVar read tail - return a - --- | A version of 'readTChan' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryReadTChan :: TChan a -> STM (Maybe a) -tryReadTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> return Nothing - TCons a tl -> do - writeTVar read tl - return (Just a) - --- | Get the next value from the @TChan@ without removing it, --- retrying if the channel is empty. -peekTChan :: TChan a -> STM a -peekTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> retry - TCons a _ -> return a - --- | A version of 'peekTChan' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryPeekTChan :: TChan a -> STM (Maybe a) -tryPeekTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> return Nothing - TCons a _ -> return (Just a) - --- |Duplicate a 'TChan': the duplicate channel begins empty, but data written to --- either channel from then on will be available from both. Hence this creates --- a kind of broadcast channel, where data written by anyone is seen by --- everyone else. -dupTChan :: TChan a -> STM (TChan a) -dupTChan (TChan _read write) = do - hole <- readTVar write - new_read <- newTVar hole - return (TChan new_read write) - --- |Put a data item back onto a channel, where it will be the next item read. -unGetTChan :: TChan a -> a -> STM () -unGetTChan (TChan read _write) a = do - listhead <- readTVar read - newhead <- newTVar (TCons a listhead) - writeTVar read newhead - --- |Returns 'True' if the supplied 'TChan' is empty. -isEmptyTChan :: TChan a -> STM Bool -isEmptyTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> return True - TCons _ _ -> return False - --- |Clone a 'TChan': similar to dupTChan, but the cloned channel starts with the --- same content available as the original channel. --- --- @since 2.4 -cloneTChan :: TChan a -> STM (TChan a) -cloneTChan (TChan read write) = do - readpos <- readTVar read - new_read <- newTVar readpos - return (TChan new_read write) -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TMVar.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TMVar.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TMVar.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TMVar.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -{-# LANGUAGE CPP, DeriveDataTypeable, MagicHash, UnboxedTuples #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TMVar --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- TMVar: Transactional MVars, for use in the STM monad --- (GHC only) --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TMVar ( -#ifdef __GLASGOW_HASKELL__ - -- * TMVars - TMVar, - newTMVar, - newEmptyTMVar, - newTMVarIO, - newEmptyTMVarIO, - takeTMVar, - putTMVar, - readTMVar, - tryReadTMVar, - swapTMVar, - tryTakeTMVar, - tryPutTMVar, - isEmptyTMVar, - mkWeakTMVar -#endif - ) where - -#ifdef __GLASGOW_HASKELL__ -import GHC.Base -import GHC.Conc -import GHC.Weak - -import Data.Typeable (Typeable) - -newtype TMVar a = TMVar (TVar (Maybe a)) deriving (Eq, Typeable) -{- ^ -A 'TMVar' is a synchronising variable, used -for communication between concurrent threads. It can be thought of -as a box, which may be empty or full. --} - --- |Create a 'TMVar' which contains the supplied value. -newTMVar :: a -> STM (TMVar a) -newTMVar a = do - t <- newTVar (Just a) - return (TMVar t) - --- |@IO@ version of 'newTMVar'. This is useful for creating top-level --- 'TMVar's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newTMVarIO :: a -> IO (TMVar a) -newTMVarIO a = do - t <- newTVarIO (Just a) - return (TMVar t) - --- |Create a 'TMVar' which is initially empty. -newEmptyTMVar :: STM (TMVar a) -newEmptyTMVar = do - t <- newTVar Nothing - return (TMVar t) - --- |@IO@ version of 'newEmptyTMVar'. This is useful for creating top-level --- 'TMVar's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newEmptyTMVarIO :: IO (TMVar a) -newEmptyTMVarIO = do - t <- newTVarIO Nothing - return (TMVar t) - --- |Return the contents of the 'TMVar'. If the 'TMVar' is currently --- empty, the transaction will 'retry'. After a 'takeTMVar', --- the 'TMVar' is left empty. -takeTMVar :: TMVar a -> STM a -takeTMVar (TMVar t) = do - m <- readTVar t - case m of - Nothing -> retry - Just a -> do writeTVar t Nothing; return a - --- | A version of 'takeTMVar' that does not 'retry'. The 'tryTakeTMVar' --- function returns 'Nothing' if the 'TMVar' was empty, or @'Just' a@ if --- the 'TMVar' was full with contents @a@. After 'tryTakeTMVar', the --- 'TMVar' is left empty. -tryTakeTMVar :: TMVar a -> STM (Maybe a) -tryTakeTMVar (TMVar t) = do - m <- readTVar t - case m of - Nothing -> return Nothing - Just a -> do writeTVar t Nothing; return (Just a) - --- |Put a value into a 'TMVar'. If the 'TMVar' is currently full, --- 'putTMVar' will 'retry'. -putTMVar :: TMVar a -> a -> STM () -putTMVar (TMVar t) a = do - m <- readTVar t - case m of - Nothing -> do writeTVar t (Just a); return () - Just _ -> retry - --- | A version of 'putTMVar' that does not 'retry'. The 'tryPutTMVar' --- function attempts to put the value @a@ into the 'TMVar', returning --- 'True' if it was successful, or 'False' otherwise. -tryPutTMVar :: TMVar a -> a -> STM Bool -tryPutTMVar (TMVar t) a = do - m <- readTVar t - case m of - Nothing -> do writeTVar t (Just a); return True - Just _ -> return False - --- | This is a combination of 'takeTMVar' and 'putTMVar'; ie. it --- takes the value from the 'TMVar', puts it back, and also returns --- it. -readTMVar :: TMVar a -> STM a -readTMVar (TMVar t) = do - m <- readTVar t - case m of - Nothing -> retry - Just a -> return a - --- | A version of 'readTMVar' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryReadTMVar :: TMVar a -> STM (Maybe a) -tryReadTMVar (TMVar t) = readTVar t - --- |Swap the contents of a 'TMVar' for a new value. -swapTMVar :: TMVar a -> a -> STM a -swapTMVar (TMVar t) new = do - m <- readTVar t - case m of - Nothing -> retry - Just old -> do writeTVar t (Just new); return old - --- |Check whether a given 'TMVar' is empty. -isEmptyTMVar :: TMVar a -> STM Bool -isEmptyTMVar (TMVar t) = do - m <- readTVar t - case m of - Nothing -> return True - Just _ -> return False - --- | Make a 'Weak' pointer to a 'TMVar', using the second argument as --- a finalizer to run when the 'TMVar' is garbage-collected. --- --- @since 2.4.4 -mkWeakTMVar :: TMVar a -> IO () -> IO (Weak (TMVar a)) -mkWeakTMVar tmv@(TMVar (TVar t#)) f = IO $ \s -> - case mkWeak# t# tmv f s of (# s1, w #) -> (# s1, Weak w #) -#endif diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TQueue.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TQueue.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TQueue.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TQueue.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,140 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-name-shadowing #-} -{-# LANGUAGE CPP, DeriveDataTypeable #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TQueue --- Copyright : (c) The University of Glasgow 2012 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- A 'TQueue' is like a 'TChan', with two important differences: --- --- * it has faster throughput than both 'TChan' and 'Chan' (although --- the costs are amortised, so the cost of individual operations --- can vary a lot). --- --- * it does /not/ provide equivalents of the 'dupTChan' and --- 'cloneTChan' operations. --- --- The implementation is based on the traditional purely-functional --- queue representation that uses two lists to obtain amortised /O(1)/ --- enqueue and dequeue operations. --- --- @since 2.4 ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TQueue ( - -- * TQueue - TQueue, - newTQueue, - newTQueueIO, - readTQueue, - tryReadTQueue, - peekTQueue, - tryPeekTQueue, - writeTQueue, - unGetTQueue, - isEmptyTQueue, - ) where - -import GHC.Conc - -import Data.Typeable (Typeable) - --- | 'TQueue' is an abstract type representing an unbounded FIFO channel. --- --- @since 2.4 -data TQueue a = TQueue {-# UNPACK #-} !(TVar [a]) - {-# UNPACK #-} !(TVar [a]) - deriving Typeable - -instance Eq (TQueue a) where - TQueue a _ == TQueue b _ = a == b - --- |Build and returns a new instance of 'TQueue' -newTQueue :: STM (TQueue a) -newTQueue = do - read <- newTVar [] - write <- newTVar [] - return (TQueue read write) - --- |@IO@ version of 'newTQueue'. This is useful for creating top-level --- 'TQueue's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newTQueueIO :: IO (TQueue a) -newTQueueIO = do - read <- newTVarIO [] - write <- newTVarIO [] - return (TQueue read write) - --- |Write a value to a 'TQueue'. -writeTQueue :: TQueue a -> a -> STM () -writeTQueue (TQueue _read write) a = do - listend <- readTVar write - writeTVar write (a:listend) - --- |Read the next value from the 'TQueue'. -readTQueue :: TQueue a -> STM a -readTQueue (TQueue read write) = do - xs <- readTVar read - case xs of - (x:xs') -> do writeTVar read xs' - return x - [] -> do ys <- readTVar write - case ys of - [] -> retry - _ -> case reverse ys of - [] -> error "readTQueue" - (z:zs) -> do writeTVar write [] - writeTVar read zs - return z - --- | A version of 'readTQueue' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryReadTQueue :: TQueue a -> STM (Maybe a) -tryReadTQueue c = fmap Just (readTQueue c) `orElse` return Nothing - --- | Get the next value from the @TQueue@ without removing it, --- retrying if the channel is empty. -peekTQueue :: TQueue a -> STM a -peekTQueue c = do - x <- readTQueue c - unGetTQueue c x - return x - --- | A version of 'peekTQueue' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryPeekTQueue :: TQueue a -> STM (Maybe a) -tryPeekTQueue c = do - m <- tryReadTQueue c - case m of - Nothing -> return Nothing - Just x -> do - unGetTQueue c x - return m - --- |Put a data item back onto a channel, where it will be the next item read. -unGetTQueue :: TQueue a -> a -> STM () -unGetTQueue (TQueue read _write) a = do - xs <- readTVar read - writeTVar read (a:xs) - --- |Returns 'True' if the supplied 'TQueue' is empty. -isEmptyTQueue :: TQueue a -> STM Bool -isEmptyTQueue (TQueue read write) = do - xs <- readTVar read - case xs of - (_:_) -> return False - [] -> do ys <- readTVar write - case ys of - [] -> return True - _ -> return False diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TSem.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TSem.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TSem.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TSem.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TSem --- Copyright : (c) The University of Glasgow 2012 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- 'TSem': transactional semaphores. --- --- @since 2.4.2 ------------------------------------------------------------------------------ - -{-# LANGUAGE DeriveDataTypeable #-} -module Control.Concurrent.STM.TSem ( - TSem, newTSem, waitTSem, signalTSem - ) where - -import Control.Concurrent.STM -import Control.Monad -import Data.Typeable - --- | 'TSem' is a transactional semaphore. It holds a certain number --- of units, and units may be acquired or released by 'waitTSem' and --- 'signalTSem' respectively. When the 'TSem' is empty, 'waitTSem' --- blocks. --- --- Note that 'TSem' has no concept of fairness, and there is no --- guarantee that threads blocked in `waitTSem` will be unblocked in --- the same order; in fact they will all be unblocked at the same time --- and will fight over the 'TSem'. Hence 'TSem' is not suitable if --- you expect there to be a high number of threads contending for the --- resource. However, like other STM abstractions, 'TSem' is --- composable. --- --- @since 2.4.2 -newtype TSem = TSem (TVar Int) - deriving (Eq, Typeable) - -newTSem :: Int -> STM TSem -newTSem i = fmap TSem (newTVar i) - -waitTSem :: TSem -> STM () -waitTSem (TSem t) = do - i <- readTVar t - when (i <= 0) retry - writeTVar t $! (i-1) - -signalTSem :: TSem -> STM () -signalTSem (TSem t) = do - i <- readTVar t - writeTVar t $! i+1 - diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TVar.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TVar.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM/TVar.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM/TVar.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TVar --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- TVar: Transactional variables --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TVar ( - -- * TVars - TVar, - newTVar, - newTVarIO, - readTVar, - readTVarIO, - writeTVar, - modifyTVar, - modifyTVar', - swapTVar, -#ifdef __GLASGOW_HASKELL__ - registerDelay, -#endif - mkWeakTVar - ) where - -#ifdef __GLASGOW_HASKELL__ -import GHC.Base -import GHC.Conc -import GHC.Weak -#else -import Control.Sequential.STM -#endif - --- Like 'modifyIORef' but for 'TVar'. --- | Mutate the contents of a 'TVar'. /N.B./, this version is --- non-strict. -modifyTVar :: TVar a -> (a -> a) -> STM () -modifyTVar var f = do - x <- readTVar var - writeTVar var (f x) -{-# INLINE modifyTVar #-} - - --- | Strict version of 'modifyTVar'. -modifyTVar' :: TVar a -> (a -> a) -> STM () -modifyTVar' var f = do - x <- readTVar var - writeTVar var $! f x -{-# INLINE modifyTVar' #-} - - --- Like 'swapTMVar' but for 'TVar'. --- | Swap the contents of a 'TVar' for a new value. -swapTVar :: TVar a -> a -> STM a -swapTVar var new = do - old <- readTVar var - writeTVar var new - return old -{-# INLINE swapTVar #-} - - --- | Make a 'Weak' pointer to a 'TVar', using the second argument as --- a finalizer to run when 'TVar' is garbage-collected --- --- @since 2.4.3 -mkWeakTVar :: TVar a -> IO () -> IO (Weak (TVar a)) -mkWeakTVar t@(TVar t#) f = IO $ \s -> - case mkWeak# t# t f s of (# s1, w #) -> (# s1, Weak w #) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Concurrent/STM.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Concurrent/STM.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -{-# LANGUAGE CPP #-} - -#if __GLASGOW_HASKELL__ >= 709 -{-# LANGUAGE Safe #-} -#elif __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- Software Transactional Memory: a modular composable concurrency --- abstraction. See --- --- * /Composable memory transactions/, by Tim Harris, Simon Marlow, Simon --- Peyton Jones, and Maurice Herlihy, in /ACM Conference on Principles --- and Practice of Parallel Programming/ 2005. --- --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM ( - module Control.Monad.STM, - module Control.Concurrent.STM.TVar, -#ifdef __GLASGOW_HASKELL__ - module Control.Concurrent.STM.TMVar, - module Control.Concurrent.STM.TChan, - module Control.Concurrent.STM.TQueue, - module Control.Concurrent.STM.TBQueue, -#endif - module Control.Concurrent.STM.TArray - ) where - -import Control.Monad.STM -import Control.Concurrent.STM.TVar -#ifdef __GLASGOW_HASKELL__ -import Control.Concurrent.STM.TMVar -import Control.Concurrent.STM.TChan -#endif -import Control.Concurrent.STM.TArray -import Control.Concurrent.STM.TQueue -import Control.Concurrent.STM.TBQueue diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Monad/STM.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Monad/STM.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Monad/STM.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Monad/STM.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-orphans #-} -{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.STM --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- Software Transactional Memory: a modular composable concurrency --- abstraction. See --- --- * /Composable memory transactions/, by Tim Harris, Simon Marlow, Simon --- Peyton Jones, and Maurice Herlihy, in /ACM Conference on Principles --- and Practice of Parallel Programming/ 2005. --- --- --- This module only defines the 'STM' monad; you probably want to --- import "Control.Concurrent.STM" (which exports "Control.Monad.STM"). ------------------------------------------------------------------------------ - -module Control.Monad.STM ( - STM, - atomically, -#ifdef __GLASGOW_HASKELL__ - always, - alwaysSucceeds, - retry, - orElse, - check, -#endif - throwSTM, - catchSTM - ) where - -#ifdef __GLASGOW_HASKELL__ -#if ! (MIN_VERSION_base(4,3,0)) -import GHC.Conc hiding (catchSTM) -import Control.Monad ( MonadPlus(..) ) -import Control.Exception -#else -import GHC.Conc -#endif -import GHC.Exts -import Control.Monad.Fix -#else -import Control.Sequential.STM -#endif - -#ifdef __GLASGOW_HASKELL__ -#if ! (MIN_VERSION_base(4,3,0)) -import Control.Applicative -import Control.Monad (ap) -#endif -#endif - - -#ifdef __GLASGOW_HASKELL__ -#if ! (MIN_VERSION_base(4,3,0)) -instance MonadPlus STM where - mzero = retry - mplus = orElse - -instance Applicative STM where - pure = return - (<*>) = ap - -instance Alternative STM where - empty = retry - (<|>) = orElse -#endif - -check :: Bool -> STM () -check b = if b then return () else retry -#endif - -#if ! (MIN_VERSION_base(4,3,0)) --- |Exception handling within STM actions. -catchSTM :: Exception e => STM a -> (e -> STM a) -> STM a -catchSTM (STM m) handler = STM $ catchSTM# m handler' - where - handler' e = case fromException e of - Just e' -> case handler e' of STM m' -> m' - Nothing -> raiseIO# e - --- | A variant of 'throw' that can only be used within the 'STM' monad. --- --- Throwing an exception in @STM@ aborts the transaction and propagates the --- exception. --- --- Although 'throwSTM' has a type that is an instance of the type of 'throw', the --- two functions are subtly different: --- --- > throw e `seq` x ===> throw e --- > throwSTM e `seq` x ===> x --- --- The first example will cause the exception @e@ to be raised, --- whereas the second one won\'t. In fact, 'throwSTM' will only cause --- an exception to be raised when it is used within the 'STM' monad. --- The 'throwSTM' variant should be used in preference to 'throw' to --- raise an exception within the 'STM' monad because it guarantees --- ordering with respect to other 'STM' operations, whereas 'throw' --- does not. -throwSTM :: Exception e => e -> STM a -throwSTM e = STM $ raiseIO# (toException e) -#endif - - -data STMret a = STMret (State# RealWorld) a - -liftSTM :: STM a -> State# RealWorld -> STMret a -liftSTM (STM m) = \s -> case m s of (# s', r #) -> STMret s' r - -instance MonadFix STM where - mfix k = STM $ \s -> - let ans = liftSTM (k r) s - STMret _ r = ans - in case ans of STMret s' x -> (# s', x #) diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Sequential/STM.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Sequential/STM.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Control/Sequential/STM.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Control/Sequential/STM.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ --- Transactional memory for sequential implementations. --- Transactions do not run concurrently, but are atomic in the face --- of exceptions. - -{-# LANGUAGE CPP #-} - -#if __GLASGOW_HASKELL__ >= 709 -{-# LANGUAGE Safe #-} -#elif __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - --- #hide -module Control.Sequential.STM ( - STM, atomically, throwSTM, catchSTM, - TVar, newTVar, newTVarIO, readTVar, readTVarIO, writeTVar - ) where - -#if __GLASGOW_HASKELL__ < 705 -import Prelude hiding (catch) -#endif -#if __GLASGOW_HASKELL__ < 709 -import Control.Applicative (Applicative(pure, (<*>))) -#endif -import Control.Exception -import Data.IORef - --- The reference contains a rollback action to be executed on exceptions -newtype STM a = STM (IORef (IO ()) -> IO a) - -unSTM :: STM a -> IORef (IO ()) -> IO a -unSTM (STM f) = f - -instance Functor STM where - fmap f (STM m) = STM (fmap f . m) - -instance Applicative STM where - pure = STM . const . pure - STM mf <*> STM mx = STM $ \ r -> mf r <*> mx r - -instance Monad STM where - return = pure - STM m >>= k = STM $ \ r -> do - x <- m r - unSTM (k x) r - -atomically :: STM a -> IO a -atomically (STM m) = do - r <- newIORef (return ()) - m r `onException` do - rollback <- readIORef r - rollback - -throwSTM :: Exception e => e -> STM a -throwSTM = STM . const . throwIO - -catchSTM :: Exception e => STM a -> (e -> STM a) -> STM a -catchSTM (STM m) h = STM $ \ r -> do - old_rollback <- readIORef r - writeIORef r (return ()) - res <- try (m r) - rollback_m <- readIORef r - case res of - Left ex -> do - rollback_m - writeIORef r old_rollback - unSTM (h ex) r - Right a -> do - writeIORef r (rollback_m >> old_rollback) - return a - -newtype TVar a = TVar (IORef a) - deriving (Eq) - -newTVar :: a -> STM (TVar a) -newTVar a = STM (const (newTVarIO a)) - -newTVarIO :: a -> IO (TVar a) -newTVarIO a = do - ref <- newIORef a - return (TVar ref) - -readTVar :: TVar a -> STM a -readTVar (TVar ref) = STM (const (readIORef ref)) - -readTVarIO :: TVar a -> IO a -readTVarIO (TVar ref) = readIORef ref - -writeTVar :: TVar a -> a -> STM () -writeTVar (TVar ref) a = STM $ \ r -> do - oldval <- readIORef ref - modifyIORef r (writeIORef ref oldval >>) - writeIORef ref a diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/LICENSE cabal-install-1.22-1.22.9.0/=unpacked-tar9=/LICENSE --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/LICENSE 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -The Glasgow Haskell Compiler License - -Copyright 2004, The University Court of the University of Glasgow. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -- Neither name of the University nor the names of its contributors may be -used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF -GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Setup.hs cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Setup.hs --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/Setup.hs 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -module Main (main) where - -import Distribution.Simple - -main :: IO () -main = defaultMain diff -Nru cabal-install-1.22-1.22.6.0/=unpacked-tar9=/stm.cabal cabal-install-1.22-1.22.9.0/=unpacked-tar9=/stm.cabal --- cabal-install-1.22-1.22.6.0/=unpacked-tar9=/stm.cabal 2014-12-17 11:00:31.000000000 +0000 +++ cabal-install-1.22-1.22.9.0/=unpacked-tar9=/stm.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -name: stm -version: 2.4.4 --- don't forget to update changelog.md file! -license: BSD3 -license-file: LICENSE -maintainer: libraries@haskell.org -bug-reports: http://ghc.haskell.org/trac/ghc/newticket?component=libraries%20%28other%29&keywords=stm -synopsis: Software Transactional Memory -category: Concurrency -description: A modular composable concurrency abstraction. -build-type: Simple -cabal-version: >=1.10 -tested-with: GHC==7.6.3, GHC==7.6.2, GHC==7.6.1, GHC==7.4.2, GHC==7.4.1, GHC==7.2.2, GHC==7.2.1, GHC==7.0.4, GHC==7.0.3, GHC==7.0.2, GHC==7.0.1, GHC==6.12.3 - -extra-source-files: - changelog.md - -source-repository head - type: git - location: http://git.haskell.org/packages/stm.git - -library - default-language: Haskell98 - other-extensions: - CPP - DeriveDataTypeable - FlexibleInstances - MagicHash - MultiParamTypeClasses - UnboxedTuples - if impl(ghc >= 7.2) - other-extensions: Trustworthy - if impl(ghc >= 7.9) - other-extensions: Safe - - build-depends: - base >= 4.2 && < 4.9, - array >= 0.3 && < 0.6 - - exposed-modules: - Control.Concurrent.STM - Control.Concurrent.STM.TArray - Control.Concurrent.STM.TVar - Control.Concurrent.STM.TChan - Control.Concurrent.STM.TMVar - Control.Concurrent.STM.TQueue - Control.Concurrent.STM.TBQueue - Control.Concurrent.STM.TSem - Control.Monad.STM - other-modules: - Control.Sequential.STM - - ghc-options: -Wall Binary files /tmp/tmpVBg9ir/PzIulGlZus/cabal-install-1.22-1.22.6.0/zlib-0.5.4.2.tar.gz and /tmp/tmpVBg9ir/pYw8BlU4Qo/cabal-install-1.22-1.22.9.0/zlib-0.5.4.2.tar.gz differ